fix dashboard prediction

This commit is contained in:
Giuseppe Nucifora 2024-12-18 00:46:28 +01:00
parent 07df51cdf4
commit 5f58dcf5f5
3 changed files with 255 additions and 267 deletions

View File

@ -48,6 +48,10 @@ class EnvironmentalSimulator:
results = [] results = []
current_date = datetime.now() current_date = datetime.now()
# Calcola valori giornalieri di pioggia e radiazione
daily_rainfall = rainfall / 30 # Distribuisce la pioggia mensile sui giorni
daily_radiation = radiation # Usa il valore di radiazione fornito
for day in range(days): for day in range(days):
# Calcola la fase corrente # Calcola la fase corrente
day_of_year = (current_date + timedelta(days=day)).timetuple().tm_yday day_of_year = (current_date + timedelta(days=day)).timetuple().tm_yday
@ -56,6 +60,12 @@ class EnvironmentalSimulator:
# Simula temperatura giornaliera # Simula temperatura giornaliera
temp = np.random.uniform(temp_range[0], temp_range[1]) temp = np.random.uniform(temp_range[0], temp_range[1])
# Simula variazione giornaliera della pioggia (±20% del valore medio)
daily_rain = daily_rainfall * np.random.uniform(0.8, 1.2)
# Simula variazione giornaliera della radiazione (±10% del valore base)
daily_rad = daily_radiation * np.random.uniform(0.9, 1.1)
# Calcola stress giornaliero # Calcola stress giornaliero
stress = self.calculate_stress_index(temp_range, humidity, rainfall, radiation) stress = self.calculate_stress_index(temp_range, humidity, rainfall, radiation)
@ -66,6 +76,9 @@ class EnvironmentalSimulator:
'date': current_date + timedelta(days=day), 'date': current_date + timedelta(days=day),
'phase': phase, 'phase': phase,
'temperature': temp, 'temperature': temp,
'rainfall': daily_rain,
'radiation': daily_rad,
'humidity': humidity,
'stress_index': stress, 'stress_index': stress,
'growth_rate': growth_rate 'growth_rate': growth_rate
}) })

View File

@ -9,11 +9,6 @@ import json
from utils.helpers import clean_column_name from utils.helpers import clean_column_name
from dashboard.environmental_simulator import * from dashboard.environmental_simulator import *
from dotenv import load_dotenv from dotenv import load_dotenv
import sagemaker
from sagemaker.tensorflow import TensorFlowModel
from sagemaker.serverless import ServerlessInferenceConfig
from sagemaker import Session
import boto3
CONFIG_FILE = 'olive_config.json' CONFIG_FILE = 'olive_config.json'
@ -23,6 +18,13 @@ os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# Set global precision policy # Set global precision policy
tf.keras.mixed_precision.set_global_policy('float32') tf.keras.mixed_precision.set_global_policy('float32')
DEV_MODE = True
model = None
scaler_temporal = None
scaler_static = None
scaler_y = None
MODEL_LOADING = False
def load_config(): def load_config():
default_config = { default_config = {
@ -54,6 +56,10 @@ def load_config():
'etichettatura': 0.30 'etichettatura': 0.30
}, },
'selling_price': 12.00 'selling_price': 12.00
},
'inference': {
'debug_mode': True,
'model_path': './sources/olive_oil_transformer/olive_oil_transformer_model.keras'
} }
} }
@ -82,150 +88,12 @@ try:
simulated_data = pd.read_parquet("./sources/olive_training_dataset.parquet") simulated_data = pd.read_parquet("./sources/olive_training_dataset.parquet")
weather_data = pd.read_parquet("./sources/weather_data_solarenergy.parquet") weather_data = pd.read_parquet("./sources/weather_data_solarenergy.parquet")
olive_varieties = pd.read_parquet("./sources/olive_varieties.parquet") olive_varieties = pd.read_parquet("./sources/olive_varieties.parquet")
if not True:
# Print versions and system information
print(f"Keras version: {keras.__version__}")
print(f"TensorFlow version: {tf.__version__}")
print(f"CUDA available: {tf.test.is_built_with_cuda()}")
print(f"GPU devices: {tf.config.list_physical_devices('GPU')}")
# GPU memory configuration
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
print(e)
@keras.saving.register_keras_serializable()
class DataAugmentation(tf.keras.layers.Layer):
"""Custom layer per l'augmentation dei dati"""
def __init__(self, noise_stddev=0.03, **kwargs):
super().__init__(**kwargs)
self.noise_stddev = noise_stddev
def call(self, inputs, training=None):
if training:
return inputs + tf.random.normal(
shape=tf.shape(inputs),
mean=0.0,
stddev=self.noise_stddev
)
return inputs
def get_config(self):
config = super().get_config()
config.update({"noise_stddev": self.noise_stddev})
return config
@keras.saving.register_keras_serializable()
class PositionalEncoding(tf.keras.layers.Layer):
"""Custom layer per l'encoding posizionale"""
def __init__(self, d_model, **kwargs):
super().__init__(**kwargs)
self.d_model = d_model
def build(self, input_shape):
_, seq_length, _ = input_shape
# Crea la matrice di encoding posizionale
position = tf.range(seq_length, dtype=tf.float32)[:, tf.newaxis]
div_term = tf.exp(
tf.range(0, self.d_model, 2, dtype=tf.float32) *
(-tf.math.log(10000.0) / self.d_model)
)
# Calcola sin e cos
pos_encoding = tf.zeros((1, seq_length, self.d_model))
pos_encoding_even = tf.sin(position * div_term)
pos_encoding_odd = tf.cos(position * div_term)
# Assegna i valori alle posizioni pari e dispari
pos_encoding = tf.concat(
[tf.expand_dims(pos_encoding_even, -1),
tf.expand_dims(pos_encoding_odd, -1)],
axis=-1
)
pos_encoding = tf.reshape(pos_encoding, (1, seq_length, -1))
pos_encoding = pos_encoding[:, :, :self.d_model]
# Salva l'encoding come peso non trainabile
self.pos_encoding = self.add_weight(
shape=(1, seq_length, self.d_model),
initializer=tf.keras.initializers.Constant(pos_encoding),
trainable=False,
name='positional_encoding'
)
super().build(input_shape)
def call(self, inputs):
# Broadcast l'encoding posizionale sul batch
batch_size = tf.shape(inputs)[0]
pos_encoding_tiled = tf.tile(self.pos_encoding, [batch_size, 1, 1])
return inputs + pos_encoding_tiled
def get_config(self):
config = super().get_config()
config.update({"d_model": self.d_model})
return config
@keras.saving.register_keras_serializable()
class WarmUpLearningRateSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
"""Custom learning rate schedule with linear warmup and exponential decay."""
def __init__(self, initial_learning_rate=1e-3, warmup_steps=500, decay_steps=5000):
super().__init__()
self.initial_learning_rate = initial_learning_rate
self.warmup_steps = warmup_steps
self.decay_steps = decay_steps
def __call__(self, step):
warmup_pct = tf.cast(step, tf.float32) / self.warmup_steps
warmup_lr = self.initial_learning_rate * warmup_pct
decay_factor = tf.pow(0.1, tf.cast(step, tf.float32) / self.decay_steps)
decayed_lr = self.initial_learning_rate * decay_factor
return tf.where(step < self.warmup_steps, warmup_lr, decayed_lr)
def get_config(self):
return {
'initial_learning_rate': self.initial_learning_rate,
'warmup_steps': self.warmup_steps,
'decay_steps': self.decay_steps
}
@keras.saving.register_keras_serializable()
def weighted_huber_loss(y_true, y_pred):
# Pesi per diversi output
weights = tf.constant([1.0, 0.8, 0.8, 1.0, 0.6], dtype=tf.float32)
huber = tf.keras.losses.Huber(delta=1.0)
loss = huber(y_true, y_pred)
weighted_loss = tf.reduce_mean(loss * weights)
return weighted_loss
print("Caricamento modello e scaler...")
model = tf.keras.models.load_model('./sources/olive_oil_transformer/olive_oil_transformer_model.keras')
# model.save('./sources/olive_oil_transformer/olive_oil_transformer_model', save_format='tf')
scaler_temporal = joblib.load('./sources/olive_oil_transformer/olive_oil_transformer_scaler_temporal.joblib') scaler_temporal = joblib.load('./sources/olive_oil_transformer/olive_oil_transformer_scaler_temporal.joblib')
scaler_static = joblib.load('./sources/olive_oil_transformer/olive_oil_transformer_scaler_static.joblib') scaler_static = joblib.load('./sources/olive_oil_transformer/olive_oil_transformer_scaler_static.joblib')
scaler_y = joblib.load('./sources/olive_oil_transformer/olive_oil_transformer_scaler_y.joblib') scaler_y = joblib.load('./sources/olive_oil_transformer/olive_oil_transformer_scaler_y.joblib')
else:
print("Modalità sviluppo attiva - Modelli non caricati")
config = load_config()
DEV_MODE = config.get('inference', {}).get('debug_mode', True)
except Exception as e: except Exception as e:
print(f"Errore nel caricamento: {str(e)}") print(f"Errore nel caricamento: {str(e)}")
raise e raise e
@ -460,7 +328,6 @@ def mock_make_prediction(weather_data, varieties_info, percentages, hectares, si
def make_prediction(weather_data, varieties_info, percentages, hectares, simulation_data=None): def make_prediction(weather_data, varieties_info, percentages, hectares, simulation_data=None):
DEV_MODE = True
if DEV_MODE: if DEV_MODE:
return mock_make_prediction(weather_data, varieties_info, percentages, hectares, simulation_data) return mock_make_prediction(weather_data, varieties_info, percentages, hectares, simulation_data)
try: try:
@ -564,12 +431,6 @@ def make_prediction(weather_data, varieties_info, percentages, hectares, simulat
print(f"Shape dei dati temporali: {temporal_data.shape}") print(f"Shape dei dati temporali: {temporal_data.shape}")
print(f"Shape dei dati statici: {static_data.shape}") print(f"Shape dei dati statici: {static_data.shape}")
# Debug info
print("Static data:")
print(static_data)
print("\nTemporal data:")
print(temporal_data)
# Standardizza i dati # Standardizza i dati
temporal_data = scaler_temporal.transform(temporal_data.reshape(1, -1)).reshape(1, 1, -1) temporal_data = scaler_temporal.transform(temporal_data.reshape(1, -1)).reshape(1, 1, -1)
static_data = scaler_static.transform(static_data) static_data = scaler_static.transform(static_data)
@ -989,10 +850,9 @@ def create_configuration_tab():
]) ])
], className="mb-4") ], className="mb-4")
], md=6), ], md=6),
# Sezione SageMaker
dbc.Row([ dbc.Row([
dbc.Col([ dbc.Col([
create_sagemaker_config_section() create_inference_config_section()
], md=12) ], md=12)
]), ]),
# Configurazione Costi # Configurazione Costi
@ -1013,51 +873,207 @@ def create_configuration_tab():
@app.callback( @app.callback(
[Output('sagemaker-status', 'children'), [Output('inference-status', 'children'),
Output('sagemaker-endpoint', 'children'), Output('inference-mode', 'children'),
Output('sagemaker-latency', 'children'), Output('inference-latency', 'children'),
Output('sagemaker-requests', 'children')], Output('inference-requests', 'children')],
[Input('sagemaker-switch', 'value')], [Input('debug-switch', 'value')]
[State('sagemaker-memory', 'value'),
State('sagemaker-concurrency', 'value'),
State('sagemaker-model-uri', 'value'),
State('sagemaker-role', 'value')]
) )
def toggle_sagemaker(enabled, memory, concurrency, model_uri, role): def toggle_inference_mode(debug_mode):
if not enabled: global DEV_MODE
global model
try:
config = load_config()
# Aggiorna la modalità debug nella configurazione
config['inference'] = config.get('inference', {}) # Crea la sezione se non esiste
config['inference']['debug_mode'] = debug_mode
# Salva la configurazione aggiornata
try:
with open(CONFIG_FILE, 'w') as f:
json.dump(config, f, indent=4)
except Exception as e:
print(f"Errore nel salvataggio della configurazione: {e}")
DEV_MODE = debug_mode
if debug_mode:
return ( return (
dbc.Alert("Servizio non attivo", color="warning"), dbc.Alert("Modalità Debug attiva - Using mock predictions", color="info"),
"N/A", "Debug (Mock)",
"N/A", "< 1ms",
"N/A" "N/A"
) )
else: else:
try: try:
boto3.setup_default_session(profile_name="giuseppenucifora", region_name="eu-west-1") print(f"Keras version: {keras.__version__}")
session = Session() print(f"TensorFlow version: {tf.__version__}")
# Inizializza SageMaker print(f"CUDA available: {tf.test.is_built_with_cuda()}")
tf_model = TensorFlowModel( print(f"GPU devices: {tf.config.list_physical_devices('GPU')}")
model_data=model_uri,
role=role, # GPU memory configuration
framework_version="2.14" gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
print(e)
@keras.saving.register_keras_serializable()
class DataAugmentation(tf.keras.layers.Layer):
"""Custom layer per l'augmentation dei dati"""
def __init__(self, noise_stddev=0.03, **kwargs):
super().__init__(**kwargs)
self.noise_stddev = noise_stddev
def call(self, inputs, training=None):
if training:
return inputs + tf.random.normal(
shape=tf.shape(inputs),
mean=0.0,
stddev=self.noise_stddev
)
return inputs
def get_config(self):
config = super().get_config()
config.update({"noise_stddev": self.noise_stddev})
return config
@keras.saving.register_keras_serializable()
class PositionalEncoding(tf.keras.layers.Layer):
"""Custom layer per l'encoding posizionale"""
def __init__(self, d_model, **kwargs):
super().__init__(**kwargs)
self.d_model = d_model
def build(self, input_shape):
_, seq_length, _ = input_shape
# Crea la matrice di encoding posizionale
position = tf.range(seq_length, dtype=tf.float32)[:, tf.newaxis]
div_term = tf.exp(
tf.range(0, self.d_model, 2, dtype=tf.float32) *
(-tf.math.log(10000.0) / self.d_model)
) )
serverless_config = ServerlessInferenceConfig( # Calcola sin e cos
memory_size_in_mb=memory, pos_encoding = tf.zeros((1, seq_length, self.d_model))
max_concurrency=concurrency, pos_encoding_even = tf.sin(position * div_term)
pos_encoding_odd = tf.cos(position * div_term)
# Assegna i valori alle posizioni pari e dispari
pos_encoding = tf.concat(
[tf.expand_dims(pos_encoding_even, -1),
tf.expand_dims(pos_encoding_odd, -1)],
axis=-1
)
pos_encoding = tf.reshape(pos_encoding, (1, seq_length, -1))
pos_encoding = pos_encoding[:, :, :self.d_model]
# Salva l'encoding come peso non trainabile
self.pos_encoding = self.add_weight(
shape=(1, seq_length, self.d_model),
initializer=tf.keras.initializers.Constant(pos_encoding),
trainable=False,
name='positional_encoding'
) )
# Deploy del modello super().build(input_shape)
predictor = tf_model.deploy(serverless_inference_config=serverless_config)
def call(self, inputs):
# Broadcast l'encoding posizionale sul batch
batch_size = tf.shape(inputs)[0]
pos_encoding_tiled = tf.tile(self.pos_encoding, [batch_size, 1, 1])
return inputs + pos_encoding_tiled
def get_config(self):
config = super().get_config()
config.update({"d_model": self.d_model})
return config
@keras.saving.register_keras_serializable()
class WarmUpLearningRateSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
"""Custom learning rate schedule with linear warmup and exponential decay."""
def __init__(self, initial_learning_rate=1e-3, warmup_steps=500, decay_steps=5000):
super().__init__()
self.initial_learning_rate = initial_learning_rate
self.warmup_steps = warmup_steps
self.decay_steps = decay_steps
def __call__(self, step):
warmup_pct = tf.cast(step, tf.float32) / self.warmup_steps
warmup_lr = self.initial_learning_rate * warmup_pct
decay_factor = tf.pow(0.1, tf.cast(step, tf.float32) / self.decay_steps)
decayed_lr = self.initial_learning_rate * decay_factor
return tf.where(step < self.warmup_steps, warmup_lr, decayed_lr)
def get_config(self):
return {
'initial_learning_rate': self.initial_learning_rate,
'warmup_steps': self.warmup_steps,
'decay_steps': self.decay_steps
}
@keras.saving.register_keras_serializable()
def weighted_huber_loss(y_true, y_pred):
# Pesi per diversi output
weights = tf.constant([1.0, 0.8, 0.8, 1.0, 0.6], dtype=tf.float32)
huber = tf.keras.losses.Huber(delta=1.0)
loss = huber(y_true, y_pred)
weighted_loss = tf.reduce_mean(loss * weights)
return weighted_loss
print("Caricamento modello e scaler...")
# Verifica che il modello sia disponibile
model_path = './sources/olive_oil_transformer/olive_oil_transformer_model.keras'
if not os.path.exists(model_path):
raise FileNotFoundError(f"Modello non trovato in: {model_path}")
# Prova a caricare il modello
model = tf.keras.models.load_model(model_path, custom_objects={
'DataAugmentation': DataAugmentation,
'PositionalEncoding': PositionalEncoding,
'WarmUpLearningRateSchedule': WarmUpLearningRateSchedule,
'weighted_huber_loss': weighted_huber_loss
})
return ( return (
dbc.Alert("Servizio attivo", color="success"), dbc.Alert("Modello caricato correttamente", color="success"),
predictor.endpoint_name, "Produzione (Local Model)",
"< 100ms", "~ 100ms",
"0" "0"
) )
except Exception as e: except Exception as e:
print(f"Errore nell'attivazione di SageMaker: {str(e)}") print(f"Errore nel caricamento del modello: {str(e)}")
# Se c'è un errore nel caricamento del modello, torna in modalità debug
DEV_MODE = True
# Aggiorna la configurazione per riflettere il fallback
config['inference']['debug_mode'] = True
try:
with open(CONFIG_FILE, 'w') as f:
json.dump(config, f, indent=4)
except Exception as save_error:
print(f"Errore nel salvataggio della configurazione di fallback: {save_error}")
return (
dbc.Alert(f"Errore nel caricamento del modello: {str(e)}", color="danger"),
"Debug (Mock) - Fallback",
"N/A",
"N/A"
)
except Exception as e:
print(f"Errore nella configurazione inferenza: {str(e)}")
return ( return (
dbc.Alert(f"Errore: {str(e)}", color="danger"), dbc.Alert(f"Errore: {str(e)}", color="danger"),
"Errore", "Errore",
@ -1829,14 +1845,18 @@ def create_costs_config_section():
]) ])
def create_sagemaker_config_section(): def create_inference_config_section():
config = load_config()
debug_mode = config.get('inference', {}).get('debug_mode', True)
return dbc.Card([ return dbc.Card([
dbc.CardHeader([ dbc.CardHeader([
html.H4("Configurazione SageMaker", className="text-primary mb-0"), html.H4("Configurazione Inferenza", className="text-primary mb-0"),
dbc.Switch( dbc.Switch(
id='sagemaker-switch', id='debug-switch',
label="Abilita SageMaker", label="Modalità Debug",
value=False, value=debug_mode,
className="mt-2" className="mt-2"
), ),
], className="bg-light"), ], className="bg-light"),
@ -1846,7 +1866,7 @@ def create_sagemaker_config_section():
dbc.Col([ dbc.Col([
html.Div([ html.Div([
html.H5("Stato Servizio", className="mb-3"), html.H5("Stato Servizio", className="mb-3"),
html.Div(id='sagemaker-status', className="mb-3"), html.Div(id='inference-status', className="mb-3"),
]) ])
]) ])
], className="mb-4"), ], className="mb-4"),
@ -1858,59 +1878,15 @@ def create_sagemaker_config_section():
dbc.Form([ dbc.Form([
dbc.Row([ dbc.Row([
dbc.Col([ dbc.Col([
dbc.Label("Memoria (MB):", className="fw-bold"), dbc.Label("Modello:", className="fw-bold"),
dbc.Input( # Usa html.Div invece di dbc.Input per il percorso in sola lettura
id='sagemaker-memory', html.Div(
type='number', "./sources/olive_oil_transformer/olive_oil_transformer_model.keras",
min=512, id='model-path',
max=6144, className="mb-2 p-2 bg-light border rounded",
step=512,
value=2048,
className="mb-2"
)
], md=6),
dbc.Col([
dbc.Label("Concorrenza massima:", className="fw-bold"),
dbc.Input(
id='sagemaker-concurrency',
type='number',
min=1,
max=10,
value=5,
className="mb-2"
)
], md=6),
]),
dbc.Row([
dbc.Col([
dbc.Label("Model URI:", className="fw-bold"),
dbc.Input(
id='sagemaker-model-uri',
type='text',
value="s3://sagemaker-oil-transformer/model/saved_model.pb",
className="mb-2",
disabled=True,
style={ style={
"background-color": "#f8f9fa", "font-family": "monospace",
"opacity": "1", "font-size": "0.9rem"
"cursor": "not-allowed"
}
)
], md=12),
]),
dbc.Row([
dbc.Col([
dbc.Label("IAM Role:", className="fw-bold"),
dbc.Input(
id='sagemaker-role',
type='text',
value="arn:aws:iam::906312666576:role/sagemaker-olive-oil",
className="mb-2",
disabled=True,
style={
"background-color": "#f8f9fa",
"opacity": "1",
"cursor": "not-allowed"
} }
) )
], md=12), ], md=12),
@ -1925,16 +1901,16 @@ def create_sagemaker_config_section():
html.H5("Metriche", className="mb-3"), html.H5("Metriche", className="mb-3"),
dbc.ListGroup([ dbc.ListGroup([
dbc.ListGroupItem([ dbc.ListGroupItem([
html.Strong("Endpoint: "), html.Strong("Modalità: "),
html.Span(id='sagemaker-endpoint') html.Span(id='inference-mode')
]), ]),
dbc.ListGroupItem([ dbc.ListGroupItem([
html.Strong("Latenza media: "), html.Strong("Latenza media: "),
html.Span(id='sagemaker-latency') html.Span(id='inference-latency')
]), ]),
dbc.ListGroupItem([ dbc.ListGroupItem([
html.Strong("Richieste totali: "), html.Strong("Richieste totali: "),
html.Span(id='sagemaker-requests') html.Span(id='inference-requests')
]) ])
], flush=True) ], flush=True)
]) ])
@ -2593,8 +2569,7 @@ def update_simulation(n_clicks, temp_range, humidity, rainfall, radiation):
varieties_info.append(variety_data.iloc[0]) varieties_info.append(variety_data.iloc[0])
percentages.append(variety_config['percentage']) percentages.append(variety_config['percentage'])
print(config['oliveto']['varieties']) print(sim_data)
print(olive_varieties)
prediction = make_prediction(weather_data, varieties_info, percentages, hectares, sim_data) prediction = make_prediction(weather_data, varieties_info, percentages, hectares, sim_data)