{ "cells": [ { "cell_type": "code", "execution_count": 1, "id": "8adcbe0819b88578", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Hit:1 http://security.ubuntu.com/ubuntu jammy-security InRelease\n", "Hit:2 https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64 InRelease\n", "Hit:3 http://archive.ubuntu.com/ubuntu jammy InRelease \n", "Hit:4 http://archive.ubuntu.com/ubuntu jammy-updates InRelease\n", "Hit:5 http://archive.ubuntu.com/ubuntu jammy-backports InRelease\n", "Reading package lists... Done\n", "Reading package lists... Done\n", "Building dependency tree... Done\n", "Reading state information... Done\n", "graphviz is already the newest version (2.42.2-6ubuntu0.1).\n", "0 upgraded, 0 newly installed, 0 to remove and 121 not upgraded.\n", "Requirement already satisfied: tensorflow in /usr/local/lib/python3.11/dist-packages (2.14.0)\n", "Requirement already satisfied: absl-py>=1.0.0 in /usr/local/lib/python3.11/dist-packages (from tensorflow) (2.0.0)\n", "Requirement already satisfied: astunparse>=1.6.0 in /usr/local/lib/python3.11/dist-packages (from tensorflow) (1.6.3)\n", "Requirement already satisfied: flatbuffers>=23.5.26 in /usr/local/lib/python3.11/dist-packages (from tensorflow) (23.5.26)\n", "Requirement already satisfied: gast!=0.5.0,!=0.5.1,!=0.5.2,>=0.2.1 in /usr/local/lib/python3.11/dist-packages (from tensorflow) (0.5.4)\n", "Requirement already satisfied: google-pasta>=0.1.1 in /usr/local/lib/python3.11/dist-packages (from tensorflow) (0.2.0)\n", "Requirement already satisfied: h5py>=2.9.0 in /usr/local/lib/python3.11/dist-packages (from tensorflow) (3.9.0)\n", "Requirement already satisfied: libclang>=13.0.0 in /usr/local/lib/python3.11/dist-packages (from tensorflow) (16.0.6)\n", "Requirement already satisfied: ml-dtypes==0.2.0 in /usr/local/lib/python3.11/dist-packages (from tensorflow) (0.2.0)\n", "Requirement already satisfied: numpy>=1.23.5 in /usr/local/lib/python3.11/dist-packages (from tensorflow) (1.26.0)\n", "Requirement already satisfied: opt-einsum>=2.3.2 in /usr/local/lib/python3.11/dist-packages (from tensorflow) (3.3.0)\n", "Requirement already satisfied: packaging in /usr/local/lib/python3.11/dist-packages (from tensorflow) (23.1)\n", "Requirement already satisfied: protobuf!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<5.0.0dev,>=3.20.3 in /usr/local/lib/python3.11/dist-packages (from tensorflow) (4.24.3)\n", "Requirement already satisfied: setuptools in /usr/local/lib/python3.11/dist-packages (from tensorflow) (68.2.2)\n", "Requirement already satisfied: six>=1.12.0 in /usr/lib/python3/dist-packages (from tensorflow) (1.16.0)\n", "Requirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python3.11/dist-packages (from tensorflow) (2.3.0)\n", "Requirement already satisfied: typing-extensions>=3.6.6 in /usr/local/lib/python3.11/dist-packages (from tensorflow) (4.8.0)\n", "Requirement already satisfied: wrapt<1.15,>=1.11.0 in /usr/local/lib/python3.11/dist-packages (from tensorflow) (1.14.1)\n", "Requirement already satisfied: tensorflow-io-gcs-filesystem>=0.23.1 in /usr/local/lib/python3.11/dist-packages (from tensorflow) (0.37.1)\n", "Requirement already satisfied: grpcio<2.0,>=1.24.3 in /usr/local/lib/python3.11/dist-packages (from tensorflow) (1.58.0)\n", "Requirement already satisfied: tensorboard<2.15,>=2.14 in /usr/local/lib/python3.11/dist-packages (from tensorflow) (2.14.0)\n", "Requirement already satisfied: tensorflow-estimator<2.15,>=2.14.0 in /usr/local/lib/python3.11/dist-packages (from tensorflow) (2.14.0)\n", "Requirement already satisfied: keras<2.15,>=2.14.0 in /usr/local/lib/python3.11/dist-packages (from tensorflow) (2.14.0)\n", "Requirement already satisfied: wheel<1.0,>=0.23.0 in /usr/local/lib/python3.11/dist-packages (from astunparse>=1.6.0->tensorflow) (0.41.2)\n", "Requirement already satisfied: google-auth<3,>=1.6.3 in /usr/local/lib/python3.11/dist-packages (from tensorboard<2.15,>=2.14->tensorflow) (2.23.1)\n", "Requirement already satisfied: google-auth-oauthlib<1.1,>=0.5 in /usr/local/lib/python3.11/dist-packages (from tensorboard<2.15,>=2.14->tensorflow) (1.0.0)\n", "Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.11/dist-packages (from tensorboard<2.15,>=2.14->tensorflow) (3.4.4)\n", "Requirement already satisfied: requests<3,>=2.21.0 in /usr/local/lib/python3.11/dist-packages (from tensorboard<2.15,>=2.14->tensorflow) (2.31.0)\n", "Requirement already satisfied: tensorboard-data-server<0.8.0,>=0.7.0 in /usr/local/lib/python3.11/dist-packages (from tensorboard<2.15,>=2.14->tensorflow) (0.7.1)\n", "Requirement already satisfied: werkzeug>=1.0.1 in /usr/local/lib/python3.11/dist-packages (from tensorboard<2.15,>=2.14->tensorflow) (2.3.7)\n", "Requirement already satisfied: cachetools<6.0,>=2.0.0 in /usr/local/lib/python3.11/dist-packages (from google-auth<3,>=1.6.3->tensorboard<2.15,>=2.14->tensorflow) (5.3.1)\n", "Requirement already satisfied: pyasn1-modules>=0.2.1 in /usr/local/lib/python3.11/dist-packages (from google-auth<3,>=1.6.3->tensorboard<2.15,>=2.14->tensorflow) (0.3.0)\n", "Requirement already satisfied: rsa<5,>=3.1.4 in /usr/local/lib/python3.11/dist-packages (from google-auth<3,>=1.6.3->tensorboard<2.15,>=2.14->tensorflow) (4.9)\n", "Requirement already satisfied: urllib3>=2.0.5 in /usr/local/lib/python3.11/dist-packages (from google-auth<3,>=1.6.3->tensorboard<2.15,>=2.14->tensorflow) (2.0.5)\n", "Requirement already satisfied: requests-oauthlib>=0.7.0 in /usr/local/lib/python3.11/dist-packages (from google-auth-oauthlib<1.1,>=0.5->tensorboard<2.15,>=2.14->tensorflow) (1.3.1)\n", "Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.11/dist-packages (from requests<3,>=2.21.0->tensorboard<2.15,>=2.14->tensorflow) (3.2.0)\n", "Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.11/dist-packages (from requests<3,>=2.21.0->tensorboard<2.15,>=2.14->tensorflow) (3.4)\n", "Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.11/dist-packages (from requests<3,>=2.21.0->tensorboard<2.15,>=2.14->tensorflow) (2023.7.22)\n", "Requirement already satisfied: MarkupSafe>=2.1.1 in /usr/local/lib/python3.11/dist-packages (from werkzeug>=1.0.1->tensorboard<2.15,>=2.14->tensorflow) (2.1.3)\n", "Requirement already satisfied: pyasn1<0.6.0,>=0.4.6 in /usr/local/lib/python3.11/dist-packages (from pyasn1-modules>=0.2.1->google-auth<3,>=1.6.3->tensorboard<2.15,>=2.14->tensorflow) (0.5.0)\n", "Requirement already satisfied: oauthlib>=3.0.0 in /usr/lib/python3/dist-packages (from requests-oauthlib>=0.7.0->google-auth-oauthlib<1.1,>=0.5->tensorboard<2.15,>=2.14->tensorflow) (3.2.0)\n", "\u001B[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv\u001B[0m\u001B[33m\n", "\u001B[0m\n", "\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m A new release of pip is available: \u001B[0m\u001B[31;49m23.2.1\u001B[0m\u001B[39;49m -> \u001B[0m\u001B[32;49m24.3.1\u001B[0m\n", "\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m To update, run: \u001B[0m\u001B[32;49mpython3 -m pip install --upgrade pip\u001B[0m\n", "Requirement already satisfied: numpy in /usr/local/lib/python3.11/dist-packages (1.26.0)\n", "\u001B[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv\u001B[0m\u001B[33m\n", "\u001B[0m\n", "\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m A new release of pip is available: \u001B[0m\u001B[31;49m23.2.1\u001B[0m\u001B[39;49m -> \u001B[0m\u001B[32;49m24.3.1\u001B[0m\n", "\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m To update, run: \u001B[0m\u001B[32;49mpython3 -m pip install --upgrade pip\u001B[0m\n", "Requirement already satisfied: pandas in /usr/local/lib/python3.11/dist-packages (2.2.3)\n", "Requirement already satisfied: numpy>=1.23.2 in /usr/local/lib/python3.11/dist-packages (from pandas) (1.26.0)\n", "Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.11/dist-packages (from pandas) (2.8.2)\n", "Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.11/dist-packages (from pandas) (2024.2)\n", "Requirement already satisfied: tzdata>=2022.7 in /usr/local/lib/python3.11/dist-packages (from pandas) (2024.2)\n", "Requirement already satisfied: six>=1.5 in /usr/lib/python3/dist-packages (from python-dateutil>=2.8.2->pandas) (1.16.0)\n", "\u001B[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv\u001B[0m\u001B[33m\n", "\u001B[0m\n", "\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m A new release of pip is available: \u001B[0m\u001B[31;49m23.2.1\u001B[0m\u001B[39;49m -> \u001B[0m\u001B[32;49m24.3.1\u001B[0m\n", "\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m To update, run: \u001B[0m\u001B[32;49mpython3 -m pip install --upgrade pip\u001B[0m\n", "Requirement already satisfied: keras in /usr/local/lib/python3.11/dist-packages (2.14.0)\n", "\u001B[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv\u001B[0m\u001B[33m\n", "\u001B[0m\n", "\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m A new release of pip is available: \u001B[0m\u001B[31;49m23.2.1\u001B[0m\u001B[39;49m -> \u001B[0m\u001B[32;49m24.3.1\u001B[0m\n", "\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m To update, run: \u001B[0m\u001B[32;49mpython3 -m pip install --upgrade pip\u001B[0m\n", "Requirement already satisfied: scikit-learn in /usr/local/lib/python3.11/dist-packages (1.5.2)\n", "Requirement already satisfied: numpy>=1.19.5 in /usr/local/lib/python3.11/dist-packages (from scikit-learn) (1.26.0)\n", "Requirement already satisfied: scipy>=1.6.0 in /usr/local/lib/python3.11/dist-packages (from scikit-learn) (1.14.1)\n", "Requirement already satisfied: joblib>=1.2.0 in /usr/local/lib/python3.11/dist-packages (from scikit-learn) (1.4.2)\n", "Requirement already satisfied: threadpoolctl>=3.1.0 in /usr/local/lib/python3.11/dist-packages (from scikit-learn) (3.5.0)\n", "\u001B[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv\u001B[0m\u001B[33m\n", "\u001B[0m\n", "\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m A new release of pip is available: \u001B[0m\u001B[31;49m23.2.1\u001B[0m\u001B[39;49m -> \u001B[0m\u001B[32;49m24.3.1\u001B[0m\n", "\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m To update, run: \u001B[0m\u001B[32;49mpython3 -m pip install --upgrade pip\u001B[0m\n", "Requirement already satisfied: matplotlib in /usr/local/lib/python3.11/dist-packages (3.8.0)\n", "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.11/dist-packages (from matplotlib) (1.1.1)\n", "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.11/dist-packages (from matplotlib) (0.11.0)\n", "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.11/dist-packages (from matplotlib) (4.42.1)\n", "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.11/dist-packages (from matplotlib) (1.4.5)\n", "Requirement already satisfied: numpy<2,>=1.21 in /usr/local/lib/python3.11/dist-packages (from matplotlib) (1.26.0)\n", "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.11/dist-packages (from matplotlib) (23.1)\n", "Requirement already satisfied: pillow>=6.2.0 in /usr/local/lib/python3.11/dist-packages (from matplotlib) (10.0.1)\n", "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.11/dist-packages (from matplotlib) (3.2.0)\n", "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.11/dist-packages (from matplotlib) (2.8.2)\n", "Requirement already satisfied: six>=1.5 in /usr/lib/python3/dist-packages (from python-dateutil>=2.7->matplotlib) (1.16.0)\n", "\u001B[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv\u001B[0m\u001B[33m\n", "\u001B[0m\n", "\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m A new release of pip is available: \u001B[0m\u001B[31;49m23.2.1\u001B[0m\u001B[39;49m -> \u001B[0m\u001B[32;49m24.3.1\u001B[0m\n", "\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m To update, run: \u001B[0m\u001B[32;49mpython3 -m pip install --upgrade pip\u001B[0m\n", "Requirement already satisfied: joblib in /usr/local/lib/python3.11/dist-packages (1.4.2)\n", "\u001B[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv\u001B[0m\u001B[33m\n", "\u001B[0m\n", "\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m A new release of pip is available: \u001B[0m\u001B[31;49m23.2.1\u001B[0m\u001B[39;49m -> \u001B[0m\u001B[32;49m24.3.1\u001B[0m\n", "\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m To update, run: \u001B[0m\u001B[32;49mpython3 -m pip install --upgrade pip\u001B[0m\n", "Requirement already satisfied: pyarrow in /usr/local/lib/python3.11/dist-packages (18.0.0)\n", "\u001B[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv\u001B[0m\u001B[33m\n", "\u001B[0m\n", "\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m A new release of pip is available: \u001B[0m\u001B[31;49m23.2.1\u001B[0m\u001B[39;49m -> \u001B[0m\u001B[32;49m24.3.1\u001B[0m\n", "\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m To update, run: \u001B[0m\u001B[32;49mpython3 -m pip install --upgrade pip\u001B[0m\n", "Requirement already satisfied: fastparquet in /usr/local/lib/python3.11/dist-packages (2024.11.0)\n", "Requirement already satisfied: pandas>=1.5.0 in /usr/local/lib/python3.11/dist-packages (from fastparquet) (2.2.3)\n", "Requirement already satisfied: numpy in /usr/local/lib/python3.11/dist-packages (from fastparquet) (1.26.0)\n", "Requirement already satisfied: cramjam>=2.3 in /usr/local/lib/python3.11/dist-packages (from fastparquet) (2.9.0)\n", "Requirement already satisfied: fsspec in /usr/local/lib/python3.11/dist-packages (from fastparquet) (2024.10.0)\n", "Requirement already satisfied: packaging in /usr/local/lib/python3.11/dist-packages (from fastparquet) (23.1)\n", "Requirement already satisfied: python-dateutil>=2.8.2 in /usr/local/lib/python3.11/dist-packages (from pandas>=1.5.0->fastparquet) (2.8.2)\n", "Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.11/dist-packages (from pandas>=1.5.0->fastparquet) (2024.2)\n", "Requirement already satisfied: tzdata>=2022.7 in /usr/local/lib/python3.11/dist-packages (from pandas>=1.5.0->fastparquet) (2024.2)\n", "Requirement already satisfied: six>=1.5 in /usr/lib/python3/dist-packages (from python-dateutil>=2.8.2->pandas>=1.5.0->fastparquet) (1.16.0)\n", "\u001B[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv\u001B[0m\u001B[33m\n", "\u001B[0m\n", "\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m A new release of pip is available: \u001B[0m\u001B[31;49m23.2.1\u001B[0m\u001B[39;49m -> \u001B[0m\u001B[32;49m24.3.1\u001B[0m\n", "\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m To update, run: \u001B[0m\u001B[32;49mpython3 -m pip install --upgrade pip\u001B[0m\n", "Requirement already satisfied: scipy in /usr/local/lib/python3.11/dist-packages (1.14.1)\n", "Requirement already satisfied: numpy<2.3,>=1.23.5 in /usr/local/lib/python3.11/dist-packages (from scipy) (1.26.0)\n", "\u001B[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv\u001B[0m\u001B[33m\n", "\u001B[0m\n", "\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m A new release of pip is available: \u001B[0m\u001B[31;49m23.2.1\u001B[0m\u001B[39;49m -> \u001B[0m\u001B[32;49m24.3.1\u001B[0m\n", "\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m To update, run: \u001B[0m\u001B[32;49mpython3 -m pip install --upgrade pip\u001B[0m\n", "Requirement already satisfied: seaborn in /usr/local/lib/python3.11/dist-packages (0.13.2)\n", "Requirement already satisfied: numpy!=1.24.0,>=1.20 in /usr/local/lib/python3.11/dist-packages (from seaborn) (1.26.0)\n", "Requirement already satisfied: pandas>=1.2 in /usr/local/lib/python3.11/dist-packages (from seaborn) (2.2.3)\n", "Requirement already satisfied: matplotlib!=3.6.1,>=3.4 in /usr/local/lib/python3.11/dist-packages (from seaborn) (3.8.0)\n", "Requirement already satisfied: contourpy>=1.0.1 in /usr/local/lib/python3.11/dist-packages (from matplotlib!=3.6.1,>=3.4->seaborn) (1.1.1)\n", "Requirement already satisfied: cycler>=0.10 in /usr/local/lib/python3.11/dist-packages (from matplotlib!=3.6.1,>=3.4->seaborn) (0.11.0)\n", "Requirement already satisfied: fonttools>=4.22.0 in /usr/local/lib/python3.11/dist-packages (from matplotlib!=3.6.1,>=3.4->seaborn) (4.42.1)\n", "Requirement already satisfied: kiwisolver>=1.0.1 in /usr/local/lib/python3.11/dist-packages (from matplotlib!=3.6.1,>=3.4->seaborn) (1.4.5)\n", "Requirement already satisfied: packaging>=20.0 in /usr/local/lib/python3.11/dist-packages (from matplotlib!=3.6.1,>=3.4->seaborn) (23.1)\n", "Requirement already satisfied: pillow>=6.2.0 in /usr/local/lib/python3.11/dist-packages (from matplotlib!=3.6.1,>=3.4->seaborn) (10.0.1)\n", "Requirement already satisfied: pyparsing>=2.3.1 in /usr/local/lib/python3.11/dist-packages (from matplotlib!=3.6.1,>=3.4->seaborn) (3.2.0)\n", "Requirement already satisfied: python-dateutil>=2.7 in /usr/local/lib/python3.11/dist-packages (from matplotlib!=3.6.1,>=3.4->seaborn) (2.8.2)\n", "Requirement already satisfied: pytz>=2020.1 in /usr/local/lib/python3.11/dist-packages (from pandas>=1.2->seaborn) (2024.2)\n", "Requirement already satisfied: tzdata>=2022.7 in /usr/local/lib/python3.11/dist-packages (from pandas>=1.2->seaborn) (2024.2)\n", "Requirement already satisfied: six>=1.5 in /usr/lib/python3/dist-packages (from python-dateutil>=2.7->matplotlib!=3.6.1,>=3.4->seaborn) (1.16.0)\n", "\u001B[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv\u001B[0m\u001B[33m\n", "\u001B[0m\n", "\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m A new release of pip is available: \u001B[0m\u001B[31;49m23.2.1\u001B[0m\u001B[39;49m -> \u001B[0m\u001B[32;49m24.3.1\u001B[0m\n", "\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m To update, run: \u001B[0m\u001B[32;49mpython3 -m pip install --upgrade pip\u001B[0m\n", "Requirement already satisfied: tqdm in /usr/local/lib/python3.11/dist-packages (4.67.1)\n", "\u001B[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv\u001B[0m\u001B[33m\n", "\u001B[0m\n", "\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m A new release of pip is available: \u001B[0m\u001B[31;49m23.2.1\u001B[0m\u001B[39;49m -> \u001B[0m\u001B[32;49m24.3.1\u001B[0m\n", "\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m To update, run: \u001B[0m\u001B[32;49mpython3 -m pip install --upgrade pip\u001B[0m\n", "Requirement already satisfied: pydot in /usr/local/lib/python3.11/dist-packages (3.0.2)\n", "Requirement already satisfied: pyparsing>=3.0.9 in /usr/local/lib/python3.11/dist-packages (from pydot) (3.2.0)\n", "\u001B[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv\u001B[0m\u001B[33m\n", "\u001B[0m\n", "\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m A new release of pip is available: \u001B[0m\u001B[31;49m23.2.1\u001B[0m\u001B[39;49m -> \u001B[0m\u001B[32;49m24.3.1\u001B[0m\n", "\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m To update, run: \u001B[0m\u001B[32;49mpython3 -m pip install --upgrade pip\u001B[0m\n", "Requirement already satisfied: tensorflow-io in /usr/local/lib/python3.11/dist-packages (0.37.1)\n", "Requirement already satisfied: tensorflow-io-gcs-filesystem==0.37.1 in /usr/local/lib/python3.11/dist-packages (from tensorflow-io) (0.37.1)\n", "\u001B[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv\u001B[0m\u001B[33m\n", "\u001B[0m\n", "\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m A new release of pip is available: \u001B[0m\u001B[31;49m23.2.1\u001B[0m\u001B[39;49m -> \u001B[0m\u001B[32;49m24.3.1\u001B[0m\n", "\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m To update, run: \u001B[0m\u001B[32;49mpython3 -m pip install --upgrade pip\u001B[0m\n", "Requirement already satisfied: tensorflow-addons in /usr/local/lib/python3.11/dist-packages (0.23.0)\n", "Requirement already satisfied: packaging in /usr/local/lib/python3.11/dist-packages (from tensorflow-addons) (23.1)\n", "Requirement already satisfied: typeguard<3.0.0,>=2.7 in /usr/local/lib/python3.11/dist-packages (from tensorflow-addons) (2.13.3)\n", "\u001B[33mWARNING: Running pip as the 'root' user can result in broken permissions and conflicting behaviour with the system package manager. It is recommended to use a virtual environment instead: https://pip.pypa.io/warnings/venv\u001B[0m\u001B[33m\n", "\u001B[0m\n", "\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m A new release of pip is available: \u001B[0m\u001B[31;49m23.2.1\u001B[0m\u001B[39;49m -> \u001B[0m\u001B[32;49m24.3.1\u001B[0m\n", "\u001B[1m[\u001B[0m\u001B[34;49mnotice\u001B[0m\u001B[1;39;49m]\u001B[0m\u001B[39;49m To update, run: \u001B[0m\u001B[32;49mpython3 -m pip install --upgrade pip\u001B[0m\n" ] } ], "source": [ "from opt_einsum.paths import branch_1\n", "!apt-get update\n", "!apt-get install graphviz -y\n", "\n", "!pip install tensorflow\n", "!pip install numpy\n", "!pip install pandas\n", "\n", "!pip install keras\n", "!pip install scikit-learn\n", "!pip install matplotlib\n", "!pip install joblib\n", "!pip install pyarrow\n", "!pip install fastparquet\n", "!pip install scipy\n", "!pip install seaborn\n", "!pip install tqdm\n", "!pip install pydot\n", "!pip install tensorflow-io\n", "!pip install tensorflow-addons" ] }, { "cell_type": "code", "execution_count": 2, "id": "e6fe6bb613168a8a", "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ "2024-11-25 21:39:40.365457: E tensorflow/compiler/xla/stream_executor/cuda/cuda_dnn.cc:9342] Unable to register cuDNN factory: Attempting to register factory for plugin cuDNN when one has already been registered\n", "2024-11-25 21:39:40.365505: E tensorflow/compiler/xla/stream_executor/cuda/cuda_fft.cc:609] Unable to register cuFFT factory: Attempting to register factory for plugin cuFFT when one has already been registered\n", "2024-11-25 21:39:40.365547: E tensorflow/compiler/xla/stream_executor/cuda/cuda_blas.cc:1518] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n", "2024-11-25 21:39:40.374255: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\n", "To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\n", "/usr/local/lib/python3.11/dist-packages/tensorflow_addons/utils/tfa_eol_msg.py:23: UserWarning: \n", "\n", "TensorFlow Addons (TFA) has ended development and introduction of new features.\n", "TFA has entered a minimal maintenance and release mode until a planned end of life in May 2024.\n", "Please modify downstream libraries to take dependencies from other repositories in our TensorFlow community (e.g. Keras, Keras-CV, and Keras-NLP). \n", "\n", "For more information see: https://github.com/tensorflow/addons/issues/2807 \n", "\n", " warnings.warn(\n" ] } ], "source": [ "import tensorflow as tf\n", "from tensorflow.keras.layers import Dense, LSTM, MultiHeadAttention, Dropout, BatchNormalization, LayerNormalization, Input, Activation, Lambda, Bidirectional, Add, MaxPooling1D, SpatialDropout1D, GlobalAveragePooling1D, \\\n", " GlobalMaxPooling1D, Concatenate, ThresholdedReLU, Average\n", "from tensorflow.keras import regularizers\n", "from tensorflow.keras.models import Model\n", "import pandas as pd\n", "import numpy as np\n", "from sklearn.model_selection import train_test_split\n", "from sklearn.preprocessing import RobustScaler\n", "from tensorflow.keras.callbacks import EarlyStopping, ReduceLROnPlateau\n", "from tensorflow.keras.optimizers import AdamW\n", "import json\n", "from datetime import datetime\n", "import matplotlib.pyplot as plt\n", "from tensorflow.keras.utils import plot_model\n", "import tensorflow_addons as tfa\n", "import os\n", "import joblib\n", "import seaborn as sns\n", "from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score, confusion_matrix, classification_report, roc_auc_score\n", "from tensorflow.keras.metrics import AUC\n", "from scipy import stats\n", "\n", "folder_name = datetime.now().strftime(\"%Y-%m-%d_%H-%M\")\n", "\n", "random_state_value = None" ] }, { "cell_type": "code", "execution_count": 3, "id": "3da8b15c7eb9833f", "metadata": {}, "outputs": [], "source": [ "def get_season(date):\n", " month = date.month\n", " day = date.day\n", " if (month == 12 and day >= 21) or (month <= 3 and day < 20):\n", " return 'Winter'\n", " elif (month == 3 and day >= 20) or (month <= 6 and day < 21):\n", " return 'Spring'\n", " elif (month == 6 and day >= 21) or (month <= 9 and day < 23):\n", " return 'Summer'\n", " elif (month == 9 and day >= 23) or (month <= 12 and day < 21):\n", " return 'Autumn'\n", " else:\n", " return 'Unknown'\n", "\n", "\n", "def get_time_period(hour):\n", " if 5 <= hour < 12:\n", " return 'Morning'\n", " elif 12 <= hour < 17:\n", " return 'Afternoon'\n", " elif 17 <= hour < 21:\n", " return 'Evening'\n", " else:\n", " return 'Night'\n", "\n", "\n", "def add_time_features(df):\n", " \"\"\"\n", " Add time-based features to the DataFrame.\n", " Works with both 'datetime' as column or index.\n", " \"\"\"\n", " # Se datetime è l'indice, lo usiamo direttamente\n", " if isinstance(df.index, pd.DatetimeIndex):\n", " datetime_col = df.index\n", " else:\n", " # Se datetime è una colonna, la convertiamo\n", " if 'datetime' in df.columns:\n", " datetime_col = pd.to_datetime(df['datetime'])\n", " else:\n", " raise ValueError(\"No datetime column or index found in DataFrame\")\n", "\n", " # Creazione delle feature temporali\n", " df['timestamp'] = datetime_col.astype(np.int64) // 10 ** 9\n", " df['year'] = datetime_col.year\n", " df['month'] = datetime_col.month\n", " df['day'] = datetime_col.day\n", " df['hour'] = datetime_col.hour\n", " df['minute'] = datetime_col.minute\n", " df['hour_sin'] = np.sin(datetime_col.hour * (2 * np.pi / 24))\n", " df['hour_cos'] = np.cos(datetime_col.hour * (2 * np.pi / 24))\n", " df['day_of_week'] = datetime_col.dayofweek\n", " df['day_of_year'] = datetime_col.dayofyear\n", " df['week_of_year'] = datetime_col.isocalendar().week.astype(int)\n", " df['quarter'] = datetime_col.quarter\n", " df['is_month_end'] = datetime_col.is_month_end.astype(int)\n", " df['is_quarter_end'] = datetime_col.is_quarter_end.astype(int)\n", " df['is_year_end'] = datetime_col.is_year_end.astype(int)\n", " df['month_sin'] = np.sin(datetime_col.month * (2 * np.pi / 12))\n", " df['month_cos'] = np.cos(datetime_col.month * (2 * np.pi / 12))\n", " df['day_of_year_sin'] = np.sin(datetime_col.dayofyear * (2 * np.pi / 365.25))\n", " df['day_of_year_cos'] = np.cos(datetime_col.dayofyear * (2 * np.pi / 365.25))\n", " df['season'] = datetime_col.map(get_season)\n", " df['time_period'] = datetime_col.hour.map(get_time_period)\n", "\n", " return df\n", "\n", "\n", "def add_solar_features(df):\n", " # Solar angle calculation\n", " df['solar_angle'] = np.sin(df['day_of_year'] * (2 * np.pi / 365.25)) * np.sin(df['hour'] * (2 * np.pi / 24))\n", "\n", " # Interactions between relevant features\n", " df['cloud_temp_interaction'] = df['cloudcover'] * df['temp']\n", " df['visibility_cloud_interaction'] = df['visibility'] * (100 - df['cloudcover'])\n", "\n", " # Derived features\n", " df['clear_sky_index'] = (100 - df['cloudcover']) / 100\n", " df['temp_gradient'] = df['temp'] - df['tempmin']\n", "\n", " return df\n", "\n", "\n", "def add_solar_specific_features(df):\n", " \"\"\"\n", " Aggiunge feature specifiche per la predizione della radiazione solare\n", " combinando caratteristiche astronomiche e meteorologiche\n", " \"\"\"\n", " # Caratteristiche astronomiche\n", " df['day_length'] = 12 + 3 * np.sin(2 * np.pi * (df['day_of_year'] - 81) / 365.25)\n", " df['solar_noon'] = 12 - df['hour']\n", " df['solar_elevation'] = np.sin(2 * np.pi * df['day_of_year'] / 365.25) * np.cos(2 * np.pi * df['solar_noon'] / 24)\n", "\n", " # Angolo solare teorico\n", " df['solar_angle'] = np.sin(df['hour_sin']) * np.sin(df['day_of_year_sin'])\n", "\n", " # Interazioni con condizioni atmosferiche\n", " df['cloud_elevation'] = df['cloudcover'] * df['solar_elevation']\n", " df['visibility_elevation'] = df['visibility'] * df['solar_elevation']\n", " df['uv_cloud_interaction'] = df['uvindex'] * (100 - df['cloudcover']) / 100\n", "\n", " # Indici di chiarezza e trasmissione\n", " df['clearness_index'] = (100 - df['cloudcover']) * df['visibility'] / 10000\n", " df['atmospheric_attenuation'] = (df['pressure'] / 1013.25) * (1 - (df['humidity'] / 100) * 0.6)\n", "\n", " # Radiazione teorica e attenuazione\n", " df['theoretical_radiation'] = df['solar_angle'].clip(0, 1) * 1000\n", " df['expected_radiation'] = df['theoretical_radiation'] * df['clearness_index']\n", "\n", " # Rolling features\n", " df['cloud_rolling_12h'] = df['cloudcover'].rolling(window=12).mean()\n", " df['temp_rolling_12h'] = df['temp'].rolling(window=12).mean()\n", " df['uv_rolling_12h'] = df['uvindex'].rolling(window=12).mean()\n", "\n", " # Interazioni temperatura-radiazione\n", " df['temp_radiation_potential'] = df['temp'] * df['solar_elevation']\n", "\n", " return df\n", "\n", "\n", "def add_radiation_energy_features(df):\n", " \"\"\"Adds specific features based on solarenergy and uvindex\"\"\"\n", "\n", " # Assicuriamoci che l'indice sia di tipo datetime\n", " if not isinstance(df.index, pd.DatetimeIndex):\n", " df.index = pd.to_datetime(df['datetime'])\n", "\n", " # Solar energy to UV ratio (independent from solarradiation)\n", " df['energy_uv_ratio'] = df['solarenergy'] / (df['uvindex'] + 1e-6)\n", "\n", " # Time aggregations\n", " # Moving averages\n", " windows = [3, 6, 12, 24] # hours\n", " for w in windows:\n", " df[f'energy_rolling_mean_{w}h'] = df['solarenergy'].rolling(window=w).mean()\n", " df[f'uv_rolling_mean_{w}h'] = df['uvindex'].rolling(window=w).mean()\n", "\n", " # Daily aggregations utilizzando datetime\n", " df['energy_daily_sum'] = df.groupby(df.index.date)['solarenergy'].transform('sum')\n", " df['uv_daily_max'] = df.groupby(df.index.date)['uvindex'].transform('max')\n", "\n", " # Changes\n", " df['energy_change'] = df['solarenergy'].diff()\n", " df['uv_change'] = df['uvindex'].diff()\n", "\n", " # Lag features\n", " lags = [1, 2, 3, 6, 12, 24] # hours\n", " for lag in lags:\n", " df[f'energy_lag_{lag}h'] = df['solarenergy'].shift(lag)\n", " df[f'uv_lag_{lag}h'] = df['uvindex'].shift(lag)\n", "\n", " # Peak indicators\n", " df['is_energy_peak'] = (df['solarenergy'] > df['energy_rolling_mean_6h'] * 1.2).astype(int)\n", " df['is_uv_peak'] = (df['uvindex'] > df['uv_rolling_mean_6h'] * 1.2).astype(int)\n", "\n", " # Aggiungiamo alcune metriche di volatilità\n", " df['energy_volatility'] = df['energy_change'].rolling(window=24).std()\n", " df['uv_volatility'] = df['uv_change'].rolling(window=24).std()\n", "\n", " # Indice di intensità solare composito\n", " df['solar_intensity_index'] = (df['solarenergy'] * df['uvindex']) / (df['cloudcover'] + 1e-6)\n", "\n", " # Interazioni\n", " df['uv_cloud_interaction'] = df['uvindex'] * (100 - df['cloudcover']) / 100\n", " df['energy_temp_interaction'] = df['solarenergy'] * df['temp']\n", "\n", " return df\n", "\n", "\n", "def add_advanced_features(df):\n", " \"\"\"\n", " Add all advanced features to the DataFrame\n", " Assumes df has a DatetimeIndex\n", " \"\"\"\n", " # Verifichiamo che abbiamo un DatetimeIndex\n", " if not isinstance(df.index, pd.DatetimeIndex):\n", " raise ValueError(\"DataFrame must have a DatetimeIndex\")\n", "\n", " # Existing features\n", " df = add_time_features(df)\n", " df = add_solar_features(df)\n", " df = add_solar_specific_features(df)\n", " df = add_radiation_energy_features(df)\n", "\n", " # Weather variable interactions\n", " df['temp_humidity'] = df['temp'] * df['humidity']\n", " df['temp_cloudcover'] = df['temp'] * df['cloudcover']\n", " df['visibility_cloudcover'] = df['visibility'] * df['cloudcover']\n", "\n", " # Derived features\n", " df['clear_sky_factor'] = (100 - df['cloudcover']) / 100\n", " df['temp_humidity_interaction'] = df['temp'] * df['humidity'] / 100\n", " df['atmospheric_transparency'] = (100 - df['cloudcover']) * (df['visibility'] / 10)\n", "\n", " # Rolling means\n", " df['temp_rolling_mean_6h'] = df['temp'].rolling(window=6).mean()\n", " df['cloudcover_rolling_mean_6h'] = df['cloudcover'].rolling(window=6).mean()\n", "\n", " # Lag features\n", " df['temp_1h_lag'] = df['temp'].shift(1)\n", " df['cloudcover_1h_lag'] = df['cloudcover'].shift(1)\n", " df['humidity_1h_lag'] = df['humidity'].shift(1)\n", "\n", " # Extreme conditions indicator\n", " df['extreme_conditions'] = ((df['temp'] > df['temp'].quantile(0.75)) &\n", " (df['humidity'] < df['humidity'].quantile(0.25))).astype(int)\n", "\n", " # One-hot encoding for categorical features\n", " df = pd.get_dummies(df, columns=['season', 'time_period'])\n", "\n", " return df\n", "\n", "\n", "def prepare_advanced_data(df):\n", " \"\"\"\n", " Prepare data for advanced modeling with proper datetime handling\n", " \"\"\"\n", " # Assicuriamoci che abbiamo una copia del DataFrame\n", " df = df.copy()\n", "\n", " # Verifichiamo se datetime è già l'indice\n", " if not isinstance(df.index, pd.DatetimeIndex):\n", " if 'datetime' in df.columns:\n", " df['datetime'] = pd.to_datetime(df['datetime'])\n", " df.set_index('datetime', inplace=True)\n", " else:\n", " raise ValueError(\"No datetime column or index found in DataFrame\")\n", "\n", " # Ordiniamo il DataFrame per datetime\n", " df = df.sort_index()\n", "\n", " # Apply feature engineering functions\n", " df = add_advanced_features(df)\n", "\n", " #all_columns = list(df.columns)\n", " #print(all_columns)\n", "\n", " features = {\n", " # Primary Features (strong direct correlation)\n", " 'primary_features': [\n", " 'uvindex', # Direct radiation indicator\n", " 'cloudcover', # Cloud coverage\n", " 'visibility', # Atmospheric transparency\n", " 'temp', # Temperature\n", " 'pressure', # Atmospheric pressure\n", " 'humidity', # Humidity\n", " ],\n", "\n", " # Astronomical and Temporal Features\n", " 'astronomical_features': [\n", " 'solar_elevation', # Solar elevation\n", " 'solar_angle', # Solar angle\n", " 'day_length', # Day length\n", " 'hour_sin', # Daily cycle\n", " 'hour_cos',\n", " 'day_of_year_sin', # Annual cycle\n", " 'day_of_year_cos',\n", " 'month_sin', # Monthly cycle\n", " 'month_cos',\n", " ],\n", "\n", " # Key Indices and Interactions\n", " 'key_interactions': [\n", " 'clear_sky_index', # Clear sky index\n", " 'atmospheric_attenuation', # Atmospheric attenuation\n", " 'theoretical_radiation', # Theoretical radiation\n", " 'expected_radiation', # Expected radiation\n", " 'cloud_elevation', # Cloud-elevation interaction\n", " 'visibility_elevation', # Visibility-elevation interaction\n", " 'uv_cloud_interaction', # UV-cloud interaction\n", " 'temp_radiation_potential', # Temperature-radiation potential\n", " ],\n", "\n", " # Rolling Features (temporal trends)\n", " 'rolling_features': [\n", " 'cloud_rolling_12h', # Cloud coverage moving average\n", " 'temp_rolling_12h', # Temperature moving average\n", " 'uv_rolling_12h', # UV moving average\n", " 'cloudcover_rolling_mean_6h',\n", " 'temp_rolling_mean_6h',\n", " ],\n", "\n", " # Lag Features (most recent)\n", " 'lag_features': [\n", " 'temp_1h_lag', # 1-hour temperature lag\n", " 'cloudcover_1h_lag', # 1-hour cloud coverage lag\n", " 'humidity_1h_lag', # 1-hour humidity lag\n", " 'uv_lag_1h', # 1-hour UV lag\n", " ],\n", "\n", " # Categorical Features\n", " 'categorical_features': [\n", " 'season_Spring', # Seasons\n", " 'season_Summer',\n", " 'season_Autumn',\n", " 'season_Winter',\n", " 'time_period_Morning', # Time periods\n", " 'time_period_Afternoon',\n", " 'time_period_Evening',\n", " 'time_period_Night',\n", " ]\n", " }\n", "\n", " final_features = [feature for group in features.values() for feature in group]\n", "\n", " # Handle missing values\n", " target_variables = ['solarradiation', 'solarenergy', 'uvindex']\n", " for column in final_features + target_variables:\n", " if column in df.columns:\n", " df[column] = df[column].interpolate(method='time')\n", "\n", " df.fillna(0, inplace=True)\n", "\n", " # Temporal split\n", " data_after_2010 = df[df['year'] >= 2010].copy()\n", " data_before_2010 = df[df['year'] < 2010].copy()\n", "\n", " X = data_after_2010[final_features]\n", " y = data_after_2010['solarradiation']\n", " X_to_predict = data_before_2010[final_features]\n", "\n", " # Train-test split\n", " X_train, X_test, y_train, y_test = train_test_split(\n", " X, y, test_size=0.2, random_state=random_state_value, shuffle=False\n", " )\n", "\n", " # Scaling\n", " scaler_X = RobustScaler()\n", " X_train_scaled = scaler_X.fit_transform(X_train)\n", " X_test_scaled = scaler_X.transform(X_test)\n", " X_to_predict_scaled = scaler_X.transform(X_to_predict)\n", "\n", " scaler_y = RobustScaler()\n", " y_train_scaled = scaler_y.fit_transform(y_train.values.reshape(-1, 1))\n", " y_test_scaled = scaler_y.transform(y_test.values.reshape(-1, 1))\n", "\n", " # Print info about selected features\n", " print(\"\\nSelected features:\")\n", " print(f\"Number of features: {len(final_features)}\")\n", " print(\"Features list:\", final_features)\n", "\n", " return X_train_scaled, X_test_scaled, y_train_scaled, y_test_scaled, scaler_X, scaler_y, final_features, X_to_predict_scaled\n", "\n", "\n", "def create_sequence_data(X, sequence_length=24):\n", " \"\"\"\n", " Converts data into sequences for LSTM input\n", " sequence_length represents how many previous hours to consider\n", " \"\"\"\n", " sequences = []\n", " for i in range(len(X) - sequence_length + 1):\n", " sequences.append(X[i:i + sequence_length])\n", " return np.array(sequences)\n", "\n", "\n", "def prepare_hybrid_data(df):\n", " X_train_scaled, X_test_scaled, y_train_scaled, y_test_scaled, scaler_X, scaler_y, features, X_to_predict_scaled = prepare_advanced_data(df)\n", "\n", " # Convert data into sequences\n", " sequence_length = 24 # 24 hours of historical data\n", "\n", " X_train_seq = create_sequence_data(X_train_scaled, sequence_length)\n", " X_test_seq = create_sequence_data(X_test_scaled, sequence_length)\n", "\n", " # Adjust y by removing the first (sequence_length-1) elements\n", " y_train = y_train_scaled[sequence_length - 1:]\n", " y_test = y_test_scaled[sequence_length - 1:]\n", "\n", " X_to_predict_seq = create_sequence_data(X_to_predict_scaled, sequence_length)\n", "\n", " return X_train_seq, X_test_seq, y_train, y_test, scaler_X, scaler_y, features, X_to_predict_seq" ] }, { "cell_type": "code", "execution_count": 4, "id": "570b18f2caa3e0db", "metadata": {}, "outputs": [], "source": [ "def create_residual_lstm_layer(x, units, dropout_rate, l2_reg=0.01, return_sequences=True, survival_probability=0.8):\n", " \"\"\"\n", " Creates a bidirectional LSTM layer with residual connections and regularization.\n", "\n", " Parameters:\n", " x: Input tensor\n", " units: Number of LSTM units\n", " dropout_rate: Dropout rate for regularization\n", " l2_reg: L2 regularization factor\n", " return_sequences: Whether to return sequences or just the last output\n", " survival_probability: Probability of layer survival for stochastic depth\n", " \"\"\"\n", " residual = x\n", " x = Bidirectional(LSTM(units, return_sequences=return_sequences, kernel_regularizer=regularizers.l2(l2_reg)))(x)\n", " x = LayerNormalization()(x)\n", " x = Dropout(dropout_rate)(x)\n", "\n", " if return_sequences:\n", " if int(residual.shape[-1]) != 2 * units:\n", " residual = Dense(2 * units, activation='linear')(residual)\n", " x = tfa.layers.StochasticDepth(survival_probability)([x, residual])\n", " return x\n", "\n", "\n", "def attention_block(x, units, num_heads=8, survival_probability=0.8):\n", " \"\"\"\n", " Creates a multi-head attention block with residual connections.\n", "\n", " Parameters:\n", " x: Input tensor\n", " units: Dimension of the key space\n", " num_heads: Number of attention heads\n", " survival_probability: Probability of layer survival for stochastic depth\n", " \"\"\"\n", " attention = MultiHeadAttention(num_heads=num_heads, key_dim=units)(x, x)\n", " x = tfa.layers.StochasticDepth(survival_probability)([x, attention])\n", " x = LayerNormalization()(x)\n", " return x\n", "\n", "\n", "def create_regression_branch(shared_features, l2_lambda=0.005, name_suffix=''):\n", " \"\"\"\n", " Crea un singolo branch di regressione con architettura migliorata\n", " \"\"\"\n", " regression_x = shared_features\n", " dense_units = [256, 128, 64, 32]\n", " dense_dropout = [0.3, 0.2, 0.15, 0.1]\n", " \n", " # Skip connections per ogni blocco\n", " for i, (units, dropout) in enumerate(zip(dense_units, dense_dropout)):\n", " # Salva l'input per la skip connection\n", " residual = regression_x\n", " \n", " # Primo dense layer del blocco\n", " regression_x = Dense(\n", " units, \n", " kernel_regularizer=regularizers.l2(l2_lambda),\n", " name=f'reg_dense1_{units}_{name_suffix}'\n", " )(regression_x)\n", " regression_x = BatchNormalization(name=f'reg_bn1_{units}_{name_suffix}')(regression_x)\n", " regression_x = Activation('swish', name=f'reg_swish1_{units}_{name_suffix}')(regression_x)\n", " regression_x = Dropout(dropout, name=f'reg_drop1_{units}_{name_suffix}')(regression_x)\n", " \n", " # Secondo dense layer del blocco\n", " regression_x = Dense(\n", " units,\n", " kernel_regularizer=regularizers.l2(l2_lambda),\n", " name=f'reg_dense2_{units}_{name_suffix}'\n", " )(regression_x)\n", " regression_x = BatchNormalization(name=f'reg_bn2_{units}_{name_suffix}')(regression_x)\n", " regression_x = Activation('swish', name=f'reg_swish2_{units}_{name_suffix}')(regression_x)\n", " regression_x = Dropout(dropout, name=f'reg_drop2_{units}_{name_suffix}')(regression_x)\n", " \n", " # Proiezione residuale se necessario\n", " if i > 0 and int(residual.shape[-1]) != units:\n", " residual = Dense(\n", " units,\n", " kernel_regularizer=regularizers.l2(l2_lambda),\n", " name=f'reg_residual_proj_{units}_{name_suffix}'\n", " )(residual)\n", " \n", " # Skip connection\n", " regression_x = Add(name=f'reg_skip_{units}_{name_suffix}')([regression_x, residual])\n", " \n", " # Output layer\n", " regression_output = Dense(1, name=f'regression_output_{name_suffix}')(regression_x)\n", " \n", " return regression_output\n", "\n", "def create_solarradiation_model(input_shape, folder_name, l2_lambda=0.005, min_output=0, max_output=1):\n", " \"\"\"\n", " Creates a hybrid model with ensemble regression\n", " \"\"\"\n", " inputs = Input(shape=input_shape)\n", "\n", " # Backbone comune (mantenuto come prima)\n", " survival_probs = [0.9, 0.8, 0.7, 0.6]\n", " attention_survival_probs = [0.85, 0.75, 0.65, 0.55]\n", " lstm_units = [256, 128, 64, 32]\n", " dropout_rates = [0.4, 0.3, 0.2, 0.2]\n", " attention_heads = [32, 24, 16, 8]\n", "\n", " # Backbone comune\n", " x = inputs\n", " lstm_blocks = 4\n", " for i in range(lstm_blocks):\n", " x = create_residual_lstm_layer(\n", " x,\n", " units=lstm_units[i],\n", " dropout_rate=dropout_rates[i],\n", " l2_reg=l2_lambda,\n", " return_sequences=True,\n", " survival_probability=survival_probs[i]\n", " )\n", " x = attention_block(\n", " x,\n", " units=lstm_units[i],\n", " num_heads=attention_heads[i],\n", " survival_probability=attention_survival_probs[i]\n", " )\n", " if i < lstm_blocks - 1:\n", " x = MaxPooling1D()(x)\n", "\n", " # Final shared LSTM layer\n", " shared_features = create_residual_lstm_layer(\n", " x,\n", " units=32,\n", " dropout_rate=0.1,\n", " l2_reg=l2_lambda,\n", " return_sequences=False,\n", " survival_probability=0.6\n", " )\n", "\n", " # Classification branch (mantenuto come prima)\n", " classification_x = Dense(64, kernel_regularizer=regularizers.l2(l2_lambda))(shared_features)\n", " classification_x = BatchNormalization()(classification_x)\n", " classification_x = Activation('swish')(classification_x)\n", " classification_x = Dropout(0.2)(classification_x)\n", " classification_x = Dense(32, kernel_regularizer=regularizers.l2(l2_lambda))(classification_x)\n", " classification_x = BatchNormalization()(classification_x)\n", " classification_x = Activation('swish')(classification_x)\n", " classification_output = Dense(1, activation='sigmoid', name='classification_output')(classification_x)\n", "\n", " # Ensemble di regression branches\n", " n_ensemble = 3\n", " regression_outputs = []\n", " \n", " for i in range(n_ensemble):\n", " # Creare una versione diversa delle feature condivise\n", " features_variation = Dense(\n", " 256,\n", " activation='swish',\n", " kernel_regularizer=regularizers.l2(l2_lambda),\n", " name=f'ensemble_features_{i}'\n", " )(shared_features)\n", " \n", " # Creare un branch di regressione\n", " reg_output = create_regression_branch(\n", " features_variation,\n", " l2_lambda=l2_lambda,\n", " name_suffix=f'ensemble_{i}'\n", " )\n", " regression_outputs.append(reg_output)\n", "\n", " # Combinare i output di regressione\n", " if n_ensemble > 1:\n", " regression_output = Average(name='regression_ensemble')(regression_outputs)\n", " else:\n", " regression_output = regression_outputs[0]\n", " \n", " # Clip dei valori di regressione\n", " regression_output = Lambda(\n", " lambda x: tf.clip_by_value(x, min_output, max_output),\n", " name='regression_output'\n", " )(regression_output)\n", "\n", " # Combine outputs using threshold activation\n", " thresholded_classification = ThresholdedReLU(theta=0.5)(classification_output)\n", " normalized_classification = Lambda(lambda x: tf.cast(x > 0, tf.float32))(thresholded_classification)\n", " final_output = Lambda(\n", " lambda inputs: inputs[0] * inputs[1],\n", " name='final_output'\n", " )([regression_output, normalized_classification])\n", "\n", " # Create model with all outputs\n", " model = Model(\n", " inputs=inputs,\n", " outputs=[\n", " classification_output,\n", " regression_output,\n", " final_output\n", " ],\n", " name=\"SolarRadiationModel\"\n", " )\n", "\n", " # Custom loss functions\n", " def hybrid_focal_loss(y_true, y_pred):\n", " mse = tf.square(y_true - y_pred)\n", " error_ratio = tf.abs(y_true - y_pred) / (tf.abs(y_true) + 1.0)\n", " focal_weight = tf.pow(error_ratio, 2)\n", " weighted_mse = focal_weight * mse\n", " mae = tf.abs(y_true - y_pred)\n", " return tf.reduce_mean(0.7 * weighted_mse + 0.3 * mae)\n", "\n", " def masked_regression_loss(y_true, y_pred):\n", " mask = tf.cast(tf.not_equal(y_true, 0), tf.float32)\n", " return hybrid_focal_loss(y_true * mask, y_pred * mask)\n", "\n", " # Metrics (mantenute come prima)\n", " def rmse(y_true, y_pred):\n", " return tf.sqrt(tf.reduce_mean(tf.square(y_true - y_pred)))\n", "\n", " def custom_mape(y_true, y_pred):\n", " epsilon = 1e-7\n", " diff = tf.abs((y_true - y_pred) / (y_true + epsilon))\n", " diff = tf.clip_by_value(diff, 0, 1)\n", " return tf.reduce_mean(diff) * 100\n", "\n", " # Optimizer\n", " optimizer = AdamW(\n", " learning_rate=0.0003,\n", " beta_1=0.9,\n", " beta_2=0.999,\n", " epsilon=1e-7,\n", " weight_decay=0.001,\n", " amsgrad=True\n", " )\n", "\n", " # Compile model\n", " model.compile(\n", " optimizer=optimizer,\n", " loss={\n", " 'classification_output': 'binary_crossentropy',\n", " 'regression_output': masked_regression_loss,\n", " 'final_output': hybrid_focal_loss\n", " },\n", " loss_weights={\n", " 'classification_output': 0.2,\n", " 'regression_output': 0.5,\n", " 'final_output': 0.3\n", " },\n", " metrics={\n", " 'classification_output': ['accuracy', AUC()],\n", " 'regression_output': ['mse', 'mae', rmse, custom_mape],\n", " 'final_output': ['mse', 'mae', rmse, custom_mape]\n", " }\n", " )\n", "\n", " model.summary()\n", "\n", " # Save model architecture visualization\n", " plot_model(\n", " model,\n", " to_file=f'{folder_name}_model_architecture.png',\n", " show_shapes=True,\n", " show_layer_names=True,\n", " dpi=150,\n", " show_layer_activations=True\n", " )\n", "\n", " return model\n", "\n", "\n", "def evaluate_solarradiation_predictions(y_true, y_pred, hour=None, folder_name=None):\n", " \"\"\"\n", " Comprehensive evaluation of solar radiation predictions with detailed analysis and visualizations.\n", "\n", " Parameters:\n", " -----------\n", " y_true : array-like\n", " Actual solar radiation values (W/m²)\n", " y_pred : array-like\n", " Predicted solar radiation values (W/m²)\n", " hour : array-like, optional\n", " Array of hours corresponding to predictions, for temporal analysis\n", " folder_name : str, optional\n", " Directory to save analysis plots\n", "\n", " Returns:\n", " --------\n", " dict\n", " Dictionary containing all calculated metrics\n", " \"\"\"\n", "\n", " # Data preparation\n", " y_true = np.array(y_true).ravel()\n", " y_pred = np.array(y_pred).ravel()\n", " errors = y_pred - y_true\n", "\n", " # Basic metrics calculation\n", " mae_raw = mean_absolute_error(y_true, y_pred)\n", " rmse_raw = np.sqrt(mean_squared_error(y_true, y_pred))\n", " r2_raw = r2_score(y_true, y_pred)\n", "\n", " # Corrected MAPE calculation\n", " mask = y_true > 10 # Consider only values above 10 W/m²\n", " if np.any(mask):\n", " mape = np.mean(np.abs((y_true[mask] - y_pred[mask]) / y_true[mask])) * 100\n", " else:\n", " mape = np.nan\n", "\n", " # Corrected error margin accuracy\n", " within_5_percent = np.mean(np.abs(errors) <= 5) * 100 # Within 5 W/m²\n", " within_10_percent = np.mean(np.abs(errors) <= 10) * 100 # Within 10 W/m²\n", " within_20_percent = np.mean(np.abs(errors) <= 20) * 100 # Within 20 W/m²\n", "\n", " # Radiation level classification\n", " def get_radiation_level(value):\n", " if value <= 200:\n", " return 'Very Low'\n", " elif value <= 400:\n", " return 'Low'\n", " elif value <= 600:\n", " return 'Moderate'\n", " elif value <= 800:\n", " return 'High'\n", " elif value <= 1000:\n", " return 'Very High'\n", " else:\n", " return 'Extreme'\n", "\n", " # Calculate radiation levels\n", " y_true_levels = [get_radiation_level(v) for v in y_true]\n", " y_pred_levels = [get_radiation_level(v) for v in y_pred]\n", " level_accuracy = np.mean([t == p for t, p in zip(y_true_levels, y_pred_levels)])\n", "\n", " # Print main metrics\n", " print(\"\\nSolar Radiation Prediction Metrics:\")\n", " print(\"\\nAbsolute Metrics:\")\n", " print(f\"MAE: {mae_raw:.2f} W/m²\")\n", " print(f\"RMSE: {rmse_raw:.2f} W/m²\")\n", " print(f\"R² Score: {r2_raw:.3f}\")\n", " print(f\"MAPE: {mape:.2f}%\" if not np.isnan(mape) else \"MAPE: N/A (insufficient data)\")\n", "\n", " print(\"\\nAccuracy Metrics:\")\n", " print(f\"Within ±5 W/m²: {within_5_percent:.1f}%\")\n", " print(f\"Within ±10 W/m²: {within_10_percent:.1f}%\")\n", " print(f\"Within ±20 W/m²: {within_20_percent:.1f}%\")\n", "\n", " print(\"\\nLevel Accuracy:\")\n", " print(f\"Level Accuracy: {level_accuracy * 100:.1f}%\")\n", "\n", " # Confusion matrix for radiation levels\n", " cm = confusion_matrix(y_true_levels, y_pred_levels)\n", " print(\"\\nConfusion Matrix for Radiation Levels:\")\n", " cm_df = pd.DataFrame(\n", " cm,\n", " columns=['Very Low', 'Low', 'Moderate', 'High', 'Very High', 'Extreme'],\n", " index=['Very Low', 'Low', 'Moderate', 'High', 'Very High', 'Extreme']\n", " )\n", " print(cm_df)\n", "\n", " # Time period analysis\n", " if hour is not None:\n", " day_periods = {\n", " 'Morning (5-11)': (5, 11),\n", " 'Noon (11-13)': (11, 13),\n", " 'Afternoon (13-17)': (13, 17),\n", " 'Evening (17-21)': (17, 21),\n", " 'Night (21-5)': (21, 5)\n", " }\n", "\n", " print(\"\\nAnalysis by Time Period:\")\n", " for period, (start, end) in day_periods.items():\n", " if start < end:\n", " mask = (hour >= start) & (hour < end)\n", " else:\n", " mask = (hour >= start) | (hour < end)\n", "\n", " if np.any(mask):\n", " period_mae = mean_absolute_error(y_true[mask], y_pred[mask])\n", "\n", " # Corrected period MAPE calculation\n", " period_mask = mask & (y_true > 10)\n", " if np.any(period_mask):\n", " period_mape = np.mean(np.abs((y_true[period_mask] - y_pred[period_mask]) / y_true[period_mask])) * 100\n", " print(f\"\\n{period}:\")\n", " print(f\"MAE: {period_mae:.2f} W/m²\")\n", " print(f\"MAPE: {period_mape:.2f}%\")\n", " else:\n", " print(f\"\\n{period}:\")\n", " print(f\"MAE: {period_mae:.2f} W/m²\")\n", " print(\"MAPE: N/A (insufficient data)\")\n", "\n", " # Visualizations\n", " if folder_name is not None:\n", " try:\n", " # Figure 1: Main analysis plots\n", " plt.figure(figsize=(20, 15))\n", "\n", " # Plot 1: Scatter plot of actual vs predicted values\n", " plt.subplot(3, 2, 1)\n", " plt.scatter(y_true, y_pred, alpha=0.5)\n", " plt.plot([y_true.min(), y_true.max()], [y_true.min(), y_true.max()], 'r--', lw=2)\n", " plt.xlabel('Actual Radiation (W/m²)')\n", " plt.ylabel('Predicted Radiation (W/m²)')\n", " plt.title('Actual vs Predicted Values')\n", " plt.grid(True)\n", "\n", " # Plot 2: Absolute error distribution\n", " plt.subplot(3, 2, 2)\n", " plt.hist(errors, bins=50, alpha=0.7)\n", " plt.xlabel('Prediction Error (W/m²)')\n", " plt.ylabel('Frequency')\n", " plt.title('Error Distribution')\n", " plt.grid(True)\n", "\n", " # Plot 3: Percentage error distribution (only for values > 10 W/m²)\n", " plt.subplot(3, 2, 3)\n", " mask = y_true > 10\n", " if np.any(mask):\n", " percentage_errors = ((y_pred[mask] - y_true[mask]) / y_true[mask]) * 100\n", " plt.hist(np.clip(percentage_errors, -100, 100), bins=50, alpha=0.7)\n", " plt.xlabel('Percentage Error (%)')\n", " plt.ylabel('Frequency')\n", " plt.title('Percentage Error Distribution (for values > 10 W/m²)')\n", " plt.grid(True)\n", "\n", " # Plot 4: Errors vs actual values\n", " plt.subplot(3, 2, 4)\n", " plt.scatter(y_true, errors, alpha=0.5)\n", " plt.axhline(y=0, color='r', linestyle='--')\n", " plt.xlabel('Actual Radiation (W/m²)')\n", " plt.ylabel('Error (W/m²)')\n", " plt.title('Errors vs Actual Values')\n", " plt.grid(True)\n", "\n", " # Plot 5: Error boxplot by radiation level\n", " plt.subplot(3, 2, 5)\n", " sns.boxplot(x=[get_radiation_level(v) for v in y_true], y=errors)\n", " plt.xticks(rotation=45)\n", " plt.xlabel('Radiation Level')\n", " plt.ylabel('Error (W/m²)')\n", " plt.title('Error Distribution by Level')\n", "\n", " # Plot 6: Confusion matrix heatmap\n", " plt.subplot(3, 2, 6)\n", " sns.heatmap(cm_df, annot=True, fmt='d', cmap='Blues')\n", " plt.title('Confusion Matrix')\n", " plt.xticks(rotation=45)\n", " plt.yticks(rotation=45)\n", "\n", " plt.tight_layout()\n", " filename = f'{folder_name}_radiation_analysis.png'\n", " plt.savefig(filename, dpi=300, bbox_inches='tight')\n", " print(f\"\\nPlot saved as: {filename}\")\n", " plt.close()\n", "\n", " except Exception as e:\n", " print(f\"\\nError saving plots: {str(e)}\")\n", "\n", " # Additional error statistics\n", " print(\"\\nError Statistics:\")\n", " print(f\"Mean error: {np.mean(errors):.3f}\")\n", " print(f\"Error standard deviation: {np.std(errors):.3f}\")\n", " print(f\"Median error: {np.median(errors):.3f}\")\n", " print(f\"95th percentile absolute error: {np.percentile(np.abs(errors), 95):.3f}\")\n", "\n", " # Return structured metrics\n", " metrics = {\n", " 'absolute': {\n", " 'mae': mae_raw,\n", " 'rmse': rmse_raw,\n", " 'r2': r2_raw,\n", " 'mape': float(mape) if not np.isnan(mape) else None\n", " },\n", " 'accuracy': {\n", " 'within_5_wm2': float(within_5_percent),\n", " 'within_10_wm2': float(within_10_percent),\n", " 'within_20_wm2': float(within_20_percent)\n", " },\n", " 'categorical': {\n", " 'level_accuracy': float(level_accuracy)\n", " },\n", " 'error_stats': {\n", " 'mean': float(np.mean(errors)),\n", " 'std': float(np.std(errors)),\n", " 'median': float(np.median(errors)),\n", " 'p95_abs': float(np.percentile(np.abs(errors), 95))\n", " }\n", " }\n", "\n", " return metrics\n", "\n", "\n", "def plot_training_history(history, folder_name=None):\n", " \"\"\"\n", " Visualize and save training history for the hybrid model\n", " \"\"\"\n", " plt.figure(figsize=(15, 10))\n", "\n", " # Loss plots\n", " plt.subplot(2, 2, 1)\n", " plt.plot(history.history['classification_output_loss'], label='Class Loss')\n", " plt.plot(history.history['regression_output_loss'], label='Reg Loss')\n", " plt.plot(history.history['final_output_loss'], label='Final Loss')\n", " plt.plot(history.history['val_classification_output_loss'], label='Val Class Loss')\n", " plt.plot(history.history['val_regression_output_loss'], label='Val Reg Loss')\n", " plt.plot(history.history['val_final_output_loss'], label='Val Final Loss')\n", " plt.title('Model Losses')\n", " plt.xlabel('Epoch')\n", " plt.ylabel('Loss')\n", " plt.legend()\n", " plt.grid(True)\n", "\n", " # Classification metrics\n", " plt.subplot(2, 2, 2)\n", " plt.plot(history.history['classification_output_accuracy'], label='Class Acc')\n", " plt.plot(history.history['val_classification_output_accuracy'], label='Val Class Acc')\n", " plt.plot(history.history['classification_output_auc'], label='Class AUC')\n", " plt.plot(history.history['val_classification_output_auc'], label='Val Class AUC')\n", " plt.title('Classification Metrics')\n", " plt.xlabel('Epoch')\n", " plt.ylabel('Metric Value')\n", " plt.legend()\n", " plt.grid(True)\n", "\n", " # Regression metrics\n", " plt.subplot(2, 2, 3)\n", " plt.plot(history.history['regression_output_mae'], label='Reg MAE')\n", " plt.plot(history.history['val_regression_output_mae'], label='Val Reg MAE')\n", " plt.title('Regression MAE')\n", " plt.xlabel('Epoch')\n", " plt.ylabel('MAE')\n", " plt.legend()\n", " plt.grid(True)\n", "\n", " # Final output metrics\n", " plt.subplot(2, 2, 4)\n", " plt.plot(history.history['final_output_mae'], label='Final MAE')\n", " plt.plot(history.history['val_final_output_mae'], label='Val Final MAE')\n", " plt.title('Final Output MAE')\n", " plt.xlabel('Epoch')\n", " plt.ylabel('MAE')\n", " plt.legend()\n", " plt.grid(True)\n", "\n", " plt.tight_layout()\n", "\n", " if folder_name is not None:\n", " filename = f'{folder_name}_training_history.png'\n", " plt.savefig(filename, dpi=300, bbox_inches='tight')\n", " print(f\"\\nTraining history plot saved as: {filename}\")\n", "\n", " # Save history to JSON\n", " history_dict = history.history\n", " json_filename = f'{folder_name}_training_history.json'\n", " with open(json_filename, 'w') as f:\n", " json.dump(history_dict, f)\n", " print(f\"Training history saved as: {json_filename}\")\n", "\n", " plt.show()\n", "\n", "def calculate_metrics(y_true, y_class, y_reg, y_final, min_output, max_output):\n", " \"\"\"\n", " Helper function to calculate and print metrics for all outputs\n", " \n", " Parameters:\n", " - y_true: true values\n", " - y_class: classification predictions\n", " - y_reg: regression predictions\n", " - y_final: final combined predictions\n", " \"\"\"\n", " from sklearn.metrics import roc_auc_score, classification_report, confusion_matrix\n", " \n", " y_true = np.array(y_true).flatten()\n", " y_class = np.array(y_class).flatten()\n", " y_reg = np.array(y_reg).flatten()\n", " y_final = np.array(y_final).flatten()\n", " \n", " # Classification metrics\n", " print(\"\\nClassification Metrics:\")\n", " y_true_binary = (y_true > 0).astype(int)\n", " y_pred_binary = (y_class > 0.5).astype(int)\n", " \n", " accuracy = np.mean((y_class > 0.5) == (y_true > 0)) * 100\n", " auc_roc = roc_auc_score(y_true > 0, y_class)\n", " print(f\"Accuracy: {accuracy:.2f}%\")\n", " print(f\"AUC-ROC: {auc_roc:.4f}\")\n", " \n", " print(\"\\nConfusion Matrix:\")\n", " print(confusion_matrix(y_true_binary, y_pred_binary))\n", " \n", " print(\"\\nClassification Report:\")\n", " print(classification_report(y_true_binary, y_pred_binary, \n", " target_names=['Zero', 'Non-Zero'],\n", " digits=4))\n", " \n", " # Regression metrics (non-zero values)\n", " print(\"\\nRegression Metrics (non-zero values):\")\n", " mask_nonzero = y_true > 0\n", " if np.any(mask_nonzero): # verifichiamo che ci siano valori non-zero\n", " y_true_nonzero = y_true[mask_nonzero]\n", " y_reg_nonzero = y_reg[mask_nonzero]\n", " \n", " out_of_range = np.sum((y_reg_nonzero < min_output) | (y_reg_nonzero > max_output))\n", " diff = np.abs((y_true_nonzero - y_reg_nonzero) / (y_true_nonzero + 1e-7))\n", " diff = np.clip(diff, 0, 1)\n", " mape = np.mean(diff) * 100\n", " within_10_percent = np.mean(diff <= 0.10) * 100\n", " mae = np.mean(np.abs(y_true_nonzero - y_reg_nonzero))\n", " rmse = np.sqrt(np.mean(np.square(y_true_nonzero - y_reg_nonzero)))\n", " \n", " print(f\"Out of range: {out_of_range} predictions\")\n", " print(f\"MAPE: {mape:.2f}%\")\n", " print(f\"Within ±10%: {within_10_percent:.2f}%\")\n", " print(f\"MAE: {mae:.2f}\")\n", " print(f\"RMSE: {rmse:.2f}\")\n", " else:\n", " print(\"No non-zero values in this batch\")\n", " \n", " # Final combined output metrics\n", " print(\"\\nFinal Combined Output Metrics:\")\n", " out_of_range = np.sum((y_final < min_output) | (y_final > max_output))\n", " diff = np.abs((y_true - y_final) / (y_true + 1e-7))\n", " diff = np.clip(diff, 0, 1)\n", " mape = np.mean(diff) * 100\n", " within_10_percent = np.mean(diff <= 0.10) * 100\n", " mae = np.mean(np.abs(y_true - y_final))\n", " rmse = np.sqrt(np.mean(np.square(y_true - y_final)))\n", " \n", " print(f\"Out of range: {out_of_range} predictions\")\n", " print(f\"MAPE: {mape:.2f}%\")\n", " print(f\"Within ±10%: {within_10_percent:.2f}%\")\n", " print(f\"MAE: {mae:.2f}\")\n", " print(f\"RMSE: {rmse:.2f}\")\n", "\n", "def train_hybrid_model(model, X_train, y_train, X_test, y_test, epochs=100, batch_size=32, folder_name='solarradiation', min_output=0, max_output=1):\n", " \"\"\"\n", " Advanced training function for the hybrid solar radiation model\n", " \"\"\" \n", " # Prepare binary targets for classification\n", " y_train_binary = (y_train > 0).astype(float)\n", " y_test_binary = (y_test > 0).astype(float)\n", "\n", " # Training targets dictionary - usando i nomi esatti degli output del modello\n", " train_targets = {\n", " 'classification_output': y_train_binary,\n", " 'regression_output': y_train, # Questo nome corrisponde a quello nel modello\n", " 'final_output': y_train\n", " }\n", "\n", " # Validation targets dictionary\n", " test_targets = {\n", " 'classification_output': y_test_binary,\n", " 'regression_output': y_test, # Questo nome corrisponde a quello nel modello\n", " 'final_output': y_test\n", " }\n", "\n", " callbacks = [\n", " EarlyStopping(\n", " monitor='val_final_output_loss',\n", " patience=15,\n", " restore_best_weights=True,\n", " mode='min',\n", " verbose=1,\n", " min_delta=1e-4\n", " ),\n", " ReduceLROnPlateau(\n", " monitor='val_final_output_loss',\n", " factor=0.5,\n", " patience=7,\n", " verbose=1,\n", " mode='min',\n", " min_delta=1e-4,\n", " cooldown=2,\n", " min_lr=1e-7\n", " ),\n", " tf.keras.callbacks.ModelCheckpoint(\n", " filepath=f'{folder_name}_best_model.h5',\n", " monitor='val_final_output_loss',\n", " save_best_only=True,\n", " mode='min',\n", " save_weights_only=False\n", " ),\n", " tf.keras.callbacks.TensorBoard(\n", " log_dir=f'./{folder_name}_logs',\n", " histogram_freq=1,\n", " write_graph=True,\n", " update_freq='epoch'\n", " ),\n", " tf.keras.callbacks.LambdaCallback(\n", " on_epoch_end=lambda epoch, logs: (\n", " print(f\"\\nEpoch {epoch + 1} Detailed Metrics:\") and\n", " calculate_metrics(y_test, *model.predict(X_test, verbose=0), min_output, max_output)\n", " ) if epoch % 10 == 0 else None\n", " )\n", " ]\n", "\n", " try:\n", " history = model.fit(\n", " X_train,\n", " train_targets,\n", " validation_data=(X_test, test_targets),\n", " epochs=epochs,\n", " batch_size=batch_size,\n", " callbacks=callbacks,\n", " verbose=1,\n", " shuffle=False\n", " )\n", "\n", " print(\"\\nTraining completed successfully!\")\n", "\n", " # Final evaluation\n", " predictions = model.predict(X_test, verbose=0)\n", " calculate_metrics(y_test, *predictions, min_output, max_output)\n", "\n", " return history\n", "\n", " except Exception as e:\n", " print(f\"\\nError during training: {str(e)}\")\n", " print(\"\\nModel output names:\", [output.name for output in model.outputs])\n", " print(\"Training targets keys:\", train_targets.keys())\n", " raise\n", "\n", " finally:\n", " tf.keras.backend.clear_session()\n", "\n", "\n", "def integrate_predictions(df, predictions, sequence_length=24):\n", " \"\"\"\n", " Integrates solar radiation predictions into the original dataset for pre-2010 data.\n", "\n", " Parameters:\n", " -----------\n", " df : pandas.DataFrame\n", " Original dataset\n", " predictions : tuple\n", " Tuple containing (classification_pred, regression_pred, final_pred)\n", " - classification_pred: probability of non-zero values\n", " - regression_pred: predicted values (used for non-zero cases)\n", " - final_pred: final combined predictions\n", " sequence_length : int\n", " Sequence length used for predictions\n", "\n", " Returns:\n", " --------\n", " pandas.DataFrame\n", " Updated dataset with solar radiation predictions and additional prediction details\n", " \"\"\"\n", " # Convert datetime to datetime format if not already\n", " df['datetime'] = pd.to_datetime(df['datetime'])\n", "\n", " # Identify pre-2010 rows\n", " mask_pre_2010 = df['datetime'].dt.year < 2010\n", "\n", " # Unpack predictions\n", " classification_pred, regression_pred, final_pred = predictions\n", "\n", " # Create temporary DataFrame with all predictions\n", " dates_pre_2010 = df[mask_pre_2010]['datetime'].iloc[sequence_length - 1:]\n", " predictions_df = pd.DataFrame({\n", " 'datetime': dates_pre_2010,\n", " 'solarradiation_predicted': final_pred.flatten(),\n", " 'solarradiation_classification': classification_pred.flatten(),\n", " 'solarradiation_regression': regression_pred.flatten()\n", " })\n", "\n", " # Merge with original dataset\n", " df = df.merge(predictions_df, on='datetime', how='left')\n", "\n", " # Update solar radiation column where missing\n", " df['solarradiation'] = df['solarradiation'].fillna(df['solarradiation_predicted'])\n", "\n", " # Print detailed statistics\n", " print(\"\\nPrediction Integration Statistics:\")\n", " print(f\"Added {len(final_pred)} predictions to dataset\")\n", " print(f\"Rows with solar radiation after integration: {df['solarradiation'].notna().sum()}\")\n", "\n", " # Analyze prediction components for the filled values\n", " mask_filled = df['solarradiation'] == df['solarradiation_predicted']\n", " if mask_filled.any():\n", " filled_data = df[mask_filled]\n", "\n", " print(\"\\nFilled Values Analysis:\")\n", " print(f\"Zero predictions (classification < 0.5): {(filled_data['solarradiation_classification'] < 0.5).sum()}\")\n", " print(f\"Non-zero predictions (classification >= 0.5): {(filled_data['solarradiation_classification'] >= 0.5).sum()}\")\n", "\n", " # Distribution of predicted values\n", " non_zero_pred = filled_data[filled_data['solarradiation_predicted'] > 0]\n", " if len(non_zero_pred) > 0:\n", " print(f\"\\nNon-zero predictions statistics:\")\n", " print(f\"Mean: {non_zero_pred['solarradiation_predicted'].mean():.2f}\")\n", " print(f\"Median: {non_zero_pred['solarradiation_predicted'].median():.2f}\")\n", " print(f\"Std: {non_zero_pred['solarradiation_predicted'].std():.2f}\")\n", "\n", " # Optionally, you can keep or remove the intermediate prediction columns\n", " columns_to_drop = ['solarradiation_predicted', 'solarradiation_classification',\n", " 'solarradiation_regression']\n", " df = df.drop(columns_to_drop, axis=1)\n", "\n", " return df" ] }, { "cell_type": "code", "execution_count": 5, "id": "b3b0c2e65ddf484", "metadata": {}, "outputs": [], "source": [ "def analyze_distribution(data, sequence_length=24, name='Solar Radiation'):\n", " \"\"\"\n", " Analizza dettagliatamente la distribuzione dei valori reali e predetti.\n", "\n", " Parameters:\n", " -----------\n", " data : pandas.DataFrame\n", " DataFrame contenente i dati originali\n", " predictions : tuple\n", " Tuple contenente (classification_pred, regression_pred, final_pred)\n", " sequence_length : int\n", " Lunghezza della sequenza usata per le predizioni\n", " name : str\n", " Nome della variabile da analizzare\n", " \"\"\"\n", " # Unpack predictions\n", " classification_pred, regression_pred, final_pred = predictions\n", "\n", " # Prepare data for analysis\n", " mask_pre_2010 = data['datetime'].dt.year < 2010\n", " actual_values = data[mask_pre_2010]['solarradiation'].iloc[sequence_length - 1:]\n", "\n", " # Create analysis DataFrame\n", " analysis_df = pd.DataFrame({\n", " 'actual': actual_values,\n", " 'classification': classification_pred.flatten(),\n", " 'regression': regression_pred.flatten(),\n", " 'final': final_pred.flatten()\n", " })\n", "\n", " # Analisi per ogni componente\n", " components = {\n", " 'Actual Values': 'actual',\n", " 'Classification Predictions': 'classification',\n", " 'Regression Predictions': 'regression',\n", " 'Final Combined Predictions': 'final'\n", " }\n", "\n", " for title, column in components.items():\n", " print(f\"\\n{'-'*20} {title} {'-'*20}\")\n", "\n", " # Statistiche di base\n", " stats_dict = {\n", " 'count': len(analysis_df[column]),\n", " 'missing': analysis_df[column].isnull().sum(),\n", " 'zeros': (analysis_df[column] == 0).sum(),\n", " 'mean': analysis_df[column].mean(),\n", " 'median': analysis_df[column].median(),\n", " 'std': analysis_df[column].std(),\n", " 'min': analysis_df[column].min(),\n", " 'max': analysis_df[column].max(),\n", " 'skewness': stats.skew(analysis_df[column].dropna()),\n", " 'kurtosis': stats.kurtosis(analysis_df[column].dropna())\n", " }\n", "\n", " # Percentili\n", " percentiles = [1, 5, 10, 25, 50, 75, 90, 95, 99]\n", " for p in percentiles:\n", " stats_dict[f'percentile_{p}'] = np.percentile(analysis_df[column].dropna(), p)\n", "\n", " # Plot delle distribuzioni\n", " fig, axes = plt.subplots(2, 2, figsize=(20, 12))\n", " fig.suptitle(f'Distribution Analysis - {title}')\n", "\n", " # Histogram\n", " sns.histplot(data=analysis_df, x=column, kde=True, ax=axes[0,0])\n", " axes[0,0].set_title('Distribution')\n", " axes[0,0].set_xlabel(title)\n", " axes[0,0].set_ylabel('Frequency')\n", "\n", " # Box Plot\n", " sns.boxplot(y=analysis_df[column], ax=axes[0,1])\n", " axes[0,1].set_title('Box Plot')\n", "\n", " # QQ Plot\n", " stats.probplot(analysis_df[column].dropna(), dist=\"norm\", plot=plt, ax=axes[1,0])\n", " axes[1,0].set_title('Q-Q Plot')\n", "\n", " # Log-transformed distribution (except for classification)\n", " if column != 'classification':\n", " sns.histplot(data=np.log1p(analysis_df[column]), kde=True, ax=axes[1,1])\n", " axes[1,1].set_title('Log-transformed Distribution')\n", " axes[1,1].set_xlabel(f'Log({title} + 1)')\n", " axes[1,1].set_ylabel('Frequency')\n", " else:\n", " sns.histplot(data=analysis_df[column], kde=True, ax=axes[1,1])\n", " axes[1,1].set_title('Classification Distribution')\n", "\n", " plt.tight_layout()\n", " plt.show()\n", "\n", " # Stampa statistiche\n", " print(\"\\nStatistiche principali:\")\n", " print(\"-\" * 50)\n", " for key, value in stats_dict.items():\n", " print(f\"{key:15}: {value:,.4f}\")\n", "\n", " # Analisi specifiche per tipo di output\n", " if column == 'classification':\n", " # Analisi della classificazione\n", " threshold = 0.5\n", " predicted_zeros = (analysis_df[column] < threshold).sum()\n", " predicted_nonzeros = (analysis_df[column] >= threshold).sum()\n", " actual_zeros = (analysis_df['actual'] == 0).sum()\n", "\n", " print(\"\\nAnalisi Classificazione:\")\n", " print(f\"Predicted Zeros: {predicted_zeros} ({predicted_zeros/len(analysis_df)*100:.2f}%)\")\n", " print(f\"Predicted Non-zeros: {predicted_nonzeros} ({predicted_nonzeros/len(analysis_df)*100:.2f}%)\")\n", " print(f\"Actual Zeros: {actual_zeros} ({actual_zeros/len(analysis_df)*100:.2f}%)\")\n", "\n", " # Confusion Matrix\n", " y_true = (analysis_df['actual'] > 0).astype(int)\n", " y_pred = (analysis_df[column] >= threshold).astype(int)\n", " cm = confusion_matrix(y_true, y_pred)\n", " print(\"\\nConfusion Matrix:\")\n", " print(cm)\n", " print(\"\\nClassification Report:\")\n", " print(classification_report(y_true, y_pred))\n", "\n", " elif column in ['regression', 'final']:\n", " # Analisi degli errori\n", " errors = analysis_df['actual'] - analysis_df[column]\n", " mae = np.mean(np.abs(errors))\n", " rmse = np.sqrt(np.mean(errors**2))\n", " mape = np.mean(np.abs(errors / (analysis_df['actual'] + 1e-7))) * 100\n", "\n", " print(\"\\nMetriche di Errore:\")\n", " print(f\"MAE: {mae:.4f}\")\n", " print(f\"RMSE: {rmse:.4f}\")\n", " print(f\"MAPE: {mape:.4f}%\")\n", "\n", " # Plot comparativo finale\n", " plt.figure(figsize=(15, 6))\n", " plt.plot(analysis_df['actual'], label='Actual', alpha=0.5)\n", " plt.plot(analysis_df['final'], label='Predicted', alpha=0.5)\n", " plt.title(f'Comparison of Actual vs Predicted {name}')\n", " plt.xlabel('Sample')\n", " plt.ylabel(name)\n", " plt.legend()\n", " plt.show()\n", "\n", " return analysis_df" ] }, { "cell_type": "code", "execution_count": 6, "id": "1b1ee91d1573ec66", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "Initializing solar radiation model training...\n", "\n", "1. Preparing data...\n", "\n", "Selected features:\n", "Number of features: 40\n", "Features list: ['uvindex', 'cloudcover', 'visibility', 'temp', 'pressure', 'humidity', 'solar_elevation', 'solar_angle', 'day_length', 'hour_sin', 'hour_cos', 'day_of_year_sin', 'day_of_year_cos', 'month_sin', 'month_cos', 'clear_sky_index', 'atmospheric_attenuation', 'theoretical_radiation', 'expected_radiation', 'cloud_elevation', 'visibility_elevation', 'uv_cloud_interaction', 'temp_radiation_potential', 'cloud_rolling_12h', 'temp_rolling_12h', 'uv_rolling_12h', 'cloudcover_rolling_mean_6h', 'temp_rolling_mean_6h', 'temp_1h_lag', 'cloudcover_1h_lag', 'humidity_1h_lag', 'uv_lag_1h', 'season_Spring', 'season_Summer', 'season_Autumn', 'season_Winter', 'time_period_Morning', 'time_period_Afternoon', 'time_period_Evening', 'time_period_Night']\n", "Training data shape: (103798, 24, 40)\n", "Test data shape: (25933, 24, 40)\n", "Saving scaler X to: 2024-11-25_21-39_scale_X.joblib\n", "Saving scaler X to: 2024-11-25_21-39_scale_y.joblib\n", "Saving features to: 2024-11-25_21-39_features.json\n" ] } ], "source": [ "df = pd.read_parquet('../../sources/weather_data_uvindex.parquet')\n", "\n", "print(\"Initializing solar radiation model training...\")\n", "\n", "# Data preparation\n", "print(\"\\n1. Preparing data...\")\n", "X_train_seq, X_test_seq, y_train, y_test, scaler_X, scaler_y, features, X_to_predict_seq = prepare_hybrid_data(df)\n", "\n", "print(f\"Training data shape: {X_train_seq.shape}\")\n", "print(f\"Test data shape: {X_test_seq.shape}\")\n", "\n", "# Save or load scaler and features\n", "scaler_X_path = f'{folder_name}_scale_X.joblib'\n", "scaler_y_path = f'{folder_name}_scale_y.joblib'\n", "features_path = f'{folder_name}_features.json'\n", "model_path = f'{folder_name}_best_model.h5'\n", "history_path = f'{folder_name}_training_history.json'\n", "\n", "if os.path.exists(scaler_X_path):\n", " print(f\"Loading existing scaler X from: {scaler_X_path}\")\n", " scaler = joblib.load(scaler_X_path)\n", "else:\n", " print(f\"Saving scaler X to: {scaler_X_path}\")\n", " joblib.dump(scaler_X, scaler_X_path)\n", "\n", "if os.path.exists(scaler_y_path):\n", " print(f\"Loading existing scaler X from: {scaler_y_path}\")\n", " scaler = joblib.load(scaler_y_path)\n", "else:\n", " print(f\"Saving scaler X to: {scaler_y_path}\")\n", " joblib.dump(scaler_y, scaler_y_path)\n", "\n", "if os.path.exists(features_path):\n", " print(f\"Loading existing features from: {features_path}\")\n", " with open(features_path, 'r') as f:\n", " features = json.load(f)\n", "else:\n", " print(f\"Saving features to: {features_path}\")\n", " with open(features_path, 'w') as f:\n", " json.dump(features, f)\n", "\n", "# Data quality verification\n", "if np.isnan(X_train_seq).any() or np.isnan(y_train).any():\n", " raise ValueError(\"Found NaN values in training data\")" ] }, { "cell_type": "code", "execution_count": 7, "id": "76deb4deb84dc4c5", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "2. Creating model...\n", "\n", "Max dataset solar radiation : 1113.0 - Scaled Version : 3.2535460992907805\n", "Max dataset solar radiation increased by 15% : 1279.9499999999998 - Scaled Version : 3.7415780141843973\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "2024-11-25 21:39:47.411609: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1886] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 43404 MB memory: -> device: 0, name: NVIDIA L40, pci bus id: 0000:01:00.0, compute capability: 8.9\n", "2024-11-25 21:39:48.280823: I tensorflow/tsl/platform/default/subprocess.cc:304] Start cannot spawn child process: No such file or directory\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "Model: \"SolarRadiationModel\"\n", "__________________________________________________________________________________________________\n", " Layer (type) Output Shape Param # Connected to \n", "==================================================================================================\n", " input_1 (InputLayer) [(None, 24, 40)] 0 [] \n", " \n", " bidirectional (Bidirection (None, 24, 512) 608256 ['input_1[0][0]'] \n", " al) \n", " \n", " layer_normalization (Layer (None, 24, 512) 1024 ['bidirectional[0][0]'] \n", " Normalization) \n", " \n", " dropout (Dropout) (None, 24, 512) 0 ['layer_normalization[0][0]'] \n", " \n", " dense (Dense) (None, 24, 512) 20992 ['input_1[0][0]'] \n", " \n", " stochastic_depth (Stochast (None, 24, 512) 0 ['dropout[0][0]', \n", " icDepth) 'dense[0][0]'] \n", " \n", " multi_head_attention (Mult (None, 24, 512) 1680230 ['stochastic_depth[0][0]', \n", " iHeadAttention) 4 'stochastic_depth[0][0]'] \n", " \n", " stochastic_depth_1 (Stocha (None, 24, 512) 0 ['stochastic_depth[0][0]', \n", " sticDepth) 'multi_head_attention[0][0]']\n", " \n", " layer_normalization_1 (Lay (None, 24, 512) 1024 ['stochastic_depth_1[0][0]'] \n", " erNormalization) \n", " \n", " max_pooling1d (MaxPooling1 (None, 12, 512) 0 ['layer_normalization_1[0][0]'\n", " D) ] \n", " \n", " bidirectional_1 (Bidirecti (None, 12, 256) 656384 ['max_pooling1d[0][0]'] \n", " onal) \n", " \n", " layer_normalization_2 (Lay (None, 12, 256) 512 ['bidirectional_1[0][0]'] \n", " erNormalization) \n", " \n", " dropout_1 (Dropout) (None, 12, 256) 0 ['layer_normalization_2[0][0]'\n", " ] \n", " \n", " dense_1 (Dense) (None, 12, 256) 131328 ['max_pooling1d[0][0]'] \n", " \n", " stochastic_depth_2 (Stocha (None, 12, 256) 0 ['dropout_1[0][0]', \n", " sticDepth) 'dense_1[0][0]'] \n", " \n", " multi_head_attention_1 (Mu (None, 12, 256) 3155200 ['stochastic_depth_2[0][0]', \n", " ltiHeadAttention) 'stochastic_depth_2[0][0]'] \n", " \n", " stochastic_depth_3 (Stocha (None, 12, 256) 0 ['stochastic_depth_2[0][0]', \n", " sticDepth) 'multi_head_attention_1[0][0]\n", " '] \n", " \n", " layer_normalization_3 (Lay (None, 12, 256) 512 ['stochastic_depth_3[0][0]'] \n", " erNormalization) \n", " \n", " max_pooling1d_1 (MaxPoolin (None, 6, 256) 0 ['layer_normalization_3[0][0]'\n", " g1D) ] \n", " \n", " bidirectional_2 (Bidirecti (None, 6, 128) 164352 ['max_pooling1d_1[0][0]'] \n", " onal) \n", " \n", " layer_normalization_4 (Lay (None, 6, 128) 256 ['bidirectional_2[0][0]'] \n", " erNormalization) \n", " \n", " dropout_2 (Dropout) (None, 6, 128) 0 ['layer_normalization_4[0][0]'\n", " ] \n", " \n", " dense_2 (Dense) (None, 6, 128) 32896 ['max_pooling1d_1[0][0]'] \n", " \n", " stochastic_depth_4 (Stocha (None, 6, 128) 0 ['dropout_2[0][0]', \n", " sticDepth) 'dense_2[0][0]'] \n", " \n", " multi_head_attention_2 (Mu (None, 6, 128) 527488 ['stochastic_depth_4[0][0]', \n", " ltiHeadAttention) 'stochastic_depth_4[0][0]'] \n", " \n", " stochastic_depth_5 (Stocha (None, 6, 128) 0 ['stochastic_depth_4[0][0]', \n", " sticDepth) 'multi_head_attention_2[0][0]\n", " '] \n", " \n", " layer_normalization_5 (Lay (None, 6, 128) 256 ['stochastic_depth_5[0][0]'] \n", " erNormalization) \n", " \n", " max_pooling1d_2 (MaxPoolin (None, 3, 128) 0 ['layer_normalization_5[0][0]'\n", " g1D) ] \n", " \n", " bidirectional_3 (Bidirecti (None, 3, 64) 41216 ['max_pooling1d_2[0][0]'] \n", " onal) \n", " \n", " layer_normalization_6 (Lay (None, 3, 64) 128 ['bidirectional_3[0][0]'] \n", " erNormalization) \n", " \n", " dropout_3 (Dropout) (None, 3, 64) 0 ['layer_normalization_6[0][0]'\n", " ] \n", " \n", " dense_3 (Dense) (None, 3, 64) 8256 ['max_pooling1d_2[0][0]'] \n", " \n", " stochastic_depth_6 (Stocha (None, 3, 64) 0 ['dropout_3[0][0]', \n", " sticDepth) 'dense_3[0][0]'] \n", " \n", " multi_head_attention_3 (Mu (None, 3, 64) 66368 ['stochastic_depth_6[0][0]', \n", " ltiHeadAttention) 'stochastic_depth_6[0][0]'] \n", " \n", " stochastic_depth_7 (Stocha (None, 3, 64) 0 ['stochastic_depth_6[0][0]', \n", " sticDepth) 'multi_head_attention_3[0][0]\n", " '] \n", " \n", " layer_normalization_7 (Lay (None, 3, 64) 128 ['stochastic_depth_7[0][0]'] \n", " erNormalization) \n", " \n", " bidirectional_4 (Bidirecti (None, 64) 24832 ['layer_normalization_7[0][0]'\n", " onal) ] \n", " \n", " layer_normalization_8 (Lay (None, 64) 128 ['bidirectional_4[0][0]'] \n", " erNormalization) \n", " \n", " dropout_4 (Dropout) (None, 64) 0 ['layer_normalization_8[0][0]'\n", " ] \n", " \n", " ensemble_features_0 (Dense (None, 256) 16640 ['dropout_4[0][0]'] \n", " ) \n", " \n", " ensemble_features_1 (Dense (None, 256) 16640 ['dropout_4[0][0]'] \n", " ) \n", " \n", " ensemble_features_2 (Dense (None, 256) 16640 ['dropout_4[0][0]'] \n", " ) \n", " \n", " reg_dense1_256_ensemble_0 (None, 256) 65792 ['ensemble_features_0[0][0]'] \n", " (Dense) \n", " \n", " reg_dense1_256_ensemble_1 (None, 256) 65792 ['ensemble_features_1[0][0]'] \n", " (Dense) \n", " \n", " reg_dense1_256_ensemble_2 (None, 256) 65792 ['ensemble_features_2[0][0]'] \n", " (Dense) \n", " \n", " reg_bn1_256_ensemble_0 (Ba (None, 256) 1024 ['reg_dense1_256_ensemble_0[0]\n", " tchNormalization) [0]'] \n", " \n", " reg_bn1_256_ensemble_1 (Ba (None, 256) 1024 ['reg_dense1_256_ensemble_1[0]\n", " tchNormalization) [0]'] \n", " \n", " reg_bn1_256_ensemble_2 (Ba (None, 256) 1024 ['reg_dense1_256_ensemble_2[0]\n", " tchNormalization) [0]'] \n", " \n", " reg_swish1_256_ensemble_0 (None, 256) 0 ['reg_bn1_256_ensemble_0[0][0]\n", " (Activation) '] \n", " \n", " reg_swish1_256_ensemble_1 (None, 256) 0 ['reg_bn1_256_ensemble_1[0][0]\n", " (Activation) '] \n", " \n", " reg_swish1_256_ensemble_2 (None, 256) 0 ['reg_bn1_256_ensemble_2[0][0]\n", " (Activation) '] \n", " \n", " reg_drop1_256_ensemble_0 ( (None, 256) 0 ['reg_swish1_256_ensemble_0[0]\n", " Dropout) [0]'] \n", " \n", " reg_drop1_256_ensemble_1 ( (None, 256) 0 ['reg_swish1_256_ensemble_1[0]\n", " Dropout) [0]'] \n", " \n", " reg_drop1_256_ensemble_2 ( (None, 256) 0 ['reg_swish1_256_ensemble_2[0]\n", " Dropout) [0]'] \n", " \n", " reg_dense2_256_ensemble_0 (None, 256) 65792 ['reg_drop1_256_ensemble_0[0][\n", " (Dense) 0]'] \n", " \n", " reg_dense2_256_ensemble_1 (None, 256) 65792 ['reg_drop1_256_ensemble_1[0][\n", " (Dense) 0]'] \n", " \n", " reg_dense2_256_ensemble_2 (None, 256) 65792 ['reg_drop1_256_ensemble_2[0][\n", " (Dense) 0]'] \n", " \n", " reg_bn2_256_ensemble_0 (Ba (None, 256) 1024 ['reg_dense2_256_ensemble_0[0]\n", " tchNormalization) [0]'] \n", " \n", " reg_bn2_256_ensemble_1 (Ba (None, 256) 1024 ['reg_dense2_256_ensemble_1[0]\n", " tchNormalization) [0]'] \n", " \n", " reg_bn2_256_ensemble_2 (Ba (None, 256) 1024 ['reg_dense2_256_ensemble_2[0]\n", " tchNormalization) [0]'] \n", " \n", " reg_swish2_256_ensemble_0 (None, 256) 0 ['reg_bn2_256_ensemble_0[0][0]\n", " (Activation) '] \n", " \n", " reg_swish2_256_ensemble_1 (None, 256) 0 ['reg_bn2_256_ensemble_1[0][0]\n", " (Activation) '] \n", " \n", " reg_swish2_256_ensemble_2 (None, 256) 0 ['reg_bn2_256_ensemble_2[0][0]\n", " (Activation) '] \n", " \n", " reg_drop2_256_ensemble_0 ( (None, 256) 0 ['reg_swish2_256_ensemble_0[0]\n", " Dropout) [0]'] \n", " \n", " reg_drop2_256_ensemble_1 ( (None, 256) 0 ['reg_swish2_256_ensemble_1[0]\n", " Dropout) [0]'] \n", " \n", " reg_drop2_256_ensemble_2 ( (None, 256) 0 ['reg_swish2_256_ensemble_2[0]\n", " Dropout) [0]'] \n", " \n", " reg_skip_256_ensemble_0 (A (None, 256) 0 ['reg_drop2_256_ensemble_0[0][\n", " dd) 0]', \n", " 'ensemble_features_0[0][0]'] \n", " \n", " reg_skip_256_ensemble_1 (A (None, 256) 0 ['reg_drop2_256_ensemble_1[0][\n", " dd) 0]', \n", " 'ensemble_features_1[0][0]'] \n", " \n", " reg_skip_256_ensemble_2 (A (None, 256) 0 ['reg_drop2_256_ensemble_2[0][\n", " dd) 0]', \n", " 'ensemble_features_2[0][0]'] \n", " \n", " reg_dense1_128_ensemble_0 (None, 128) 32896 ['reg_skip_256_ensemble_0[0][0\n", " (Dense) ]'] \n", " \n", " reg_dense1_128_ensemble_1 (None, 128) 32896 ['reg_skip_256_ensemble_1[0][0\n", " (Dense) ]'] \n", " \n", " reg_dense1_128_ensemble_2 (None, 128) 32896 ['reg_skip_256_ensemble_2[0][0\n", " (Dense) ]'] \n", " \n", " reg_bn1_128_ensemble_0 (Ba (None, 128) 512 ['reg_dense1_128_ensemble_0[0]\n", " tchNormalization) [0]'] \n", " \n", " reg_bn1_128_ensemble_1 (Ba (None, 128) 512 ['reg_dense1_128_ensemble_1[0]\n", " tchNormalization) [0]'] \n", " \n", " reg_bn1_128_ensemble_2 (Ba (None, 128) 512 ['reg_dense1_128_ensemble_2[0]\n", " tchNormalization) [0]'] \n", " \n", " reg_swish1_128_ensemble_0 (None, 128) 0 ['reg_bn1_128_ensemble_0[0][0]\n", " (Activation) '] \n", " \n", " reg_swish1_128_ensemble_1 (None, 128) 0 ['reg_bn1_128_ensemble_1[0][0]\n", " (Activation) '] \n", " \n", " reg_swish1_128_ensemble_2 (None, 128) 0 ['reg_bn1_128_ensemble_2[0][0]\n", " (Activation) '] \n", " \n", " reg_drop1_128_ensemble_0 ( (None, 128) 0 ['reg_swish1_128_ensemble_0[0]\n", " Dropout) [0]'] \n", " \n", " reg_drop1_128_ensemble_1 ( (None, 128) 0 ['reg_swish1_128_ensemble_1[0]\n", " Dropout) [0]'] \n", " \n", " reg_drop1_128_ensemble_2 ( (None, 128) 0 ['reg_swish1_128_ensemble_2[0]\n", " Dropout) [0]'] \n", " \n", " reg_dense2_128_ensemble_0 (None, 128) 16512 ['reg_drop1_128_ensemble_0[0][\n", " (Dense) 0]'] \n", " \n", " reg_dense2_128_ensemble_1 (None, 128) 16512 ['reg_drop1_128_ensemble_1[0][\n", " (Dense) 0]'] \n", " \n", " reg_dense2_128_ensemble_2 (None, 128) 16512 ['reg_drop1_128_ensemble_2[0][\n", " (Dense) 0]'] \n", " \n", " reg_bn2_128_ensemble_0 (Ba (None, 128) 512 ['reg_dense2_128_ensemble_0[0]\n", " tchNormalization) [0]'] \n", " \n", " reg_bn2_128_ensemble_1 (Ba (None, 128) 512 ['reg_dense2_128_ensemble_1[0]\n", " tchNormalization) [0]'] \n", " \n", " reg_bn2_128_ensemble_2 (Ba (None, 128) 512 ['reg_dense2_128_ensemble_2[0]\n", " tchNormalization) [0]'] \n", " \n", " reg_swish2_128_ensemble_0 (None, 128) 0 ['reg_bn2_128_ensemble_0[0][0]\n", " (Activation) '] \n", " \n", " reg_swish2_128_ensemble_1 (None, 128) 0 ['reg_bn2_128_ensemble_1[0][0]\n", " (Activation) '] \n", " \n", " reg_swish2_128_ensemble_2 (None, 128) 0 ['reg_bn2_128_ensemble_2[0][0]\n", " (Activation) '] \n", " \n", " reg_drop2_128_ensemble_0 ( (None, 128) 0 ['reg_swish2_128_ensemble_0[0]\n", " Dropout) [0]'] \n", " \n", " reg_residual_proj_128_ense (None, 128) 32896 ['reg_skip_256_ensemble_0[0][0\n", " mble_0 (Dense) ]'] \n", " \n", " reg_drop2_128_ensemble_1 ( (None, 128) 0 ['reg_swish2_128_ensemble_1[0]\n", " Dropout) [0]'] \n", " \n", " reg_residual_proj_128_ense (None, 128) 32896 ['reg_skip_256_ensemble_1[0][0\n", " mble_1 (Dense) ]'] \n", " \n", " reg_drop2_128_ensemble_2 ( (None, 128) 0 ['reg_swish2_128_ensemble_2[0]\n", " Dropout) [0]'] \n", " \n", " reg_residual_proj_128_ense (None, 128) 32896 ['reg_skip_256_ensemble_2[0][0\n", " mble_2 (Dense) ]'] \n", " \n", " reg_skip_128_ensemble_0 (A (None, 128) 0 ['reg_drop2_128_ensemble_0[0][\n", " dd) 0]', \n", " 'reg_residual_proj_128_ensemb\n", " le_0[0][0]'] \n", " \n", " reg_skip_128_ensemble_1 (A (None, 128) 0 ['reg_drop2_128_ensemble_1[0][\n", " dd) 0]', \n", " 'reg_residual_proj_128_ensemb\n", " le_1[0][0]'] \n", " \n", " reg_skip_128_ensemble_2 (A (None, 128) 0 ['reg_drop2_128_ensemble_2[0][\n", " dd) 0]', \n", " 'reg_residual_proj_128_ensemb\n", " le_2[0][0]'] \n", " \n", " reg_dense1_64_ensemble_0 ( (None, 64) 8256 ['reg_skip_128_ensemble_0[0][0\n", " Dense) ]'] \n", " \n", " reg_dense1_64_ensemble_1 ( (None, 64) 8256 ['reg_skip_128_ensemble_1[0][0\n", " Dense) ]'] \n", " \n", " reg_dense1_64_ensemble_2 ( (None, 64) 8256 ['reg_skip_128_ensemble_2[0][0\n", " Dense) ]'] \n", " \n", " reg_bn1_64_ensemble_0 (Bat (None, 64) 256 ['reg_dense1_64_ensemble_0[0][\n", " chNormalization) 0]'] \n", " \n", " reg_bn1_64_ensemble_1 (Bat (None, 64) 256 ['reg_dense1_64_ensemble_1[0][\n", " chNormalization) 0]'] \n", " \n", " reg_bn1_64_ensemble_2 (Bat (None, 64) 256 ['reg_dense1_64_ensemble_2[0][\n", " chNormalization) 0]'] \n", " \n", " reg_swish1_64_ensemble_0 ( (None, 64) 0 ['reg_bn1_64_ensemble_0[0][0]'\n", " Activation) ] \n", " \n", " reg_swish1_64_ensemble_1 ( (None, 64) 0 ['reg_bn1_64_ensemble_1[0][0]'\n", " Activation) ] \n", " \n", " reg_swish1_64_ensemble_2 ( (None, 64) 0 ['reg_bn1_64_ensemble_2[0][0]'\n", " Activation) ] \n", " \n", " reg_drop1_64_ensemble_0 (D (None, 64) 0 ['reg_swish1_64_ensemble_0[0][\n", " ropout) 0]'] \n", " \n", " reg_drop1_64_ensemble_1 (D (None, 64) 0 ['reg_swish1_64_ensemble_1[0][\n", " ropout) 0]'] \n", " \n", " reg_drop1_64_ensemble_2 (D (None, 64) 0 ['reg_swish1_64_ensemble_2[0][\n", " ropout) 0]'] \n", " \n", " reg_dense2_64_ensemble_0 ( (None, 64) 4160 ['reg_drop1_64_ensemble_0[0][0\n", " Dense) ]'] \n", " \n", " reg_dense2_64_ensemble_1 ( (None, 64) 4160 ['reg_drop1_64_ensemble_1[0][0\n", " Dense) ]'] \n", " \n", " reg_dense2_64_ensemble_2 ( (None, 64) 4160 ['reg_drop1_64_ensemble_2[0][0\n", " Dense) ]'] \n", " \n", " reg_bn2_64_ensemble_0 (Bat (None, 64) 256 ['reg_dense2_64_ensemble_0[0][\n", " chNormalization) 0]'] \n", " \n", " reg_bn2_64_ensemble_1 (Bat (None, 64) 256 ['reg_dense2_64_ensemble_1[0][\n", " chNormalization) 0]'] \n", " \n", " reg_bn2_64_ensemble_2 (Bat (None, 64) 256 ['reg_dense2_64_ensemble_2[0][\n", " chNormalization) 0]'] \n", " \n", " reg_swish2_64_ensemble_0 ( (None, 64) 0 ['reg_bn2_64_ensemble_0[0][0]'\n", " Activation) ] \n", " \n", " reg_swish2_64_ensemble_1 ( (None, 64) 0 ['reg_bn2_64_ensemble_1[0][0]'\n", " Activation) ] \n", " \n", " reg_swish2_64_ensemble_2 ( (None, 64) 0 ['reg_bn2_64_ensemble_2[0][0]'\n", " Activation) ] \n", " \n", " reg_drop2_64_ensemble_0 (D (None, 64) 0 ['reg_swish2_64_ensemble_0[0][\n", " ropout) 0]'] \n", " \n", " reg_residual_proj_64_ensem (None, 64) 8256 ['reg_skip_128_ensemble_0[0][0\n", " ble_0 (Dense) ]'] \n", " \n", " reg_drop2_64_ensemble_1 (D (None, 64) 0 ['reg_swish2_64_ensemble_1[0][\n", " ropout) 0]'] \n", " \n", " reg_residual_proj_64_ensem (None, 64) 8256 ['reg_skip_128_ensemble_1[0][0\n", " ble_1 (Dense) ]'] \n", " \n", " reg_drop2_64_ensemble_2 (D (None, 64) 0 ['reg_swish2_64_ensemble_2[0][\n", " ropout) 0]'] \n", " \n", " reg_residual_proj_64_ensem (None, 64) 8256 ['reg_skip_128_ensemble_2[0][0\n", " ble_2 (Dense) ]'] \n", " \n", " reg_skip_64_ensemble_0 (Ad (None, 64) 0 ['reg_drop2_64_ensemble_0[0][0\n", " d) ]', \n", " 'reg_residual_proj_64_ensembl\n", " e_0[0][0]'] \n", " \n", " reg_skip_64_ensemble_1 (Ad (None, 64) 0 ['reg_drop2_64_ensemble_1[0][0\n", " d) ]', \n", " 'reg_residual_proj_64_ensembl\n", " e_1[0][0]'] \n", " \n", " reg_skip_64_ensemble_2 (Ad (None, 64) 0 ['reg_drop2_64_ensemble_2[0][0\n", " d) ]', \n", " 'reg_residual_proj_64_ensembl\n", " e_2[0][0]'] \n", " \n", " reg_dense1_32_ensemble_0 ( (None, 32) 2080 ['reg_skip_64_ensemble_0[0][0]\n", " Dense) '] \n", " \n", " reg_dense1_32_ensemble_1 ( (None, 32) 2080 ['reg_skip_64_ensemble_1[0][0]\n", " Dense) '] \n", " \n", " reg_dense1_32_ensemble_2 ( (None, 32) 2080 ['reg_skip_64_ensemble_2[0][0]\n", " Dense) '] \n", " \n", " reg_bn1_32_ensemble_0 (Bat (None, 32) 128 ['reg_dense1_32_ensemble_0[0][\n", " chNormalization) 0]'] \n", " \n", " reg_bn1_32_ensemble_1 (Bat (None, 32) 128 ['reg_dense1_32_ensemble_1[0][\n", " chNormalization) 0]'] \n", " \n", " reg_bn1_32_ensemble_2 (Bat (None, 32) 128 ['reg_dense1_32_ensemble_2[0][\n", " chNormalization) 0]'] \n", " \n", " dense_4 (Dense) (None, 64) 4160 ['dropout_4[0][0]'] \n", " \n", " reg_swish1_32_ensemble_0 ( (None, 32) 0 ['reg_bn1_32_ensemble_0[0][0]'\n", " Activation) ] \n", " \n", " reg_swish1_32_ensemble_1 ( (None, 32) 0 ['reg_bn1_32_ensemble_1[0][0]'\n", " Activation) ] \n", " \n", " reg_swish1_32_ensemble_2 ( (None, 32) 0 ['reg_bn1_32_ensemble_2[0][0]'\n", " Activation) ] \n", " \n", " batch_normalization (Batch (None, 64) 256 ['dense_4[0][0]'] \n", " Normalization) \n", " \n", " reg_drop1_32_ensemble_0 (D (None, 32) 0 ['reg_swish1_32_ensemble_0[0][\n", " ropout) 0]'] \n", " \n", " reg_drop1_32_ensemble_1 (D (None, 32) 0 ['reg_swish1_32_ensemble_1[0][\n", " ropout) 0]'] \n", " \n", " reg_drop1_32_ensemble_2 (D (None, 32) 0 ['reg_swish1_32_ensemble_2[0][\n", " ropout) 0]'] \n", " \n", " activation (Activation) (None, 64) 0 ['batch_normalization[0][0]'] \n", " \n", " reg_dense2_32_ensemble_0 ( (None, 32) 1056 ['reg_drop1_32_ensemble_0[0][0\n", " Dense) ]'] \n", " \n", " reg_dense2_32_ensemble_1 ( (None, 32) 1056 ['reg_drop1_32_ensemble_1[0][0\n", " Dense) ]'] \n", " \n", " reg_dense2_32_ensemble_2 ( (None, 32) 1056 ['reg_drop1_32_ensemble_2[0][0\n", " Dense) ]'] \n", " \n", " dropout_5 (Dropout) (None, 64) 0 ['activation[0][0]'] \n", " \n", " reg_bn2_32_ensemble_0 (Bat (None, 32) 128 ['reg_dense2_32_ensemble_0[0][\n", " chNormalization) 0]'] \n", " \n", " reg_bn2_32_ensemble_1 (Bat (None, 32) 128 ['reg_dense2_32_ensemble_1[0][\n", " chNormalization) 0]'] \n", " \n", " reg_bn2_32_ensemble_2 (Bat (None, 32) 128 ['reg_dense2_32_ensemble_2[0][\n", " chNormalization) 0]'] \n", " \n", " dense_5 (Dense) (None, 32) 2080 ['dropout_5[0][0]'] \n", " \n", " reg_swish2_32_ensemble_0 ( (None, 32) 0 ['reg_bn2_32_ensemble_0[0][0]'\n", " Activation) ] \n", " \n", " reg_swish2_32_ensemble_1 ( (None, 32) 0 ['reg_bn2_32_ensemble_1[0][0]'\n", " Activation) ] \n", " \n", " reg_swish2_32_ensemble_2 ( (None, 32) 0 ['reg_bn2_32_ensemble_2[0][0]'\n", " Activation) ] \n", " \n", " batch_normalization_1 (Bat (None, 32) 128 ['dense_5[0][0]'] \n", " chNormalization) \n", " \n", " reg_drop2_32_ensemble_0 (D (None, 32) 0 ['reg_swish2_32_ensemble_0[0][\n", " ropout) 0]'] \n", " \n", " reg_residual_proj_32_ensem (None, 32) 2080 ['reg_skip_64_ensemble_0[0][0]\n", " ble_0 (Dense) '] \n", " \n", " reg_drop2_32_ensemble_1 (D (None, 32) 0 ['reg_swish2_32_ensemble_1[0][\n", " ropout) 0]'] \n", " \n", " reg_residual_proj_32_ensem (None, 32) 2080 ['reg_skip_64_ensemble_1[0][0]\n", " ble_1 (Dense) '] \n", " \n", " reg_drop2_32_ensemble_2 (D (None, 32) 0 ['reg_swish2_32_ensemble_2[0][\n", " ropout) 0]'] \n", " \n", " reg_residual_proj_32_ensem (None, 32) 2080 ['reg_skip_64_ensemble_2[0][0]\n", " ble_2 (Dense) '] \n", " \n", " activation_1 (Activation) (None, 32) 0 ['batch_normalization_1[0][0]'\n", " ] \n", " \n", " reg_skip_32_ensemble_0 (Ad (None, 32) 0 ['reg_drop2_32_ensemble_0[0][0\n", " d) ]', \n", " 'reg_residual_proj_32_ensembl\n", " e_0[0][0]'] \n", " \n", " reg_skip_32_ensemble_1 (Ad (None, 32) 0 ['reg_drop2_32_ensemble_1[0][0\n", " d) ]', \n", " 'reg_residual_proj_32_ensembl\n", " e_1[0][0]'] \n", " \n", " reg_skip_32_ensemble_2 (Ad (None, 32) 0 ['reg_drop2_32_ensemble_2[0][0\n", " d) ]', \n", " 'reg_residual_proj_32_ensembl\n", " e_2[0][0]'] \n", " \n", " classification_output (Den (None, 1) 33 ['activation_1[0][0]'] \n", " se) \n", " \n", " regression_output_ensemble (None, 1) 33 ['reg_skip_32_ensemble_0[0][0]\n", " _0 (Dense) '] \n", " \n", " regression_output_ensemble (None, 1) 33 ['reg_skip_32_ensemble_1[0][0]\n", " _1 (Dense) '] \n", " \n", " regression_output_ensemble (None, 1) 33 ['reg_skip_32_ensemble_2[0][0]\n", " _2 (Dense) '] \n", " \n", " regression_ensemble (Avera (None, 1) 0 ['regression_output_ensemble_0\n", " ge) [0][0]', \n", " 'regression_output_ensemble_1\n", " [0][0]', \n", " 'regression_output_ensemble_2\n", " [0][0]'] \n", " \n", " thresholded_re_lu (Thresho (None, 1) 0 ['classification_output[0][0]'\n", " ldedReLU) ] \n", " \n", " regression_output (Lambda) (None, 1) 0 ['regression_ensemble[0][0]'] \n", " \n", " lambda (Lambda) (None, 1) 0 ['thresholded_re_lu[0][0]'] \n", " \n", " final_output (Lambda) (None, 1) 0 ['regression_output[0][0]', \n", " 'lambda[0][0]'] \n", " \n", "==================================================================================================\n", "Total params: 23031364 (87.86 MB)\n", "Trainable params: 23025412 (87.83 MB)\n", "Non-trainable params: 5952 (23.25 KB)\n", "__________________________________________________________________________________________________\n", "\n", "Class distribution in training set:\n", "Zeros: 52022 (50.12%)\n", "Non-zeros: 51776 (49.88%)\n", "\n", "Class distribution in test set:\n", "Zeros: 13007 (50.16%)\n", "Non-zeros: 12926 (49.84%)\n", "\n", "Model output names: ['classification_output', 'regression_output', 'final_output']\n", "\n", "4. Starting training...\n", "Epoch 1/100\n" ] }, { "name": "stderr", "output_type": "stream", "text": [ "2024-11-25 21:40:23.511608: I tensorflow/compiler/xla/stream_executor/cuda/cuda_dnn.cc:442] Loaded cuDNN version 8905\n", "2024-11-25 21:40:25.325940: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x73010c02aa90 initialized for platform CUDA (this does not guarantee that XLA will be used). Devices:\n", "2024-11-25 21:40:25.325975: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (0): NVIDIA L40, Compute Capability 8.9\n", "2024-11-25 21:40:25.331376: I tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.cc:269] disabling MLIR crash reproducer, set env var `MLIR_CRASH_REPRODUCER_DIRECTORY` to enable.\n", "2024-11-25 21:40:25.470956: I ./tensorflow/compiler/jit/device_compiler.h:186] Compiled cluster using XLA! This line is logged at most once for the lifetime of the process.\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "541/541 [==============================] - ETA: 0s - loss: 21.7614 - classification_output_loss: 0.5756 - regression_output_loss: 0.4676 - final_output_loss: 0.4591 - classification_output_accuracy: 0.6628 - classification_output_auc: 0.7471 - regression_output_mse: 0.9288 - regression_output_mae: 0.5546 - regression_output_rmse: 0.8998 - regression_output_custom_mape: 52.2010 - final_output_mse: 0.9184 - final_output_mae: 0.5587 - final_output_rmse: 0.8940 - final_output_custom_mape: 81.9168" ] }, { "name": "stderr", "output_type": "stream", "text": [ "/usr/local/lib/python3.11/dist-packages/keras/src/engine/training.py:3079: UserWarning: You are saving your model as an HDF5 file via `model.save()`. This file format is considered legacy. We recommend using instead the native Keras format, e.g. `model.save('my_model.keras')`.\n", " saving_api.save_model(\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ "\n", "Epoch 1 Detailed Metrics:\n", "541/541 [==============================] - 106s 106ms/step - loss: 21.7614 - classification_output_loss: 0.5756 - regression_output_loss: 0.4676 - final_output_loss: 0.4591 - classification_output_accuracy: 0.6628 - classification_output_auc: 0.7471 - regression_output_mse: 0.9288 - regression_output_mae: 0.5546 - regression_output_rmse: 0.8998 - regression_output_custom_mape: 52.2010 - final_output_mse: 0.9184 - final_output_mae: 0.5587 - final_output_rmse: 0.8940 - final_output_custom_mape: 81.9168 - val_loss: 12.6098 - val_classification_output_loss: 0.3514 - val_regression_output_loss: 0.4318 - val_final_output_loss: 0.4355 - val_classification_output_accuracy: 0.8638 - val_classification_output_auc: 0.9544 - val_regression_output_mse: 0.8895 - val_regression_output_mae: 0.5327 - val_regression_output_rmse: 0.8780 - val_regression_output_custom_mape: 52.5583 - val_final_output_mse: 0.8893 - val_final_output_mae: 0.5451 - val_final_output_rmse: 0.8777 - val_final_output_custom_mape: 91.4040 - lr: 3.0000e-04\n", "Epoch 2/100\n", "541/541 [==============================] - 55s 101ms/step - loss: 8.2372 - classification_output_loss: 0.1856 - regression_output_loss: 0.2360 - final_output_loss: 0.2405 - classification_output_accuracy: 0.9278 - classification_output_auc: 0.9800 - regression_output_mse: 0.4706 - regression_output_mae: 0.3565 - regression_output_rmse: 0.5927 - regression_output_custom_mape: 38.9142 - final_output_mse: 0.4705 - final_output_mae: 0.3702 - final_output_rmse: 0.5925 - final_output_custom_mape: 83.6743 - val_loss: 5.2106 - val_classification_output_loss: 0.3927 - val_regression_output_loss: 0.0851 - val_final_output_loss: 0.0890 - val_classification_output_accuracy: 0.8532 - val_classification_output_auc: 0.9356 - val_regression_output_mse: 0.1465 - val_regression_output_mae: 0.2230 - val_regression_output_rmse: 0.3681 - val_regression_output_custom_mape: 31.6631 - val_final_output_mse: 0.1460 - val_final_output_mae: 0.2351 - val_final_output_rmse: 0.3671 - val_final_output_custom_mape: 71.4497 - lr: 3.0000e-04\n", "Epoch 3/100\n", "541/541 [==============================] - 56s 103ms/step - loss: 3.6460 - classification_output_loss: 0.1600 - regression_output_loss: 0.0630 - final_output_loss: 0.0671 - classification_output_accuracy: 0.9351 - classification_output_auc: 0.9845 - regression_output_mse: 0.1030 - regression_output_mae: 0.1770 - regression_output_rmse: 0.3038 - regression_output_custom_mape: 26.8426 - final_output_mse: 0.1031 - final_output_mae: 0.1908 - final_output_rmse: 0.3039 - final_output_custom_mape: 71.6244 - val_loss: 2.5998 - val_classification_output_loss: 0.1993 - val_regression_output_loss: 0.1267 - val_final_output_loss: 0.1317 - val_classification_output_accuracy: 0.9146 - val_classification_output_auc: 0.9830 - val_regression_output_mse: 0.1888 - val_regression_output_mae: 0.2526 - val_regression_output_rmse: 0.4112 - val_regression_output_custom_mape: 32.5356 - val_final_output_mse: 0.1889 - val_final_output_mae: 0.2664 - val_final_output_rmse: 0.4115 - val_final_output_custom_mape: 74.5863 - lr: 3.0000e-04\n", "Epoch 4/100\n", "541/541 [==============================] - 52s 97ms/step - loss: 1.9158 - classification_output_loss: 0.1380 - regression_output_loss: 0.0553 - final_output_loss: 0.0589 - classification_output_accuracy: 0.9442 - classification_output_auc: 0.9884 - regression_output_mse: 0.0876 - regression_output_mae: 0.1633 - regression_output_rmse: 0.2807 - regression_output_custom_mape: 26.0265 - final_output_mse: 0.0871 - final_output_mae: 0.1756 - final_output_rmse: 0.2795 - final_output_custom_mape: 70.3726 - val_loss: 1.4454 - val_classification_output_loss: 0.2029 - val_regression_output_loss: 0.0481 - val_final_output_loss: 0.0525 - val_classification_output_accuracy: 0.9374 - val_classification_output_auc: 0.9791 - val_regression_output_mse: 0.0630 - val_regression_output_mae: 0.1436 - val_regression_output_rmse: 0.2442 - val_regression_output_custom_mape: 25.8998 - val_final_output_mse: 0.0633 - val_final_output_mae: 0.1582 - val_final_output_rmse: 0.2447 - val_final_output_custom_mape: 71.9146 - lr: 3.0000e-04\n", "Epoch 5/100\n", "541/541 [==============================] - 54s 99ms/step - loss: 1.1300 - classification_output_loss: 0.1199 - regression_output_loss: 0.0437 - final_output_loss: 0.0472 - classification_output_accuracy: 0.9514 - classification_output_auc: 0.9912 - regression_output_mse: 0.0597 - regression_output_mae: 0.1336 - regression_output_rmse: 0.2314 - regression_output_custom_mape: 23.3383 - final_output_mse: 0.0591 - final_output_mae: 0.1457 - final_output_rmse: 0.2294 - final_output_custom_mape: 68.0596 - val_loss: 0.9264 - val_classification_output_loss: 0.2782 - val_regression_output_loss: 0.0444 - val_final_output_loss: 0.0487 - val_classification_output_accuracy: 0.8822 - val_classification_output_auc: 0.9807 - val_regression_output_mse: 0.0539 - val_regression_output_mae: 0.1356 - val_regression_output_rmse: 0.2185 - val_regression_output_custom_mape: 27.1526 - val_final_output_mse: 0.0543 - val_final_output_mae: 0.1488 - val_final_output_rmse: 0.2198 - val_final_output_custom_mape: 65.6245 - lr: 3.0000e-04\n", "Epoch 6/100\n", "541/541 [==============================] - 51s 94ms/step - loss: 0.7342 - classification_output_loss: 0.1012 - regression_output_loss: 0.0414 - final_output_loss: 0.0450 - classification_output_accuracy: 0.9590 - classification_output_auc: 0.9938 - regression_output_mse: 0.0545 - regression_output_mae: 0.1274 - regression_output_rmse: 0.2209 - regression_output_custom_mape: 23.0598 - final_output_mse: 0.0542 - final_output_mae: 0.1402 - final_output_rmse: 0.2197 - final_output_custom_mape: 67.9279 - val_loss: 0.8505 - val_classification_output_loss: 0.5649 - val_regression_output_loss: 0.2350 - val_final_output_loss: 0.2400 - val_classification_output_accuracy: 0.8290 - val_classification_output_auc: 0.9356 - val_regression_output_mse: 0.3572 - val_regression_output_mae: 0.3788 - val_regression_output_rmse: 0.5206 - val_regression_output_custom_mape: 43.1358 - val_final_output_mse: 0.3575 - val_final_output_mae: 0.3900 - val_final_output_rmse: 0.5210 - val_final_output_custom_mape: 77.3503 - lr: 3.0000e-04\n", "Epoch 7/100\n", "541/541 [==============================] - 52s 95ms/step - loss: 0.5133 - classification_output_loss: 0.1021 - regression_output_loss: 0.0422 - final_output_loss: 0.0457 - classification_output_accuracy: 0.9594 - classification_output_auc: 0.9934 - regression_output_mse: 0.0556 - regression_output_mae: 0.1304 - regression_output_rmse: 0.2240 - regression_output_custom_mape: 23.6281 - final_output_mse: 0.0552 - final_output_mae: 0.1423 - final_output_rmse: 0.2220 - final_output_custom_mape: 68.1228 - val_loss: 0.4582 - val_classification_output_loss: 0.1588 - val_regression_output_loss: 0.0590 - val_final_output_loss: 0.0526 - val_classification_output_accuracy: 0.9454 - val_classification_output_auc: 0.9904 - val_regression_output_mse: 0.0843 - val_regression_output_mae: 0.1796 - val_regression_output_rmse: 0.2742 - val_regression_output_custom_mape: 36.2912 - val_final_output_mse: 0.0737 - val_final_output_mae: 0.1608 - val_final_output_rmse: 0.2501 - val_final_output_custom_mape: 71.0505 - lr: 3.0000e-04\n", "Epoch 8/100\n", "541/541 [==============================] - 51s 94ms/step - loss: 0.3853 - classification_output_loss: 0.1100 - regression_output_loss: 0.0427 - final_output_loss: 0.0459 - classification_output_accuracy: 0.9558 - classification_output_auc: 0.9924 - regression_output_mse: 0.0552 - regression_output_mae: 0.1290 - regression_output_rmse: 0.2218 - regression_output_custom_mape: 23.6954 - final_output_mse: 0.0551 - final_output_mae: 0.1417 - final_output_rmse: 0.2212 - final_output_custom_mape: 68.4358 - val_loss: 0.3914 - val_classification_output_loss: 0.1860 - val_regression_output_loss: 0.0915 - val_final_output_loss: 0.0959 - val_classification_output_accuracy: 0.9378 - val_classification_output_auc: 0.9824 - val_regression_output_mse: 0.2065 - val_regression_output_mae: 0.2504 - val_regression_output_rmse: 0.4286 - val_regression_output_custom_mape: 29.7654 - val_final_output_mse: 0.2066 - val_final_output_mae: 0.2649 - val_final_output_rmse: 0.4288 - val_final_output_custom_mape: 76.1419 - lr: 3.0000e-04\n", "Epoch 9/100\n", "541/541 [==============================] - 52s 96ms/step - loss: 0.3199 - classification_output_loss: 0.1101 - regression_output_loss: 0.0616 - final_output_loss: 0.0650 - classification_output_accuracy: 0.9545 - classification_output_auc: 0.9925 - regression_output_mse: 0.1038 - regression_output_mae: 0.1762 - regression_output_rmse: 0.3045 - regression_output_custom_mape: 27.8291 - final_output_mse: 0.1033 - final_output_mae: 0.1887 - final_output_rmse: 0.3037 - final_output_custom_mape: 72.4254 - val_loss: 0.3449 - val_classification_output_loss: 0.3869 - val_regression_output_loss: 0.0618 - val_final_output_loss: 0.0662 - val_classification_output_accuracy: 0.9008 - val_classification_output_auc: 0.9561 - val_regression_output_mse: 0.0833 - val_regression_output_mae: 0.1741 - val_regression_output_rmse: 0.2749 - val_regression_output_custom_mape: 29.8195 - val_final_output_mse: 0.0835 - val_final_output_mae: 0.1877 - val_final_output_rmse: 0.2752 - val_final_output_custom_mape: 72.2675 - lr: 3.0000e-04\n", "Epoch 10/100\n", "541/541 [==============================] - 56s 104ms/step - loss: 0.2505 - classification_output_loss: 0.1019 - regression_output_loss: 0.0518 - final_output_loss: 0.0553 - classification_output_accuracy: 0.9581 - classification_output_auc: 0.9936 - regression_output_mse: 0.0792 - regression_output_mae: 0.1561 - regression_output_rmse: 0.2681 - regression_output_custom_mape: 26.8968 - final_output_mse: 0.0789 - final_output_mae: 0.1677 - final_output_rmse: 0.2668 - final_output_custom_mape: 70.8997 - val_loss: 0.2645 - val_classification_output_loss: 0.1521 - val_regression_output_loss: 0.0882 - val_final_output_loss: 0.0927 - val_classification_output_accuracy: 0.9399 - val_classification_output_auc: 0.9882 - val_regression_output_mse: 0.1400 - val_regression_output_mae: 0.2075 - val_regression_output_rmse: 0.3452 - val_regression_output_custom_mape: 32.3501 - val_final_output_mse: 0.1404 - val_final_output_mae: 0.2225 - val_final_output_rmse: 0.3459 - val_final_output_custom_mape: 77.4425 - lr: 3.0000e-04\n", "Epoch 11/100\n", "541/541 [==============================] - ETA: 0s - loss: 0.1874 - classification_output_loss: 0.0730 - regression_output_loss: 0.0375 - final_output_loss: 0.0411 - classification_output_accuracy: 0.9704 - classification_output_auc: 0.9968 - regression_output_mse: 0.0458 - regression_output_mae: 0.1184 - regression_output_rmse: 0.2036 - regression_output_custom_mape: 23.5087 - final_output_mse: 0.0455 - final_output_mae: 0.1305 - final_output_rmse: 0.2021 - final_output_custom_mape: 67.9603\n", "Epoch 11 Detailed Metrics:\n", "541/541 [==============================] - 59s 108ms/step - loss: 0.1874 - classification_output_loss: 0.0730 - regression_output_loss: 0.0375 - final_output_loss: 0.0411 - classification_output_accuracy: 0.9704 - classification_output_auc: 0.9968 - regression_output_mse: 0.0458 - regression_output_mae: 0.1184 - regression_output_rmse: 0.2036 - regression_output_custom_mape: 23.5087 - final_output_mse: 0.0455 - final_output_mae: 0.1305 - final_output_rmse: 0.2021 - final_output_custom_mape: 67.9603 - val_loss: 0.1989 - val_classification_output_loss: 0.1751 - val_regression_output_loss: 0.0484 - val_final_output_loss: 0.0531 - val_classification_output_accuracy: 0.9477 - val_classification_output_auc: 0.9842 - val_regression_output_mse: 0.0615 - val_regression_output_mae: 0.1431 - val_regression_output_rmse: 0.2300 - val_regression_output_custom_mape: 25.9657 - val_final_output_mse: 0.0619 - val_final_output_mae: 0.1582 - val_final_output_rmse: 0.2309 - val_final_output_custom_mape: 72.2079 - lr: 3.0000e-04\n", "Epoch 12/100\n", "541/541 [==============================] - ETA: 0s - loss: 0.1606 - classification_output_loss: 0.0842 - regression_output_loss: 0.0386 - final_output_loss: 0.0426 - classification_output_accuracy: 0.9653 - classification_output_auc: 0.9957 - regression_output_mse: 0.0483 - regression_output_mae: 0.1216 - regression_output_rmse: 0.2090 - regression_output_custom_mape: 23.4801 - final_output_mse: 0.0482 - final_output_mae: 0.1347 - final_output_rmse: 0.2086 - final_output_custom_mape: 68.3661\n", "Epoch 12: ReduceLROnPlateau reducing learning rate to 0.0001500000071246177.\n", "541/541 [==============================] - 58s 107ms/step - loss: 0.1606 - classification_output_loss: 0.0842 - regression_output_loss: 0.0386 - final_output_loss: 0.0426 - classification_output_accuracy: 0.9653 - classification_output_auc: 0.9957 - regression_output_mse: 0.0483 - regression_output_mae: 0.1216 - regression_output_rmse: 0.2090 - regression_output_custom_mape: 23.4801 - final_output_mse: 0.0482 - final_output_mae: 0.1347 - final_output_rmse: 0.2086 - final_output_custom_mape: 68.3661 - val_loss: 0.2972 - val_classification_output_loss: 0.3844 - val_regression_output_loss: 0.1478 - val_final_output_loss: 0.1521 - val_classification_output_accuracy: 0.9027 - val_classification_output_auc: 0.9666 - val_regression_output_mse: 0.3291 - val_regression_output_mae: 0.3247 - val_regression_output_rmse: 0.5009 - val_regression_output_custom_mape: 36.7739 - val_final_output_mse: 0.3295 - val_final_output_mae: 0.3387 - val_final_output_rmse: 0.5017 - val_final_output_custom_mape: 77.1084 - lr: 3.0000e-04\n", "Epoch 13/100\n", "541/541 [==============================] - 58s 108ms/step - loss: 0.1417 - classification_output_loss: 0.0731 - regression_output_loss: 0.0386 - final_output_loss: 0.0425 - classification_output_accuracy: 0.9705 - classification_output_auc: 0.9967 - regression_output_mse: 0.0476 - regression_output_mae: 0.1209 - regression_output_rmse: 0.2074 - regression_output_custom_mape: 23.6777 - final_output_mse: 0.0475 - final_output_mae: 0.1340 - final_output_rmse: 0.2067 - final_output_custom_mape: 68.5557 - val_loss: 0.2201 - val_classification_output_loss: 0.2901 - val_regression_output_loss: 0.0877 - val_final_output_loss: 0.0920 - val_classification_output_accuracy: 0.9203 - val_classification_output_auc: 0.9715 - val_regression_output_mse: 0.1554 - val_regression_output_mae: 0.2386 - val_regression_output_rmse: 0.3845 - val_regression_output_custom_mape: 35.5962 - val_final_output_mse: 0.1558 - val_final_output_mae: 0.2530 - val_final_output_rmse: 0.3851 - val_final_output_custom_mape: 78.4799 - lr: 1.5000e-04\n", "Epoch 14/100\n", "541/541 [==============================] - 57s 105ms/step - loss: 0.1297 - classification_output_loss: 0.0711 - regression_output_loss: 0.0365 - final_output_loss: 0.0410 - classification_output_accuracy: 0.9705 - classification_output_auc: 0.9969 - regression_output_mse: 0.0442 - regression_output_mae: 0.1148 - regression_output_rmse: 0.1989 - regression_output_custom_mape: 22.1252 - final_output_mse: 0.0446 - final_output_mae: 0.1295 - final_output_rmse: 0.1999 - final_output_custom_mape: 68.0297 - val_loss: 0.2045 - val_classification_output_loss: 0.3035 - val_regression_output_loss: 0.0776 - val_final_output_loss: 0.0818 - val_classification_output_accuracy: 0.9149 - val_classification_output_auc: 0.9655 - val_regression_output_mse: 0.1226 - val_regression_output_mae: 0.2069 - val_regression_output_rmse: 0.3244 - val_regression_output_custom_mape: 32.3946 - val_final_output_mse: 0.1228 - val_final_output_mae: 0.2210 - val_final_output_rmse: 0.3247 - val_final_output_custom_mape: 76.3908 - lr: 1.5000e-04\n", "Epoch 15/100\n", "541/541 [==============================] - 56s 104ms/step - loss: 0.1146 - classification_output_loss: 0.0645 - regression_output_loss: 0.0307 - final_output_loss: 0.0350 - classification_output_accuracy: 0.9732 - classification_output_auc: 0.9975 - regression_output_mse: 0.0320 - regression_output_mae: 0.0987 - regression_output_rmse: 0.1721 - regression_output_custom_mape: 20.6845 - final_output_mse: 0.0322 - final_output_mae: 0.1131 - final_output_rmse: 0.1725 - final_output_custom_mape: 66.5285 - val_loss: 0.1646 - val_classification_output_loss: 0.2214 - val_regression_output_loss: 0.0593 - val_final_output_loss: 0.0638 - val_classification_output_accuracy: 0.9308 - val_classification_output_auc: 0.9772 - val_regression_output_mse: 0.0818 - val_regression_output_mae: 0.1627 - val_regression_output_rmse: 0.2642 - val_regression_output_custom_mape: 28.9516 - val_final_output_mse: 0.0821 - val_final_output_mae: 0.1773 - val_final_output_rmse: 0.2650 - val_final_output_custom_mape: 73.5214 - lr: 1.5000e-04\n", "Epoch 16/100\n", "541/541 [==============================] - 53s 98ms/step - loss: 0.1037 - classification_output_loss: 0.0616 - regression_output_loss: 0.0280 - final_output_loss: 0.0325 - classification_output_accuracy: 0.9745 - classification_output_auc: 0.9977 - regression_output_mse: 0.0267 - regression_output_mae: 0.0908 - regression_output_rmse: 0.1581 - regression_output_custom_mape: 19.6992 - final_output_mse: 0.0270 - final_output_mae: 0.1056 - final_output_rmse: 0.1590 - final_output_custom_mape: 65.7434 - val_loss: 0.1887 - val_classification_output_loss: 0.2883 - val_regression_output_loss: 0.0816 - val_final_output_loss: 0.0859 - val_classification_output_accuracy: 0.9193 - val_classification_output_auc: 0.9663 - val_regression_output_mse: 0.1330 - val_regression_output_mae: 0.2139 - val_regression_output_rmse: 0.3380 - val_regression_output_custom_mape: 32.7521 - val_final_output_mse: 0.1332 - val_final_output_mae: 0.2280 - val_final_output_rmse: 0.3383 - val_final_output_custom_mape: 77.1713 - lr: 1.5000e-04\n", "Epoch 17/100\n", "541/541 [==============================] - 54s 100ms/step - loss: 0.0970 - classification_output_loss: 0.0637 - regression_output_loss: 0.0273 - final_output_loss: 0.0316 - classification_output_accuracy: 0.9739 - classification_output_auc: 0.9975 - regression_output_mse: 0.0252 - regression_output_mae: 0.0887 - regression_output_rmse: 0.1533 - regression_output_custom_mape: 19.8560 - final_output_mse: 0.0253 - final_output_mae: 0.1029 - final_output_rmse: 0.1536 - final_output_custom_mape: 65.5673 - val_loss: 0.1351 - val_classification_output_loss: 0.1977 - val_regression_output_loss: 0.0453 - val_final_output_loss: 0.0498 - val_classification_output_accuracy: 0.9382 - val_classification_output_auc: 0.9785 - val_regression_output_mse: 0.0596 - val_regression_output_mae: 0.1399 - val_regression_output_rmse: 0.2281 - val_regression_output_custom_mape: 25.3515 - val_final_output_mse: 0.0599 - val_final_output_mae: 0.1546 - val_final_output_rmse: 0.2289 - val_final_output_custom_mape: 71.1378 - lr: 1.5000e-04\n", "Epoch 18/100\n", "541/541 [==============================] - 56s 104ms/step - loss: 0.0998 - classification_output_loss: 0.0648 - regression_output_loss: 0.0364 - final_output_loss: 0.0411 - classification_output_accuracy: 0.9724 - classification_output_auc: 0.9975 - regression_output_mse: 0.0448 - regression_output_mae: 0.1148 - regression_output_rmse: 0.1968 - regression_output_custom_mape: 22.1714 - final_output_mse: 0.0461 - final_output_mae: 0.1290 - final_output_rmse: 0.1982 - final_output_custom_mape: 67.7461 - val_loss: 0.1535 - val_classification_output_loss: 0.1355 - val_regression_output_loss: 0.0881 - val_final_output_loss: 0.0928 - val_classification_output_accuracy: 0.9563 - val_classification_output_auc: 0.9891 - val_regression_output_mse: 0.1866 - val_regression_output_mae: 0.2311 - val_regression_output_rmse: 0.3747 - val_regression_output_custom_mape: 30.8233 - val_final_output_mse: 0.1871 - val_final_output_mae: 0.2466 - val_final_output_rmse: 0.3756 - val_final_output_custom_mape: 76.8307 - lr: 1.5000e-04\n", "Epoch 19/100\n", "541/541 [==============================] - 56s 104ms/step - loss: 0.0957 - classification_output_loss: 0.0685 - regression_output_loss: 0.0356 - final_output_loss: 0.0397 - classification_output_accuracy: 0.9717 - classification_output_auc: 0.9970 - regression_output_mse: 0.0417 - regression_output_mae: 0.1129 - regression_output_rmse: 0.1937 - regression_output_custom_mape: 22.1537 - final_output_mse: 0.0418 - final_output_mae: 0.1267 - final_output_rmse: 0.1934 - final_output_custom_mape: 67.7136 - val_loss: 0.2040 - val_classification_output_loss: 0.4776 - val_regression_output_loss: 0.0709 - val_final_output_loss: 0.0749 - val_classification_output_accuracy: 0.8736 - val_classification_output_auc: 0.9469 - val_regression_output_mse: 0.1066 - val_regression_output_mae: 0.1909 - val_regression_output_rmse: 0.2962 - val_regression_output_custom_mape: 30.8400 - val_final_output_mse: 0.1068 - val_final_output_mae: 0.2036 - val_final_output_rmse: 0.2967 - val_final_output_custom_mape: 70.2057 - lr: 1.5000e-04\n", "Epoch 20/100\n", "541/541 [==============================] - 57s 105ms/step - loss: 0.0885 - classification_output_loss: 0.0706 - regression_output_loss: 0.0308 - final_output_loss: 0.0349 - classification_output_accuracy: 0.9709 - classification_output_auc: 0.9969 - regression_output_mse: 0.0327 - regression_output_mae: 0.0991 - regression_output_rmse: 0.1718 - regression_output_custom_mape: 20.5523 - final_output_mse: 0.0328 - final_output_mae: 0.1127 - final_output_rmse: 0.1716 - final_output_custom_mape: 65.9518 - val_loss: 0.1069 - val_classification_output_loss: 0.1560 - val_regression_output_loss: 0.0350 - val_final_output_loss: 0.0394 - val_classification_output_accuracy: 0.9449 - val_classification_output_auc: 0.9872 - val_regression_output_mse: 0.0383 - val_regression_output_mae: 0.1097 - val_regression_output_rmse: 0.1910 - val_regression_output_custom_mape: 25.4061 - val_final_output_mse: 0.0385 - val_final_output_mae: 0.1244 - val_final_output_rmse: 0.1919 - val_final_output_custom_mape: 71.9942 - lr: 1.5000e-04\n", "Epoch 21/100\n", "541/541 [==============================] - ETA: 0s - loss: 0.0849 - classification_output_loss: 0.0647 - regression_output_loss: 0.0324 - final_output_loss: 0.0367 - classification_output_accuracy: 0.9730 - classification_output_auc: 0.9974 - regression_output_mse: 0.0356 - regression_output_mae: 0.1039 - regression_output_rmse: 0.1784 - regression_output_custom_mape: 21.1817 - final_output_mse: 0.0357 - final_output_mae: 0.1182 - final_output_rmse: 0.1785 - final_output_custom_mape: 67.0364\n", "Epoch 21 Detailed Metrics:\n", "541/541 [==============================] - 57s 105ms/step - loss: 0.0849 - classification_output_loss: 0.0647 - regression_output_loss: 0.0324 - final_output_loss: 0.0367 - classification_output_accuracy: 0.9730 - classification_output_auc: 0.9974 - regression_output_mse: 0.0356 - regression_output_mae: 0.1039 - regression_output_rmse: 0.1784 - regression_output_custom_mape: 21.1817 - final_output_mse: 0.0357 - final_output_mae: 0.1182 - final_output_rmse: 0.1785 - final_output_custom_mape: 67.0364 - val_loss: 0.1266 - val_classification_output_loss: 0.2513 - val_regression_output_loss: 0.0404 - val_final_output_loss: 0.0448 - val_classification_output_accuracy: 0.9234 - val_classification_output_auc: 0.9729 - val_regression_output_mse: 0.0453 - val_regression_output_mae: 0.1206 - val_regression_output_rmse: 0.1997 - val_regression_output_custom_mape: 25.4902 - val_final_output_mse: 0.0456 - val_final_output_mae: 0.1349 - val_final_output_rmse: 0.2005 - val_final_output_custom_mape: 69.6527 - lr: 1.5000e-04\n", "Epoch 22/100\n", "541/541 [==============================] - 54s 99ms/step - loss: 0.0843 - classification_output_loss: 0.0782 - regression_output_loss: 0.0324 - final_output_loss: 0.0367 - classification_output_accuracy: 0.9668 - classification_output_auc: 0.9963 - regression_output_mse: 0.0351 - regression_output_mae: 0.1035 - regression_output_rmse: 0.1789 - regression_output_custom_mape: 21.2053 - final_output_mse: 0.0355 - final_output_mae: 0.1175 - final_output_rmse: 0.1795 - final_output_custom_mape: 66.7957 - val_loss: 0.2227 - val_classification_output_loss: 0.4580 - val_regression_output_loss: 0.1112 - val_final_output_loss: 0.1152 - val_classification_output_accuracy: 0.8829 - val_classification_output_auc: 0.9413 - val_regression_output_mse: 0.1935 - val_regression_output_mae: 0.2653 - val_regression_output_rmse: 0.3958 - val_regression_output_custom_mape: 35.5581 - val_final_output_mse: 0.1932 - val_final_output_mae: 0.2781 - val_final_output_rmse: 0.3950 - val_final_output_custom_mape: 78.9677 - lr: 1.5000e-04\n", "Epoch 23/100\n", "541/541 [==============================] - 52s 96ms/step - loss: 0.0870 - classification_output_loss: 0.0731 - regression_output_loss: 0.0391 - final_output_loss: 0.0435 - classification_output_accuracy: 0.9691 - classification_output_auc: 0.9967 - regression_output_mse: 0.0500 - regression_output_mae: 0.1222 - regression_output_rmse: 0.2072 - regression_output_custom_mape: 23.1067 - final_output_mse: 0.0506 - final_output_mae: 0.1360 - final_output_rmse: 0.2076 - final_output_custom_mape: 68.5077 - val_loss: 0.1775 - val_classification_output_loss: 0.2573 - val_regression_output_loss: 0.1071 - val_final_output_loss: 0.1116 - val_classification_output_accuracy: 0.9199 - val_classification_output_auc: 0.9744 - val_regression_output_mse: 0.1666 - val_regression_output_mae: 0.2438 - val_regression_output_rmse: 0.3710 - val_regression_output_custom_mape: 33.6608 - val_final_output_mse: 0.1669 - val_final_output_mae: 0.2583 - val_final_output_rmse: 0.3716 - val_final_output_custom_mape: 76.9211 - lr: 1.5000e-04\n", "Epoch 24/100\n", "541/541 [==============================] - 54s 100ms/step - loss: 0.0843 - classification_output_loss: 0.0783 - regression_output_loss: 0.0371 - final_output_loss: 0.0412 - classification_output_accuracy: 0.9677 - classification_output_auc: 0.9961 - regression_output_mse: 0.0450 - regression_output_mae: 0.1170 - regression_output_rmse: 0.1988 - regression_output_custom_mape: 22.4883 - final_output_mse: 0.0450 - final_output_mae: 0.1307 - final_output_rmse: 0.1983 - final_output_custom_mape: 67.8036 - val_loss: 0.1307 - val_classification_output_loss: 0.2292 - val_regression_output_loss: 0.0589 - val_final_output_loss: 0.0634 - val_classification_output_accuracy: 0.9306 - val_classification_output_auc: 0.9793 - val_regression_output_mse: 0.0809 - val_regression_output_mae: 0.1633 - val_regression_output_rmse: 0.2541 - val_regression_output_custom_mape: 28.1449 - val_final_output_mse: 0.0813 - val_final_output_mae: 0.1780 - val_final_output_rmse: 0.2552 - val_final_output_custom_mape: 71.9658 - lr: 1.5000e-04\n", "Epoch 25/100\n", "541/541 [==============================] - 56s 103ms/step - loss: 0.0716 - classification_output_loss: 0.0618 - regression_output_loss: 0.0293 - final_output_loss: 0.0336 - classification_output_accuracy: 0.9747 - classification_output_auc: 0.9976 - regression_output_mse: 0.0296 - regression_output_mae: 0.0948 - regression_output_rmse: 0.1638 - regression_output_custom_mape: 19.9853 - final_output_mse: 0.0298 - final_output_mae: 0.1090 - final_output_rmse: 0.1639 - final_output_custom_mape: 65.7690 - val_loss: 0.1265 - val_classification_output_loss: 0.2870 - val_regression_output_loss: 0.0438 - val_final_output_loss: 0.0481 - val_classification_output_accuracy: 0.9252 - val_classification_output_auc: 0.9695 - val_regression_output_mse: 0.0554 - val_regression_output_mae: 0.1393 - val_regression_output_rmse: 0.2290 - val_regression_output_custom_mape: 25.3975 - val_final_output_mse: 0.0555 - val_final_output_mae: 0.1534 - val_final_output_rmse: 0.2291 - val_final_output_custom_mape: 71.5147 - lr: 1.5000e-04\n", "Epoch 26/100\n", "541/541 [==============================] - 55s 102ms/step - loss: 0.0770 - classification_output_loss: 0.0757 - regression_output_loss: 0.0346 - final_output_loss: 0.0392 - classification_output_accuracy: 0.9682 - classification_output_auc: 0.9964 - regression_output_mse: 0.0404 - regression_output_mae: 0.1096 - regression_output_rmse: 0.1880 - regression_output_custom_mape: 21.4210 - final_output_mse: 0.0412 - final_output_mae: 0.1240 - final_output_rmse: 0.1889 - final_output_custom_mape: 67.1111 - val_loss: 0.1067 - val_classification_output_loss: 0.1242 - val_regression_output_loss: 0.0610 - val_final_output_loss: 0.0657 - val_classification_output_accuracy: 0.9530 - val_classification_output_auc: 0.9917 - val_regression_output_mse: 0.1073 - val_regression_output_mae: 0.1808 - val_regression_output_rmse: 0.2964 - val_regression_output_custom_mape: 27.8969 - val_final_output_mse: 0.1078 - val_final_output_mae: 0.1963 - val_final_output_rmse: 0.2975 - val_final_output_custom_mape: 73.3188 - lr: 1.5000e-04\n", "Epoch 27/100\n", "541/541 [==============================] - 56s 103ms/step - loss: 0.0711 - classification_output_loss: 0.0661 - regression_output_loss: 0.0324 - final_output_loss: 0.0366 - classification_output_accuracy: 0.9721 - classification_output_auc: 0.9973 - regression_output_mse: 0.0345 - regression_output_mae: 0.1040 - regression_output_rmse: 0.1770 - regression_output_custom_mape: 21.4817 - final_output_mse: 0.0345 - final_output_mae: 0.1179 - final_output_rmse: 0.1769 - final_output_custom_mape: 67.0813 - val_loss: 0.0773 - val_classification_output_loss: 0.1097 - val_regression_output_loss: 0.0307 - val_final_output_loss: 0.0355 - val_classification_output_accuracy: 0.9597 - val_classification_output_auc: 0.9931 - val_regression_output_mse: 0.0301 - val_regression_output_mae: 0.0991 - val_regression_output_rmse: 0.1648 - val_regression_output_custom_mape: 21.1672 - val_final_output_mse: 0.0306 - val_final_output_mae: 0.1148 - val_final_output_rmse: 0.1665 - val_final_output_custom_mape: 67.1104 - lr: 1.5000e-04\n", "Epoch 28/100\n", "541/541 [==============================] - 55s 102ms/step - loss: 0.0659 - classification_output_loss: 0.0620 - regression_output_loss: 0.0295 - final_output_loss: 0.0337 - classification_output_accuracy: 0.9738 - classification_output_auc: 0.9977 - regression_output_mse: 0.0302 - regression_output_mae: 0.0953 - regression_output_rmse: 0.1650 - regression_output_custom_mape: 19.7939 - final_output_mse: 0.0303 - final_output_mae: 0.1094 - final_output_rmse: 0.1651 - final_output_custom_mape: 65.5229 - val_loss: 0.1090 - val_classification_output_loss: 0.2555 - val_regression_output_loss: 0.0365 - val_final_output_loss: 0.0410 - val_classification_output_accuracy: 0.9139 - val_classification_output_auc: 0.9787 - val_regression_output_mse: 0.0390 - val_regression_output_mae: 0.1108 - val_regression_output_rmse: 0.1787 - val_regression_output_custom_mape: 23.8597 - val_final_output_mse: 0.0395 - val_final_output_mae: 0.1250 - val_final_output_rmse: 0.1804 - val_final_output_custom_mape: 65.6156 - lr: 1.5000e-04\n", "Epoch 29/100\n", "541/541 [==============================] - 57s 106ms/step - loss: 0.0718 - classification_output_loss: 0.0786 - regression_output_loss: 0.0342 - final_output_loss: 0.0389 - classification_output_accuracy: 0.9677 - classification_output_auc: 0.9961 - regression_output_mse: 0.0398 - regression_output_mae: 0.1084 - regression_output_rmse: 0.1867 - regression_output_custom_mape: 21.4302 - final_output_mse: 0.0409 - final_output_mae: 0.1227 - final_output_rmse: 0.1880 - final_output_custom_mape: 67.0664 - val_loss: 0.2156 - val_classification_output_loss: 0.4495 - val_regression_output_loss: 0.1210 - val_final_output_loss: 0.1254 - val_classification_output_accuracy: 0.8694 - val_classification_output_auc: 0.9480 - val_regression_output_mse: 0.1870 - val_regression_output_mae: 0.2650 - val_regression_output_rmse: 0.3836 - val_regression_output_custom_mape: 34.3060 - val_final_output_mse: 0.1872 - val_final_output_mae: 0.2775 - val_final_output_rmse: 0.3839 - val_final_output_custom_mape: 73.4290 - lr: 1.5000e-04\n", "Epoch 30/100\n", "541/541 [==============================] - 58s 106ms/step - loss: 0.0626 - classification_output_loss: 0.0648 - regression_output_loss: 0.0279 - final_output_loss: 0.0321 - classification_output_accuracy: 0.9728 - classification_output_auc: 0.9974 - regression_output_mse: 0.0262 - regression_output_mae: 0.0903 - regression_output_rmse: 0.1564 - regression_output_custom_mape: 19.9212 - final_output_mse: 0.0262 - final_output_mae: 0.1043 - final_output_rmse: 0.1563 - final_output_custom_mape: 65.5329 - val_loss: 0.1270 - val_classification_output_loss: 0.2877 - val_regression_output_loss: 0.0539 - val_final_output_loss: 0.0582 - val_classification_output_accuracy: 0.9202 - val_classification_output_auc: 0.9659 - val_regression_output_mse: 0.0736 - val_regression_output_mae: 0.1570 - val_regression_output_rmse: 0.2487 - val_regression_output_custom_mape: 28.4783 - val_final_output_mse: 0.0738 - val_final_output_mae: 0.1711 - val_final_output_rmse: 0.2490 - val_final_output_custom_mape: 73.3610 - lr: 1.5000e-04\n", "Epoch 31/100\n", "541/541 [==============================] - ETA: 0s - loss: 0.0616 - classification_output_loss: 0.0661 - regression_output_loss: 0.0285 - final_output_loss: 0.0327 - classification_output_accuracy: 0.9723 - classification_output_auc: 0.9973 - regression_output_mse: 0.0271 - regression_output_mae: 0.0922 - regression_output_rmse: 0.1584 - regression_output_custom_mape: 20.3898 - final_output_mse: 0.0272 - final_output_mae: 0.1062 - final_output_rmse: 0.1586 - final_output_custom_mape: 66.0008\n", "Epoch 31 Detailed Metrics:\n", "541/541 [==============================] - 57s 105ms/step - loss: 0.0616 - classification_output_loss: 0.0661 - regression_output_loss: 0.0285 - final_output_loss: 0.0327 - classification_output_accuracy: 0.9723 - classification_output_auc: 0.9973 - regression_output_mse: 0.0271 - regression_output_mae: 0.0922 - regression_output_rmse: 0.1584 - regression_output_custom_mape: 20.3898 - final_output_mse: 0.0272 - final_output_mae: 0.1062 - final_output_rmse: 0.1586 - final_output_custom_mape: 66.0008 - val_loss: 0.0922 - val_classification_output_loss: 0.1850 - val_regression_output_loss: 0.0376 - val_final_output_loss: 0.0420 - val_classification_output_accuracy: 0.9487 - val_classification_output_auc: 0.9876 - val_regression_output_mse: 0.0436 - val_regression_output_mae: 0.1203 - val_regression_output_rmse: 0.2009 - val_regression_output_custom_mape: 25.2706 - val_final_output_mse: 0.0439 - val_final_output_mae: 0.1349 - val_final_output_rmse: 0.2017 - val_final_output_custom_mape: 72.1134 - lr: 1.5000e-04\n", "Epoch 32/100\n", "541/541 [==============================] - 59s 109ms/step - loss: 0.0650 - classification_output_loss: 0.0672 - regression_output_loss: 0.0332 - final_output_loss: 0.0375 - classification_output_accuracy: 0.9716 - classification_output_auc: 0.9971 - regression_output_mse: 0.0365 - regression_output_mae: 0.1059 - regression_output_rmse: 0.1810 - regression_output_custom_mape: 21.4473 - final_output_mse: 0.0367 - final_output_mae: 0.1203 - final_output_rmse: 0.1815 - final_output_custom_mape: 67.2227 - val_loss: 0.1220 - val_classification_output_loss: 0.2766 - val_regression_output_loss: 0.0532 - val_final_output_loss: 0.0576 - val_classification_output_accuracy: 0.9175 - val_classification_output_auc: 0.9696 - val_regression_output_mse: 0.0693 - val_regression_output_mae: 0.1501 - val_regression_output_rmse: 0.2396 - val_regression_output_custom_mape: 27.8170 - val_final_output_mse: 0.0696 - val_final_output_mae: 0.1643 - val_final_output_rmse: 0.2406 - val_final_output_custom_mape: 71.0394 - lr: 1.5000e-04\n", "Epoch 33/100\n", "541/541 [==============================] - 58s 108ms/step - loss: 0.0613 - classification_output_loss: 0.0684 - regression_output_loss: 0.0300 - final_output_loss: 0.0341 - classification_output_accuracy: 0.9717 - classification_output_auc: 0.9970 - regression_output_mse: 0.0302 - regression_output_mae: 0.0967 - regression_output_rmse: 0.1657 - regression_output_custom_mape: 20.6231 - final_output_mse: 0.0303 - final_output_mae: 0.1106 - final_output_rmse: 0.1655 - final_output_custom_mape: 66.1750 - val_loss: 0.0831 - val_classification_output_loss: 0.1589 - val_regression_output_loss: 0.0354 - val_final_output_loss: 0.0399 - val_classification_output_accuracy: 0.9504 - val_classification_output_auc: 0.9857 - val_regression_output_mse: 0.0396 - val_regression_output_mae: 0.1140 - val_regression_output_rmse: 0.1903 - val_regression_output_custom_mape: 23.1021 - val_final_output_mse: 0.0400 - val_final_output_mae: 0.1290 - val_final_output_rmse: 0.1914 - val_final_output_custom_mape: 69.5060 - lr: 1.5000e-04\n", "Epoch 34/100\n", "541/541 [==============================] - ETA: 0s - loss: 0.0607 - classification_output_loss: 0.0626 - regression_output_loss: 0.0318 - final_output_loss: 0.0360 - classification_output_accuracy: 0.9731 - classification_output_auc: 0.9976 - regression_output_mse: 0.0335 - regression_output_mae: 0.1023 - regression_output_rmse: 0.1740 - regression_output_custom_mape: 21.1940 - final_output_mse: 0.0336 - final_output_mae: 0.1165 - final_output_rmse: 0.1740 - final_output_custom_mape: 66.9985\n", "Epoch 34: ReduceLROnPlateau reducing learning rate to 7.500000356230885e-05.\n", "541/541 [==============================] - 57s 106ms/step - loss: 0.0607 - classification_output_loss: 0.0626 - regression_output_loss: 0.0318 - final_output_loss: 0.0360 - classification_output_accuracy: 0.9731 - classification_output_auc: 0.9976 - regression_output_mse: 0.0335 - regression_output_mae: 0.1023 - regression_output_rmse: 0.1740 - regression_output_custom_mape: 21.1940 - final_output_mse: 0.0336 - final_output_mae: 0.1165 - final_output_rmse: 0.1740 - final_output_custom_mape: 66.9985 - val_loss: 0.1197 - val_classification_output_loss: 0.3105 - val_regression_output_loss: 0.0446 - val_final_output_loss: 0.0488 - val_classification_output_accuracy: 0.9176 - val_classification_output_auc: 0.9637 - val_regression_output_mse: 0.0549 - val_regression_output_mae: 0.1412 - val_regression_output_rmse: 0.2287 - val_regression_output_custom_mape: 25.8096 - val_final_output_mse: 0.0549 - val_final_output_mae: 0.1551 - val_final_output_rmse: 0.2288 - val_final_output_custom_mape: 71.1129 - lr: 1.5000e-04\n", "Epoch 35/100\n", "541/541 [==============================] - 59s 109ms/step - loss: 0.0534 - classification_output_loss: 0.0549 - regression_output_loss: 0.0260 - final_output_loss: 0.0306 - classification_output_accuracy: 0.9770 - classification_output_auc: 0.9981 - regression_output_mse: 0.0233 - regression_output_mae: 0.0849 - regression_output_rmse: 0.1458 - regression_output_custom_mape: 18.8262 - final_output_mse: 0.0239 - final_output_mae: 0.0996 - final_output_rmse: 0.1470 - final_output_custom_mape: 64.8782 - val_loss: 0.0673 - val_classification_output_loss: 0.1295 - val_regression_output_loss: 0.0258 - val_final_output_loss: 0.0304 - val_classification_output_accuracy: 0.9557 - val_classification_output_auc: 0.9896 - val_regression_output_mse: 0.0212 - val_regression_output_mae: 0.0835 - val_regression_output_rmse: 0.1415 - val_regression_output_custom_mape: 21.2596 - val_final_output_mse: 0.0216 - val_final_output_mae: 0.0989 - val_final_output_rmse: 0.1432 - val_final_output_custom_mape: 67.3351 - lr: 7.5000e-05\n", "Epoch 36/100\n", "541/541 [==============================] - 59s 109ms/step - loss: 0.0508 - classification_output_loss: 0.0519 - regression_output_loss: 0.0250 - final_output_loss: 0.0294 - classification_output_accuracy: 0.9781 - classification_output_auc: 0.9984 - regression_output_mse: 0.0215 - regression_output_mae: 0.0819 - regression_output_rmse: 0.1412 - regression_output_custom_mape: 18.4933 - final_output_mse: 0.0218 - final_output_mae: 0.0965 - final_output_rmse: 0.1419 - final_output_custom_mape: 64.5884 - val_loss: 0.0713 - val_classification_output_loss: 0.1363 - val_regression_output_loss: 0.0302 - val_final_output_loss: 0.0348 - val_classification_output_accuracy: 0.9533 - val_classification_output_auc: 0.9888 - val_regression_output_mse: 0.0276 - val_regression_output_mae: 0.0976 - val_regression_output_rmse: 0.1597 - val_regression_output_custom_mape: 21.3838 - val_final_output_mse: 0.0280 - val_final_output_mae: 0.1127 - val_final_output_rmse: 0.1610 - val_final_output_custom_mape: 67.4136 - lr: 7.5000e-05\n", "Epoch 37/100\n", "541/541 [==============================] - 59s 109ms/step - loss: 0.0517 - classification_output_loss: 0.0540 - regression_output_loss: 0.0264 - final_output_loss: 0.0308 - classification_output_accuracy: 0.9769 - classification_output_auc: 0.9983 - regression_output_mse: 0.0241 - regression_output_mae: 0.0859 - regression_output_rmse: 0.1480 - regression_output_custom_mape: 18.8216 - final_output_mse: 0.0243 - final_output_mae: 0.1006 - final_output_rmse: 0.1487 - final_output_custom_mape: 64.9566 - val_loss: 0.0709 - val_classification_output_loss: 0.1296 - val_regression_output_loss: 0.0319 - val_final_output_loss: 0.0366 - val_classification_output_accuracy: 0.9538 - val_classification_output_auc: 0.9924 - val_regression_output_mse: 0.0321 - val_regression_output_mae: 0.0979 - val_regression_output_rmse: 0.1648 - val_regression_output_custom_mape: 22.7165 - val_final_output_mse: 0.0326 - val_final_output_mae: 0.1135 - val_final_output_rmse: 0.1667 - val_final_output_custom_mape: 68.0742 - lr: 7.5000e-05\n", "Epoch 38/100\n", "541/541 [==============================] - 58s 107ms/step - loss: 0.0525 - classification_output_loss: 0.0565 - regression_output_loss: 0.0274 - final_output_loss: 0.0318 - classification_output_accuracy: 0.9763 - classification_output_auc: 0.9981 - regression_output_mse: 0.0255 - regression_output_mae: 0.0891 - regression_output_rmse: 0.1530 - regression_output_custom_mape: 19.1326 - final_output_mse: 0.0258 - final_output_mae: 0.1037 - final_output_rmse: 0.1537 - final_output_custom_mape: 65.2182 - val_loss: 0.0966 - val_classification_output_loss: 0.1959 - val_regression_output_loss: 0.0478 - val_final_output_loss: 0.0524 - val_classification_output_accuracy: 0.9431 - val_classification_output_auc: 0.9826 - val_regression_output_mse: 0.0598 - val_regression_output_mae: 0.1395 - val_regression_output_rmse: 0.2213 - val_regression_output_custom_mape: 26.5063 - val_final_output_mse: 0.0603 - val_final_output_mae: 0.1547 - val_final_output_rmse: 0.2227 - val_final_output_custom_mape: 71.2044 - lr: 7.5000e-05\n", "Epoch 39/100\n", "541/541 [==============================] - 57s 106ms/step - loss: 0.0495 - classification_output_loss: 0.0537 - regression_output_loss: 0.0248 - final_output_loss: 0.0293 - classification_output_accuracy: 0.9774 - classification_output_auc: 0.9982 - regression_output_mse: 0.0211 - regression_output_mae: 0.0813 - regression_output_rmse: 0.1401 - regression_output_custom_mape: 18.5320 - final_output_mse: 0.0214 - final_output_mae: 0.0959 - final_output_rmse: 0.1410 - final_output_custom_mape: 64.5566 - val_loss: 0.0905 - val_classification_output_loss: 0.1782 - val_regression_output_loss: 0.0453 - val_final_output_loss: 0.0500 - val_classification_output_accuracy: 0.9458 - val_classification_output_auc: 0.9857 - val_regression_output_mse: 0.0556 - val_regression_output_mae: 0.1355 - val_regression_output_rmse: 0.2128 - val_regression_output_custom_mape: 26.0684 - val_final_output_mse: 0.0560 - val_final_output_mae: 0.1508 - val_final_output_rmse: 0.2142 - val_final_output_custom_mape: 70.9711 - lr: 7.5000e-05\n", "Epoch 40/100\n", "541/541 [==============================] - 57s 105ms/step - loss: 0.0489 - classification_output_loss: 0.0548 - regression_output_loss: 0.0246 - final_output_loss: 0.0290 - classification_output_accuracy: 0.9771 - classification_output_auc: 0.9981 - regression_output_mse: 0.0208 - regression_output_mae: 0.0803 - regression_output_rmse: 0.1387 - regression_output_custom_mape: 18.3416 - final_output_mse: 0.0210 - final_output_mae: 0.0950 - final_output_rmse: 0.1395 - final_output_custom_mape: 64.3632 - val_loss: 0.0903 - val_classification_output_loss: 0.1696 - val_regression_output_loss: 0.0477 - val_final_output_loss: 0.0524 - val_classification_output_accuracy: 0.9471 - val_classification_output_auc: 0.9867 - val_regression_output_mse: 0.0601 - val_regression_output_mae: 0.1440 - val_regression_output_rmse: 0.2217 - val_regression_output_custom_mape: 26.6593 - val_final_output_mse: 0.0606 - val_final_output_mae: 0.1593 - val_final_output_rmse: 0.2231 - val_final_output_custom_mape: 71.6669 - lr: 7.5000e-05\n", "Epoch 41/100\n", "541/541 [==============================] - ETA: 0s - loss: 0.0493 - classification_output_loss: 0.0555 - regression_output_loss: 0.0254 - final_output_loss: 0.0298 - classification_output_accuracy: 0.9770 - classification_output_auc: 0.9980 - regression_output_mse: 0.0219 - regression_output_mae: 0.0831 - regression_output_rmse: 0.1426 - regression_output_custom_mape: 18.8023 - final_output_mse: 0.0221 - final_output_mae: 0.0976 - final_output_rmse: 0.1432 - final_output_custom_mape: 64.7758\n", "Epoch 41 Detailed Metrics:\n", "541/541 [==============================] - 57s 106ms/step - loss: 0.0493 - classification_output_loss: 0.0555 - regression_output_loss: 0.0254 - final_output_loss: 0.0298 - classification_output_accuracy: 0.9770 - classification_output_auc: 0.9980 - regression_output_mse: 0.0219 - regression_output_mae: 0.0831 - regression_output_rmse: 0.1426 - regression_output_custom_mape: 18.8023 - final_output_mse: 0.0221 - final_output_mae: 0.0976 - final_output_rmse: 0.1432 - final_output_custom_mape: 64.7758 - val_loss: 0.0931 - val_classification_output_loss: 0.1918 - val_regression_output_loss: 0.0461 - val_final_output_loss: 0.0508 - val_classification_output_accuracy: 0.9437 - val_classification_output_auc: 0.9827 - val_regression_output_mse: 0.0558 - val_regression_output_mae: 0.1358 - val_regression_output_rmse: 0.2143 - val_regression_output_custom_mape: 26.0898 - val_final_output_mse: 0.0562 - val_final_output_mae: 0.1510 - val_final_output_rmse: 0.2157 - val_final_output_custom_mape: 70.9012 - lr: 7.5000e-05\n", "Epoch 42/100\n", "541/541 [==============================] - ETA: 0s - loss: 0.0489 - classification_output_loss: 0.0559 - regression_output_loss: 0.0251 - final_output_loss: 0.0295 - classification_output_accuracy: 0.9765 - classification_output_auc: 0.9980 - regression_output_mse: 0.0214 - regression_output_mae: 0.0822 - regression_output_rmse: 0.1415 - regression_output_custom_mape: 18.6815 - final_output_mse: 0.0216 - final_output_mae: 0.0968 - final_output_rmse: 0.1421 - final_output_custom_mape: 64.6422\n", "Epoch 42: ReduceLROnPlateau reducing learning rate to 3.7500001781154424e-05.\n", "541/541 [==============================] - 60s 110ms/step - loss: 0.0489 - classification_output_loss: 0.0559 - regression_output_loss: 0.0251 - final_output_loss: 0.0295 - classification_output_accuracy: 0.9765 - classification_output_auc: 0.9980 - regression_output_mse: 0.0214 - regression_output_mae: 0.0822 - regression_output_rmse: 0.1415 - regression_output_custom_mape: 18.6815 - final_output_mse: 0.0216 - final_output_mae: 0.0968 - final_output_rmse: 0.1421 - final_output_custom_mape: 64.6422 - val_loss: 0.0907 - val_classification_output_loss: 0.1880 - val_regression_output_loss: 0.0445 - val_final_output_loss: 0.0492 - val_classification_output_accuracy: 0.9440 - val_classification_output_auc: 0.9831 - val_regression_output_mse: 0.0543 - val_regression_output_mae: 0.1345 - val_regression_output_rmse: 0.2114 - val_regression_output_custom_mape: 26.1261 - val_final_output_mse: 0.0547 - val_final_output_mae: 0.1497 - val_final_output_rmse: 0.2128 - val_final_output_custom_mape: 71.0932 - lr: 7.5000e-05\n", "Epoch 43/100\n", "541/541 [==============================] - 58s 108ms/step - loss: 0.0438 - classification_output_loss: 0.0492 - regression_output_loss: 0.0210 - final_output_loss: 0.0255 - classification_output_accuracy: 0.9793 - classification_output_auc: 0.9985 - regression_output_mse: 0.0154 - regression_output_mae: 0.0690 - regression_output_rmse: 0.1200 - regression_output_custom_mape: 16.9526 - final_output_mse: 0.0157 - final_output_mae: 0.0840 - final_output_rmse: 0.1212 - final_output_custom_mape: 63.1727 - val_loss: 0.0623 - val_classification_output_loss: 0.1263 - val_regression_output_loss: 0.0252 - val_final_output_loss: 0.0299 - val_classification_output_accuracy: 0.9579 - val_classification_output_auc: 0.9908 - val_regression_output_mse: 0.0204 - val_regression_output_mae: 0.0816 - val_regression_output_rmse: 0.1371 - val_regression_output_custom_mape: 21.3421 - val_final_output_mse: 0.0209 - val_final_output_mae: 0.0972 - val_final_output_rmse: 0.1390 - val_final_output_custom_mape: 67.4464 - lr: 3.7500e-05\n", "Epoch 44/100\n", "541/541 [==============================] - 59s 109ms/step - loss: 0.0419 - classification_output_loss: 0.0470 - regression_output_loss: 0.0198 - final_output_loss: 0.0244 - classification_output_accuracy: 0.9804 - classification_output_auc: 0.9986 - regression_output_mse: 0.0138 - regression_output_mae: 0.0654 - regression_output_rmse: 0.1140 - regression_output_custom_mape: 16.4914 - final_output_mse: 0.0141 - final_output_mae: 0.0804 - final_output_rmse: 0.1155 - final_output_custom_mape: 62.8166 - val_loss: 0.0587 - val_classification_output_loss: 0.1175 - val_regression_output_loss: 0.0235 - val_final_output_loss: 0.0282 - val_classification_output_accuracy: 0.9606 - val_classification_output_auc: 0.9919 - val_regression_output_mse: 0.0177 - val_regression_output_mae: 0.0767 - val_regression_output_rmse: 0.1297 - val_regression_output_custom_mape: 20.6893 - val_final_output_mse: 0.0182 - val_final_output_mae: 0.0923 - val_final_output_rmse: 0.1317 - val_final_output_custom_mape: 66.9004 - lr: 3.7500e-05\n", "Epoch 45/100\n", "541/541 [==============================] - 60s 112ms/step - loss: 0.0414 - classification_output_loss: 0.0471 - regression_output_loss: 0.0197 - final_output_loss: 0.0242 - classification_output_accuracy: 0.9801 - classification_output_auc: 0.9986 - regression_output_mse: 0.0134 - regression_output_mae: 0.0649 - regression_output_rmse: 0.1128 - regression_output_custom_mape: 16.4420 - final_output_mse: 0.0137 - final_output_mae: 0.0800 - final_output_rmse: 0.1143 - final_output_custom_mape: 62.7677 - val_loss: 0.0571 - val_classification_output_loss: 0.1136 - val_regression_output_loss: 0.0230 - val_final_output_loss: 0.0277 - val_classification_output_accuracy: 0.9607 - val_classification_output_auc: 0.9924 - val_regression_output_mse: 0.0173 - val_regression_output_mae: 0.0746 - val_regression_output_rmse: 0.1266 - val_regression_output_custom_mape: 20.4492 - val_final_output_mse: 0.0178 - val_final_output_mae: 0.0902 - val_final_output_rmse: 0.1287 - val_final_output_custom_mape: 66.6717 - lr: 3.7500e-05\n", "Epoch 46/100\n", "541/541 [==============================] - 58s 108ms/step - loss: 0.0418 - classification_output_loss: 0.0482 - regression_output_loss: 0.0204 - final_output_loss: 0.0249 - classification_output_accuracy: 0.9796 - classification_output_auc: 0.9986 - regression_output_mse: 0.0144 - regression_output_mae: 0.0672 - regression_output_rmse: 0.1169 - regression_output_custom_mape: 16.6837 - final_output_mse: 0.0147 - final_output_mae: 0.0823 - final_output_rmse: 0.1184 - final_output_custom_mape: 63.0407 - val_loss: 0.0669 - val_classification_output_loss: 0.1158 - val_regression_output_loss: 0.0349 - val_final_output_loss: 0.0396 - val_classification_output_accuracy: 0.9604 - val_classification_output_auc: 0.9924 - val_regression_output_mse: 0.0365 - val_regression_output_mae: 0.1113 - val_regression_output_rmse: 0.1765 - val_regression_output_custom_mape: 23.5966 - val_final_output_mse: 0.0370 - val_final_output_mae: 0.1270 - val_final_output_rmse: 0.1782 - val_final_output_custom_mape: 69.7012 - lr: 3.7500e-05\n", "Epoch 47/100\n", "541/541 [==============================] - 57s 106ms/step - loss: 0.0416 - classification_output_loss: 0.0464 - regression_output_loss: 0.0209 - final_output_loss: 0.0254 - classification_output_accuracy: 0.9807 - classification_output_auc: 0.9987 - regression_output_mse: 0.0151 - regression_output_mae: 0.0688 - regression_output_rmse: 0.1189 - regression_output_custom_mape: 17.0184 - final_output_mse: 0.0154 - final_output_mae: 0.0839 - final_output_rmse: 0.1202 - final_output_custom_mape: 63.3004 - val_loss: 0.0576 - val_classification_output_loss: 0.1187 - val_regression_output_loss: 0.0230 - val_final_output_loss: 0.0277 - val_classification_output_accuracy: 0.9583 - val_classification_output_auc: 0.9917 - val_regression_output_mse: 0.0169 - val_regression_output_mae: 0.0751 - val_regression_output_rmse: 0.1274 - val_regression_output_custom_mape: 20.1316 - val_final_output_mse: 0.0173 - val_final_output_mae: 0.0906 - val_final_output_rmse: 0.1293 - val_final_output_custom_mape: 66.4177 - lr: 3.7500e-05\n", "Epoch 48/100\n", "541/541 [==============================] - 60s 111ms/step - loss: 0.0424 - classification_output_loss: 0.0483 - regression_output_loss: 0.0216 - final_output_loss: 0.0262 - classification_output_accuracy: 0.9794 - classification_output_auc: 0.9986 - regression_output_mse: 0.0162 - regression_output_mae: 0.0711 - regression_output_rmse: 0.1232 - regression_output_custom_mape: 17.0768 - final_output_mse: 0.0166 - final_output_mae: 0.0862 - final_output_rmse: 0.1246 - final_output_custom_mape: 63.4081 - val_loss: 0.0640 - val_classification_output_loss: 0.1394 - val_regression_output_loss: 0.0260 - val_final_output_loss: 0.0307 - val_classification_output_accuracy: 0.9565 - val_classification_output_auc: 0.9892 - val_regression_output_mse: 0.0213 - val_regression_output_mae: 0.0842 - val_regression_output_rmse: 0.1389 - val_regression_output_custom_mape: 21.5426 - val_final_output_mse: 0.0218 - val_final_output_mae: 0.0998 - val_final_output_rmse: 0.1408 - val_final_output_custom_mape: 67.5867 - lr: 3.7500e-05\n", "Epoch 49/100\n", "541/541 [==============================] - 55s 101ms/step - loss: 0.0399 - classification_output_loss: 0.0466 - regression_output_loss: 0.0192 - final_output_loss: 0.0238 - classification_output_accuracy: 0.9805 - classification_output_auc: 0.9987 - regression_output_mse: 0.0128 - regression_output_mae: 0.0635 - regression_output_rmse: 0.1100 - regression_output_custom_mape: 16.2668 - final_output_mse: 0.0131 - final_output_mae: 0.0787 - final_output_rmse: 0.1117 - final_output_custom_mape: 62.6690 - val_loss: 0.0574 - val_classification_output_loss: 0.1066 - val_regression_output_loss: 0.0263 - val_final_output_loss: 0.0311 - val_classification_output_accuracy: 0.9619 - val_classification_output_auc: 0.9930 - val_regression_output_mse: 0.0218 - val_regression_output_mae: 0.0855 - val_regression_output_rmse: 0.1405 - val_regression_output_custom_mape: 21.2892 - val_final_output_mse: 0.0223 - val_final_output_mae: 0.1011 - val_final_output_rmse: 0.1425 - val_final_output_custom_mape: 67.5575 - lr: 3.7500e-05\n", "Epoch 50/100\n", "541/541 [==============================] - 55s 102ms/step - loss: 0.0393 - classification_output_loss: 0.0456 - regression_output_loss: 0.0192 - final_output_loss: 0.0237 - classification_output_accuracy: 0.9807 - classification_output_auc: 0.9987 - regression_output_mse: 0.0127 - regression_output_mae: 0.0632 - regression_output_rmse: 0.1094 - regression_output_custom_mape: 16.3543 - final_output_mse: 0.0130 - final_output_mae: 0.0783 - final_output_rmse: 0.1110 - final_output_custom_mape: 62.6908 - val_loss: 0.0548 - val_classification_output_loss: 0.1058 - val_regression_output_loss: 0.0236 - val_final_output_loss: 0.0284 - val_classification_output_accuracy: 0.9626 - val_classification_output_auc: 0.9932 - val_regression_output_mse: 0.0180 - val_regression_output_mae: 0.0768 - val_regression_output_rmse: 0.1285 - val_regression_output_custom_mape: 20.5455 - val_final_output_mse: 0.0185 - val_final_output_mae: 0.0925 - val_final_output_rmse: 0.1307 - val_final_output_custom_mape: 66.7687 - lr: 3.7500e-05\n", "Epoch 51/100\n", "541/541 [==============================] - ETA: 0s - loss: 0.0393 - classification_output_loss: 0.0463 - regression_output_loss: 0.0192 - final_output_loss: 0.0238 - classification_output_accuracy: 0.9808 - classification_output_auc: 0.9987 - regression_output_mse: 0.0127 - regression_output_mae: 0.0635 - regression_output_rmse: 0.1099 - regression_output_custom_mape: 16.4549 - final_output_mse: 0.0131 - final_output_mae: 0.0787 - final_output_rmse: 0.1116 - final_output_custom_mape: 62.7996\n", "Epoch 51 Detailed Metrics:\n", "541/541 [==============================] - 51s 95ms/step - loss: 0.0393 - classification_output_loss: 0.0463 - regression_output_loss: 0.0192 - final_output_loss: 0.0238 - classification_output_accuracy: 0.9808 - classification_output_auc: 0.9987 - regression_output_mse: 0.0127 - regression_output_mae: 0.0635 - regression_output_rmse: 0.1099 - regression_output_custom_mape: 16.4549 - final_output_mse: 0.0131 - final_output_mae: 0.0787 - final_output_rmse: 0.1116 - final_output_custom_mape: 62.7996 - val_loss: 0.0545 - val_classification_output_loss: 0.1059 - val_regression_output_loss: 0.0235 - val_final_output_loss: 0.0283 - val_classification_output_accuracy: 0.9624 - val_classification_output_auc: 0.9934 - val_regression_output_mse: 0.0178 - val_regression_output_mae: 0.0766 - val_regression_output_rmse: 0.1285 - val_regression_output_custom_mape: 20.5595 - val_final_output_mse: 0.0183 - val_final_output_mae: 0.0924 - val_final_output_rmse: 0.1307 - val_final_output_custom_mape: 66.7998 - lr: 3.7500e-05\n", "Epoch 52/100\n", "541/541 [==============================] - ETA: 0s - loss: 0.0398 - classification_output_loss: 0.0467 - regression_output_loss: 0.0200 - final_output_loss: 0.0246 - classification_output_accuracy: 0.9806 - classification_output_auc: 0.9987 - regression_output_mse: 0.0138 - regression_output_mae: 0.0660 - regression_output_rmse: 0.1142 - regression_output_custom_mape: 16.6527 - final_output_mse: 0.0142 - final_output_mae: 0.0812 - final_output_rmse: 0.1158 - final_output_custom_mape: 63.0652\n", "Epoch 52: ReduceLROnPlateau reducing learning rate to 1.8750000890577212e-05.\n", "541/541 [==============================] - 52s 96ms/step - loss: 0.0398 - classification_output_loss: 0.0467 - regression_output_loss: 0.0200 - final_output_loss: 0.0246 - classification_output_accuracy: 0.9806 - classification_output_auc: 0.9987 - regression_output_mse: 0.0138 - regression_output_mae: 0.0660 - regression_output_rmse: 0.1142 - regression_output_custom_mape: 16.6527 - final_output_mse: 0.0142 - final_output_mae: 0.0812 - final_output_rmse: 0.1158 - final_output_custom_mape: 63.0652 - val_loss: 0.0612 - val_classification_output_loss: 0.1104 - val_regression_output_loss: 0.0310 - val_final_output_loss: 0.0357 - val_classification_output_accuracy: 0.9615 - val_classification_output_auc: 0.9931 - val_regression_output_mse: 0.0300 - val_regression_output_mae: 0.0999 - val_regression_output_rmse: 0.1614 - val_regression_output_custom_mape: 22.4325 - val_final_output_mse: 0.0305 - val_final_output_mae: 0.1157 - val_final_output_rmse: 0.1632 - val_final_output_custom_mape: 68.5919 - lr: 3.7500e-05\n", "Epoch 53/100\n", "541/541 [==============================] - 53s 98ms/step - loss: 0.0388 - classification_output_loss: 0.0466 - regression_output_loss: 0.0190 - final_output_loss: 0.0236 - classification_output_accuracy: 0.9803 - classification_output_auc: 0.9986 - regression_output_mse: 0.0126 - regression_output_mae: 0.0628 - regression_output_rmse: 0.1086 - regression_output_custom_mape: 16.2977 - final_output_mse: 0.0129 - final_output_mae: 0.0779 - final_output_rmse: 0.1103 - final_output_custom_mape: 62.6073 - val_loss: 0.0527 - val_classification_output_loss: 0.1067 - val_regression_output_loss: 0.0214 - val_final_output_loss: 0.0261 - val_classification_output_accuracy: 0.9619 - val_classification_output_auc: 0.9932 - val_regression_output_mse: 0.0150 - val_regression_output_mae: 0.0698 - val_regression_output_rmse: 0.1183 - val_regression_output_custom_mape: 19.7702 - val_final_output_mse: 0.0155 - val_final_output_mae: 0.0854 - val_final_output_rmse: 0.1206 - val_final_output_custom_mape: 66.0842 - lr: 1.8750e-05\n", "Epoch 54/100\n", "541/541 [==============================] - 52s 97ms/step - loss: 0.0379 - classification_output_loss: 0.0458 - regression_output_loss: 0.0182 - final_output_loss: 0.0228 - classification_output_accuracy: 0.9807 - classification_output_auc: 0.9987 - regression_output_mse: 0.0114 - regression_output_mae: 0.0602 - regression_output_rmse: 0.1042 - regression_output_custom_mape: 15.9407 - final_output_mse: 0.0118 - final_output_mae: 0.0754 - final_output_rmse: 0.1061 - final_output_custom_mape: 62.2892 - val_loss: 0.0531 - val_classification_output_loss: 0.1067 - val_regression_output_loss: 0.0221 - val_final_output_loss: 0.0268 - val_classification_output_accuracy: 0.9622 - val_classification_output_auc: 0.9932 - val_regression_output_mse: 0.0158 - val_regression_output_mae: 0.0721 - val_regression_output_rmse: 0.1214 - val_regression_output_custom_mape: 19.8702 - val_final_output_mse: 0.0163 - val_final_output_mae: 0.0878 - val_final_output_rmse: 0.1236 - val_final_output_custom_mape: 66.1805 - lr: 1.8750e-05\n", "Epoch 55/100\n", "541/541 [==============================] - 53s 98ms/step - loss: 0.0374 - classification_output_loss: 0.0457 - regression_output_loss: 0.0179 - final_output_loss: 0.0225 - classification_output_accuracy: 0.9805 - classification_output_auc: 0.9987 - regression_output_mse: 0.0110 - regression_output_mae: 0.0591 - regression_output_rmse: 0.1023 - regression_output_custom_mape: 15.8168 - final_output_mse: 0.0114 - final_output_mae: 0.0743 - final_output_rmse: 0.1042 - final_output_custom_mape: 62.1444 - val_loss: 0.0528 - val_classification_output_loss: 0.1091 - val_regression_output_loss: 0.0213 - val_final_output_loss: 0.0261 - val_classification_output_accuracy: 0.9621 - val_classification_output_auc: 0.9930 - val_regression_output_mse: 0.0150 - val_regression_output_mae: 0.0698 - val_regression_output_rmse: 0.1183 - val_regression_output_custom_mape: 19.7326 - val_final_output_mse: 0.0155 - val_final_output_mae: 0.0855 - val_final_output_rmse: 0.1205 - val_final_output_custom_mape: 66.0242 - lr: 1.8750e-05\n", "Epoch 56/100\n", "541/541 [==============================] - 53s 98ms/step - loss: 0.0373 - classification_output_loss: 0.0459 - regression_output_loss: 0.0178 - final_output_loss: 0.0223 - classification_output_accuracy: 0.9804 - classification_output_auc: 0.9988 - regression_output_mse: 0.0109 - regression_output_mae: 0.0588 - regression_output_rmse: 0.1018 - regression_output_custom_mape: 15.8104 - final_output_mse: 0.0113 - final_output_mae: 0.0740 - final_output_rmse: 0.1037 - final_output_custom_mape: 62.1211 - val_loss: 0.0526 - val_classification_output_loss: 0.1054 - val_regression_output_loss: 0.0221 - val_final_output_loss: 0.0269 - val_classification_output_accuracy: 0.9621 - val_classification_output_auc: 0.9934 - val_regression_output_mse: 0.0157 - val_regression_output_mae: 0.0723 - val_regression_output_rmse: 0.1212 - val_regression_output_custom_mape: 19.7542 - val_final_output_mse: 0.0162 - val_final_output_mae: 0.0880 - val_final_output_rmse: 0.1234 - val_final_output_custom_mape: 66.0210 - lr: 1.8750e-05\n", "Epoch 57/100\n", "541/541 [==============================] - 51s 95ms/step - loss: 0.0371 - classification_output_loss: 0.0454 - regression_output_loss: 0.0179 - final_output_loss: 0.0225 - classification_output_accuracy: 0.9814 - classification_output_auc: 0.9987 - regression_output_mse: 0.0111 - regression_output_mae: 0.0592 - regression_output_rmse: 0.1025 - regression_output_custom_mape: 15.9130 - final_output_mse: 0.0115 - final_output_mae: 0.0743 - final_output_rmse: 0.1043 - final_output_custom_mape: 62.1983 - val_loss: 0.0515 - val_classification_output_loss: 0.1063 - val_regression_output_loss: 0.0207 - val_final_output_loss: 0.0255 - val_classification_output_accuracy: 0.9622 - val_classification_output_auc: 0.9934 - val_regression_output_mse: 0.0141 - val_regression_output_mae: 0.0677 - val_regression_output_rmse: 0.1149 - val_regression_output_custom_mape: 19.3432 - val_final_output_mse: 0.0146 - val_final_output_mae: 0.0834 - val_final_output_rmse: 0.1172 - val_final_output_custom_mape: 65.6157 - lr: 1.8750e-05\n", "Epoch 58/100\n", "541/541 [==============================] - 49s 91ms/step - loss: 0.0369 - classification_output_loss: 0.0454 - regression_output_loss: 0.0178 - final_output_loss: 0.0223 - classification_output_accuracy: 0.9808 - classification_output_auc: 0.9988 - regression_output_mse: 0.0110 - regression_output_mae: 0.0588 - regression_output_rmse: 0.1019 - regression_output_custom_mape: 15.8985 - final_output_mse: 0.0113 - final_output_mae: 0.0739 - final_output_rmse: 0.1037 - final_output_custom_mape: 62.1227 - val_loss: 0.0522 - val_classification_output_loss: 0.1054 - val_regression_output_loss: 0.0219 - val_final_output_loss: 0.0266 - val_classification_output_accuracy: 0.9628 - val_classification_output_auc: 0.9934 - val_regression_output_mse: 0.0155 - val_regression_output_mae: 0.0716 - val_regression_output_rmse: 0.1202 - val_regression_output_custom_mape: 19.6480 - val_final_output_mse: 0.0160 - val_final_output_mae: 0.0873 - val_final_output_rmse: 0.1224 - val_final_output_custom_mape: 65.9271 - lr: 1.8750e-05\n", "Epoch 59/100\n", "541/541 [==============================] - 50s 92ms/step - loss: 0.0364 - classification_output_loss: 0.0447 - regression_output_loss: 0.0175 - final_output_loss: 0.0220 - classification_output_accuracy: 0.9814 - classification_output_auc: 0.9988 - regression_output_mse: 0.0106 - regression_output_mae: 0.0578 - regression_output_rmse: 0.1001 - regression_output_custom_mape: 15.7452 - final_output_mse: 0.0109 - final_output_mae: 0.0728 - final_output_rmse: 0.1019 - final_output_custom_mape: 61.9818 - val_loss: 0.0523 - val_classification_output_loss: 0.1092 - val_regression_output_loss: 0.0212 - val_final_output_loss: 0.0260 - val_classification_output_accuracy: 0.9616 - val_classification_output_auc: 0.9932 - val_regression_output_mse: 0.0148 - val_regression_output_mae: 0.0695 - val_regression_output_rmse: 0.1172 - val_regression_output_custom_mape: 19.5822 - val_final_output_mse: 0.0153 - val_final_output_mae: 0.0852 - val_final_output_rmse: 0.1195 - val_final_output_custom_mape: 65.8081 - lr: 1.8750e-05\n", "Epoch 60/100\n", "541/541 [==============================] - 53s 98ms/step - loss: 0.0366 - classification_output_loss: 0.0450 - regression_output_loss: 0.0177 - final_output_loss: 0.0222 - classification_output_accuracy: 0.9805 - classification_output_auc: 0.9988 - regression_output_mse: 0.0108 - regression_output_mae: 0.0586 - regression_output_rmse: 0.1015 - regression_output_custom_mape: 15.9146 - final_output_mse: 0.0112 - final_output_mae: 0.0736 - final_output_rmse: 0.1031 - final_output_custom_mape: 62.1485 - val_loss: 0.0508 - val_classification_output_loss: 0.0992 - val_regression_output_loss: 0.0220 - val_final_output_loss: 0.0268 - val_classification_output_accuracy: 0.9634 - val_classification_output_auc: 0.9940 - val_regression_output_mse: 0.0156 - val_regression_output_mae: 0.0720 - val_regression_output_rmse: 0.1205 - val_regression_output_custom_mape: 19.6451 - val_final_output_mse: 0.0161 - val_final_output_mae: 0.0877 - val_final_output_rmse: 0.1228 - val_final_output_custom_mape: 65.9773 - lr: 1.8750e-05\n", "Epoch 61/100\n", "541/541 [==============================] - ETA: 0s - loss: 0.0370 - classification_output_loss: 0.0454 - regression_output_loss: 0.0182 - final_output_loss: 0.0228 - classification_output_accuracy: 0.9808 - classification_output_auc: 0.9987 - regression_output_mse: 0.0115 - regression_output_mae: 0.0602 - regression_output_rmse: 0.1040 - regression_output_custom_mape: 16.0666 - final_output_mse: 0.0119 - final_output_mae: 0.0753 - final_output_rmse: 0.1058 - final_output_custom_mape: 62.3137\n", "Epoch 61 Detailed Metrics:\n", "541/541 [==============================] - 50s 92ms/step - loss: 0.0370 - classification_output_loss: 0.0454 - regression_output_loss: 0.0182 - final_output_loss: 0.0228 - classification_output_accuracy: 0.9808 - classification_output_auc: 0.9987 - regression_output_mse: 0.0115 - regression_output_mae: 0.0602 - regression_output_rmse: 0.1040 - regression_output_custom_mape: 16.0666 - final_output_mse: 0.0119 - final_output_mae: 0.0753 - final_output_rmse: 0.1058 - final_output_custom_mape: 62.3137 - val_loss: 0.0514 - val_classification_output_loss: 0.0997 - val_regression_output_loss: 0.0227 - val_final_output_loss: 0.0275 - val_classification_output_accuracy: 0.9638 - val_classification_output_auc: 0.9939 - val_regression_output_mse: 0.0164 - val_regression_output_mae: 0.0742 - val_regression_output_rmse: 0.1240 - val_regression_output_custom_mape: 19.8853 - val_final_output_mse: 0.0169 - val_final_output_mae: 0.0900 - val_final_output_rmse: 0.1261 - val_final_output_custom_mape: 66.2096 - lr: 1.8750e-05\n", "Epoch 62/100\n", "541/541 [==============================] - 52s 96ms/step - loss: 0.0363 - classification_output_loss: 0.0450 - regression_output_loss: 0.0176 - final_output_loss: 0.0221 - classification_output_accuracy: 0.9809 - classification_output_auc: 0.9988 - regression_output_mse: 0.0107 - regression_output_mae: 0.0582 - regression_output_rmse: 0.1005 - regression_output_custom_mape: 15.7357 - final_output_mse: 0.0110 - final_output_mae: 0.0733 - final_output_rmse: 0.1024 - final_output_custom_mape: 62.0327 - val_loss: 0.0506 - val_classification_output_loss: 0.1033 - val_regression_output_loss: 0.0210 - val_final_output_loss: 0.0257 - val_classification_output_accuracy: 0.9631 - val_classification_output_auc: 0.9935 - val_regression_output_mse: 0.0144 - val_regression_output_mae: 0.0687 - val_regression_output_rmse: 0.1160 - val_regression_output_custom_mape: 19.3542 - val_final_output_mse: 0.0148 - val_final_output_mae: 0.0844 - val_final_output_rmse: 0.1183 - val_final_output_custom_mape: 65.6355 - lr: 1.8750e-05\n", "Epoch 63/100\n", "541/541 [==============================] - 49s 91ms/step - loss: 0.0362 - classification_output_loss: 0.0452 - regression_output_loss: 0.0175 - final_output_loss: 0.0221 - classification_output_accuracy: 0.9809 - classification_output_auc: 0.9988 - regression_output_mse: 0.0106 - regression_output_mae: 0.0579 - regression_output_rmse: 0.1000 - regression_output_custom_mape: 15.7881 - final_output_mse: 0.0109 - final_output_mae: 0.0730 - final_output_rmse: 0.1019 - final_output_custom_mape: 62.0206 - val_loss: 0.0501 - val_classification_output_loss: 0.1035 - val_regression_output_loss: 0.0204 - val_final_output_loss: 0.0252 - val_classification_output_accuracy: 0.9629 - val_classification_output_auc: 0.9936 - val_regression_output_mse: 0.0138 - val_regression_output_mae: 0.0670 - val_regression_output_rmse: 0.1134 - val_regression_output_custom_mape: 19.1399 - val_final_output_mse: 0.0143 - val_final_output_mae: 0.0827 - val_final_output_rmse: 0.1158 - val_final_output_custom_mape: 65.4372 - lr: 1.8750e-05\n", "Epoch 64/100\n", "541/541 [==============================] - 50s 93ms/step - loss: 0.0363 - classification_output_loss: 0.0456 - regression_output_loss: 0.0178 - final_output_loss: 0.0223 - classification_output_accuracy: 0.9805 - classification_output_auc: 0.9988 - regression_output_mse: 0.0108 - regression_output_mae: 0.0587 - regression_output_rmse: 0.1015 - regression_output_custom_mape: 15.8356 - final_output_mse: 0.0112 - final_output_mae: 0.0737 - final_output_rmse: 0.1032 - final_output_custom_mape: 62.0622 - val_loss: 0.0489 - val_classification_output_loss: 0.0984 - val_regression_output_loss: 0.0203 - val_final_output_loss: 0.0251 - val_classification_output_accuracy: 0.9637 - val_classification_output_auc: 0.9940 - val_regression_output_mse: 0.0136 - val_regression_output_mae: 0.0667 - val_regression_output_rmse: 0.1128 - val_regression_output_custom_mape: 18.9880 - val_final_output_mse: 0.0141 - val_final_output_mae: 0.0824 - val_final_output_rmse: 0.1152 - val_final_output_custom_mape: 65.3033 - lr: 1.8750e-05\n", "Epoch 65/100\n", "541/541 [==============================] - 48s 89ms/step - loss: 0.0361 - classification_output_loss: 0.0442 - regression_output_loss: 0.0179 - final_output_loss: 0.0225 - classification_output_accuracy: 0.9813 - classification_output_auc: 0.9988 - regression_output_mse: 0.0110 - regression_output_mae: 0.0592 - regression_output_rmse: 0.1024 - regression_output_custom_mape: 15.8189 - final_output_mse: 0.0114 - final_output_mae: 0.0744 - final_output_rmse: 0.1043 - final_output_custom_mape: 62.1326 - val_loss: 0.0519 - val_classification_output_loss: 0.1075 - val_regression_output_loss: 0.0218 - val_final_output_loss: 0.0266 - val_classification_output_accuracy: 0.9624 - val_classification_output_auc: 0.9931 - val_regression_output_mse: 0.0154 - val_regression_output_mae: 0.0715 - val_regression_output_rmse: 0.1198 - val_regression_output_custom_mape: 19.5822 - val_final_output_mse: 0.0159 - val_final_output_mae: 0.0872 - val_final_output_rmse: 0.1221 - val_final_output_custom_mape: 65.8933 - lr: 1.8750e-05\n", "Epoch 66/100\n", "541/541 [==============================] - 53s 98ms/step - loss: 0.0359 - classification_output_loss: 0.0444 - regression_output_loss: 0.0178 - final_output_loss: 0.0223 - classification_output_accuracy: 0.9816 - classification_output_auc: 0.9987 - regression_output_mse: 0.0109 - regression_output_mae: 0.0587 - regression_output_rmse: 0.1016 - regression_output_custom_mape: 15.7985 - final_output_mse: 0.0113 - final_output_mae: 0.0739 - final_output_rmse: 0.1034 - final_output_custom_mape: 62.1004 - val_loss: 0.0530 - val_classification_output_loss: 0.1123 - val_regression_output_loss: 0.0221 - val_final_output_loss: 0.0268 - val_classification_output_accuracy: 0.9612 - val_classification_output_auc: 0.9927 - val_regression_output_mse: 0.0159 - val_regression_output_mae: 0.0723 - val_regression_output_rmse: 0.1200 - val_regression_output_custom_mape: 19.3290 - val_final_output_mse: 0.0164 - val_final_output_mae: 0.0880 - val_final_output_rmse: 0.1223 - val_final_output_custom_mape: 65.5677 - lr: 1.8750e-05\n", "Epoch 67/100\n", "541/541 [==============================] - 50s 93ms/step - loss: 0.0363 - classification_output_loss: 0.0450 - regression_output_loss: 0.0182 - final_output_loss: 0.0227 - classification_output_accuracy: 0.9805 - classification_output_auc: 0.9988 - regression_output_mse: 0.0114 - regression_output_mae: 0.0602 - regression_output_rmse: 0.1039 - regression_output_custom_mape: 15.9441 - final_output_mse: 0.0117 - final_output_mae: 0.0753 - final_output_rmse: 0.1057 - final_output_custom_mape: 62.2372 - val_loss: 0.0498 - val_classification_output_loss: 0.0979 - val_regression_output_loss: 0.0219 - val_final_output_loss: 0.0267 - val_classification_output_accuracy: 0.9640 - val_classification_output_auc: 0.9941 - val_regression_output_mse: 0.0154 - val_regression_output_mae: 0.0716 - val_regression_output_rmse: 0.1197 - val_regression_output_custom_mape: 19.4101 - val_final_output_mse: 0.0159 - val_final_output_mae: 0.0874 - val_final_output_rmse: 0.1220 - val_final_output_custom_mape: 65.7150 - lr: 1.8750e-05\n", "Epoch 68/100\n", "541/541 [==============================] - 50s 93ms/step - loss: 0.0359 - classification_output_loss: 0.0443 - regression_output_loss: 0.0180 - final_output_loss: 0.0225 - classification_output_accuracy: 0.9813 - classification_output_auc: 0.9988 - regression_output_mse: 0.0112 - regression_output_mae: 0.0593 - regression_output_rmse: 0.1024 - regression_output_custom_mape: 15.8022 - final_output_mse: 0.0115 - final_output_mae: 0.0745 - final_output_rmse: 0.1042 - final_output_custom_mape: 62.1159 - val_loss: 0.0512 - val_classification_output_loss: 0.1075 - val_regression_output_loss: 0.0212 - val_final_output_loss: 0.0260 - val_classification_output_accuracy: 0.9619 - val_classification_output_auc: 0.9932 - val_regression_output_mse: 0.0146 - val_regression_output_mae: 0.0696 - val_regression_output_rmse: 0.1169 - val_regression_output_custom_mape: 19.3851 - val_final_output_mse: 0.0151 - val_final_output_mae: 0.0852 - val_final_output_rmse: 0.1192 - val_final_output_custom_mape: 65.6778 - lr: 1.8750e-05\n", "Epoch 69/100\n", "541/541 [==============================] - 52s 96ms/step - loss: 0.0352 - classification_output_loss: 0.0445 - regression_output_loss: 0.0171 - final_output_loss: 0.0216 - classification_output_accuracy: 0.9812 - classification_output_auc: 0.9988 - regression_output_mse: 0.0101 - regression_output_mae: 0.0566 - regression_output_rmse: 0.0980 - regression_output_custom_mape: 15.5609 - final_output_mse: 0.0105 - final_output_mae: 0.0717 - final_output_rmse: 0.0999 - final_output_custom_mape: 61.8379 - val_loss: 0.0503 - val_classification_output_loss: 0.1062 - val_regression_output_loss: 0.0206 - val_final_output_loss: 0.0254 - val_classification_output_accuracy: 0.9621 - val_classification_output_auc: 0.9934 - val_regression_output_mse: 0.0139 - val_regression_output_mae: 0.0676 - val_regression_output_rmse: 0.1138 - val_regression_output_custom_mape: 19.0459 - val_final_output_mse: 0.0144 - val_final_output_mae: 0.0833 - val_final_output_rmse: 0.1161 - val_final_output_custom_mape: 65.3141 - lr: 1.8750e-05\n", "Epoch 70/100\n", "541/541 [==============================] - 52s 96ms/step - loss: 0.0352 - classification_output_loss: 0.0445 - regression_output_loss: 0.0172 - final_output_loss: 0.0217 - classification_output_accuracy: 0.9812 - classification_output_auc: 0.9988 - regression_output_mse: 0.0102 - regression_output_mae: 0.0569 - regression_output_rmse: 0.0984 - regression_output_custom_mape: 15.6253 - final_output_mse: 0.0105 - final_output_mae: 0.0720 - final_output_rmse: 0.1003 - final_output_custom_mape: 61.8611 - val_loss: 0.0495 - val_classification_output_loss: 0.1032 - val_regression_output_loss: 0.0205 - val_final_output_loss: 0.0253 - val_classification_output_accuracy: 0.9628 - val_classification_output_auc: 0.9937 - val_regression_output_mse: 0.0137 - val_regression_output_mae: 0.0672 - val_regression_output_rmse: 0.1134 - val_regression_output_custom_mape: 19.0341 - val_final_output_mse: 0.0142 - val_final_output_mae: 0.0830 - val_final_output_rmse: 0.1158 - val_final_output_custom_mape: 65.3020 - lr: 1.8750e-05\n", "Epoch 71/100\n", "541/541 [==============================] - ETA: 0s - loss: 0.0351 - classification_output_loss: 0.0442 - regression_output_loss: 0.0173 - final_output_loss: 0.0218 - classification_output_accuracy: 0.9817 - classification_output_auc: 0.9988 - regression_output_mse: 0.0103 - regression_output_mae: 0.0571 - regression_output_rmse: 0.0989 - regression_output_custom_mape: 15.6037 - final_output_mse: 0.0107 - final_output_mae: 0.0722 - final_output_rmse: 0.1007 - final_output_custom_mape: 61.8687\n", "Epoch 71: ReduceLROnPlateau reducing learning rate to 9.375000445288606e-06.\n", "\n", "Epoch 71 Detailed Metrics:\n", "541/541 [==============================] - 57s 106ms/step - loss: 0.0351 - classification_output_loss: 0.0442 - regression_output_loss: 0.0173 - final_output_loss: 0.0218 - classification_output_accuracy: 0.9817 - classification_output_auc: 0.9988 - regression_output_mse: 0.0103 - regression_output_mae: 0.0571 - regression_output_rmse: 0.0989 - regression_output_custom_mape: 15.6037 - final_output_mse: 0.0107 - final_output_mae: 0.0722 - final_output_rmse: 0.1007 - final_output_custom_mape: 61.8687 - val_loss: 0.0484 - val_classification_output_loss: 0.0953 - val_regression_output_loss: 0.0211 - val_final_output_loss: 0.0259 - val_classification_output_accuracy: 0.9642 - val_classification_output_auc: 0.9944 - val_regression_output_mse: 0.0144 - val_regression_output_mae: 0.0692 - val_regression_output_rmse: 0.1160 - val_regression_output_custom_mape: 19.0884 - val_final_output_mse: 0.0149 - val_final_output_mae: 0.0850 - val_final_output_rmse: 0.1184 - val_final_output_custom_mape: 65.3852 - lr: 1.8750e-05\n", "Epoch 72/100\n", "541/541 [==============================] - 55s 103ms/step - loss: 0.0349 - classification_output_loss: 0.0451 - regression_output_loss: 0.0169 - final_output_loss: 0.0214 - classification_output_accuracy: 0.9807 - classification_output_auc: 0.9987 - regression_output_mse: 0.0098 - regression_output_mae: 0.0559 - regression_output_rmse: 0.0967 - regression_output_custom_mape: 15.4296 - final_output_mse: 0.0102 - final_output_mae: 0.0711 - final_output_rmse: 0.0987 - final_output_custom_mape: 61.7132 - val_loss: 0.0474 - val_classification_output_loss: 0.0992 - val_regression_output_loss: 0.0190 - val_final_output_loss: 0.0238 - val_classification_output_accuracy: 0.9634 - val_classification_output_auc: 0.9940 - val_regression_output_mse: 0.0120 - val_regression_output_mae: 0.0625 - val_regression_output_rmse: 0.1064 - val_regression_output_custom_mape: 18.3833 - val_final_output_mse: 0.0125 - val_final_output_mae: 0.0782 - val_final_output_rmse: 0.1088 - val_final_output_custom_mape: 64.7322 - lr: 9.3750e-06\n", "Epoch 73/100\n", "541/541 [==============================] - 59s 108ms/step - loss: 0.0346 - classification_output_loss: 0.0447 - regression_output_loss: 0.0166 - final_output_loss: 0.0212 - classification_output_accuracy: 0.9809 - classification_output_auc: 0.9988 - regression_output_mse: 0.0095 - regression_output_mae: 0.0551 - regression_output_rmse: 0.0952 - regression_output_custom_mape: 15.3131 - final_output_mse: 0.0099 - final_output_mae: 0.0702 - final_output_rmse: 0.0972 - final_output_custom_mape: 61.5896 - val_loss: 0.0482 - val_classification_output_loss: 0.1015 - val_regression_output_loss: 0.0195 - val_final_output_loss: 0.0242 - val_classification_output_accuracy: 0.9622 - val_classification_output_auc: 0.9938 - val_regression_output_mse: 0.0125 - val_regression_output_mae: 0.0640 - val_regression_output_rmse: 0.1086 - val_regression_output_custom_mape: 18.6132 - val_final_output_mse: 0.0130 - val_final_output_mae: 0.0797 - val_final_output_rmse: 0.1110 - val_final_output_custom_mape: 64.9506 - lr: 9.3750e-06\n", "Epoch 74/100\n", "541/541 [==============================] - 58s 107ms/step - loss: 0.0346 - classification_output_loss: 0.0452 - regression_output_loss: 0.0166 - final_output_loss: 0.0211 - classification_output_accuracy: 0.9804 - classification_output_auc: 0.9988 - regression_output_mse: 0.0094 - regression_output_mae: 0.0549 - regression_output_rmse: 0.0948 - regression_output_custom_mape: 15.3850 - final_output_mse: 0.0098 - final_output_mae: 0.0701 - final_output_rmse: 0.0969 - final_output_custom_mape: 61.6494 - val_loss: 0.0474 - val_classification_output_loss: 0.0998 - val_regression_output_loss: 0.0189 - val_final_output_loss: 0.0237 - val_classification_output_accuracy: 0.9633 - val_classification_output_auc: 0.9939 - val_regression_output_mse: 0.0119 - val_regression_output_mae: 0.0621 - val_regression_output_rmse: 0.1062 - val_regression_output_custom_mape: 18.4668 - val_final_output_mse: 0.0124 - val_final_output_mae: 0.0779 - val_final_output_rmse: 0.1087 - val_final_output_custom_mape: 64.7637 - lr: 9.3750e-06\n", "Epoch 75/100\n", "541/541 [==============================] - 60s 110ms/step - loss: 0.0346 - classification_output_loss: 0.0451 - regression_output_loss: 0.0167 - final_output_loss: 0.0212 - classification_output_accuracy: 0.9805 - classification_output_auc: 0.9988 - regression_output_mse: 0.0095 - regression_output_mae: 0.0552 - regression_output_rmse: 0.0954 - regression_output_custom_mape: 15.4044 - final_output_mse: 0.0099 - final_output_mae: 0.0703 - final_output_rmse: 0.0974 - final_output_custom_mape: 61.6629 - val_loss: 0.0472 - val_classification_output_loss: 0.0992 - val_regression_output_loss: 0.0189 - val_final_output_loss: 0.0237 - val_classification_output_accuracy: 0.9635 - val_classification_output_auc: 0.9940 - val_regression_output_mse: 0.0119 - val_regression_output_mae: 0.0621 - val_regression_output_rmse: 0.1058 - val_regression_output_custom_mape: 18.3245 - val_final_output_mse: 0.0124 - val_final_output_mae: 0.0779 - val_final_output_rmse: 0.1083 - val_final_output_custom_mape: 64.6469 - lr: 9.3750e-06\n", "Epoch 76/100\n", "541/541 [==============================] - 58s 107ms/step - loss: 0.0344 - classification_output_loss: 0.0447 - regression_output_loss: 0.0166 - final_output_loss: 0.0211 - classification_output_accuracy: 0.9812 - classification_output_auc: 0.9988 - regression_output_mse: 0.0095 - regression_output_mae: 0.0549 - regression_output_rmse: 0.0950 - regression_output_custom_mape: 15.3963 - final_output_mse: 0.0099 - final_output_mae: 0.0700 - final_output_rmse: 0.0970 - final_output_custom_mape: 61.6328 - val_loss: 0.0474 - val_classification_output_loss: 0.0992 - val_regression_output_loss: 0.0192 - val_final_output_loss: 0.0239 - val_classification_output_accuracy: 0.9632 - val_classification_output_auc: 0.9940 - val_regression_output_mse: 0.0122 - val_regression_output_mae: 0.0630 - val_regression_output_rmse: 0.1075 - val_regression_output_custom_mape: 18.5056 - val_final_output_mse: 0.0126 - val_final_output_mae: 0.0787 - val_final_output_rmse: 0.1099 - val_final_output_custom_mape: 64.8788 - lr: 9.3750e-06\n", "Epoch 77/100\n", "541/541 [==============================] - 59s 108ms/step - loss: 0.0344 - classification_output_loss: 0.0449 - regression_output_loss: 0.0166 - final_output_loss: 0.0212 - classification_output_accuracy: 0.9808 - classification_output_auc: 0.9988 - regression_output_mse: 0.0095 - regression_output_mae: 0.0550 - regression_output_rmse: 0.0953 - regression_output_custom_mape: 15.3243 - final_output_mse: 0.0099 - final_output_mae: 0.0701 - final_output_rmse: 0.0972 - final_output_custom_mape: 61.6194 - val_loss: 0.0467 - val_classification_output_loss: 0.0981 - val_regression_output_loss: 0.0186 - val_final_output_loss: 0.0234 - val_classification_output_accuracy: 0.9630 - val_classification_output_auc: 0.9941 - val_regression_output_mse: 0.0115 - val_regression_output_mae: 0.0613 - val_regression_output_rmse: 0.1045 - val_regression_output_custom_mape: 18.1003 - val_final_output_mse: 0.0120 - val_final_output_mae: 0.0769 - val_final_output_rmse: 0.1070 - val_final_output_custom_mape: 64.4979 - lr: 9.3750e-06\n", "Epoch 78/100\n", "541/541 [==============================] - 56s 104ms/step - loss: 0.0343 - classification_output_loss: 0.0445 - regression_output_loss: 0.0166 - final_output_loss: 0.0212 - classification_output_accuracy: 0.9808 - classification_output_auc: 0.9988 - regression_output_mse: 0.0096 - regression_output_mae: 0.0550 - regression_output_rmse: 0.0954 - regression_output_custom_mape: 15.3804 - final_output_mse: 0.0100 - final_output_mae: 0.0701 - final_output_rmse: 0.0975 - final_output_custom_mape: 61.6332 - val_loss: 0.0475 - val_classification_output_loss: 0.0991 - val_regression_output_loss: 0.0195 - val_final_output_loss: 0.0243 - val_classification_output_accuracy: 0.9632 - val_classification_output_auc: 0.9940 - val_regression_output_mse: 0.0125 - val_regression_output_mae: 0.0641 - val_regression_output_rmse: 0.1089 - val_regression_output_custom_mape: 18.6073 - val_final_output_mse: 0.0130 - val_final_output_mae: 0.0798 - val_final_output_rmse: 0.1113 - val_final_output_custom_mape: 64.9554 - lr: 9.3750e-06\n", "Epoch 79/100\n", "541/541 [==============================] - 58s 107ms/step - loss: 0.0342 - classification_output_loss: 0.0444 - regression_output_loss: 0.0166 - final_output_loss: 0.0211 - classification_output_accuracy: 0.9810 - classification_output_auc: 0.9988 - regression_output_mse: 0.0094 - regression_output_mae: 0.0549 - regression_output_rmse: 0.0947 - regression_output_custom_mape: 15.4022 - final_output_mse: 0.0098 - final_output_mae: 0.0700 - final_output_rmse: 0.0967 - final_output_custom_mape: 61.6269 - val_loss: 0.0474 - val_classification_output_loss: 0.0996 - val_regression_output_loss: 0.0192 - val_final_output_loss: 0.0240 - val_classification_output_accuracy: 0.9635 - val_classification_output_auc: 0.9939 - val_regression_output_mse: 0.0122 - val_regression_output_mae: 0.0631 - val_regression_output_rmse: 0.1070 - val_regression_output_custom_mape: 18.3915 - val_final_output_mse: 0.0127 - val_final_output_mae: 0.0788 - val_final_output_rmse: 0.1094 - val_final_output_custom_mape: 64.7190 - lr: 9.3750e-06\n", "Epoch 80/100\n", "541/541 [==============================] - 57s 106ms/step - loss: 0.0342 - classification_output_loss: 0.0446 - regression_output_loss: 0.0166 - final_output_loss: 0.0212 - classification_output_accuracy: 0.9810 - classification_output_auc: 0.9988 - regression_output_mse: 0.0095 - regression_output_mae: 0.0549 - regression_output_rmse: 0.0951 - regression_output_custom_mape: 15.3161 - final_output_mse: 0.0099 - final_output_mae: 0.0701 - final_output_rmse: 0.0971 - final_output_custom_mape: 61.6155 - val_loss: 0.0465 - val_classification_output_loss: 0.0980 - val_regression_output_loss: 0.0186 - val_final_output_loss: 0.0234 - val_classification_output_accuracy: 0.9640 - val_classification_output_auc: 0.9941 - val_regression_output_mse: 0.0116 - val_regression_output_mae: 0.0613 - val_regression_output_rmse: 0.1044 - val_regression_output_custom_mape: 18.1482 - val_final_output_mse: 0.0121 - val_final_output_mae: 0.0771 - val_final_output_rmse: 0.1069 - val_final_output_custom_mape: 64.5007 - lr: 9.3750e-06\n", "Epoch 81/100\n", "541/541 [==============================] - ETA: 0s - loss: 0.0338 - classification_output_loss: 0.0438 - regression_output_loss: 0.0163 - final_output_loss: 0.0209 - classification_output_accuracy: 0.9813 - classification_output_auc: 0.9989 - regression_output_mse: 0.0091 - regression_output_mae: 0.0541 - regression_output_rmse: 0.0935 - regression_output_custom_mape: 15.2070 - final_output_mse: 0.0095 - final_output_mae: 0.0693 - final_output_rmse: 0.0956 - final_output_custom_mape: 61.4725\n", "Epoch 81 Detailed Metrics:\n", "541/541 [==============================] - 55s 101ms/step - loss: 0.0338 - classification_output_loss: 0.0438 - regression_output_loss: 0.0163 - final_output_loss: 0.0209 - classification_output_accuracy: 0.9813 - classification_output_auc: 0.9989 - regression_output_mse: 0.0091 - regression_output_mae: 0.0541 - regression_output_rmse: 0.0935 - regression_output_custom_mape: 15.2070 - final_output_mse: 0.0095 - final_output_mae: 0.0693 - final_output_rmse: 0.0956 - final_output_custom_mape: 61.4725 - val_loss: 0.0470 - val_classification_output_loss: 0.0990 - val_regression_output_loss: 0.0190 - val_final_output_loss: 0.0238 - val_classification_output_accuracy: 0.9636 - val_classification_output_auc: 0.9940 - val_regression_output_mse: 0.0120 - val_regression_output_mae: 0.0624 - val_regression_output_rmse: 0.1057 - val_regression_output_custom_mape: 18.1129 - val_final_output_mse: 0.0125 - val_final_output_mae: 0.0782 - val_final_output_rmse: 0.1083 - val_final_output_custom_mape: 64.4401 - lr: 9.3750e-06\n", "Epoch 82/100\n", "541/541 [==============================] - 56s 104ms/step - loss: 0.0343 - classification_output_loss: 0.0450 - regression_output_loss: 0.0167 - final_output_loss: 0.0213 - classification_output_accuracy: 0.9806 - classification_output_auc: 0.9988 - regression_output_mse: 0.0096 - regression_output_mae: 0.0552 - regression_output_rmse: 0.0954 - regression_output_custom_mape: 15.3545 - final_output_mse: 0.0100 - final_output_mae: 0.0704 - final_output_rmse: 0.0975 - final_output_custom_mape: 61.6195 - val_loss: 0.0463 - val_classification_output_loss: 0.0981 - val_regression_output_loss: 0.0184 - val_final_output_loss: 0.0232 - val_classification_output_accuracy: 0.9636 - val_classification_output_auc: 0.9940 - val_regression_output_mse: 0.0113 - val_regression_output_mae: 0.0606 - val_regression_output_rmse: 0.1037 - val_regression_output_custom_mape: 18.1365 - val_final_output_mse: 0.0118 - val_final_output_mae: 0.0763 - val_final_output_rmse: 0.1062 - val_final_output_custom_mape: 64.4947 - lr: 9.3750e-06\n", "Epoch 83/100\n", "541/541 [==============================] - 55s 103ms/step - loss: 0.0340 - classification_output_loss: 0.0442 - regression_output_loss: 0.0165 - final_output_loss: 0.0211 - classification_output_accuracy: 0.9810 - classification_output_auc: 0.9988 - regression_output_mse: 0.0094 - regression_output_mae: 0.0547 - regression_output_rmse: 0.0945 - regression_output_custom_mape: 15.2821 - final_output_mse: 0.0098 - final_output_mae: 0.0699 - final_output_rmse: 0.0967 - final_output_custom_mape: 61.5901 - val_loss: 0.0463 - val_classification_output_loss: 0.0993 - val_regression_output_loss: 0.0182 - val_final_output_loss: 0.0230 - val_classification_output_accuracy: 0.9635 - val_classification_output_auc: 0.9940 - val_regression_output_mse: 0.0111 - val_regression_output_mae: 0.0599 - val_regression_output_rmse: 0.1022 - val_regression_output_custom_mape: 17.9433 - val_final_output_mse: 0.0116 - val_final_output_mae: 0.0756 - val_final_output_rmse: 0.1048 - val_final_output_custom_mape: 64.2812 - lr: 9.3750e-06\n", "Epoch 84/100\n", "541/541 [==============================] - 57s 105ms/step - loss: 0.0337 - classification_output_loss: 0.0438 - regression_output_loss: 0.0163 - final_output_loss: 0.0209 - classification_output_accuracy: 0.9814 - classification_output_auc: 0.9988 - regression_output_mse: 0.0092 - regression_output_mae: 0.0541 - regression_output_rmse: 0.0936 - regression_output_custom_mape: 15.1612 - final_output_mse: 0.0096 - final_output_mae: 0.0693 - final_output_rmse: 0.0957 - final_output_custom_mape: 61.4662 - val_loss: 0.0460 - val_classification_output_loss: 0.0974 - val_regression_output_loss: 0.0183 - val_final_output_loss: 0.0231 - val_classification_output_accuracy: 0.9636 - val_classification_output_auc: 0.9941 - val_regression_output_mse: 0.0113 - val_regression_output_mae: 0.0604 - val_regression_output_rmse: 0.1033 - val_regression_output_custom_mape: 18.0828 - val_final_output_mse: 0.0118 - val_final_output_mae: 0.0761 - val_final_output_rmse: 0.1059 - val_final_output_custom_mape: 64.4077 - lr: 9.3750e-06\n", "Epoch 85/100\n", "541/541 [==============================] - 58s 108ms/step - loss: 0.0340 - classification_output_loss: 0.0444 - regression_output_loss: 0.0166 - final_output_loss: 0.0211 - classification_output_accuracy: 0.9811 - classification_output_auc: 0.9988 - regression_output_mse: 0.0095 - regression_output_mae: 0.0550 - regression_output_rmse: 0.0951 - regression_output_custom_mape: 15.3615 - final_output_mse: 0.0098 - final_output_mae: 0.0701 - final_output_rmse: 0.0970 - final_output_custom_mape: 61.6132 - val_loss: 0.0458 - val_classification_output_loss: 0.0976 - val_regression_output_loss: 0.0180 - val_final_output_loss: 0.0228 - val_classification_output_accuracy: 0.9638 - val_classification_output_auc: 0.9941 - val_regression_output_mse: 0.0109 - val_regression_output_mae: 0.0593 - val_regression_output_rmse: 0.1015 - val_regression_output_custom_mape: 17.8655 - val_final_output_mse: 0.0114 - val_final_output_mae: 0.0750 - val_final_output_rmse: 0.1041 - val_final_output_custom_mape: 64.2029 - lr: 9.3750e-06\n", "Epoch 86/100\n", "541/541 [==============================] - 57s 105ms/step - loss: 0.0337 - classification_output_loss: 0.0445 - regression_output_loss: 0.0163 - final_output_loss: 0.0209 - classification_output_accuracy: 0.9808 - classification_output_auc: 0.9988 - regression_output_mse: 0.0091 - regression_output_mae: 0.0541 - regression_output_rmse: 0.0935 - regression_output_custom_mape: 15.1750 - final_output_mse: 0.0095 - final_output_mae: 0.0693 - final_output_rmse: 0.0956 - final_output_custom_mape: 61.4570 - val_loss: 0.0466 - val_classification_output_loss: 0.0989 - val_regression_output_loss: 0.0188 - val_final_output_loss: 0.0235 - val_classification_output_accuracy: 0.9634 - val_classification_output_auc: 0.9939 - val_regression_output_mse: 0.0117 - val_regression_output_mae: 0.0617 - val_regression_output_rmse: 0.1049 - val_regression_output_custom_mape: 18.1680 - val_final_output_mse: 0.0122 - val_final_output_mae: 0.0774 - val_final_output_rmse: 0.1074 - val_final_output_custom_mape: 64.5077 - lr: 9.3750e-06\n", "Epoch 87/100\n", "541/541 [==============================] - 57s 105ms/step - loss: 0.0336 - classification_output_loss: 0.0441 - regression_output_loss: 0.0163 - final_output_loss: 0.0208 - classification_output_accuracy: 0.9812 - classification_output_auc: 0.9988 - regression_output_mse: 0.0091 - regression_output_mae: 0.0540 - regression_output_rmse: 0.0932 - regression_output_custom_mape: 15.2261 - final_output_mse: 0.0095 - final_output_mae: 0.0692 - final_output_rmse: 0.0953 - final_output_custom_mape: 61.4855 - val_loss: 0.0456 - val_classification_output_loss: 0.0966 - val_regression_output_loss: 0.0182 - val_final_output_loss: 0.0230 - val_classification_output_accuracy: 0.9638 - val_classification_output_auc: 0.9941 - val_regression_output_mse: 0.0111 - val_regression_output_mae: 0.0599 - val_regression_output_rmse: 0.1021 - val_regression_output_custom_mape: 17.7784 - val_final_output_mse: 0.0116 - val_final_output_mae: 0.0757 - val_final_output_rmse: 0.1047 - val_final_output_custom_mape: 64.1520 - lr: 9.3750e-06\n", "Epoch 88/100\n", "541/541 [==============================] - 58s 106ms/step - loss: 0.0333 - classification_output_loss: 0.0437 - regression_output_loss: 0.0160 - final_output_loss: 0.0206 - classification_output_accuracy: 0.9814 - classification_output_auc: 0.9989 - regression_output_mse: 0.0088 - regression_output_mae: 0.0532 - regression_output_rmse: 0.0920 - regression_output_custom_mape: 15.0525 - final_output_mse: 0.0092 - final_output_mae: 0.0684 - final_output_rmse: 0.0941 - final_output_custom_mape: 61.3526 - val_loss: 0.0466 - val_classification_output_loss: 0.1009 - val_regression_output_loss: 0.0184 - val_final_output_loss: 0.0232 - val_classification_output_accuracy: 0.9622 - val_classification_output_auc: 0.9939 - val_regression_output_mse: 0.0113 - val_regression_output_mae: 0.0606 - val_regression_output_rmse: 0.1034 - val_regression_output_custom_mape: 17.9954 - val_final_output_mse: 0.0118 - val_final_output_mae: 0.0763 - val_final_output_rmse: 0.1059 - val_final_output_custom_mape: 64.3141 - lr: 9.3750e-06\n", "Epoch 89/100\n", "541/541 [==============================] - 56s 103ms/step - loss: 0.0335 - classification_output_loss: 0.0435 - regression_output_loss: 0.0164 - final_output_loss: 0.0210 - classification_output_accuracy: 0.9815 - classification_output_auc: 0.9988 - regression_output_mse: 0.0092 - regression_output_mae: 0.0543 - regression_output_rmse: 0.0938 - regression_output_custom_mape: 15.2099 - final_output_mse: 0.0096 - final_output_mae: 0.0695 - final_output_rmse: 0.0960 - final_output_custom_mape: 61.5154 - val_loss: 0.0461 - val_classification_output_loss: 0.0985 - val_regression_output_loss: 0.0184 - val_final_output_loss: 0.0232 - val_classification_output_accuracy: 0.9639 - val_classification_output_auc: 0.9941 - val_regression_output_mse: 0.0113 - val_regression_output_mae: 0.0606 - val_regression_output_rmse: 0.1032 - val_regression_output_custom_mape: 17.9942 - val_final_output_mse: 0.0118 - val_final_output_mae: 0.0764 - val_final_output_rmse: 0.1057 - val_final_output_custom_mape: 64.3071 - lr: 9.3750e-06\n", "Epoch 90/100\n", "541/541 [==============================] - 61s 112ms/step - loss: 0.0334 - classification_output_loss: 0.0432 - regression_output_loss: 0.0163 - final_output_loss: 0.0209 - classification_output_accuracy: 0.9818 - classification_output_auc: 0.9989 - regression_output_mse: 0.0093 - regression_output_mae: 0.0541 - regression_output_rmse: 0.0936 - regression_output_custom_mape: 15.1568 - final_output_mse: 0.0096 - final_output_mae: 0.0693 - final_output_rmse: 0.0957 - final_output_custom_mape: 61.5074 - val_loss: 0.0458 - val_classification_output_loss: 0.0999 - val_regression_output_loss: 0.0177 - val_final_output_loss: 0.0225 - val_classification_output_accuracy: 0.9637 - val_classification_output_auc: 0.9940 - val_regression_output_mse: 0.0106 - val_regression_output_mae: 0.0584 - val_regression_output_rmse: 0.1002 - val_regression_output_custom_mape: 17.7691 - val_final_output_mse: 0.0111 - val_final_output_mae: 0.0742 - val_final_output_rmse: 0.1028 - val_final_output_custom_mape: 64.1077 - lr: 9.3750e-06\n", "Epoch 91/100\n", "541/541 [==============================] - ETA: 0s - loss: 0.0335 - classification_output_loss: 0.0439 - regression_output_loss: 0.0164 - final_output_loss: 0.0210 - classification_output_accuracy: 0.9812 - classification_output_auc: 0.9988 - regression_output_mse: 0.0093 - regression_output_mae: 0.0543 - regression_output_rmse: 0.0940 - regression_output_custom_mape: 15.1890 - final_output_mse: 0.0097 - final_output_mae: 0.0695 - final_output_rmse: 0.0962 - final_output_custom_mape: 61.5230\n", "Epoch 91 Detailed Metrics:\n", "541/541 [==============================] - 54s 100ms/step - loss: 0.0335 - classification_output_loss: 0.0439 - regression_output_loss: 0.0164 - final_output_loss: 0.0210 - classification_output_accuracy: 0.9812 - classification_output_auc: 0.9988 - regression_output_mse: 0.0093 - regression_output_mae: 0.0543 - regression_output_rmse: 0.0940 - regression_output_custom_mape: 15.1890 - final_output_mse: 0.0097 - final_output_mae: 0.0695 - final_output_rmse: 0.0962 - final_output_custom_mape: 61.5230 - val_loss: 0.0452 - val_classification_output_loss: 0.0961 - val_regression_output_loss: 0.0179 - val_final_output_loss: 0.0227 - val_classification_output_accuracy: 0.9641 - val_classification_output_auc: 0.9943 - val_regression_output_mse: 0.0108 - val_regression_output_mae: 0.0591 - val_regression_output_rmse: 0.1010 - val_regression_output_custom_mape: 17.6374 - val_final_output_mse: 0.0113 - val_final_output_mae: 0.0749 - val_final_output_rmse: 0.1036 - val_final_output_custom_mape: 63.9890 - lr: 9.3750e-06\n", "Epoch 92/100\n", "541/541 [==============================] - 58s 108ms/step - loss: 0.0333 - classification_output_loss: 0.0437 - regression_output_loss: 0.0163 - final_output_loss: 0.0208 - classification_output_accuracy: 0.9816 - classification_output_auc: 0.9988 - regression_output_mse: 0.0091 - regression_output_mae: 0.0540 - regression_output_rmse: 0.0932 - regression_output_custom_mape: 15.1416 - final_output_mse: 0.0095 - final_output_mae: 0.0692 - final_output_rmse: 0.0953 - final_output_custom_mape: 61.4852 - val_loss: 0.0458 - val_classification_output_loss: 0.1000 - val_regression_output_loss: 0.0178 - val_final_output_loss: 0.0226 - val_classification_output_accuracy: 0.9633 - val_classification_output_auc: 0.9940 - val_regression_output_mse: 0.0107 - val_regression_output_mae: 0.0587 - val_regression_output_rmse: 0.1005 - val_regression_output_custom_mape: 17.7404 - val_final_output_mse: 0.0112 - val_final_output_mae: 0.0744 - val_final_output_rmse: 0.1031 - val_final_output_custom_mape: 64.0757 - lr: 9.3750e-06\n", "Epoch 93/100\n", "541/541 [==============================] - 55s 101ms/step - loss: 0.0333 - classification_output_loss: 0.0433 - regression_output_loss: 0.0164 - final_output_loss: 0.0210 - classification_output_accuracy: 0.9813 - classification_output_auc: 0.9989 - regression_output_mse: 0.0093 - regression_output_mae: 0.0543 - regression_output_rmse: 0.0938 - regression_output_custom_mape: 15.2251 - final_output_mse: 0.0097 - final_output_mae: 0.0695 - final_output_rmse: 0.0959 - final_output_custom_mape: 61.5190 - val_loss: 0.0460 - val_classification_output_loss: 0.0983 - val_regression_output_loss: 0.0185 - val_final_output_loss: 0.0233 - val_classification_output_accuracy: 0.9640 - val_classification_output_auc: 0.9941 - val_regression_output_mse: 0.0114 - val_regression_output_mae: 0.0609 - val_regression_output_rmse: 0.1038 - val_regression_output_custom_mape: 18.0193 - val_final_output_mse: 0.0119 - val_final_output_mae: 0.0767 - val_final_output_rmse: 0.1063 - val_final_output_custom_mape: 64.4023 - lr: 9.3750e-06\n", "Epoch 94/100\n", "541/541 [==============================] - 58s 107ms/step - loss: 0.0332 - classification_output_loss: 0.0435 - regression_output_loss: 0.0163 - final_output_loss: 0.0209 - classification_output_accuracy: 0.9815 - classification_output_auc: 0.9989 - regression_output_mse: 0.0092 - regression_output_mae: 0.0540 - regression_output_rmse: 0.0936 - regression_output_custom_mape: 15.1258 - final_output_mse: 0.0096 - final_output_mae: 0.0692 - final_output_rmse: 0.0956 - final_output_custom_mape: 61.4526 - val_loss: 0.0454 - val_classification_output_loss: 0.0978 - val_regression_output_loss: 0.0179 - val_final_output_loss: 0.0227 - val_classification_output_accuracy: 0.9638 - val_classification_output_auc: 0.9942 - val_regression_output_mse: 0.0108 - val_regression_output_mae: 0.0589 - val_regression_output_rmse: 0.1007 - val_regression_output_custom_mape: 17.6076 - val_final_output_mse: 0.0113 - val_final_output_mae: 0.0747 - val_final_output_rmse: 0.1033 - val_final_output_custom_mape: 63.9772 - lr: 9.3750e-06\n", "Epoch 95/100\n", "541/541 [==============================] - 54s 100ms/step - loss: 0.0332 - classification_output_loss: 0.0434 - regression_output_loss: 0.0163 - final_output_loss: 0.0209 - classification_output_accuracy: 0.9814 - classification_output_auc: 0.9988 - regression_output_mse: 0.0091 - regression_output_mae: 0.0540 - regression_output_rmse: 0.0936 - regression_output_custom_mape: 15.1536 - final_output_mse: 0.0095 - final_output_mae: 0.0692 - final_output_rmse: 0.0956 - final_output_custom_mape: 61.5154 - val_loss: 0.0458 - val_classification_output_loss: 0.1002 - val_regression_output_loss: 0.0178 - val_final_output_loss: 0.0226 - val_classification_output_accuracy: 0.9632 - val_classification_output_auc: 0.9940 - val_regression_output_mse: 0.0107 - val_regression_output_mae: 0.0588 - val_regression_output_rmse: 0.1010 - val_regression_output_custom_mape: 17.8107 - val_final_output_mse: 0.0112 - val_final_output_mae: 0.0745 - val_final_output_rmse: 0.1036 - val_final_output_custom_mape: 64.1810 - lr: 9.3750e-06\n", "Epoch 96/100\n", "541/541 [==============================] - 56s 104ms/step - loss: 0.0334 - classification_output_loss: 0.0444 - regression_output_loss: 0.0164 - final_output_loss: 0.0209 - classification_output_accuracy: 0.9812 - classification_output_auc: 0.9988 - regression_output_mse: 0.0092 - regression_output_mae: 0.0542 - regression_output_rmse: 0.0935 - regression_output_custom_mape: 15.1391 - final_output_mse: 0.0096 - final_output_mae: 0.0694 - final_output_rmse: 0.0956 - final_output_custom_mape: 61.4492 - val_loss: 0.0452 - val_classification_output_loss: 0.0985 - val_regression_output_loss: 0.0176 - val_final_output_loss: 0.0224 - val_classification_output_accuracy: 0.9632 - val_classification_output_auc: 0.9941 - val_regression_output_mse: 0.0105 - val_regression_output_mae: 0.0581 - val_regression_output_rmse: 0.0993 - val_regression_output_custom_mape: 17.4962 - val_final_output_mse: 0.0110 - val_final_output_mae: 0.0738 - val_final_output_rmse: 0.1020 - val_final_output_custom_mape: 63.8275 - lr: 9.3750e-06\n", "Epoch 97/100\n", "541/541 [==============================] - 61s 113ms/step - loss: 0.0329 - classification_output_loss: 0.0432 - regression_output_loss: 0.0161 - final_output_loss: 0.0207 - classification_output_accuracy: 0.9810 - classification_output_auc: 0.9989 - regression_output_mse: 0.0090 - regression_output_mae: 0.0535 - regression_output_rmse: 0.0927 - regression_output_custom_mape: 15.0539 - final_output_mse: 0.0094 - final_output_mae: 0.0688 - final_output_rmse: 0.0950 - final_output_custom_mape: 61.4182 - val_loss: 0.0454 - val_classification_output_loss: 0.0997 - val_regression_output_loss: 0.0176 - val_final_output_loss: 0.0224 - val_classification_output_accuracy: 0.9630 - val_classification_output_auc: 0.9941 - val_regression_output_mse: 0.0105 - val_regression_output_mae: 0.0580 - val_regression_output_rmse: 0.0993 - val_regression_output_custom_mape: 17.6127 - val_final_output_mse: 0.0110 - val_final_output_mae: 0.0738 - val_final_output_rmse: 0.1019 - val_final_output_custom_mape: 63.9294 - lr: 9.3750e-06\n", "Epoch 98/100\n", "541/541 [==============================] - 59s 109ms/step - loss: 0.0331 - classification_output_loss: 0.0434 - regression_output_loss: 0.0163 - final_output_loss: 0.0209 - classification_output_accuracy: 0.9812 - classification_output_auc: 0.9989 - regression_output_mse: 0.0092 - regression_output_mae: 0.0540 - regression_output_rmse: 0.0934 - regression_output_custom_mape: 15.1271 - final_output_mse: 0.0096 - final_output_mae: 0.0692 - final_output_rmse: 0.0955 - final_output_custom_mape: 61.4752 - val_loss: 0.0455 - val_classification_output_loss: 0.0989 - val_regression_output_loss: 0.0179 - val_final_output_loss: 0.0227 - val_classification_output_accuracy: 0.9631 - val_classification_output_auc: 0.9940 - val_regression_output_mse: 0.0108 - val_regression_output_mae: 0.0590 - val_regression_output_rmse: 0.1004 - val_regression_output_custom_mape: 17.5953 - val_final_output_mse: 0.0113 - val_final_output_mae: 0.0747 - val_final_output_rmse: 0.1031 - val_final_output_custom_mape: 63.9553 - lr: 9.3750e-06\n", "Epoch 99/100\n", "541/541 [==============================] - 58s 107ms/step - loss: 0.0330 - classification_output_loss: 0.0439 - regression_output_loss: 0.0161 - final_output_loss: 0.0207 - classification_output_accuracy: 0.9811 - classification_output_auc: 0.9988 - regression_output_mse: 0.0090 - regression_output_mae: 0.0535 - regression_output_rmse: 0.0926 - regression_output_custom_mape: 15.0318 - final_output_mse: 0.0094 - final_output_mae: 0.0688 - final_output_rmse: 0.0948 - final_output_custom_mape: 61.3647 - val_loss: 0.0449 - val_classification_output_loss: 0.0970 - val_regression_output_loss: 0.0177 - val_final_output_loss: 0.0224 - val_classification_output_accuracy: 0.9637 - val_classification_output_auc: 0.9943 - val_regression_output_mse: 0.0105 - val_regression_output_mae: 0.0582 - val_regression_output_rmse: 0.0997 - val_regression_output_custom_mape: 17.5580 - val_final_output_mse: 0.0110 - val_final_output_mae: 0.0740 - val_final_output_rmse: 0.1023 - val_final_output_custom_mape: 63.9403 - lr: 9.3750e-06\n", "Epoch 100/100\n", "541/541 [==============================] - 60s 111ms/step - loss: 0.0331 - classification_output_loss: 0.0433 - regression_output_loss: 0.0164 - final_output_loss: 0.0210 - classification_output_accuracy: 0.9817 - classification_output_auc: 0.9989 - regression_output_mse: 0.0094 - regression_output_mae: 0.0543 - regression_output_rmse: 0.0941 - regression_output_custom_mape: 15.1662 - final_output_mse: 0.0098 - final_output_mae: 0.0695 - final_output_rmse: 0.0962 - final_output_custom_mape: 61.5014 - val_loss: 0.0450 - val_classification_output_loss: 0.0997 - val_regression_output_loss: 0.0172 - val_final_output_loss: 0.0219 - val_classification_output_accuracy: 0.9630 - val_classification_output_auc: 0.9940 - val_regression_output_mse: 0.0100 - val_regression_output_mae: 0.0566 - val_regression_output_rmse: 0.0969 - val_regression_output_custom_mape: 17.3156 - val_final_output_mse: 0.0105 - val_final_output_mae: 0.0723 - val_final_output_rmse: 0.0996 - val_final_output_custom_mape: 63.6544 - lr: 9.3750e-06\n", "\n", "Training completed successfully!\n", "\n", "Classification Metrics:\n", "Accuracy: 96.30%\n", "AUC-ROC: 0.9953\n", "\n", "Confusion Matrix:\n", "[[12454 553]\n", " [ 406 12520]]\n", "\n", "Classification Report:\n", " precision recall f1-score support\n", "\n", " Zero 0.9684 0.9575 0.9629 13007\n", " Non-Zero 0.9577 0.9686 0.9631 12926\n", "\n", " accuracy 0.9630 25933\n", " macro avg 0.9631 0.9630 0.9630 25933\n", "weighted avg 0.9631 0.9630 0.9630 25933\n", "\n", "\n", "Regression Metrics (non-zero values):\n", "Out of range: 0 predictions\n", "MAPE: 28.99%\n", "Within ±10%: 48.43%\n", "MAE: 0.11\n", "RMSE: 0.14\n", "\n", "Final Combined Output Metrics:\n", "Out of range: 0 predictions\n", "MAPE: 63.75%\n", "Within ±10%: 24.89%\n", "MAE: 0.07\n", "RMSE: 0.10\n" ] } ], "source": [ "# Model creation\n", "print(\"\\n2. Creating model...\")\n", "input_shape = (X_train_seq.shape[1], X_train_seq.shape[2])\n", "\n", "max_val = df['solarradiation'].max()\n", "min_val_scaled = scaler_y.transform([[0]])[0][0]\n", "max_val_scaled = scaler_y.transform([[max_val]])[0][0]\n", "\n", "print(f\"\\nMax dataset solar radiation : {max_val} - Scaled Version : {max_val_scaled}\")\n", "\n", "increase_percentage = 15\n", "\n", "max_val = max_val * (1 + increase_percentage / 100)\n", "max_val_scaled = max_val_scaled * (1 + increase_percentage / 100)\n", "\n", "print(f\"Max dataset solar radiation increased by {increase_percentage}% : {max_val} - Scaled Version : {max_val_scaled}\")\n", "\n", "# Create the hybrid model\n", "model = create_solarradiation_model(\n", " input_shape=input_shape, \n", " folder_name=folder_name, \n", " min_output=min_val_scaled, \n", " max_output=max_val_scaled\n", ")\n", "\n", "# Prepare binary targets for classification\n", "y_train_binary = (y_train > 0).astype(float)\n", "y_test_binary = (y_test > 0).astype(float)\n", "\n", "print(\"\\nClass distribution in training set:\")\n", "print(f\"Zeros: {np.sum(y_train_binary == 0)} ({np.mean(y_train_binary == 0)*100:.2f}%)\")\n", "print(f\"Non-zeros: {np.sum(y_train_binary == 1)} ({np.mean(y_train_binary == 1)*100:.2f}%)\")\n", "\n", "print(\"\\nClass distribution in test set:\")\n", "print(f\"Zeros: {np.sum(y_test_binary == 0)} ({np.mean(y_test_binary == 0)*100:.2f}%)\")\n", "print(f\"Non-zeros: {np.sum(y_test_binary == 1)} ({np.mean(y_test_binary == 1)*100:.2f}%)\")\n", "\n", "# Get the exact output names from the model\n", "output_names = [output.name.split('/')[0] for output in model.outputs]\n", "print(\"\\nModel output names:\", output_names)\n", "\n", "print(\"\\n4. Starting training...\")\n", "history = train_hybrid_model(\n", " model=model,\n", " X_train=X_train_seq,\n", " y_train=y_train,\n", " X_test=X_test_seq,\n", " y_test=y_test,\n", " epochs=100,\n", " batch_size=192,\n", " folder_name=folder_name,\n", " min_output=min_val_scaled,\n", " max_output=max_val_scaled\n", ")" ] }, { "cell_type": "code", "execution_count": 8, "id": "958d78b99e8898d6", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "5. Generating predictions...\n", "811/811 [==============================] - 12s 14ms/step\n", "\n", "6. Evaluating model...\n", "\n", "Solar Radiation Prediction Metrics:\n", "\n", "Absolute Metrics:\n", "MAE: 24.11 W/m²\n", "RMSE: 34.38 W/m²\n", "R² Score: 0.983\n", "MAPE: 26.17%\n", "\n", "Accuracy Metrics:\n", "Within ±5 W/m²: 6.7%\n", "Within ±10 W/m²: 12.1%\n", "Within ±20 W/m²: 67.5%\n", "\n", "Level Accuracy:\n", "Level Accuracy: 91.3%\n", "\n", "Confusion Matrix for Radiation Levels:\n", " Very Low Low Moderate High Very High Extreme\n", "Very Low 0 1 0 0 9 0\n", "Low 0 1554 0 251 16 0\n", "Moderate 0 0 2054 533 0 274\n", "High 0 201 89 2006 0 0\n", "Very High 0 437 0 0 700 0\n", "Extreme 0 0 457 0 0 17351\n", "\n", "Plot saved as: 2024-11-25_21-39_radiation_analysis.png\n", "\n", "Error Statistics:\n", "Mean error: 5.849\n", "Error standard deviation: 33.875\n", "Median error: 12.000\n", "95th percentile absolute error: 74.602\n" ] } ], "source": [ "print(\"\\n5. Generating predictions...\")\n", "predictions = model.predict(X_test_seq)\n", "classification_pred, regression_pred, final_pred = predictions\n", "\n", "# Clip solo le predizioni di regressione e finali\n", "regression_pred = np.clip(regression_pred, 0, 11)\n", "final_pred = np.clip(final_pred, 0, 11)\n", "\n", "# Inverse transform per tornare ai valori originali\n", "regression_pred_original = scaler_y.inverse_transform(regression_pred)\n", "final_pred_original = scaler_y.inverse_transform(final_pred)\n", "y_test_original = scaler_y.inverse_transform(y_test)\n", "\n", "print(\"\\n6. Evaluating model...\")\n", "# Valutazione delle predizioni finali\n", "metrics = evaluate_solarradiation_predictions(y_test_original, final_pred_original, folder_name=folder_name)\n", "\n", "# Create results dictionary con metriche aggiuntive per il modello ibrido\n", "training_results = {\n", " 'model_params': {\n", " 'input_shape': input_shape,\n", " 'n_features': len(features),\n", " 'sequence_length': X_train_seq.shape[1]\n", " },\n", " 'training_params': {\n", " 'batch_size': 192,\n", " 'total_epochs': len(history.history['loss']),\n", " 'best_epoch': np.argmin(history.history['val_final_output_loss']) + 1\n", " },\n", " 'performance_metrics': {\n", " 'classification': {\n", " 'final_loss': float(history.history['val_classification_output_loss'][-1]),\n", " 'final_accuracy': float(history.history['val_classification_output_accuracy'][-1]),\n", " 'final_auc': float(history.history['val_classification_output_auc'][-1])\n", " },\n", " 'regression': {\n", " 'final_loss': float(history.history['val_regression_output_loss'][-1]),\n", " 'final_mae': float(history.history['val_regression_output_mae'][-1]),\n", " 'out_of_range_predictions': int(np.sum((regression_pred < 0) | (regression_pred > 11)))\n", " },\n", " 'final_output': {\n", " 'final_loss': float(history.history['val_final_output_loss'][-1]),\n", " 'final_mae': float(history.history['val_final_output_mae'][-1]),\n", " 'best_val_loss': float(min(history.history['val_final_output_loss'])),\n", " 'out_of_range_predictions': int(np.sum((final_pred < 0) | (final_pred > 11)))\n", " }\n", " }\n", "}" ] }, { "cell_type": "code", "execution_count": 9, "id": "5c05d1d03336b1e4", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "\n", "7. Predicting missing data...\n", "7122/7122 [==============================] - 103s 14ms/step\n", "\n", "8. Integrating predictions into original dataset...\n", "\n", "Prediction Integration Statistics:\n", "Added 227879 predictions to dataset\n", "Rows with solar radiation after integration: 357615\n", "\n", "Filled Values Analysis:\n", "Zero predictions (classification < 0.5): 114804\n", "Non-zero predictions (classification >= 0.5): 113075\n", "\n", "Non-zero predictions statistics:\n", "Mean: 180.48\n", "Median: 12.00\n", "Std: 245.03\n", "\n", "Prediction Statistics:\n", "Total predictions added: 227879\n", "\n", "Classification Statistics:\n", "Predicted zeros: 114804 (50.38%)\n", "Predicted non-zeros: 113075 (49.62%)\n", "Mean classification confidence: 0.4948\n", "\n", "Final Predictions Statistics:\n", "Mean solar radiation: 180.48\n", "Min solar radiation: 12.00\n", "Max solar radiation: 879.21\n", "Zero predictions: 0 (0.00%)\n", "\n", "Training completed successfully!\n" ] } ], "source": [ "print(\"\\n7. Predicting missing data...\")\n", "predictions = model.predict(X_to_predict_seq)\n", "classification_pred, regression_pred, final_pred = predictions\n", "\n", "# Clip solo le predizioni finali che useremo per l'integrazione\n", "final_pred = np.clip(final_pred, 0, 11)\n", "final_pred_original = scaler_y.inverse_transform(final_pred)\n", "\n", "print(\"\\n8. Integrating predictions into original dataset...\")\n", "df_updated = integrate_predictions(df.copy(), predictions=(classification_pred, regression_pred, final_pred_original))\n", "\n", "df_updated.to_parquet('../../sources/weather_data_solarradiation.parquet')\n", "\n", "# Add prediction statistics to training_results\n", "training_results['prediction_stats'] = {\n", " 'n_predictions_added': len(final_pred_original),\n", " 'classification_stats': {\n", " 'predicted_zeros': int(np.sum(classification_pred < 0.5)),\n", " 'predicted_non_zeros': int(np.sum(classification_pred >= 0.5)),\n", " 'mean_confidence': float(classification_pred.mean()),\n", " },\n", " 'regression_stats': {\n", " 'mean_predicted_value': float(regression_pred.mean()),\n", " 'min_predicted_value': float(regression_pred.min()),\n", " 'max_predicted_value': float(regression_pred.max()),\n", " },\n", " 'final_predictions': {\n", " 'mean_predicted_solarradiation': float(final_pred_original.mean()),\n", " 'min_predicted_solarradiation': float(final_pred_original.min()),\n", " 'max_predicted_solarradiation': float(final_pred_original.max()),\n", " 'zero_predictions': int(np.sum(final_pred_original == 0)),\n", " 'non_zero_predictions': int(np.sum(final_pred_original > 0)),\n", " }\n", "}\n", "\n", "print(\"\\nPrediction Statistics:\")\n", "print(f\"Total predictions added: {training_results['prediction_stats']['n_predictions_added']}\")\n", "print(\"\\nClassification Statistics:\")\n", "print(f\"Predicted zeros: {training_results['prediction_stats']['classification_stats']['predicted_zeros']} \"\n", " f\"({training_results['prediction_stats']['classification_stats']['predicted_zeros']/len(final_pred_original)*100:.2f}%)\")\n", "print(f\"Predicted non-zeros: {training_results['prediction_stats']['classification_stats']['predicted_non_zeros']} \"\n", " f\"({training_results['prediction_stats']['classification_stats']['predicted_non_zeros']/len(final_pred_original)*100:.2f}%)\")\n", "print(f\"Mean classification confidence: {training_results['prediction_stats']['classification_stats']['mean_confidence']:.4f}\")\n", "\n", "print(\"\\nFinal Predictions Statistics:\")\n", "print(f\"Mean solar radiation: {training_results['prediction_stats']['final_predictions']['mean_predicted_solarradiation']:.2f}\")\n", "print(f\"Min solar radiation: {training_results['prediction_stats']['final_predictions']['min_predicted_solarradiation']:.2f}\")\n", "print(f\"Max solar radiation: {training_results['prediction_stats']['final_predictions']['max_predicted_solarradiation']:.2f}\")\n", "print(f\"Zero predictions: {training_results['prediction_stats']['final_predictions']['zero_predictions']} \"\n", " f\"({training_results['prediction_stats']['final_predictions']['zero_predictions']/len(final_pred_original)*100:.2f}%)\")\n", "\n", "print(\"\\nTraining completed successfully!\")\n", "\n", "tf.keras.backend.clear_session()" ] }, { "cell_type": "code", "execution_count": 10, "id": "ef29b3ecdf12c6db", "metadata": {}, "outputs": [ { "ename": "ValueError", "evalue": "too many values to unpack (expected 3)", "output_type": "error", "traceback": [ "\u001B[0;31m---------------------------------------------------------------------------\u001B[0m", "\u001B[0;31mValueError\u001B[0m Traceback (most recent call last)", "Cell \u001B[0;32mIn[10], line 1\u001B[0m\n\u001B[0;32m----> 1\u001B[0m \u001B[43manalyze_distribution\u001B[49m\u001B[43m(\u001B[49m\u001B[43mdf_updated\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;124;43m'\u001B[39;49m\u001B[38;5;124;43msolarradiation\u001B[39;49m\u001B[38;5;124;43m'\u001B[39;49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;124;43m'\u001B[39;49m\u001B[38;5;124;43mSolar Radiation\u001B[39;49m\u001B[38;5;124;43m'\u001B[39;49m\u001B[43m)\u001B[49m\n", "Cell \u001B[0;32mIn[5], line 17\u001B[0m, in \u001B[0;36manalyze_distribution\u001B[0;34m(data, predictions, sequence_length, name)\u001B[0m\n\u001B[1;32m 2\u001B[0m \u001B[38;5;250m\u001B[39m\u001B[38;5;124;03m\"\"\"\u001B[39;00m\n\u001B[1;32m 3\u001B[0m \u001B[38;5;124;03mAnalizza dettagliatamente la distribuzione dei valori reali e predetti.\u001B[39;00m\n\u001B[1;32m 4\u001B[0m \n\u001B[0;32m (...)\u001B[0m\n\u001B[1;32m 14\u001B[0m \u001B[38;5;124;03m Nome della variabile da analizzare\u001B[39;00m\n\u001B[1;32m 15\u001B[0m \u001B[38;5;124;03m\"\"\"\u001B[39;00m\n\u001B[1;32m 16\u001B[0m \u001B[38;5;66;03m# Unpack predictions\u001B[39;00m\n\u001B[0;32m---> 17\u001B[0m classification_pred, regression_pred, final_pred \u001B[38;5;241m=\u001B[39m predictions\n\u001B[1;32m 19\u001B[0m \u001B[38;5;66;03m# Prepare data for analysis\u001B[39;00m\n\u001B[1;32m 20\u001B[0m mask_pre_2010 \u001B[38;5;241m=\u001B[39m data[\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mdatetime\u001B[39m\u001B[38;5;124m'\u001B[39m]\u001B[38;5;241m.\u001B[39mdt\u001B[38;5;241m.\u001B[39myear \u001B[38;5;241m<\u001B[39m \u001B[38;5;241m2010\u001B[39m\n", "\u001B[0;31mValueError\u001B[0m: too many values to unpack (expected 3)" ] } ], "source": [ "analyze_distribution(df_updated, 'solarradiation', 'Solar Radiation')" ] }, { "cell_type": "code", "execution_count": null, "id": "e884cc287364c4ed", "metadata": {}, "outputs": [], "source": [ "def plot_error_analysis(y_true, predictions, folder_name=None):\n", " \"\"\"\n", " Function to visualize prediction error analysis for the hybrid model\n", "\n", " Parameters:\n", " -----------\n", " y_true : array-like\n", " Actual values\n", " predictions : tuple\n", " Tuple containing (classification_pred, regression_pred, final_pred)\n", " folder_name : str, optional\n", " Directory to save plots. If None, plots are only displayed\n", "\n", " Generates:\n", " ----------\n", " - Classification analysis plots\n", " - Regression error analysis plots\n", " - Final prediction error analysis plots\n", " \"\"\"\n", " # Unpack predictions\n", " classification_pred, regression_pred, final_pred = predictions\n", "\n", " # Convert to 1D numpy arrays if needed\n", " y_true = np.ravel(y_true)\n", " classification_pred = np.ravel(classification_pred)\n", " regression_pred = np.ravel(regression_pred)\n", " final_pred = np.ravel(final_pred)\n", "\n", " # Create binary ground truth\n", " y_true_binary = (y_true > 0).astype(float)\n", "\n", " # Calculate errors for regression and final predictions\n", " regression_errors = regression_pred - y_true\n", " final_errors = final_pred - y_true\n", "\n", " # Create main figure\n", " plt.figure(figsize=(20, 15))\n", "\n", " # Classification Analysis (Top Row)\n", " # Plot 1: Classification Distribution\n", " plt.subplot(3, 3, 1)\n", " plt.hist(classification_pred, bins=50, alpha=0.7)\n", " plt.axvline(x=0.5, color='r', linestyle='--')\n", " plt.title('Classification Probability Distribution')\n", " plt.xlabel('Classification Probability')\n", " plt.ylabel('Frequency')\n", "\n", " # Plot 2: ROC Curve\n", " plt.subplot(3, 3, 2)\n", " fpr, tpr, _ = roc_curve(y_true_binary, classification_pred)\n", " plt.plot(fpr, tpr)\n", " plt.plot([0, 1], [0, 1], 'r--')\n", " plt.title(f'ROC Curve (AUC = {roc_auc_score(y_true_binary, classification_pred):.4f})')\n", " plt.xlabel('False Positive Rate')\n", " plt.ylabel('True Positive Rate')\n", "\n", " # Plot 3: Classification Confusion Matrix\n", " plt.subplot(3, 3, 3)\n", " cm = confusion_matrix(y_true_binary, classification_pred > 0.5)\n", " sns.heatmap(cm, annot=True, fmt='d', cmap='Blues')\n", " plt.title('Classification Confusion Matrix')\n", " plt.xlabel('Predicted')\n", " plt.ylabel('Actual')\n", "\n", " # Regression Analysis (Middle Row)\n", " # Plot 4: Regression Error Distribution\n", " plt.subplot(3, 3, 4)\n", " plt.hist(regression_errors[y_true > 0], bins=50, alpha=0.7)\n", " plt.title('Regression Error Distribution (Non-zero Values)')\n", " plt.xlabel('Error')\n", " plt.ylabel('Frequency')\n", "\n", " # Plot 5: Actual vs Predicted (Regression)\n", " plt.subplot(3, 3, 5)\n", " mask_nonzero = y_true > 0\n", " plt.scatter(y_true[mask_nonzero], regression_pred[mask_nonzero], alpha=0.5)\n", " plt.plot([y_true[mask_nonzero].min(), y_true[mask_nonzero].max()],\n", " [y_true[mask_nonzero].min(), y_true[mask_nonzero].max()], 'r--', lw=2)\n", " plt.title('Actual vs Predicted (Regression, Non-zero Values)')\n", " plt.xlabel('Actual Values')\n", " plt.ylabel('Predicted Values')\n", "\n", " # Plot 6: Regression Errors vs Actual Values\n", " plt.subplot(3, 3, 6)\n", " plt.scatter(y_true[mask_nonzero], regression_errors[mask_nonzero], alpha=0.5)\n", " plt.axhline(y=0, color='r', linestyle='--')\n", " plt.title('Regression Errors vs Actual Values (Non-zero Values)')\n", " plt.xlabel('Actual Values')\n", " plt.ylabel('Error')\n", "\n", " # Final Predictions Analysis (Bottom Row)\n", " # Plot 7: Final Error Distribution\n", " plt.subplot(3, 3, 7)\n", " plt.hist(final_errors, bins=50, alpha=0.7)\n", " plt.title('Final Prediction Error Distribution')\n", " plt.xlabel('Error')\n", " plt.ylabel('Frequency')\n", "\n", " # Plot 8: Actual vs Predicted (Final)\n", " plt.subplot(3, 3, 8)\n", " plt.scatter(y_true, final_pred, alpha=0.5)\n", " plt.plot([y_true.min(), y_true.max()], [y_true.min(), y_true.max()], 'r--', lw=2)\n", " plt.title('Actual vs Predicted (Final)')\n", " plt.xlabel('Actual Values')\n", " plt.ylabel('Predicted Values')\n", "\n", " # Plot 9: Final Errors vs Actual Values\n", " plt.subplot(3, 3, 9)\n", " plt.scatter(y_true, final_errors, alpha=0.5)\n", " plt.axhline(y=0, color='r', linestyle='--')\n", " plt.title('Final Errors vs Actual Values')\n", " plt.xlabel('Actual Values')\n", " plt.ylabel('Error')\n", "\n", " plt.tight_layout()\n", "\n", " # Save plot if directory is specified\n", " if folder_name is not None:\n", " try:\n", " filename = f'{folder_name}/error_analysis.png'\n", " plt.savefig(filename, dpi=300, bbox_inches='tight')\n", " print(f\"\\nPlot saved as: {filename}\")\n", " except Exception as e:\n", " print(f\"\\nError saving plot: {str(e)}\")\n", "\n", " plt.show()\n", "\n", " # Print comprehensive statistics\n", " print(\"\\nClassification Statistics:\")\n", " print(classification_report(y_true_binary, classification_pred > 0.5))\n", " print(f\"AUC-ROC: {roc_auc_score(y_true_binary, classification_pred):.4f}\")\n", "\n", " print(\"\\nRegression Statistics (Non-zero values):\")\n", " mask_nonzero = y_true > 0\n", " if np.any(mask_nonzero):\n", " print(f\"MAE: {np.mean(np.abs(regression_errors[mask_nonzero])):.4f}\")\n", " print(f\"RMSE: {np.sqrt(np.mean(regression_errors[mask_nonzero] ** 2)):.4f}\")\n", " print(f\"Mean error: {np.mean(regression_errors[mask_nonzero]):.4f}\")\n", " print(f\"Error std: {np.std(regression_errors[mask_nonzero]):.4f}\")\n", "\n", " print(\"\\nFinal Prediction Statistics:\")\n", " print(f\"MAE: {np.mean(np.abs(final_errors)):.4f}\")\n", " print(f\"RMSE: {np.sqrt(np.mean(final_errors ** 2)):.4f}\")\n", " print(f\"Mean error: {np.mean(final_errors):.4f}\")\n", " print(f\"Error std: {np.std(final_errors):.4f}\")\n", "\n", " # Calculate percentage of errors within thresholds\n", " thresholds = [0.5, 1.0, 1.5, 2.0]\n", " print(\"\\nError Thresholds (Final Predictions):\")\n", " for threshold in thresholds:\n", " within_threshold = np.mean(np.abs(final_errors) <= threshold) * 100\n", " print(f\"Predictions within ±{threshold}: {within_threshold:.1f}%\")\n", "\n", "# Example usage\n", "plot_error_analysis(y_test, (classification_pred, regression_pred, final_pred), folder_name=folder_name)" ] }, { "cell_type": "code", "execution_count": null, "id": "dd5197ea71becfc6", "metadata": {}, "outputs": [], "source": [] }, { "cell_type": "code", "execution_count": null, "id": "f982c92c-ba99-4df6-b3c8-df92426679db", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3 (ipykernel)", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.11.0rc1" } }, "nbformat": 4, "nbformat_minor": 5 }