pickel-cancer-rick/project-cancer-classificati...

939 lines
238 KiB
Plaintext
Raw Normal View History

2024-01-04 14:47:29 +01:00
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Laden der Rohdaten"
]
},
{
"cell_type": "code",
2024-01-05 15:46:19 +01:00
"execution_count": null,
2024-01-04 14:47:29 +01:00
"metadata": {},
"outputs": [],
"source": [
"import pickle\n",
"\n",
"# Laden der 'kirp' Liste aus der Pickle-Datei\n",
"with open('rick.pickle', 'rb') as f:\n",
" data_frame = pickle.load(f)"
]
},
2024-01-04 15:06:10 +01:00
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Aktiviere Cuda Support"
]
},
{
"cell_type": "code",
2024-01-05 15:46:19 +01:00
"execution_count": null,
2024-01-04 15:06:10 +01:00
"metadata": {},
2024-01-05 15:46:19 +01:00
"outputs": [],
2024-01-04 15:06:10 +01:00
"source": [
"import torch\n",
"device = \"cpu\"\n",
"if torch.cuda.is_available():\n",
" print(\"CUDA is available on your system.\")\n",
" device = \"cuda\"\n",
"else:\n",
" print(\"CUDA is not available on your system.\")"
]
},
2024-01-04 14:47:29 +01:00
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# PCA Klasse zu Reduktion der Dimensionen"
]
},
{
"cell_type": "code",
2024-01-05 15:46:19 +01:00
"execution_count": null,
2024-01-04 14:47:29 +01:00
"metadata": {},
"outputs": [],
"source": [
"from torch.utils.data import Dataset\n",
"import torch\n",
"import pandas as pd\n",
"from sklearn.preprocessing import LabelEncoder\n",
"from sklearn.decomposition import PCA\n",
"from sklearn.preprocessing import StandardScaler\n",
"from sklearn.model_selection import train_test_split\n",
2024-01-05 15:16:59 +01:00
"from typing import List, Tuple, Dict\n",
2024-01-04 14:47:29 +01:00
"\n",
"\n",
"class GenomeDataset(Dataset):\n",
" \"\"\"\n",
" Eine benutzerdefinierte Dataset-Klasse, die für die Handhabung von Genomdaten konzipiert ist.\n",
" Diese Klasse wendet eine Principal Component Analysis (PCA) auf die Frequenzen der Genome an\n",
" und teilt den Datensatz in Trainings- und Validierungsteile auf.\n",
"\n",
" Attributes:\n",
" dataframe (pd.DataFrame): Ein Pandas DataFrame, der die initialen Daten enthält.\n",
" train_df (pd.DataFrame): Ein DataFrame, der den Trainingsdatensatz nach der Anwendung von PCA und der Aufteilung enthält.\n",
" val_df (pd.DataFrame): Ein DataFrame, der den Validierungsdatensatz nach der Anwendung von PCA und der Aufteilung enthält.\n",
"\n",
" Methods:\n",
" __init__(self, dataframe, n_pca_components=1034, train_size=0.8, split_random_state=42):\n",
" Konstruktor für die GenomeDataset Klasse.\n",
" _do_PCA(self, frequencies, n_components=1034):\n",
" Wendet PCA auf die gegebenen Frequenzen an.\n",
" _split_dataset(self, train_size=0.8, random_state=42):\n",
" Teilt den DataFrame in Trainings- und Validierungsdatensätze auf.\n",
" __getitem__(self, index):\n",
" Gibt ein Tupel aus transformierten Frequenzen und dem zugehörigen Krebstyp für einen gegebenen Index zurück.\n",
" __len__(self):\n",
" Gibt die Gesamtlänge der kombinierten Trainings- und Validierungsdatensätze zurück.\n",
" \"\"\"\n",
"\n",
" def __init__(self, dataframe: pd.DataFrame, n_pca_components: int = 1034, train_size: float = 0.8, split_random_state: int = 42):\n",
" \"\"\"\n",
" Konstruktor für die GenomeDataset Klasse.\n",
"\n",
" Parameters:\n",
" dataframe (pd.DataFrame): Der DataFrame, der die Genome Frequenzen und Krebsarten enthält.\n",
" n_pca_components (int): Die Anzahl der PCA-Komponenten, auf die reduziert werden soll. Standardwert ist 1034.\n",
" train_size (float): Der Anteil der Daten, der als Trainingsdaten verwendet werden soll. Standardwert ist 0.8.\n",
" split_random_state (int): Der Zufalls-Saatwert, der für die Aufteilung des Datensatzes verwendet wird. Standardwert ist 42.\n",
" \"\"\"\n",
" self.dataframe = dataframe\n",
"\n",
" # Umwandlung der Krebsarten in numerische Werte\n",
" self.label_encoder = LabelEncoder()\n",
" self.dataframe['encoded_cancer_type'] = self.label_encoder.fit_transform(dataframe['cancer_type'])\n",
"\n",
" # Anwenden der PCA auf die Frequenzen\n",
" self.dataframe['pca_frequencies'] = self._do_PCA(self.dataframe['genome_frequencies'].tolist(), n_pca_components)\n",
"\n",
" # Teilen des DataFrame in Trainings- und Validierungsdatensatz\n",
" self._split_dataset(train_size=train_size, random_state=split_random_state)\n",
"\n",
" def transform_datapoint(self, datapoint: List[float]) -> List[float]:\n",
" \"\"\"\n",
" Transformiert einen einzelnen Datenpunkt durch Standardisierung und Anwendung der PCA.\n",
"\n",
" Diese Methode nimmt einen rohen Datenpunkt (eine Liste von Frequenzen), standardisiert ihn mit dem \n",
" zuvor angepassten Scaler und wendet dann die PCA-Transformation an, um ihn in den reduzierten \n",
" Feature-Raum zu überführen, der für das Training des Modells verwendet wurde.\n",
"\n",
" Parameters:\n",
" datapoint (List[float]): Ein roher Datenpunkt, bestehend aus einer Liste von Frequenzen.\n",
"\n",
" Returns:\n",
" List[float]: Der transformierte Datenpunkt, nach Anwendung der Standardisierung und der PCA.\n",
" \"\"\"\n",
" # Standardisierung des Datenpunkts\n",
" scaled_data_point = self.scaler.transform([datapoint])\n",
"\n",
" # PCA-Transformation des standardisierten Datenpunkts\n",
" pca_transformed_point = self.pca.transform(scaled_data_point)\n",
"\n",
" return pca_transformed_point.tolist()\n",
"\n",
" def _do_PCA(self, frequencies: List[List[float]], n_components: int = 1034) -> List[List[float]]:\n",
" \"\"\"\n",
" Wendet PCA auf die gegebenen Frequenzen an.\n",
"\n",
" Parameters:\n",
" frequencies (List[List[float]]): Die Liste der Frequenzen, auf die die PCA angewendet werden soll.\n",
" n_components (int): Die Anzahl der Komponenten für die PCA. Standardwert ist 1034.\n",
"\n",
" Returns:\n",
" List[List[float]]: Eine Liste von Listen, die die transformierten Frequenzen nach der PCA darstellt.\n",
" \"\"\"\n",
"\n",
" # Standardisieren der Frequenzen\n",
" self.scaler = StandardScaler()\n",
" scaled_frequencies = self.scaler.fit_transform(frequencies)\n",
"\n",
" # PCA-Instanz erstellen und auf die gewünschte Anzahl von Komponenten reduzieren\n",
" self.pca = PCA(n_components=n_components)\n",
"\n",
" # PCA auf die Frequenzen anwenden\n",
" pca_result = self.pca.fit_transform(scaled_frequencies)\n",
"\n",
" return pca_result.tolist()\n",
"\n",
" def _split_dataset(self, train_size: float = 0.8, random_state: int = 42):\n",
" \"\"\"\n",
" Teilt den DataFrame in Trainings- und Validierungsdatensätze auf.\n",
"\n",
" Parameters:\n",
" train_size (float): Der Anteil der Daten, der als Trainingsdaten verwendet werden soll.\n",
" random_state (int): Der Zufalls-Saatwert, der für die Aufteilung des Datensatzes verwendet wird.\n",
" \"\"\"\n",
"\n",
" class SplittedDataset(Dataset):\n",
" def __init__(self, dataframe):\n",
" self.dataframe = dataframe\n",
"\n",
" # Umwandlung der Genome Frequenzen in Tensoren\n",
" self.genome_frequencies = torch.tensor(dataframe['pca_frequencies'].tolist(), dtype=torch.float32)\n",
"\n",
" # Umwandlung der Krebsarten in numerische Werte\n",
" self.label_encoder = LabelEncoder()\n",
" self.cancer_types = torch.tensor(dataframe['encoded_cancer_type'].tolist(), dtype=torch.long)\n",
"\n",
" def __getitem__(self, index):\n",
" # Rückgabe eines Tupels aus Genome Frequenzen und dem entsprechenden Krebstyp\n",
" return self.genome_frequencies[index], self.cancer_types[index]\n",
"\n",
" def __len__(self):\n",
" return len(self.dataframe)\n",
"\n",
" # Teilen des DataFrame in Trainings- und Validierungsdatensatz\n",
2024-01-05 15:16:59 +01:00
" train_df, val_df = train_test_split(self.dataframe, train_size=train_size) #, random_state=random_state)\n",
2024-01-04 14:47:29 +01:00
" self.train_df = SplittedDataset(train_df)\n",
" self.val_df = SplittedDataset(val_df)\n",
"\n",
"\n",
" def __getitem__(self, index: int) -> Tuple[torch.Tensor, int]:\n",
" \"\"\"\n",
" Gibt ein Tupel aus transformierten Frequenzen und dem entsprechenden Krebstyp für einen gegebenen Index zurück.\n",
"\n",
" Parameters:\n",
" index (int): Der Index des zu abrufenden Datenelements.\n",
"\n",
" Returns:\n",
" Tuple[torch.Tensor, int]: Ein Tupel, bestehend aus einem Tensor der transformierten Frequenzen und dem zugehörigen Krebstyp.\n",
" \"\"\"\n",
"\n",
" print(self.train_df.shape)\n",
" print(self.val_df.shape)\n",
" \n",
" if index < len(self.train_df):\n",
" row = self.train_df.iloc[index]\n",
" else:\n",
" row = self.val_df.iloc[len(self.train_df) - index]\n",
"\n",
" pca_frequencies_tensor = torch.tensor(row['pca_frequencies'], dtype=torch.float32)\n",
" cancer_type = row['encoded_cancer_type']\n",
"\n",
" return pca_frequencies_tensor, cancer_type\n",
"\n",
" def __len__(self) -> int:\n",
" \"\"\"\n",
" Gibt die Gesamtlänge der kombinierten Trainings- und Validierungsdatensätze zurück.\n",
"\n",
" Returns:\n",
" int: Die Länge der kombinierten Datensätze.\n",
" \"\"\"\n",
" \n",
" return len(self.train_df) + len(self.val_df)\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Definition des neuronalen Netzes"
]
},
{
"cell_type": "code",
2024-01-05 15:46:19 +01:00
"execution_count": null,
2024-01-04 14:47:29 +01:00
"metadata": {},
"outputs": [],
"source": [
"import torch\n",
"import torch.nn as nn\n",
"import torch.optim as optim\n",
"import torch.nn.functional as F\n",
"\n",
"class CancerClassifierNN(nn.Module):\n",
" \"\"\"\n",
" Eine benutzerdefinierte neuronale Netzwerkklassifikator-Klasse für die Krebsklassifikation.\n",
"\n",
" Diese Klasse definiert ein mehrschichtiges Perzeptron (MLP), das für die Klassifizierung von Krebsarten\n",
" anhand genetischer Frequenzdaten verwendet wird.\n",
"\n",
" Attributes:\n",
" fc1 (nn.Linear): Die erste lineare Schicht des Netzwerks.\n",
" fc2 (nn.Linear): Die zweite lineare Schicht des Netzwerks.\n",
" fc3 (nn.Linear): Die dritte lineare Schicht des Netzwerks.\n",
" fc4 (nn.Linear): Die Ausgabeschicht des Netzwerks.\n",
" dropout (nn.Dropout): Ein Dropout-Layer zur Vermeidung von Overfitting.\n",
"\n",
" Methods:\n",
" __init__(self, input_size: int, num_classes: int):\n",
" Konstruktor für die CancerClassifierNN Klasse.\n",
" forward(self, x: torch.Tensor) -> torch.Tensor:\n",
" Definiert den Vorwärtsdurchlauf des Netzwerks.\n",
" \"\"\"\n",
"\n",
" def __init__(self, input_size: int, num_classes: int):\n",
" \"\"\"\n",
" Konstruktor für die CancerClassifierNN Klasse.\n",
"\n",
" Parameters:\n",
" input_size (int): Die Größe des Input-Features.\n",
" num_classes (int): Die Anzahl der Zielklassen.\n",
" \"\"\"\n",
" super(CancerClassifierNN, self).__init__()\n",
" # Definieren der Schichten\n",
2024-01-05 13:19:38 +01:00
" self.fc1 = nn.Linear(input_size, input_size) # Eingabeschicht\n",
" self.fc2 = nn.Linear(input_size, input_size//2) # Versteckte Schicht\n",
" self.fc3 = nn.Linear(input_size//2, input_size//4) # Weitere versteckte Schicht\n",
" self.fc4 = nn.Linear(input_size//4, num_classes) # Ausgabeschicht\n",
2024-01-04 14:47:29 +01:00
" self.dropout = nn.Dropout(p=0.5) # Dropout\n",
"\n",
" def forward(self, x: torch.Tensor) -> torch.Tensor:\n",
" \"\"\"\n",
" Definiert den Vorwärtsdurchlauf des Netzwerks.\n",
"\n",
" Parameters:\n",
" x (torch.Tensor): Der Input-Tensor für das Netzwerk.\n",
"\n",
" Returns:\n",
" torch.Tensor: Der Output-Tensor nach dem Durchlauf durch das Netzwerk.\n",
" \"\"\"\n",
" x = F.relu(self.fc1(x))\n",
" x = self.dropout(x)\n",
" x = F.relu(self.fc2(x))\n",
" x = self.dropout(x)\n",
" x = F.relu(self.fc3(x))\n",
2024-01-05 13:19:38 +01:00
" x = self.dropout(x)\n",
" x = torch.softmax(self.fc4(x), dim=1) # Oder F.log_softmax(x, dim=1) für Mehrklassenklassifikation\n",
2024-01-04 14:47:29 +01:00
" return x"
]
},
{
"cell_type": "code",
2024-01-05 15:46:19 +01:00
"execution_count": null,
2024-01-04 14:47:29 +01:00
"metadata": {},
"outputs": [],
"source": [
"from torch.utils.data import DataLoader\n",
"import torch.optim as optim\n",
2024-01-05 15:16:59 +01:00
"from IPython.display import clear_output\n",
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"import os\n",
2024-01-05 15:46:19 +01:00
"import pickle\n",
2024-01-05 15:16:59 +01:00
"\n",
"class ExperimentationalExperiments():\n",
2024-01-05 15:56:33 +01:00
" \"\"\"\n",
" Diese Klasse dient zur Durchführung und Verwaltung von Experimenten im Rahmen\n",
" des maschinellen Lernens, insbesondere für die Krebsklassifikation.\n",
"\n",
" Attribute:\n",
" results : Dict\n",
" Speichert die Ergebnisse der durchgeführten Experimente.\n",
" \"\"\"\n",
2024-01-05 15:16:59 +01:00
"\n",
" def __init__(self) -> None:\n",
2024-01-05 15:56:33 +01:00
" \"\"\" Konstruktor der Klasse. Initialisiert 'results' als None. \"\"\"\n",
2024-01-05 15:16:59 +01:00
" self.results = None\n",
"\n",
" def run_single_experiment(self, train_loader: DataLoader, valid_loader: DataLoader, n_pca_components: int, n_epochs: int = 200, learning_rate: int = 0.0005, verbose: bool = True, experiment_num: int = None) -> Tuple:\n",
2024-01-05 15:56:33 +01:00
" \"\"\"\n",
" Führt ein einzelnes Experiment mit dem spezifizierten DataLoader, PCA-Komponenten und weiteren Parametern durch.\n",
"\n",
" Parameter:\n",
" train_loader : DataLoader\n",
" Der DataLoader für den Trainingsdatensatz.\n",
" valid_loader : DataLoader\n",
" Der DataLoader für den Validierungsdatensatz.\n",
" n_pca_components : int\n",
" Anzahl der PCA-Komponenten, die im Modell verwendet werden.\n",
" n_epochs : int, optional\n",
" Anzahl der Epochen für das Training (Standardwert ist 200).\n",
" learning_rate : float, optional\n",
" Lernrate für den Optimierer (Standardwert ist 0.0005).\n",
" verbose : bool, optional\n",
" Gibt an, ob der Trainingsfortschritt angezeigt werden soll (Standardwert ist True).\n",
" experiment_num : int, optional\n",
" Nummer des Experiments.\n",
"\n",
" Rückgabewerte:\n",
" Tuple\n",
" Ein Tupel bestehend aus Listen der Trainings- und Validierungsverluste sowie der Genauigkeiten.\n",
" \"\"\"\n",
2024-01-05 15:16:59 +01:00
" if not isinstance(n_pca_components, int):\n",
" raise TypeError(\"n_pca_components must be an integers!\")\n",
"\n",
" model = CancerClassifierNN(input_size=n_pca_components, num_classes=3)\n",
" model.to(device=device)\n",
"\n",
" # Verlustfunktion\n",
" criterion = nn.CrossEntropyLoss()\n",
" # Optimierer\n",
" optimizer = optim.Adam(model.parameters(), lr=learning_rate)\n",
"\n",
" # Listen, um Verluste zu speichern\n",
" train_losses = []\n",
" valid_losses = []\n",
" train_accuracies = []\n",
" valid_accuracies = []\n",
"\n",
" for epoch in range(n_epochs):\n",
" model.train()\n",
" train_loss = 0.0\n",
" correct_predictions = 0\n",
" total_predictions = 0\n",
"\n",
" for i, (inputs, labels) in enumerate(train_loader):\n",
" inputs, labels = inputs.to(device), labels.to(device)\n",
" optimizer.zero_grad()\n",
" outputs = model(inputs)\n",
" loss = criterion(outputs, labels)\n",
" loss.backward()\n",
" optimizer.step()\n",
" train_loss += loss.item()\n",
"\n",
" # Berechnen der Genauigkeit\n",
" _, predicted = torch.max(outputs, 1)\n",
" correct_predictions += (predicted == labels).sum().item()\n",
" total_predictions += labels.size(0)\n",
"\n",
" # Durchschnittlicher Trainingsverlust und Genauigkeit\n",
" train_loss /= len(train_loader)\n",
" train_accuracy = correct_predictions / total_predictions\n",
" train_losses.append(train_loss)\n",
" train_accuracies.append(train_accuracy)\n",
"\n",
" # Validierungsverlust und Genauigkeit\n",
" model.eval()\n",
" valid_loss = 0.0\n",
" correct_predictions = 0\n",
" total_predictions = 0\n",
"\n",
" with torch.no_grad():\n",
" for inputs, labels in valid_loader:\n",
" inputs, labels = inputs.to(device), labels.to(device)\n",
" outputs = model(inputs)\n",
" loss = criterion(outputs, labels)\n",
" valid_loss += loss.item()\n",
"\n",
" # Berechnen der Genauigkeit\n",
" _, predicted = torch.max(outputs, 1)\n",
" correct_predictions += (predicted == labels).sum().item()\n",
" total_predictions += labels.size(0)\n",
"\n",
" # Durchschnittlicher Validierungsverlust und Genauigkeit\n",
" valid_loss /= len(valid_loader)\n",
" valid_accuracy = correct_predictions / total_predictions\n",
" valid_losses.append(valid_loss)\n",
" valid_accuracies.append(valid_accuracy)\n",
"\n",
" # Aktualisieren des Graphen\n",
" clear_output(wait=True)\n",
" fig, ax1 = plt.subplots()\n",
"\n",
" # Zeichnen der Verlustkurven\n",
" ax1.plot(train_losses, label='Trainingsverlust', color='r')\n",
" ax1.plot(valid_losses, label='Validierungsverlust', color='b')\n",
" ax1.set_xlabel('Epochen')\n",
" ax1.set_ylabel('Verlust', color='g')\n",
" ax1.tick_params(axis='y', labelcolor='g')\n",
"\n",
" # Zweite y-Achse für die Genauigkeit\n",
" ax2 = ax1.twinx()\n",
" ax2.plot(train_accuracies, label='Trainingsgenauigkeit', color='r', linestyle='dashed')\n",
" ax2.plot(valid_accuracies, label='Validierungsgenauigkeit', color='b', linestyle='dashed')\n",
" ax2.set_ylabel('Genauigkeit', color='g')\n",
" ax2.tick_params(axis='y', labelcolor='g')\n",
"\n",
" # Titel und Legende\n",
" plt.title(f'Experiment #{experiment_num}: Trainings- und Validierungsverlust und -genauigkeit über die Zeit mit \\n{n_pca_components}-Hauptkomponenten, Lernrate: {learning_rate}')\n",
" fig.tight_layout()\n",
"\n",
" # Legende außerhalb des Graphen\n",
" ax1.legend(loc='upper left', bbox_to_anchor=(1.15, 1))\n",
" ax2.legend(loc='upper left', bbox_to_anchor=(1.15, 0.85))\n",
"\n",
" # Fortschritt anzeigen, falls angegeben\n",
" if verbose:\n",
2024-01-05 15:46:19 +01:00
" print(f'Experiment #{experiment_num} mit {n_pca_components} PCA components: Epoch [{epoch+1}/{n_epochs}], Trainingsverlust: {train_loss:.4f}, Trainingsgenauigkeit: {train_accuracies[-1]:.4f}, Validierungsverlust: {valid_loss:.4f}, Validierungsgenauigkeit: {valid_accuracies[-1]:.4f}')\n",
2024-01-05 15:16:59 +01:00
"\n",
2024-01-05 15:46:19 +01:00
" # Plot speichern\n",
" name = str(experiment_num) + \".png\" if experiment_num is not None else \"single_experiment.png\"\n",
" if not os.path.exists(\"Experiments\"):\n",
" os.makedirs(\"Experiments\")\n",
" if not os.path.exists(f\"Experiments/{str(n_pca_components)}\"):\n",
" os.makedirs(f\"Experiments/{str(n_pca_components)}\")\n",
" plt.savefig(f\"Experiments/{str(n_pca_components)}/{name}\", bbox_inches='tight')\n",
2024-01-05 15:16:59 +01:00
"\n",
" return train_losses, valid_losses, train_accuracies, valid_accuracies\n",
"\n",
" def run_single_pca_experiment(self, train_loader: DataLoader, valid_loader: DataLoader, n_pca_components: int, n_experiments: int, n_epochs: int = 200, learning_rate: int = 0.0005, verbose: bool = True) -> List:\n",
2024-01-05 15:56:33 +01:00
" \"\"\"\n",
" Führt eine Serie von Experimenten mit verschiedenen Konfigurationen für die PCA-Komponenten durch.\n",
"\n",
" Parameter:\n",
" train_loader : DataLoader\n",
" Der DataLoader für den Trainingsdatensatz.\n",
" valid_loader : DataLoader\n",
" Der DataLoader für den Validierungsdatensatz.\n",
" n_pca_components : int\n",
" Anzahl der PCA-Komponenten, die im Modell verwendet werden.\n",
" n_experiments : int\n",
" Anzahl der durchzuführenden Experimente.\n",
" n_epochs : int, optional\n",
" Anzahl der Epochen für das Training (Standardwert ist 200).\n",
" learning_rate : float, optional\n",
" Lernrate für den Optimierer (Standardwert ist 0.0005).\n",
" verbose : bool, optional\n",
" Gibt an, ob der Trainingsfortschritt angezeigt werden soll (Standardwert ist True).\n",
"\n",
" Rückgabewerte:\n",
" List\n",
" Eine Liste von Ergebnissen der einzelnen Experimente.\n",
" \"\"\"\n",
2024-01-05 15:16:59 +01:00
" if not isinstance(n_pca_components, int):\n",
" raise TypeError(\"n_pca_components must be an integers!\")\n",
"\n",
" results = []\n",
"\n",
" for n in range(n_experiments):\n",
" res = self.run_single_experiment(train_loader, valid_loader, n_pca_components, n_epochs=n_epochs, learning_rate=learning_rate, verbose=verbose, experiment_num=n+1)\n",
" results.append(res)\n",
"\n",
" return results\n",
" \n",
"\n",
" def run(self, n_pca_components: List[int], n_experiments: int, n_epochs: int = 200, learning_rate: int = 0.0005, batch_size: int = 64, verbose: bool = True) -> Dict:\n",
2024-01-05 15:56:33 +01:00
" \"\"\"\n",
" Hauptmethode zum Ausführen von Experimenten mit verschiedenen Anzahlen von PCA-Komponenten.\n",
"\n",
" Parameter:\n",
" n_pca_components : List[int]\n",
" Eine Liste von Anzahlen der PCA-Komponenten, die in den Experimenten verwendet werden sollen.\n",
" n_experiments : int\n",
" Anzahl der durchzuführenden Experimente pro PCA-Komponentenanzahl.\n",
" n_epochs : int, optional\n",
" Anzahl der Epochen für das Training (Standardwert ist 200).\n",
" learning_rate : float, optional\n",
" Lernrate für den Optimierer (Standardwert ist 0.0005).\n",
" batch_size : int, optional\n",
" Batch-Größe für das Laden der Daten (Standardwert ist 64).\n",
" verbose : bool, optional\n",
" Gibt an, ob der Trainingsfortschritt angezeigt werden soll (Standardwert ist True).\n",
"\n",
" Rückgabewerte:\n",
" Dict\n",
" Ein Wörterbuch, das die Ergebnisse der Experimente für jede Anzahl von PCA-Komponenten enthält.\n",
" \"\"\"\n",
2024-01-05 15:16:59 +01:00
" if not isinstance(n_pca_components, list):\n",
" raise TypeError(\"n_pca_components must be a list of integers!\")\n",
"\n",
2024-01-05 15:46:19 +01:00
" plt.ioff()\n",
2024-01-05 15:16:59 +01:00
" self.n_pca_components = n_pca_components\n",
"\n",
" results = {}\n",
"\n",
" for n_pca_comps in n_pca_components:\n",
" genome_dataset = GenomeDataset(data_frame, n_pca_components=n_pca_comps)\n",
" train_dataset = genome_dataset.train_df\n",
" valid_dataset = genome_dataset.val_df\n",
"\n",
" train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)\n",
" valid_loader = DataLoader(dataset=valid_dataset, batch_size=batch_size, shuffle=False)\n",
"\n",
" res = self.run_single_pca_experiment(train_loader, valid_loader, n_pca_comps, n_experiments, n_epochs=n_epochs, learning_rate=learning_rate, verbose=verbose)\n",
" results[str(n_pca_comps)] = res\n",
"\n",
" self.plot_and_save_results(res, n_pca_comps)\n",
"\n",
" self.results = results\n",
"\n",
2024-01-05 15:46:19 +01:00
" # Speichern der Daten in einer lokalen Datei\n",
" if len(n_pca_components) > 1:\n",
" with open('Experiments/results.pickle', 'wb') as f:\n",
" pickle.dump(self.results, f)\n",
" else:\n",
" with open(f'Experiments/{str(n_pca_components[0])}/results_{str(n_pca_components[0])}.pickle', 'wb') as f:\n",
" pickle.dump(self.results, f)\n",
2024-01-05 15:46:19 +01:00
"\n",
" plt.ion()\n",
"\n",
2024-01-05 15:16:59 +01:00
" return results\n",
"\n",
" def plot_and_save_results(self, results: List[Tuple], n_pca_components: int) -> None:\n",
2024-01-05 15:56:33 +01:00
" \"\"\"\n",
" Erstellt und speichert Plots für die Ergebnisse der Experimente.\n",
"\n",
" Parameter:\n",
" results : List[Tuple]\n",
" Eine Liste von Tupeln mit den Ergebnissen der Experimente.\n",
" n_pca_components : int\n",
" Anzahl der PCA-Komponenten, für die die Ergebnisse geplottet werden sollen.\n",
"\n",
" Keine Rückgabewerte, da die Methode Plots speichert.\n",
" \"\"\"\n",
" \n",
2024-01-05 15:16:59 +01:00
" # Mittelwerte und Standardabweichungen berechnen\n",
" train_losses, valid_losses, train_accuracies, valid_accuracies = zip(*results)\n",
"\n",
" train_losses = np.array(train_losses)\n",
" valid_losses = np.array(valid_losses)\n",
" train_accuracies = np.array(train_accuracies)\n",
" valid_accuracies = np.array(valid_accuracies)\n",
"\n",
" avg_train_losses = np.mean(train_losses, axis=0)\n",
" avg_valid_losses = np.mean(valid_losses, axis=0)\n",
" avg_train_acc = np.mean(train_accuracies, axis=0)\n",
" avg_valid_acc = np.mean(valid_accuracies, axis=0)\n",
"\n",
" std_train_losses = np.std(train_losses, axis=0)\n",
" std_valid_losses = np.std(valid_losses, axis=0)\n",
" std_train_acc = np.std(train_accuracies, axis=0)\n",
" std_valid_acc = np.std(valid_accuracies, axis=0)\n",
"\n",
" # Erstellen von Plots\n",
" epochs = range(1, len(avg_train_losses) + 1)\n",
"\n",
" # Plot für Verluste\n",
2024-01-05 15:46:19 +01:00
" plt.clf()\n",
2024-01-05 15:16:59 +01:00
" plt.plot(epochs, avg_train_losses, label='Mittlerer Trainingsverlust', color='r')\n",
" plt.fill_between(epochs, np.subtract(avg_train_losses, std_train_losses), np.add(avg_train_losses, std_train_losses), color='r', alpha=0.2)\n",
" plt.plot(epochs, avg_valid_losses, label='Mittlerer Validierungsverlust', color='b')\n",
" plt.fill_between(epochs, np.subtract(avg_valid_losses, std_valid_losses), np.add(avg_valid_losses, std_valid_losses), color='b', alpha=0.2)\n",
" plt.title(f'Mittelwert und Standardabweichung der Verluste für {n_pca_components} PCA-Komponenten')\n",
" plt.xlabel('Experiment Nummer')\n",
" plt.ylabel('Verlust')\n",
" plt.legend()\n",
2024-01-05 15:46:19 +01:00
" plt.savefig(f\"Experiments/{n_pca_components}/average_losses.png\", bbox_inches='tight')\n",
2024-01-05 15:16:59 +01:00
" plt.clf()\n",
"\n",
" # Plot für Genauigkeiten\n",
" plt.plot(epochs, avg_train_acc, label='Mittlere Trainingsgenauigkeit', color='r')\n",
" plt.fill_between(epochs, np.subtract(avg_train_acc, std_train_acc), np.add(avg_train_acc, std_train_acc), color='r', alpha=0.2)\n",
" plt.plot(epochs, avg_valid_acc, label='Mittlere Validierungsgenauigkeit', color='b')\n",
" plt.fill_between(epochs, np.subtract(avg_valid_acc, std_valid_acc), np.add(avg_valid_acc, std_valid_acc), color='b', alpha=0.2)\n",
" plt.title(f'Mittelwert und Standardabweichung der Genauigkeiten für {n_pca_components} PCA-Komponenten')\n",
" plt.xlabel('Experiment Nummer')\n",
" plt.ylabel('Genauigkeit')\n",
" plt.legend()\n",
2024-01-05 15:46:19 +01:00
" plt.savefig(f\"Experiments/{n_pca_components}/average_accuracies.png\", bbox_inches='tight')\n",
2024-01-05 15:16:59 +01:00
" plt.clf()\n",
"\n",
" "
2024-01-04 14:47:29 +01:00
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Starten der einzelnen Experimente, da in einer Schleife RAM Probleme auftreten"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"e1 = ExperimentationalExperiments()\n",
"results = e1.run([1024], 10, n_epochs=500)\n",
"del e1"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"e2 = ExperimentationalExperiments()\n",
"results = e2.run([512], 10, n_epochs=500)\n",
"del e2"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"e3 = ExperimentationalExperiments()\n",
"results = e3.run([256], 10, n_epochs=500)\n",
"del e3"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"e4 = ExperimentationalExperiments()\n",
"results = e4.run([128], 10, n_epochs=500)\n",
"del e4"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"e5 = ExperimentationalExperiments()\n",
"results = e5.run([64], 10, n_epochs=500)\n",
"del e5"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"e6 = ExperimentationalExperiments()\n",
"results = e6.run([32], 10, n_epochs=500)\n",
"del e6"
]
},
2024-01-04 14:47:29 +01:00
{
"cell_type": "code",
2024-01-05 15:46:19 +01:00
"execution_count": null,
2024-01-04 14:47:29 +01:00
"metadata": {},
2024-01-05 15:46:19 +01:00
"outputs": [],
2024-01-04 14:47:29 +01:00
"source": [
"e7 = ExperimentationalExperiments()\n",
"results = e7.run([16], 2, n_epochs=10)\n",
"del e7"
2024-01-04 14:47:29 +01:00
]
2024-01-04 15:15:24 +01:00
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Lesen der Daten und Erstellen der Mittelwerte und anschließender Auswertung"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"import numpy as np\n",
"import pickle\n",
"import os\n",
"import matplotlib.pyplot as plt\n",
"from typing import List, Dict, Tuple\n",
"\n",
"def load_results(path: str) -> Dict:\n",
" \"\"\"\n",
" Lädt und konvertiert die Ergebnisse aus Pickle-Dateien in den spezifizierten Verzeichnissen.\n",
"\n",
" Argumente:\n",
" path (str): Der Pfad zum Basisverzeichnis, das die Ergebnis-Unterverzeichnisse enthält.\n",
"\n",
" Rückgabe:\n",
" Dict: Ein Dictionary, in dem jeder Schlüssel einer PCA-Komponentenanzahl entspricht und\n",
" dessen Werte Achtertupel aus Durchschnitt und Standardabweichung von Trainingsverlust,\n",
" Validierungsverlust, Trainingsgenauigkeit und Validierungsgenauigkeit sind.\n",
" \"\"\"\n",
"\n",
" results = {}\n",
"\n",
" # Über alle Ordner im Basispfad iterieren\n",
" for directory in os.listdir(path):\n",
" full_path = os.path.join(path, directory)\n",
"\n",
" # Überprüfen, ob es sich um einen Ordner handelt\n",
" if os.path.isdir(full_path):\n",
" pickle_file = f'results_{directory}.pickle'\n",
" pickle_path = os.path.join(full_path, pickle_file)\n",
"\n",
" # Überprüfen, ob die Pickle-Datei existiert\n",
" if os.path.isfile(pickle_path):\n",
" # Pickle-Datei laden\n",
" with open(pickle_path, 'rb') as file:\n",
" results[directory] = pickle.load(file)\n",
"\n",
" converted_results = {}\n",
" for values in list(results.values()):\n",
" key = list(values.keys())[0]\n",
" value = list(values.values())[0]\n",
" converted_results[key] = value\n",
"\n",
" return converted_results\n",
"\n",
"\n",
"def calculate_means_for_n_last(results: Dict, n_last: int) -> Dict:\n",
" \"\"\"\n",
" Berechnet Durchschnittswerte und Standardabweichungen für die letzten `n_last` Ergebnisse.\n",
"\n",
" Argumente:\n",
" results (Dict): Ein Dictionary von Ergebnissen, wie von `load_results` zurückgegeben.\n",
" n_last (int): Anzahl der letzten Ergebnisse, die zur Berechnung verwendet werden sollen.\n",
"\n",
" Rückgabe:\n",
" Dict: Ein Dictionary mit Schlüsseln als PCA-Komponentenanzahlen und Werten als Achtertupel,\n",
" bestehend aus Durchschnitt und Standardabweichung von Trainingsverlust, \n",
" Validierungsverlust, Trainingsgenauigkeit und Validierungsgenauigkeit.\n",
" \"\"\"\n",
"\n",
" assert results is not None\n",
" assert n_last <= len(list(results.values())[0][0][0])\n",
"\n",
" means_and_stds = {}\n",
"\n",
" for key, value in results.items():\n",
" train_losses, valid_losses, train_accuracies, valid_accuracies = zip(*value)\n",
" \n",
" train_losses = train_losses[:n_last] \n",
" valid_losses = valid_losses[:n_last]\n",
" train_accuracies = train_accuracies[:n_last]\n",
" valid_accuracies = valid_accuracies[:n_last]\n",
"\n",
" avg_train_loss = np.mean(train_losses)#, axis=0)\n",
" avg_valid_loss = np.mean(valid_losses)#, axis=0)\n",
" avg_train_acc = np.mean(train_accuracies)#, axis=0)\n",
" avg_valid_acc = np.mean(valid_accuracies)#, axis=0)\n",
"\n",
" std_train_loss = np.std(train_losses)#, axis=0)\n",
" std_valid_loss = np.std(valid_losses)#, axis=0)\n",
" std_train_acc = np.std(train_accuracies)#, axis=0)\n",
" std_valid_acc = np.std(valid_accuracies)#, axis=0)\n",
"\n",
" means_and_stds[key] = (avg_train_loss, std_train_loss, avg_valid_loss, std_valid_loss, avg_train_acc, std_train_acc, avg_valid_acc, std_valid_acc)\n",
"\n",
" return means_and_stds\n",
"\n",
"\n",
"def plot_results(results: Dict, show_lines: bool = True) -> None:\n",
" \"\"\"\n",
" Stellt die Ergebnisse als Fehlerbalkendiagramme dar. Jedes Diagramm zeigt Mittelwert und\n",
" Standardabweichung von Trainings- und Validierungsverlust sowie -genauigkeit. \n",
" Fügt zusätzlich eine rote Linie für den höchsten Genauigkeitswert und den geringsten Verlustwert hinzu,\n",
" mit einer Beschriftung, die den Schlüssel des entsprechenden höchsten bzw. niedrigsten Werts anzeigt.\n",
"\n",
" Argumente:\n",
" results (Dict): Ein Dictionary von berechneten Mittelwerten und Standardabweichungen,\n",
" wie von `calculate_means_for_n_last` zurückgegeben.\n",
" show_lines (bool): Ein flag, das angibt, ob die Maximal- / Minimallinie gezeichnet werden soll.\n",
" \"\"\"\n",
" # Schlüssel sortieren\n",
" sorted_keys = sorted(results.keys(), key=lambda x: int(x))\n",
"\n",
" # Listen für das Plotten vorbereiten\n",
" mean_train_loss = [results[k][0] for k in sorted_keys]\n",
" std_train_loss = [results[k][1] for k in sorted_keys]\n",
" mean_validation_loss = [results[k][2] for k in sorted_keys]\n",
" std_validation_loss = [results[k][3] for k in sorted_keys]\n",
" mean_train_accuracy = [results[k][4] for k in sorted_keys]\n",
" std_train_accuracy = [results[k][5] for k in sorted_keys]\n",
" mean_validation_accuracy = [results[k][6] for k in sorted_keys]\n",
" std_validation_accuracy = [results[k][7] for k in sorted_keys]\n",
"\n",
" # Plotten\n",
" plt.figure(figsize=(12, 8))\n",
"\n",
" # Verluste\n",
" plt.errorbar(sorted_keys, mean_train_loss, yerr=std_train_loss, label='Trainingverlust', fmt='o', linestyle='--', alpha=0.8)\n",
" plt.errorbar(sorted_keys, mean_validation_loss, yerr=std_validation_loss, label='Validierungsverlust', fmt='o', linestyle='--', alpha=0.8)\n",
"\n",
" # Genauigkeiten\n",
" plt.errorbar(sorted_keys, mean_train_accuracy, yerr=std_train_accuracy, label='Trainingsgenauigkeit', fmt='x', linestyle='--', alpha=0.8)\n",
" plt.errorbar(sorted_keys, mean_validation_accuracy, yerr=std_validation_accuracy, label='Validierungsgenauigkeit', fmt='x', linestyle='--', alpha=0.8)\n",
"\n",
" # Gestaltung\n",
" plt.xlabel('Anzahl der PCA Komponenten')\n",
" plt.ylabel('Werte')\n",
" plt.title('Trainings- und Validierungsverlust und -genauigkeit')\n",
" plt.grid(True)\n",
"\n",
" # Höchste Genauigkeit und geringster Verlust\n",
" highest_accuracy = max(max(mean_train_accuracy), max(mean_validation_accuracy))\n",
" lowest_loss = min(min(mean_train_loss), min(mean_validation_loss))\n",
"\n",
" # Schlüssel für höchste Genauigkeit und geringsten Verlust finden\n",
" highest_acc_key = sorted_keys[mean_train_accuracy.index(max(mean_train_accuracy))] if max(mean_train_accuracy) > max(mean_validation_accuracy) else sorted_keys[mean_validation_accuracy.index(max(mean_validation_accuracy))]\n",
" lowest_loss_key = sorted_keys[mean_train_loss.index(min(mean_train_loss))] if min(mean_train_loss) < min(mean_validation_loss) else sorted_keys[mean_validation_loss.index(min(mean_validation_loss))]\n",
"\n",
" plt.legend()\n",
"\n",
" # Linien und Text für höchste Genauigkeit und geringsten Verlust\n",
" if show_lines:\n",
" plt.axhline(y=highest_accuracy, color='r', linestyle='-', alpha=0.5)\n",
" plt.text(0.95, highest_accuracy, f'Höchste Genauigkeit (PCA: {highest_acc_key})', verticalalignment='bottom', horizontalalignment='right', color='red', fontsize=8, transform=plt.gca().get_yaxis_transform())\n",
"\n",
" plt.axhline(y=lowest_loss, color='r', linestyle='-', alpha=0.5)\n",
" plt.text(0.95, lowest_loss, f'Geringster Verlust (PCA: {lowest_loss_key})', verticalalignment='top', horizontalalignment='right', color='red', fontsize=8, transform=plt.gca().get_yaxis_transform())\n",
" \n",
" plt.savefig('Endergebnisse_mit_Linien.png')\n",
" else:\n",
" plt.savefig('Endergebnisse_ohne_Linien.png')\n",
"\n",
" plt.show()"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAA+kAAAK9CAYAAABYVS0qAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuNCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8WgzjOAAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOzdd3wUdf7H8ddsT28kQOi9d0QFCwKKNKUooKiA/ax36lnuPBX7z+6hHnY9FQWk2AALytkrRWx0kB5I79kyvz+WTLIkgYS2SXg/75EjO/Od2c/OTmLeO9/5fg3TNE1EREREREREJOxs4S5ARERERERERIIU0kVERERERERqCYV0ERERERERkVpCIV1ERERERESkllBIFxEREREREaklFNJFREREREREagmFdBEREREREZFaQiFdREREREREpJZQSBcRERERERGpJRTSRUQOgylTptCyZcuD2vauu+7CMIzDW1A9smnTJgzD4JVXXjnqz20YBnfddZf1+JVXXsEwDDZt2nTAbVu2bMmUKVOsx0uXLsUwDJYuXXrY65SgY/kYDxw4kIEDB4a7jFrhUH5nlG77yCOPHJZaDuW/DSJy7FJIF5F6zTCMan0di3/U1yfXXXcdhmGwbt26Ktv885//xDAMfv7556NYmdQnX3/9NXfddRdZWVnhLkXqqIKCAu666y79N0dE9ssR7gJERI6k1157LeTxf//7Xz7++OMKyzt16nRIz/P8888TCAQOatvbb7+dW2+99ZCe/1g3adIkpk+fzsyZM7njjjsqbfPmm2/SrVs3unfvftDPc+GFFzJx4kTcbneNtz3llFMoLCzE5XId9PNLeH399ddMmzaNKVOmEB8fH+5ypAotWrSgsLAQp9MZ7lIq/LehoKCAadOmAajng4hUSSFdROq1Cy64IOTxt99+y8cff1xh+b4KCgqIjIys9vMcyh+DDocDh0O/jg/F8ccfT9u2bXnzzTcrDenffPMNGzdu5MEHHzyk57Hb7djt9oPa1maz4fF4Dun5y8vPzycqKuqw7a8uKyoq0ocfYjEM47D+rB2K2vBBgYjUPeruLiLHvIEDB9K1a1d++uknTjnlFCIjI/nHP/4BwDvvvMOIESNITU3F7XbTpk0b7rnnHvx+f8g+9r3vsPx9jc899xxt2rTB7XZz3HHH8cMPP4RsW9k96YZhcM0117BgwQK6du2K2+2mS5cuLF68uEL9S5cupW/fvng8Htq0acOzzz5b6T4//vhjTjrpJOLj44mOjqZDhw7W66ypqu79rexe0ClTphAdHc22bdsYPXo00dHRJCcnc9NNN1U4jllZWUyZMoW4uDji4+OZPHlytbsWT5o0iT/++INly5ZVWDdz5kwMw+C8886jpKSEO+64gz59+hAXF0dUVBQnn3wyn3322QGfo7J70k3T5N5776Vp06ZERkZy2mmn8euvv1bYtqpj9t1333HmmWcSFxdHZGQkp556Kl999VVIm9L387fffuP8888nISGBk046Caj6XuRDOScB5syZQ+fOnfF4PHTt2pX58+dXen/tW2+9RZ8+fYiJiSE2NpZu3brx5JNPAvDjjz9iGAavvvpqhf1/+OGHGIbB+++/by3btm0bF198MQ0bNrTO+ZdeeqnS4/jWW29x++2306RJEyIjI8nJyanwHFBxbIBSlR236dOn06VLFyIjI0lISKBv377MnDkTCL4Hf//73wFo1aqVdavM/sYnqO5zl76m2bNnc99999G0aVM8Hg+DBw+u9BaO0vcvIiKCfv368cUXX1RZQ3Wlp6dz4YUXEhsba/3srVy5stJ7u//44w/OOeccEhMT8Xg89O3bl3fffTekTenPyldffcUNN9xAcnIyUVFRjBkzht27d4e0re7v2eoez6ruSa/uOb0v0zS5/PLLcblczJs3z1r++uuv06dPHyIiIkhMTGTixIls2bIlZNvy+9+0aRPJyckATJs2zTqHyo97ISICupIuIgIE/0AdNmwYEydO5IILLqBhw4ZA8A/N6OhobrjhBqKjo/n000+54447yMnJ4eGHHz7gfmfOnElubi5XXHEFhmHw0EMPMXbsWDZs2HDAKyxffvkl8+bN46qrriImJoZ///vfjBs3jj///JOkpCQAli9fzplnnknjxo2ZNm0afr+fu+++2/pDsNSvv/7KyJEj6d69O3fffTdut5t169ZVCINHit/vZ+jQoRx//PE88sgjfPLJJzz66KO0adOGv/zlL0DwD+Gzzz6bL7/8kiuvvJJOnToxf/58Jk+eXK3nmDRpEtOmTWPmzJn07t075Llnz57NySefTPPmzdmzZw8vvPAC5513Hpdddhm5ubm8+OKLDB06lO+//56ePXvW6LXdcccd3HvvvQwfPpzhw4ezbNkyzjjjDEpKSg647aeffsqwYcPo06cPd955JzabjZdffplBgwbxxRdf0K9fv5D25557Lu3ateP+++/HNM0a1VmqOufkBx98wIQJE+jWrRsPPPAAmZmZXHLJJTRp0iRkXx9//DHnnXcegwcP5v/+7/8A+P333/nqq6+4/vrr6du3L61bt2b27NkV3sdZs2aRkJDA0KFDAdi1axcnnHCC9QFVcnIyixYt4pJLLiEnJ4e//vWvIdvfc889uFwubrrpJoqLiw/5Svrzzz/PddddxznnnMP1119PUVERP//8M9999x3nn38+Y8eOZc2aNbz55ps8/vjjNGjQAKDCz9qhePDBB7HZbNx0001kZ2fz0EMPMWnSJL777jurzYsvvsgVV1xB//79+etf/8qGDRs466yzSExMpFmzZgf1vIFAgFGjRvH999/zl7/8hY4dO/LOO+9U+rP366+/MmDAAJo0acKtt95KVFQUs2fPZvTo0cydO5cxY8aEtL/22mtJSEjgzjvvZNOmTTzxxBNcc801zJo1y2pzqL9nq6O65/S+/H4/F198MbNmzWL+/PmMGDECgPvuu49//etfjB8/nksvvZTdu3czffp0TjnlFJYvX17p7RDJycn85z//4S9/+Qtjxoxh7NixAId0C46I1FOmiMgx5Oqrrzb3/dV36qmnmoA5Y8aMCu0LCgoqLLviiivMyMhIs6ioyFo2efJks0WLFtbjjRs3moCZlJRkZmRkWMvfeecdEzDfe+89a9mdd95ZoSbAdLlc5rp166xlK1euNAFz+vTp1rJRo0aZkZGR5rZt26xla9euNR0OR8g+H3/8cRMwd+/eXelxqanPPvvMBMzPPvssZHnp63755ZetZZMnTzYB8+677w5p26tXL7NPnz7W4wULFpiA+dBDD1nLfD6fefLJJ1fYZ1WOO+44s2nTpqbf77eWLV682ATMZ5991tpncXFxyHaZmZlmw4YNzYsvvjhkOWDeeeed1uOXX37ZBMyNGzeapmmaaWlppsvlMkeMGGEGAgGr3T/+8Q8TMCdPnmwt2/eYBQIBs127dubQoUNDti0oKDBbtWplnn766day0nPkvPPOq/CaTz31VPPUU0+tsPxQzslu3bqZTZs2NXNzc61lS5cuNYGQfV5//fVmbGys6fP5Kjx/qdtuu810Op0hz1lcXGzGx8eHHO9LLrnEbNy4sblnz56Q7SdOnGjGxcVZP4ulx7F169YVfj4rOy9btGgR8j6U2ve4nX322WaXLl2qfB2maZoPP/xwyPt/INV97tK6O3XqFHJuPvnkkyZgrlq1yjRN0ywpKTFTUlLMnj17hrR77rnnTKDS86A65s6dawLmE088YS3z+/3moEGDKvzsDR482OzWrVvI779AIGD279/fbNeunbWs9GdlyJAhIef33/72N9Nut5tZWVnWsur+nq3u8azs91B1z+nSbR9++GHT6/WaEyZMMCMiIswPP/zQarNp0ybTbreb9913X0gdq1atMh0OR8jyfX8Od+/eXeH3iojIvtTdXUQEcLvdTJ06tcLyiIgI6/vc3Fz27NnDySefTEFBAX/88ccB9zthwgQSEhKsxyeffDIAGzZsOOC2Q4YMoU2bNtbj7t27Exsba23r9/v55JNPGD16NKmpqVa7tm3bMmzYsJB9lV7Veeeddw5
"text/plain": [
"<Figure size 1200x800 with 1 Axes>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAA+kAAAK9CAYAAABYVS0qAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuNCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8WgzjOAAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOzdd3wU1drA8d9sz6YH0gkthN5BFFBAigEhSlGKSFfsXl7lilhQVLAgihfloqJgoVcVpAtXxYJKVekEpAQCpJNsts37x5IhSxJIaBvg+frJh52ZMzNnZs+u+8xpiqqqKkIIIYQQQgghhPA5na8zIIQQQgghhBBCCA8J0oUQQgghhBBCiHJCgnQhhBBCCCGEEKKckCBdCCGEEEIIIYQoJyRIF0IIIYQQQgghygkJ0oUQQgghhBBCiHJCgnQhhBBCCCGEEKKckCBdCCGEEEIIIYQoJyRIF0IIIYQQQgghygkJ0oUQ4jIYPHgwVatWvah9X375ZRRFubwZuo4cOHAARVGYMWPGVT+3oii8/PLL2vKMGTNQFIUDBw5ccN+qVasyePBgbXn9+vUoisL69esvez6Fx418j9u1a0e7du18nY1y4VK+Mwr2ffvtty9LXi7l/w1CiBuXBOlCiOuaoiil+rsRf9RfT5588kkURWHv3r0lpnn++edRFIVt27ZdxZyJ68lPP/3Eyy+/TEZGhq+zIq5Rubm5vPzyy/L/HCHEeRl8nQEhhLiSvvjiC6/lzz//nNWrVxdZX6dOnUs6z8cff4zb7b6ofV944QWeffbZSzr/ja5///5MnjyZWbNmMWbMmGLTzJ49mwYNGtCwYcOLPs+AAQPo27cvZrO5zPu2adOGvLw8TCbTRZ9f+NZPP/3E2LFjGTx4MCEhIb7OjihBlSpVyMvLw2g0+jorRf7fkJuby9ixYwGk5YMQokQSpAshrmv333+/1/Ivv/zC6tWri6w/V25uLlartdTnuZQfgwaDAYNBvo4vxc0330yNGjWYPXt2sUH6zz//THJyMm+88cYlnUev16PX6y9qX51Oh8ViuaTzF3b69Gn8/f0v2/GuZTabTR5+CI2iKJf1s3YpysODAiHEtUeauwshbnjt2rWjfv36/PHHH7Rp0war1cpzzz0HwFdffUXXrl2JiYnBbDYTHx/Pq6++isvl8jrGuf0OC/dr/Oijj4iPj8dsNnPTTTfx22+/ee1bXJ90RVF4/PHHWbJkCfXr18dsNlOvXj1WrFhRJP/r16+nefPmWCwW4uPj+fDDD4s95urVq7n11lsJCQkhICCAWrVqaddZViX1/S2uL+jgwYMJCAjgyJEjdO/enYCAAMLDwxk5cmSR+5iRkcHgwYMJDg4mJCSEQYMGlbppcf/+/dm5cyebNm0qsm3WrFkoikK/fv2w2+2MGTOGZs2aERwcjL+/P7fddhvr1q274DmK65OuqiqvvfYalSpVwmq1cvvtt/PXX38V2beke/brr7/SuXNngoODsVqttG3blg0bNnilKXg///77b+677z5CQ0O59dZbgZL7Il9KmQSYP38+devWxWKxUL9+fRYvXlxs/9o5c+bQrFkzAgMDCQoKokGDBrz33nsA/P777yiKwmeffVbk+CtXrkRRFJYuXaqtO3LkCEOHDiUyMlIr859++mmx93HOnDm88MILxMbGYrVaycrKKnIOKDo2QIHi7tvkyZOpV68eVquV0NBQmjdvzqxZswDPe/Dvf/8bgGrVqmldZc43PkFpz11wTfPmzWPcuHFUqlQJi8VChw4diu3CUfD++fn50aJFC3744YcS81Bap06dYsCAAQQFBWmfva1btxbbt3vnzp3cc889hIWFYbFYaN68OV9//bVXmoLPyoYNG3jqqacIDw/H39+fHj16cOLECa+0pf2eLe39LKlPemnL9LlUVWX48OGYTCYWLVqkrf/yyy9p1qwZfn5+hIWF0bdvXw4dOuS1b+HjHzhwgPDwcADGjh2rlaHC414IIQRITboQQgCeH6hdunShb9++3H///URGRgKeH5oBAQE89dRTBAQE8N133zFmzBiysrKYMGHCBY87a9YssrOzeeihh1AUhbfeeouePXuyf//+C9aw/PjjjyxatIhHH32UwMBA/vOf/9CrVy/++ecfKlSoAMDmzZvp3Lkz0dHRjB07FpfLxSuvvKL9ECzw119/0a1bNxo2bMgrr7yC2Wxm7969RYLBK8XlcpGYmMjNN9/M22+/zZo1a5g4cSLx8fE88sgjgOeH8N13382PP/7Iww8/TJ06dVi8eDGDBg0q1Tn69+/P2LFjmTVrFk2bNvU697x587jtttuoXLkyJ0+eZNq0afTr148HH3yQ7OxsPvnkExITE9m4cSONGzcu07WNGTOG1157jTvvvJM777yTTZs2cccdd2C32y+473fffUeXLl1o1qwZL730EjqdjunTp9O+fXt++OEHWrRo4ZX+3nvvJSEhgfHjx6OqapnyWaA0ZXLZsmX06dOHBg0a8Prrr5Oens6wYcOIjY31Otbq1avp168fHTp04M033wRgx44dbNiwgX/96180b96c6tWrM2/evCLv49y5cwkNDSUxMRGA48ePc8stt2gPqMLDw1m+fDnDhg0jKyuLESNGeO3/6quvYjKZGDlyJPn5+Zdck/7xxx/z5JNPcs899/Cvf/0Lm83Gtm3b+PXXX7nvvvvo2bMnu3fvZvbs2bz77rtUrFgRoMhn7VK88cYb6HQ6Ro4cSWZmJm+99Rb9+/fn119/1dJ88sknPPTQQ7Rq1YoRI0awf/9+7rrrLsLCwoiLi7uo87rdbpKSkti4cSOPPPIItWvX5quvvir2s/fXX3/RunVrYmNjefbZZ/H392fevHl0796dhQsX0qNHD6/0TzzxBKGhobz00kscOHCASZMm8fjjjzN37lwtzaV+z5ZGacv0uVwuF0OHDmXu3LksXryYrl27AjBu3DhefPFFevfuzQMPPMCJEyeYPHkybdq0YfPmzcV2hwgPD+e///0vjzzyCD169KBnz54Al9QFRwhxnVKFEOIG8thjj6nnfvW1bdtWBdSpU6cWSZ+bm1tk3UMPPaRarVbVZrNp6wYNGqRWqVJFW05OTlYBtUKFCmpaWpq2/quvvlIB9ZtvvtHWvfTSS0XyBKgmk0ndu3evtm7r1q0qoE6ePFlbl5SUpFqtVvXIkSPauj179qgGg8HrmO+++64KqCdOnCj2vpTVunXrVEBdt26d1/qC654+fbq2btCgQSqgvvLKK15pmzRpojZr1kxbXrJkiQqob731lrbO6XSqt912W5FjluSmm25SK1WqpLpcLm3dihUrVED98MMPtWPm5+d77Zeenq5GRkaqQ4cO9VoPqC+99JK2PH36dBVQk5OTVVVV1dTUVNVkMqldu3ZV3W63lu65555TAXXQoEHaunPvmdvtVhMSEtTExESvfXNzc9Vq1aqpnTp10tYVlJF+/foVuea2bduqbdu2LbL+UspkgwYN1EqVKqnZ2dnauvXr16uA1zH/9a9/qUFBQarT6Sxy/gKjR49WjUaj1znz8/PVkJAQr/s9bNgwNTo6Wj158qTX/n379lWDg4O1z2LBfaxevXqRz2dx5bJKlSpe70OBc+/b3XffrdarV6/E61BVVZ0wYYLX+38hpT13Qb7r1KnjVTbfe+89FVC3b9+uqqqq2u12NSIiQm3cuLFXuo8++kgFii0HpbFw4UIVUCdNmqStc7lcavv27Yt89jp06KA2aNDA6/vP7XarrVq1UhMSErR1BZ+Vjh07epXv//u//1P1er2akZGhrSvt92xp72dx30OlLdMF+06YMEF1OBxqnz59VD8/P3XlypVamgMHDqh6vV4dN26cVz62b9+uGgwGr/Xnfg5PnDhR5HtFCCHOJc3dhRACMJvNDBkypMh6Pz8/7XV2djYnT57ktttuIzc3l507d17wuH369CE0NFRbvu222wDYv3//Bfft2LEj8fHx2nLDhg0JCgrS9nW5XKxZs4bu3bsTExOjpatRowZdunTxOlZBrc5XX3110QPcXaqHH37Ya/m2227zug/ffvstBoN
"text/plain": [
"<Figure size 1200x800 with 1 Axes>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"\n",
"# Ergebnisse laden\n",
"base_path = 'Experiments'\n",
"loaded_results = load_results(base_path)\n",
"\n",
"# Ergebnisse verarbeiten und plotten\n",
"m_a_s = calculate_means_for_n_last(loaded_results, 200)\n",
"plot_results(m_a_s)\n",
"plot_results(m_a_s, show_lines=False)"
]
},
2024-01-04 15:15:24 +01:00
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# TODO MNIST datenstaz mit wget ohne tensorflow oder pytorch einlesen"
]
2024-01-04 14:47:29 +01:00
}
],
"metadata": {
"kernelspec": {
"display_name": "rl",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
2024-01-05 15:16:59 +01:00
"version": "3.8.18"
2024-01-04 14:47:29 +01:00
},
"orig_nbformat": 4
},
"nbformat": 4,
"nbformat_minor": 2
}