pickel-cancer-rick/project-cancer-classificati...

1072 lines
242 KiB
Plaintext
Raw Normal View History

2024-01-04 14:47:29 +01:00
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Laden der Rohdaten"
]
},
{
"cell_type": "code",
2024-01-05 15:46:19 +01:00
"execution_count": null,
2024-01-04 14:47:29 +01:00
"metadata": {},
"outputs": [],
"source": [
"import pickle\n",
"\n",
"# Laden der 'kirp' Liste aus der Pickle-Datei\n",
"with open('rick.pickle', 'rb') as f:\n",
" data_frame = pickle.load(f)"
]
},
2024-01-04 15:06:10 +01:00
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Aktiviere Cuda Support"
]
},
{
"cell_type": "code",
2024-01-05 15:46:19 +01:00
"execution_count": null,
2024-01-04 15:06:10 +01:00
"metadata": {},
2024-01-05 15:46:19 +01:00
"outputs": [],
2024-01-04 15:06:10 +01:00
"source": [
"import torch\n",
"device = \"cpu\"\n",
"if torch.cuda.is_available():\n",
" print(\"CUDA is available on your system.\")\n",
" device = \"cuda\"\n",
"else:\n",
" print(\"CUDA is not available on your system.\")"
]
},
2024-01-04 14:47:29 +01:00
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# PCA Klasse zu Reduktion der Dimensionen"
]
},
{
"cell_type": "code",
2024-01-05 15:46:19 +01:00
"execution_count": null,
2024-01-04 14:47:29 +01:00
"metadata": {},
"outputs": [],
"source": [
"from torch.utils.data import Dataset\n",
"import torch\n",
"import pandas as pd\n",
"from sklearn.preprocessing import LabelEncoder\n",
"from sklearn.decomposition import PCA\n",
"from sklearn.preprocessing import StandardScaler\n",
"from sklearn.model_selection import train_test_split\n",
2024-01-05 15:16:59 +01:00
"from typing import List, Tuple, Dict\n",
2024-01-04 14:47:29 +01:00
"\n",
"\n",
"class GenomeDataset(Dataset):\n",
" \"\"\"\n",
" Eine benutzerdefinierte Dataset-Klasse, die für die Handhabung von Genomdaten konzipiert ist.\n",
" Diese Klasse wendet eine Principal Component Analysis (PCA) auf die Frequenzen der Genome an\n",
" und teilt den Datensatz in Trainings- und Validierungsteile auf.\n",
"\n",
" Attributes:\n",
" dataframe (pd.DataFrame): Ein Pandas DataFrame, der die initialen Daten enthält.\n",
" train_df (pd.DataFrame): Ein DataFrame, der den Trainingsdatensatz nach der Anwendung von PCA und der Aufteilung enthält.\n",
" val_df (pd.DataFrame): Ein DataFrame, der den Validierungsdatensatz nach der Anwendung von PCA und der Aufteilung enthält.\n",
"\n",
" Methods:\n",
" __init__(self, dataframe, n_pca_components=1034, train_size=0.8, split_random_state=42):\n",
" Konstruktor für die GenomeDataset Klasse.\n",
" _do_PCA(self, frequencies, n_components=1034):\n",
" Wendet PCA auf die gegebenen Frequenzen an.\n",
" _split_dataset(self, train_size=0.8, random_state=42):\n",
" Teilt den DataFrame in Trainings- und Validierungsdatensätze auf.\n",
" __getitem__(self, index):\n",
" Gibt ein Tupel aus transformierten Frequenzen und dem zugehörigen Krebstyp für einen gegebenen Index zurück.\n",
" __len__(self):\n",
" Gibt die Gesamtlänge der kombinierten Trainings- und Validierungsdatensätze zurück.\n",
" \"\"\"\n",
"\n",
" def __init__(self, dataframe: pd.DataFrame, n_pca_components: int = 1034, train_size: float = 0.8, split_random_state: int = 42):\n",
" \"\"\"\n",
" Konstruktor für die GenomeDataset Klasse.\n",
"\n",
" Parameters:\n",
" dataframe (pd.DataFrame): Der DataFrame, der die Genome Frequenzen und Krebsarten enthält.\n",
" n_pca_components (int): Die Anzahl der PCA-Komponenten, auf die reduziert werden soll. Standardwert ist 1034.\n",
" train_size (float): Der Anteil der Daten, der als Trainingsdaten verwendet werden soll. Standardwert ist 0.8.\n",
" split_random_state (int): Der Zufalls-Saatwert, der für die Aufteilung des Datensatzes verwendet wird. Standardwert ist 42.\n",
" \"\"\"\n",
" self.dataframe = dataframe\n",
"\n",
" # Umwandlung der Krebsarten in numerische Werte\n",
" self.label_encoder = LabelEncoder()\n",
" self.dataframe['encoded_cancer_type'] = self.label_encoder.fit_transform(dataframe['cancer_type'])\n",
"\n",
" # Anwenden der PCA auf die Frequenzen\n",
" self.dataframe['pca_frequencies'] = self._do_PCA(self.dataframe['genome_frequencies'].tolist(), n_pca_components)\n",
"\n",
" # Teilen des DataFrame in Trainings- und Validierungsdatensatz\n",
" self._split_dataset(train_size=train_size, random_state=split_random_state)\n",
"\n",
" def transform_datapoint(self, datapoint: List[float]) -> List[float]:\n",
" \"\"\"\n",
" Transformiert einen einzelnen Datenpunkt durch Standardisierung und Anwendung der PCA.\n",
"\n",
" Diese Methode nimmt einen rohen Datenpunkt (eine Liste von Frequenzen), standardisiert ihn mit dem \n",
" zuvor angepassten Scaler und wendet dann die PCA-Transformation an, um ihn in den reduzierten \n",
" Feature-Raum zu überführen, der für das Training des Modells verwendet wurde.\n",
"\n",
" Parameters:\n",
" datapoint (List[float]): Ein roher Datenpunkt, bestehend aus einer Liste von Frequenzen.\n",
"\n",
" Returns:\n",
" List[float]: Der transformierte Datenpunkt, nach Anwendung der Standardisierung und der PCA.\n",
" \"\"\"\n",
" # Standardisierung des Datenpunkts\n",
" scaled_data_point = self.scaler.transform([datapoint])\n",
"\n",
" # PCA-Transformation des standardisierten Datenpunkts\n",
" pca_transformed_point = self.pca.transform(scaled_data_point)\n",
"\n",
" return pca_transformed_point.tolist()\n",
"\n",
" def _do_PCA(self, frequencies: List[List[float]], n_components: int = 1034) -> List[List[float]]:\n",
" \"\"\"\n",
" Wendet PCA auf die gegebenen Frequenzen an.\n",
"\n",
" Parameters:\n",
" frequencies (List[List[float]]): Die Liste der Frequenzen, auf die die PCA angewendet werden soll.\n",
" n_components (int): Die Anzahl der Komponenten für die PCA. Standardwert ist 1034.\n",
"\n",
" Returns:\n",
" List[List[float]]: Eine Liste von Listen, die die transformierten Frequenzen nach der PCA darstellt.\n",
" \"\"\"\n",
"\n",
" # Standardisieren der Frequenzen\n",
" self.scaler = StandardScaler()\n",
" scaled_frequencies = self.scaler.fit_transform(frequencies)\n",
"\n",
" # PCA-Instanz erstellen und auf die gewünschte Anzahl von Komponenten reduzieren\n",
" self.pca = PCA(n_components=n_components)\n",
"\n",
" # PCA auf die Frequenzen anwenden\n",
" pca_result = self.pca.fit_transform(scaled_frequencies)\n",
"\n",
" return pca_result.tolist()\n",
"\n",
" def _split_dataset(self, train_size: float = 0.8, random_state: int = 42):\n",
" \"\"\"\n",
" Teilt den DataFrame in Trainings- und Validierungsdatensätze auf.\n",
"\n",
" Parameters:\n",
" train_size (float): Der Anteil der Daten, der als Trainingsdaten verwendet werden soll.\n",
" random_state (int): Der Zufalls-Saatwert, der für die Aufteilung des Datensatzes verwendet wird.\n",
" \"\"\"\n",
"\n",
" class SplittedDataset(Dataset):\n",
" def __init__(self, dataframe):\n",
" self.dataframe = dataframe\n",
"\n",
" # Umwandlung der Genome Frequenzen in Tensoren\n",
" self.genome_frequencies = torch.tensor(dataframe['pca_frequencies'].tolist(), dtype=torch.float32)\n",
"\n",
" # Umwandlung der Krebsarten in numerische Werte\n",
" self.label_encoder = LabelEncoder()\n",
" self.cancer_types = torch.tensor(dataframe['encoded_cancer_type'].tolist(), dtype=torch.long)\n",
"\n",
" def __getitem__(self, index):\n",
" # Rückgabe eines Tupels aus Genome Frequenzen und dem entsprechenden Krebstyp\n",
" return self.genome_frequencies[index], self.cancer_types[index]\n",
"\n",
" def __len__(self):\n",
" return len(self.dataframe)\n",
"\n",
" # Teilen des DataFrame in Trainings- und Validierungsdatensatz\n",
2024-01-05 15:16:59 +01:00
" train_df, val_df = train_test_split(self.dataframe, train_size=train_size) #, random_state=random_state)\n",
2024-01-04 14:47:29 +01:00
" self.train_df = SplittedDataset(train_df)\n",
" self.val_df = SplittedDataset(val_df)\n",
"\n",
"\n",
" def __getitem__(self, index: int) -> Tuple[torch.Tensor, int]:\n",
" \"\"\"\n",
" Gibt ein Tupel aus transformierten Frequenzen und dem entsprechenden Krebstyp für einen gegebenen Index zurück.\n",
"\n",
" Parameters:\n",
" index (int): Der Index des zu abrufenden Datenelements.\n",
"\n",
" Returns:\n",
" Tuple[torch.Tensor, int]: Ein Tupel, bestehend aus einem Tensor der transformierten Frequenzen und dem zugehörigen Krebstyp.\n",
" \"\"\"\n",
"\n",
" print(self.train_df.shape)\n",
" print(self.val_df.shape)\n",
" \n",
" if index < len(self.train_df):\n",
" row = self.train_df.iloc[index]\n",
" else:\n",
" row = self.val_df.iloc[len(self.train_df) - index]\n",
"\n",
" pca_frequencies_tensor = torch.tensor(row['pca_frequencies'], dtype=torch.float32)\n",
" cancer_type = row['encoded_cancer_type']\n",
"\n",
" return pca_frequencies_tensor, cancer_type\n",
"\n",
" def __len__(self) -> int:\n",
" \"\"\"\n",
" Gibt die Gesamtlänge der kombinierten Trainings- und Validierungsdatensätze zurück.\n",
"\n",
" Returns:\n",
" int: Die Länge der kombinierten Datensätze.\n",
" \"\"\"\n",
" \n",
" return len(self.train_df) + len(self.val_df)\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Definition des neuronalen Netzes"
]
},
{
"cell_type": "code",
2024-01-05 15:46:19 +01:00
"execution_count": null,
2024-01-04 14:47:29 +01:00
"metadata": {},
"outputs": [],
"source": [
"import torch\n",
"import torch.nn as nn\n",
"import torch.optim as optim\n",
"import torch.nn.functional as F\n",
2024-01-08 14:20:49 +01:00
"from sklearn.metrics import confusion_matrix\n",
2024-01-04 14:47:29 +01:00
"\n",
"class CancerClassifierNN(nn.Module):\n",
" \"\"\"\n",
" Eine benutzerdefinierte neuronale Netzwerkklassifikator-Klasse für die Krebsklassifikation.\n",
"\n",
" Diese Klasse definiert ein mehrschichtiges Perzeptron (MLP), das für die Klassifizierung von Krebsarten\n",
" anhand genetischer Frequenzdaten verwendet wird.\n",
"\n",
" Attributes:\n",
" fc1 (nn.Linear): Die erste lineare Schicht des Netzwerks.\n",
" fc2 (nn.Linear): Die zweite lineare Schicht des Netzwerks.\n",
" fc3 (nn.Linear): Die dritte lineare Schicht des Netzwerks.\n",
" fc4 (nn.Linear): Die Ausgabeschicht des Netzwerks.\n",
" dropout (nn.Dropout): Ein Dropout-Layer zur Vermeidung von Overfitting.\n",
"\n",
" Methods:\n",
" __init__(self, input_size: int, num_classes: int):\n",
" Konstruktor für die CancerClassifierNN Klasse.\n",
" forward(self, x: torch.Tensor) -> torch.Tensor:\n",
" Definiert den Vorwärtsdurchlauf des Netzwerks.\n",
" \"\"\"\n",
"\n",
" def __init__(self, input_size: int, num_classes: int):\n",
" \"\"\"\n",
" Konstruktor für die CancerClassifierNN Klasse.\n",
"\n",
" Parameters:\n",
" input_size (int): Die Größe des Input-Features.\n",
" num_classes (int): Die Anzahl der Zielklassen.\n",
" \"\"\"\n",
" super(CancerClassifierNN, self).__init__()\n",
" # Definieren der Schichten\n",
2024-01-05 13:19:38 +01:00
" self.fc1 = nn.Linear(input_size, input_size) # Eingabeschicht\n",
" self.fc2 = nn.Linear(input_size, input_size//2) # Versteckte Schicht\n",
" self.fc3 = nn.Linear(input_size//2, input_size//4) # Weitere versteckte Schicht\n",
" self.fc4 = nn.Linear(input_size//4, num_classes) # Ausgabeschicht\n",
2024-01-04 14:47:29 +01:00
" self.dropout = nn.Dropout(p=0.5) # Dropout\n",
"\n",
" def forward(self, x: torch.Tensor) -> torch.Tensor:\n",
" \"\"\"\n",
" Definiert den Vorwärtsdurchlauf des Netzwerks.\n",
"\n",
" Parameters:\n",
" x (torch.Tensor): Der Input-Tensor für das Netzwerk.\n",
"\n",
" Returns:\n",
" torch.Tensor: Der Output-Tensor nach dem Durchlauf durch das Netzwerk.\n",
" \"\"\"\n",
" x = F.relu(self.fc1(x))\n",
" x = self.dropout(x)\n",
" x = F.relu(self.fc2(x))\n",
" x = self.dropout(x)\n",
" x = F.relu(self.fc3(x))\n",
2024-01-05 13:19:38 +01:00
" x = self.dropout(x)\n",
" x = torch.softmax(self.fc4(x), dim=1) # Oder F.log_softmax(x, dim=1) für Mehrklassenklassifikation\n",
2024-01-08 14:20:49 +01:00
" return x\n",
" \n",
" def calculate_confusion_matrix(self, model, dataset_loader):\n",
" \"\"\"\n",
" Berechnet die Konfusionsmatrix für das gegebene Modell und den Datensatz.\n",
"\n",
" Parameters:\n",
" model (torch.nn.Module): Das PyTorch-Modell.\n",
" dataset_loader (torch.utils.data.DataLoader): Der DataLoader für den Datensatz.\n",
"\n",
" Returns:\n",
" np.array: Die Konfusionsmatrix.\n",
" \"\"\"\n",
" model.eval()\n",
" all_preds = []\n",
" all_targets = []\n",
"\n",
" with torch.no_grad():\n",
" for data, target in dataset_loader:\n",
" outputs = model(data)\n",
" _, preds = torch.max(outputs, 1)\n",
" all_preds.extend(preds.cpu().numpy())\n",
" all_targets.extend(target.cpu().numpy())\n",
"\n",
" return confusion_matrix(all_targets, all_preds)"
2024-01-04 14:47:29 +01:00
]
},
{
"cell_type": "code",
2024-01-05 15:46:19 +01:00
"execution_count": null,
2024-01-04 14:47:29 +01:00
"metadata": {},
"outputs": [],
"source": [
"from torch.utils.data import DataLoader\n",
"import torch.optim as optim\n",
2024-01-05 15:16:59 +01:00
"from IPython.display import clear_output\n",
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"import os\n",
2024-01-05 15:46:19 +01:00
"import pickle\n",
2024-01-05 15:16:59 +01:00
"\n",
"class ExperimentationalExperiments():\n",
2024-01-05 15:56:33 +01:00
" \"\"\"\n",
" Diese Klasse dient zur Durchführung und Verwaltung von Experimenten im Rahmen\n",
" des maschinellen Lernens, insbesondere für die Krebsklassifikation.\n",
"\n",
" Attribute:\n",
" results : Dict\n",
" Speichert die Ergebnisse der durchgeführten Experimente.\n",
" \"\"\"\n",
2024-01-05 15:16:59 +01:00
"\n",
" def __init__(self) -> None:\n",
2024-01-05 15:56:33 +01:00
" \"\"\" Konstruktor der Klasse. Initialisiert 'results' als None. \"\"\"\n",
2024-01-05 15:16:59 +01:00
" self.results = None\n",
"\n",
" def run_single_experiment(self, train_loader: DataLoader, valid_loader: DataLoader, n_pca_components: int, n_epochs: int = 200, learning_rate: int = 0.0005, verbose: bool = True, experiment_num: int = None) -> Tuple:\n",
2024-01-05 15:56:33 +01:00
" \"\"\"\n",
" Führt ein einzelnes Experiment mit dem spezifizierten DataLoader, PCA-Komponenten und weiteren Parametern durch.\n",
"\n",
" Parameter:\n",
" train_loader : DataLoader\n",
" Der DataLoader für den Trainingsdatensatz.\n",
" valid_loader : DataLoader\n",
" Der DataLoader für den Validierungsdatensatz.\n",
" n_pca_components : int\n",
" Anzahl der PCA-Komponenten, die im Modell verwendet werden.\n",
" n_epochs : int, optional\n",
" Anzahl der Epochen für das Training (Standardwert ist 200).\n",
" learning_rate : float, optional\n",
" Lernrate für den Optimierer (Standardwert ist 0.0005).\n",
" verbose : bool, optional\n",
" Gibt an, ob der Trainingsfortschritt angezeigt werden soll (Standardwert ist True).\n",
" experiment_num : int, optional\n",
" Nummer des Experiments.\n",
"\n",
" Rückgabewerte:\n",
" Tuple\n",
" Ein Tupel bestehend aus Listen der Trainings- und Validierungsverluste sowie der Genauigkeiten.\n",
" \"\"\"\n",
2024-01-05 15:16:59 +01:00
" if not isinstance(n_pca_components, int):\n",
" raise TypeError(\"n_pca_components must be an integers!\")\n",
"\n",
" model = CancerClassifierNN(input_size=n_pca_components, num_classes=3)\n",
" model.to(device=device)\n",
"\n",
" # Verlustfunktion\n",
" criterion = nn.CrossEntropyLoss()\n",
" # Optimierer\n",
" optimizer = optim.Adam(model.parameters(), lr=learning_rate)\n",
"\n",
" # Listen, um Verluste zu speichern\n",
" train_losses = []\n",
" valid_losses = []\n",
" train_accuracies = []\n",
" valid_accuracies = []\n",
"\n",
" for epoch in range(n_epochs):\n",
" model.train()\n",
" train_loss = 0.0\n",
" correct_predictions = 0\n",
" total_predictions = 0\n",
"\n",
" for i, (inputs, labels) in enumerate(train_loader):\n",
" inputs, labels = inputs.to(device), labels.to(device)\n",
" optimizer.zero_grad()\n",
" outputs = model(inputs)\n",
" loss = criterion(outputs, labels)\n",
" loss.backward()\n",
" optimizer.step()\n",
" train_loss += loss.item()\n",
"\n",
" # Berechnen der Genauigkeit\n",
" _, predicted = torch.max(outputs, 1)\n",
" correct_predictions += (predicted == labels).sum().item()\n",
" total_predictions += labels.size(0)\n",
"\n",
" # Durchschnittlicher Trainingsverlust und Genauigkeit\n",
" train_loss /= len(train_loader)\n",
" train_accuracy = correct_predictions / total_predictions\n",
" train_losses.append(train_loss)\n",
" train_accuracies.append(train_accuracy)\n",
"\n",
" # Validierungsverlust und Genauigkeit\n",
" model.eval()\n",
" valid_loss = 0.0\n",
" correct_predictions = 0\n",
" total_predictions = 0\n",
"\n",
" with torch.no_grad():\n",
" for inputs, labels in valid_loader:\n",
" inputs, labels = inputs.to(device), labels.to(device)\n",
" outputs = model(inputs)\n",
" loss = criterion(outputs, labels)\n",
" valid_loss += loss.item()\n",
"\n",
" # Berechnen der Genauigkeit\n",
" _, predicted = torch.max(outputs, 1)\n",
" correct_predictions += (predicted == labels).sum().item()\n",
" total_predictions += labels.size(0)\n",
"\n",
" # Durchschnittlicher Validierungsverlust und Genauigkeit\n",
" valid_loss /= len(valid_loader)\n",
" valid_accuracy = correct_predictions / total_predictions\n",
" valid_losses.append(valid_loss)\n",
" valid_accuracies.append(valid_accuracy)\n",
"\n",
" # Aktualisieren des Graphen\n",
" clear_output(wait=True)\n",
" fig, ax1 = plt.subplots()\n",
"\n",
" # Zeichnen der Verlustkurven\n",
" ax1.plot(train_losses, label='Trainingsverlust', color='r')\n",
" ax1.plot(valid_losses, label='Validierungsverlust', color='b')\n",
" ax1.set_xlabel('Epochen')\n",
" ax1.set_ylabel('Verlust', color='g')\n",
" ax1.tick_params(axis='y', labelcolor='g')\n",
"\n",
" # Zweite y-Achse für die Genauigkeit\n",
" ax2 = ax1.twinx()\n",
" ax2.plot(train_accuracies, label='Trainingsgenauigkeit', color='r', linestyle='dashed')\n",
" ax2.plot(valid_accuracies, label='Validierungsgenauigkeit', color='b', linestyle='dashed')\n",
" ax2.set_ylabel('Genauigkeit', color='g')\n",
" ax2.tick_params(axis='y', labelcolor='g')\n",
"\n",
" # Titel und Legende\n",
" plt.title(f'Experiment #{experiment_num}: Trainings- und Validierungsverlust und -genauigkeit über die Zeit mit \\n{n_pca_components}-Hauptkomponenten, Lernrate: {learning_rate}')\n",
" fig.tight_layout()\n",
"\n",
" # Legende außerhalb des Graphen\n",
" ax1.legend(loc='upper left', bbox_to_anchor=(1.15, 1))\n",
" ax2.legend(loc='upper left', bbox_to_anchor=(1.15, 0.85))\n",
"\n",
" # Fortschritt anzeigen, falls angegeben\n",
" if verbose:\n",
2024-01-05 15:46:19 +01:00
" print(f'Experiment #{experiment_num} mit {n_pca_components} PCA components: Epoch [{epoch+1}/{n_epochs}], Trainingsverlust: {train_loss:.4f}, Trainingsgenauigkeit: {train_accuracies[-1]:.4f}, Validierungsverlust: {valid_loss:.4f}, Validierungsgenauigkeit: {valid_accuracies[-1]:.4f}')\n",
2024-01-05 15:16:59 +01:00
"\n",
2024-01-05 15:46:19 +01:00
" # Plot speichern\n",
" name = str(experiment_num) + \".png\" if experiment_num is not None else \"single_experiment.png\"\n",
" if not os.path.exists(\"Experiments\"):\n",
" os.makedirs(\"Experiments\")\n",
" if not os.path.exists(f\"Experiments/{str(n_pca_components)}\"):\n",
" os.makedirs(f\"Experiments/{str(n_pca_components)}\")\n",
" plt.savefig(f\"Experiments/{str(n_pca_components)}/{name}\", bbox_inches='tight')\n",
2024-01-05 15:16:59 +01:00
"\n",
" return train_losses, valid_losses, train_accuracies, valid_accuracies\n",
"\n",
" def run_single_pca_experiment(self, train_loader: DataLoader, valid_loader: DataLoader, n_pca_components: int, n_experiments: int, n_epochs: int = 200, learning_rate: int = 0.0005, verbose: bool = True) -> List:\n",
2024-01-05 15:56:33 +01:00
" \"\"\"\n",
" Führt eine Serie von Experimenten mit verschiedenen Konfigurationen für die PCA-Komponenten durch.\n",
"\n",
" Parameter:\n",
" train_loader : DataLoader\n",
" Der DataLoader für den Trainingsdatensatz.\n",
" valid_loader : DataLoader\n",
" Der DataLoader für den Validierungsdatensatz.\n",
" n_pca_components : int\n",
" Anzahl der PCA-Komponenten, die im Modell verwendet werden.\n",
" n_experiments : int\n",
" Anzahl der durchzuführenden Experimente.\n",
" n_epochs : int, optional\n",
" Anzahl der Epochen für das Training (Standardwert ist 200).\n",
" learning_rate : float, optional\n",
" Lernrate für den Optimierer (Standardwert ist 0.0005).\n",
" verbose : bool, optional\n",
" Gibt an, ob der Trainingsfortschritt angezeigt werden soll (Standardwert ist True).\n",
"\n",
" Rückgabewerte:\n",
" List\n",
" Eine Liste von Ergebnissen der einzelnen Experimente.\n",
" \"\"\"\n",
2024-01-05 15:16:59 +01:00
" if not isinstance(n_pca_components, int):\n",
" raise TypeError(\"n_pca_components must be an integers!\")\n",
"\n",
" results = []\n",
"\n",
" for n in range(n_experiments):\n",
" res = self.run_single_experiment(train_loader, valid_loader, n_pca_components, n_epochs=n_epochs, learning_rate=learning_rate, verbose=verbose, experiment_num=n+1)\n",
" results.append(res)\n",
"\n",
" return results\n",
" \n",
"\n",
" def run(self, n_pca_components: List[int], n_experiments: int, n_epochs: int = 200, learning_rate: int = 0.0005, batch_size: int = 64, verbose: bool = True) -> Dict:\n",
2024-01-05 15:56:33 +01:00
" \"\"\"\n",
" Hauptmethode zum Ausführen von Experimenten mit verschiedenen Anzahlen von PCA-Komponenten.\n",
"\n",
" Parameter:\n",
" n_pca_components : List[int]\n",
" Eine Liste von Anzahlen der PCA-Komponenten, die in den Experimenten verwendet werden sollen.\n",
" n_experiments : int\n",
" Anzahl der durchzuführenden Experimente pro PCA-Komponentenanzahl.\n",
" n_epochs : int, optional\n",
" Anzahl der Epochen für das Training (Standardwert ist 200).\n",
" learning_rate : float, optional\n",
" Lernrate für den Optimierer (Standardwert ist 0.0005).\n",
" batch_size : int, optional\n",
" Batch-Größe für das Laden der Daten (Standardwert ist 64).\n",
" verbose : bool, optional\n",
" Gibt an, ob der Trainingsfortschritt angezeigt werden soll (Standardwert ist True).\n",
"\n",
" Rückgabewerte:\n",
" Dict\n",
" Ein Wörterbuch, das die Ergebnisse der Experimente für jede Anzahl von PCA-Komponenten enthält.\n",
" \"\"\"\n",
2024-01-05 15:16:59 +01:00
" if not isinstance(n_pca_components, list):\n",
" raise TypeError(\"n_pca_components must be a list of integers!\")\n",
"\n",
2024-01-05 15:46:19 +01:00
" plt.ioff()\n",
2024-01-05 15:16:59 +01:00
" self.n_pca_components = n_pca_components\n",
"\n",
" results = {}\n",
"\n",
" for n_pca_comps in n_pca_components:\n",
" genome_dataset = GenomeDataset(data_frame, n_pca_components=n_pca_comps)\n",
" train_dataset = genome_dataset.train_df\n",
" valid_dataset = genome_dataset.val_df\n",
"\n",
" train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)\n",
" valid_loader = DataLoader(dataset=valid_dataset, batch_size=batch_size, shuffle=False)\n",
"\n",
" res = self.run_single_pca_experiment(train_loader, valid_loader, n_pca_comps, n_experiments, n_epochs=n_epochs, learning_rate=learning_rate, verbose=verbose)\n",
" results[str(n_pca_comps)] = res\n",
"\n",
" self.plot_and_save_results(res, n_pca_comps)\n",
"\n",
" self.results = results\n",
"\n",
2024-01-05 15:46:19 +01:00
" # Speichern der Daten in einer lokalen Datei\n",
" if len(n_pca_components) > 1:\n",
" with open('Experiments/results.pickle', 'wb') as f:\n",
" pickle.dump(self.results, f)\n",
" else:\n",
" with open(f'Experiments/{str(n_pca_components[0])}/results_{str(n_pca_components[0])}.pickle', 'wb') as f:\n",
" pickle.dump(self.results, f)\n",
2024-01-05 15:46:19 +01:00
"\n",
" plt.ion()\n",
"\n",
2024-01-05 15:16:59 +01:00
" return results\n",
"\n",
" def plot_and_save_results(self, results: List[Tuple], n_pca_components: int) -> None:\n",
2024-01-05 15:56:33 +01:00
" \"\"\"\n",
" Erstellt und speichert Plots für die Ergebnisse der Experimente.\n",
"\n",
" Parameter:\n",
" results : List[Tuple]\n",
" Eine Liste von Tupeln mit den Ergebnissen der Experimente.\n",
" n_pca_components : int\n",
" Anzahl der PCA-Komponenten, für die die Ergebnisse geplottet werden sollen.\n",
"\n",
" Keine Rückgabewerte, da die Methode Plots speichert.\n",
" \"\"\"\n",
" \n",
2024-01-05 15:16:59 +01:00
" # Mittelwerte und Standardabweichungen berechnen\n",
" train_losses, valid_losses, train_accuracies, valid_accuracies = zip(*results)\n",
"\n",
" train_losses = np.array(train_losses)\n",
" valid_losses = np.array(valid_losses)\n",
" train_accuracies = np.array(train_accuracies)\n",
" valid_accuracies = np.array(valid_accuracies)\n",
"\n",
" avg_train_losses = np.mean(train_losses, axis=0)\n",
" avg_valid_losses = np.mean(valid_losses, axis=0)\n",
" avg_train_acc = np.mean(train_accuracies, axis=0)\n",
" avg_valid_acc = np.mean(valid_accuracies, axis=0)\n",
"\n",
" std_train_losses = np.std(train_losses, axis=0)\n",
" std_valid_losses = np.std(valid_losses, axis=0)\n",
" std_train_acc = np.std(train_accuracies, axis=0)\n",
" std_valid_acc = np.std(valid_accuracies, axis=0)\n",
"\n",
" # Erstellen von Plots\n",
" epochs = range(1, len(avg_train_losses) + 1)\n",
"\n",
" # Plot für Verluste\n",
2024-01-05 15:46:19 +01:00
" plt.clf()\n",
2024-01-05 15:16:59 +01:00
" plt.plot(epochs, avg_train_losses, label='Mittlerer Trainingsverlust', color='r')\n",
" plt.fill_between(epochs, np.subtract(avg_train_losses, std_train_losses), np.add(avg_train_losses, std_train_losses), color='r', alpha=0.2)\n",
" plt.plot(epochs, avg_valid_losses, label='Mittlerer Validierungsverlust', color='b')\n",
" plt.fill_between(epochs, np.subtract(avg_valid_losses, std_valid_losses), np.add(avg_valid_losses, std_valid_losses), color='b', alpha=0.2)\n",
" plt.title(f'Mittelwert und Standardabweichung der Verluste für {n_pca_components} PCA-Komponenten')\n",
" plt.xlabel('Experiment Nummer')\n",
" plt.ylabel('Verlust')\n",
" plt.legend()\n",
2024-01-05 15:46:19 +01:00
" plt.savefig(f\"Experiments/{n_pca_components}/average_losses.png\", bbox_inches='tight')\n",
2024-01-05 15:16:59 +01:00
" plt.clf()\n",
"\n",
" # Plot für Genauigkeiten\n",
" plt.plot(epochs, avg_train_acc, label='Mittlere Trainingsgenauigkeit', color='r')\n",
" plt.fill_between(epochs, np.subtract(avg_train_acc, std_train_acc), np.add(avg_train_acc, std_train_acc), color='r', alpha=0.2)\n",
" plt.plot(epochs, avg_valid_acc, label='Mittlere Validierungsgenauigkeit', color='b')\n",
" plt.fill_between(epochs, np.subtract(avg_valid_acc, std_valid_acc), np.add(avg_valid_acc, std_valid_acc), color='b', alpha=0.2)\n",
" plt.title(f'Mittelwert und Standardabweichung der Genauigkeiten für {n_pca_components} PCA-Komponenten')\n",
" plt.xlabel('Experiment Nummer')\n",
" plt.ylabel('Genauigkeit')\n",
" plt.legend()\n",
2024-01-05 15:46:19 +01:00
" plt.savefig(f\"Experiments/{n_pca_components}/average_accuracies.png\", bbox_inches='tight')\n",
2024-01-05 15:16:59 +01:00
" plt.clf()\n",
"\n",
" "
2024-01-04 14:47:29 +01:00
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Starten der einzelnen Experimente, da in einer Schleife RAM Probleme auftreten"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"e1 = ExperimentationalExperiments()\n",
"results = e1.run([1024], 10, n_epochs=500)\n",
"del e1"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"e2 = ExperimentationalExperiments()\n",
"results = e2.run([512], 10, n_epochs=500)\n",
"del e2"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"e3 = ExperimentationalExperiments()\n",
"results = e3.run([256], 10, n_epochs=500)\n",
"del e3"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"e4 = ExperimentationalExperiments()\n",
"results = e4.run([128], 10, n_epochs=500)\n",
"del e4"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"e5 = ExperimentationalExperiments()\n",
"results = e5.run([64], 10, n_epochs=500)\n",
"del e5"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"e6 = ExperimentationalExperiments()\n",
"results = e6.run([32], 10, n_epochs=500)\n",
"del e6"
]
},
2024-01-04 14:47:29 +01:00
{
"cell_type": "code",
2024-01-05 15:46:19 +01:00
"execution_count": null,
2024-01-04 14:47:29 +01:00
"metadata": {},
2024-01-05 15:46:19 +01:00
"outputs": [],
2024-01-04 14:47:29 +01:00
"source": [
"e7 = ExperimentationalExperiments()\n",
"results = e7.run([16], 10, n_epochs=500)\n",
"del e7"
2024-01-04 14:47:29 +01:00
]
2024-01-04 15:15:24 +01:00
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"e8 = ExperimentationalExperiments()\n",
"results = e7.run([1034], 10, n_epochs=500)\n",
"del e8"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Lesen der Daten und Erstellen der Mittelwerte und anschließender Auswertung"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {},
"outputs": [],
"source": [
"import numpy as np\n",
"import pickle\n",
"import os\n",
"import matplotlib.pyplot as plt\n",
"from typing import List, Dict, Tuple\n",
"\n",
"def load_results(path: str) -> Dict:\n",
" \"\"\"\n",
" Lädt und konvertiert die Ergebnisse aus Pickle-Dateien in den spezifizierten Verzeichnissen.\n",
"\n",
" Argumente:\n",
" path (str): Der Pfad zum Basisverzeichnis, das die Ergebnis-Unterverzeichnisse enthält.\n",
"\n",
" Rückgabe:\n",
" Dict: Ein Dictionary, in dem jeder Schlüssel einer PCA-Komponentenanzahl entspricht und\n",
" dessen Werte Achtertupel aus Durchschnitt und Standardabweichung von Trainingsverlust,\n",
" Validierungsverlust, Trainingsgenauigkeit und Validierungsgenauigkeit sind.\n",
" \"\"\"\n",
"\n",
" results = {}\n",
"\n",
" # Über alle Ordner im Basispfad iterieren\n",
" for directory in os.listdir(path):\n",
" full_path = os.path.join(path, directory)\n",
"\n",
" # Überprüfen, ob es sich um einen Ordner handelt\n",
" if os.path.isdir(full_path):\n",
" pickle_file = f'results_{directory}.pickle'\n",
" pickle_path = os.path.join(full_path, pickle_file)\n",
"\n",
" # Überprüfen, ob die Pickle-Datei existiert\n",
" if os.path.isfile(pickle_path):\n",
" # Pickle-Datei laden\n",
" with open(pickle_path, 'rb') as file:\n",
" results[directory] = pickle.load(file)\n",
"\n",
" converted_results = {}\n",
" for values in list(results.values()):\n",
" key = list(values.keys())[0]\n",
" value = list(values.values())[0]\n",
" converted_results[key] = value\n",
"\n",
" return converted_results\n",
"\n",
"\n",
"def calculate_means_for_n_last(results: Dict, n_last: int) -> Dict:\n",
" \"\"\"\n",
" Berechnet Durchschnittswerte und Standardabweichungen für die letzten `n_last` Ergebnisse.\n",
"\n",
" Argumente:\n",
" results (Dict): Ein Dictionary von Ergebnissen, wie von `load_results` zurückgegeben.\n",
" n_last (int): Anzahl der letzten Ergebnisse, die zur Berechnung verwendet werden sollen.\n",
"\n",
" Rückgabe:\n",
" Dict: Ein Dictionary mit Schlüsseln als PCA-Komponentenanzahlen und Werten als Achtertupel,\n",
" bestehend aus Durchschnitt und Standardabweichung von Trainingsverlust, \n",
" Validierungsverlust, Trainingsgenauigkeit und Validierungsgenauigkeit.\n",
" \"\"\"\n",
"\n",
" assert results is not None\n",
" assert n_last <= len(list(results.values())[0][0][0])\n",
"\n",
" means_and_stds = {}\n",
"\n",
" for key, value in results.items():\n",
" train_losses, valid_losses, train_accuracies, valid_accuracies = zip(*value)\n",
" \n",
" train_losses = train_losses[:n_last] \n",
" valid_losses = valid_losses[:n_last]\n",
" train_accuracies = train_accuracies[:n_last]\n",
" valid_accuracies = valid_accuracies[:n_last]\n",
"\n",
" avg_train_loss = np.mean(train_losses)#, axis=0)\n",
" avg_valid_loss = np.mean(valid_losses)#, axis=0)\n",
" avg_train_acc = np.mean(train_accuracies)#, axis=0)\n",
" avg_valid_acc = np.mean(valid_accuracies)#, axis=0)\n",
"\n",
" std_train_loss = np.std(train_losses)#, axis=0)\n",
" std_valid_loss = np.std(valid_losses)#, axis=0)\n",
" std_train_acc = np.std(train_accuracies)#, axis=0)\n",
" std_valid_acc = np.std(valid_accuracies)#, axis=0)\n",
"\n",
" print(f\"### {key} PCA Komponenten ###\")\n",
" print(f\"Trainingsverlust: {avg_train_loss:.3f} \\u00B1 {std_train_loss:.3f}\")\n",
" print(f\"Validierungsverlust: {avg_valid_loss:.3f} \\u00B1 {std_valid_loss:.3f}\")\n",
" print(f\"Trainingsgenauigkeit: {avg_train_acc:.3f} \\u00B1 {std_train_acc:.3f}\")\n",
" print(f\"Validierungsgenauigkeit: {avg_valid_acc:.3f} \\u00B1 {std_valid_acc:.3f}\\n\")\n",
"\n",
" means_and_stds[key] = (avg_train_loss, std_train_loss, avg_valid_loss, std_valid_loss, avg_train_acc, std_train_acc, avg_valid_acc, std_valid_acc)\n",
"\n",
" # Initialisierung der Variablen für die Minima und Maxima\n",
" min_train_loss = float('inf')\n",
" min_valid_loss = float('inf')\n",
" max_train_acc = 0\n",
" max_valid_acc = 0\n",
"\n",
" # Durchlaufen aller berechneten Mittelwerte und Standardabweichungen\n",
" for key, (avg_train_loss, std_train_loss, avg_valid_loss, std_valid_loss, avg_train_acc, std_train_acc, avg_valid_acc, std_valid_acc) in means_and_stds.items():\n",
" if avg_train_loss < min_train_loss:\n",
" min_train_loss = avg_train_loss\n",
" min_train_loss_key = key\n",
"\n",
" if avg_valid_loss < min_valid_loss:\n",
" min_valid_loss = avg_valid_loss\n",
" min_valid_loss_key = key\n",
"\n",
" if avg_train_acc > max_train_acc:\n",
" max_train_acc = avg_train_acc\n",
" max_train_acc_key = key\n",
"\n",
" if avg_valid_acc > max_valid_acc:\n",
" max_valid_acc = avg_valid_acc\n",
" max_valid_acc_key = key\n",
"\n",
" # Drucken der Endresultate\n",
" print(f\"### Auswertung ###\")\n",
" print(f\"Niedrigster Trainingsverlust: {min_train_loss:.3f} bei {min_train_loss_key} PCA-Komponenten\")\n",
" print(f\"Niedrigster Validierungsverlust: {min_valid_loss:.3f} bei {min_valid_loss_key} PCA-Komponenten\")\n",
" print(f\"Höchste Trainingsgenauigkeit: {max_train_acc:.3f} bei {max_train_acc_key} PCA-Komponenten\")\n",
" print(f\"Höchste Validierungsgenauigkeit: {max_valid_acc:.3f} bei {max_valid_acc_key} PCA-Komponenten\")\n",
"\n",
" return means_and_stds\n",
"\n",
"\n",
"def plot_results(results: Dict, show_lines: bool = True) -> None:\n",
" \"\"\"\n",
" Stellt die Ergebnisse als Fehlerbalkendiagramme dar. Jedes Diagramm zeigt Mittelwert und\n",
" Standardabweichung von Trainings- und Validierungsverlust sowie -genauigkeit. \n",
" Fügt zusätzlich eine rote Linie für den höchsten Genauigkeitswert und den geringsten Verlustwert hinzu,\n",
" mit einer Beschriftung, die den Schlüssel des entsprechenden höchsten bzw. niedrigsten Werts anzeigt.\n",
"\n",
" Argumente:\n",
" results (Dict): Ein Dictionary von berechneten Mittelwerten und Standardabweichungen,\n",
" wie von `calculate_means_for_n_last` zurückgegeben.\n",
" show_lines (bool): Ein flag, das angibt, ob die Maximal- / Minimallinie gezeichnet werden soll.\n",
" \"\"\"\n",
" # Schlüssel sortieren\n",
" sorted_keys = sorted(results.keys(), key=lambda x: int(x))\n",
"\n",
" # Listen für das Plotten vorbereiten\n",
" mean_train_loss = [results[k][0] for k in sorted_keys]\n",
" std_train_loss = [results[k][1] for k in sorted_keys]\n",
" mean_validation_loss = [results[k][2] for k in sorted_keys]\n",
" std_validation_loss = [results[k][3] for k in sorted_keys]\n",
" mean_train_accuracy = [results[k][4] for k in sorted_keys]\n",
" std_train_accuracy = [results[k][5] for k in sorted_keys]\n",
" mean_validation_accuracy = [results[k][6] for k in sorted_keys]\n",
" std_validation_accuracy = [results[k][7] for k in sorted_keys]\n",
"\n",
" # Plotten\n",
" plt.figure(figsize=(12, 8))\n",
"\n",
" # Verluste\n",
" plt.errorbar(sorted_keys, mean_train_loss, yerr=std_train_loss, label='Trainingverlust', fmt='o', linestyle='--', alpha=0.5)\n",
" plt.errorbar(sorted_keys, mean_validation_loss, yerr=std_validation_loss, label='Validierungsverlust', fmt='o', linestyle='--', alpha=0.5)\n",
"\n",
" # Genauigkeiten\n",
" plt.errorbar(sorted_keys, mean_train_accuracy, yerr=std_train_accuracy, label='Trainingsgenauigkeit', fmt='x', linestyle='--', alpha=0.5)\n",
" plt.errorbar(sorted_keys, mean_validation_accuracy, yerr=std_validation_accuracy, label='Validierungsgenauigkeit', fmt='x', linestyle='--', alpha=0.5)\n",
"\n",
" # Gestaltung\n",
" plt.xlabel('Anzahl der PCA Komponenten')\n",
" plt.ylabel('Werte')\n",
" plt.title('Trainings- und Validierungsverlust und -genauigkeit')\n",
" plt.grid(True)\n",
"\n",
" # Höchste Genauigkeit und geringster Verlust\n",
" highest_accuracy = max(max(mean_train_accuracy), max(mean_validation_accuracy))\n",
" lowest_loss = min(min(mean_train_loss), min(mean_validation_loss))\n",
"\n",
" # Schlüssel für höchste Genauigkeit und geringsten Verlust finden\n",
" highest_acc_key = sorted_keys[mean_train_accuracy.index(max(mean_train_accuracy))] if max(mean_train_accuracy) > max(mean_validation_accuracy) else sorted_keys[mean_validation_accuracy.index(max(mean_validation_accuracy))]\n",
" lowest_loss_key = sorted_keys[mean_train_loss.index(min(mean_train_loss))] if min(mean_train_loss) < min(mean_validation_loss) else sorted_keys[mean_validation_loss.index(min(mean_validation_loss))]\n",
"\n",
" plt.legend()\n",
"\n",
" # Linien und Text für höchste Genauigkeit und geringsten Verlust\n",
" if show_lines:\n",
" plt.axhline(y=highest_accuracy, color='r', linestyle='-', alpha=0.8)\n",
" plt.text(0.95, highest_accuracy, f'Höchste Genauigkeit (PCA: {highest_acc_key})', verticalalignment='bottom', horizontalalignment='right', color='red', fontsize=10, transform=plt.gca().get_yaxis_transform())\n",
"\n",
" plt.axhline(y=lowest_loss, color='r', linestyle='-', alpha=0.8)\n",
" plt.text(0.95, lowest_loss, f'Geringster Verlust (PCA: {lowest_loss_key})', verticalalignment='top', horizontalalignment='right', color='red', fontsize=10, transform=plt.gca().get_yaxis_transform())\n",
" \n",
" plt.savefig('Experiments/Endergebnisse_mit_Linien.png')\n",
" else:\n",
" plt.savefig('Experiments/Endergebnisse_ohne_Linien.png')\n",
"\n",
" plt.show()"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"### 128 PCA Komponenten ###\n",
"Trainingsverlust: 0.575 ± 0.051\n",
"Validierungsverlust: 0.591 ± 0.029\n",
"Trainingsgenauigkeit: 0.977 ± 0.052\n",
"Validierungsgenauigkeit: 0.952 ± 0.028\n",
"\n",
"### 64 PCA Komponenten ###\n",
"Trainingsverlust: 0.613 ± 0.064\n",
"Validierungsverlust: 0.628 ± 0.047\n",
"Trainingsgenauigkeit: 0.940 ± 0.066\n",
"Validierungsgenauigkeit: 0.933 ± 0.051\n",
"\n",
"### 512 PCA Komponenten ###\n",
"Trainingsverlust: 0.558 ± 0.026\n",
"Validierungsverlust: 0.606 ± 0.018\n",
"Trainingsgenauigkeit: 0.994 ± 0.026\n",
"Validierungsgenauigkeit: 0.950 ± 0.016\n",
"\n",
"### 16 PCA Komponenten ###\n",
"Trainingsverlust: 0.792 ± 0.082\n",
"Validierungsverlust: 0.704 ± 0.084\n",
"Trainingsgenauigkeit: 0.773 ± 0.089\n",
"Validierungsgenauigkeit: 0.842 ± 0.083\n",
"\n",
"### 32 PCA Komponenten ###\n",
"Trainingsverlust: 0.698 ± 0.076\n",
"Validierungsverlust: 0.642 ± 0.054\n",
"Trainingsgenauigkeit: 0.855 ± 0.082\n",
"Validierungsgenauigkeit: 0.919 ± 0.049\n",
"\n",
"### 1024 PCA Komponenten ###\n",
"Trainingsverlust: 0.558 ± 0.021\n",
"Validierungsverlust: 0.606 ± 0.018\n",
"Trainingsgenauigkeit: 0.994 ± 0.021\n",
"Validierungsgenauigkeit: 0.946 ± 0.017\n",
"\n",
"### 256 PCA Komponenten ###\n",
"Trainingsverlust: 0.563 ± 0.035\n",
"Validierungsverlust: 0.595 ± 0.020\n",
"Trainingsgenauigkeit: 0.989 ± 0.035\n",
"Validierungsgenauigkeit: 0.956 ± 0.016\n",
"\n",
"### Auswertung ###\n",
"Niedrigster Trainingsverlust: 0.558 bei 512 PCA-Komponenten\n",
"Niedrigster Validierungsverlust: 0.591 bei 128 PCA-Komponenten\n",
"Höchste Trainingsgenauigkeit: 0.994 bei 512 PCA-Komponenten\n",
"Höchste Validierungsgenauigkeit: 0.956 bei 256 PCA-Komponenten\n"
]
},
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAA+kAAAK9CAYAAABYVS0qAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuNCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8WgzjOAAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOzdd3wUZf7A8c9sy2Y32TRCGkkgoXcIHanSy0lREFFBsLfzd8epnJ6Kig099VAPyynqiRX1rCggWJDeBASkhB4IJT3ZbJvfH8sO2RTY0DYJ3/frtS8yM8/MPDP77LLfeZqiqqqKEEIIIYQQQgghgk4X7AwIIYQQQgghhBDCS4J0IYQQQgghhBCihpAgXQghhBBCCCGEqCEkSBdCCCGEEEIIIWoICdKFEEIIIYQQQogaQoJ0IYQQQgghhBCihpAgXQghhBBCCCGEqCEkSBdCCCGEEEIIIWoICdKFEEIIIYQQQogaQoJ0IYQ4DyZPnkzDhg3Pat9HHnkERVHOb4bqkD179qAoCnPnzr3o51YUhUceeURbnjt3LoqisGfPnjPu27BhQyZPnqwtL126FEVRWLp06XnPp/C6lO9x37596du3b7CzUSOcy3eGb99nn332vOTlXP5vEEJcuiRIF0LUaYqiBPS6FH/U1yV33303iqKwc+fOKtM88MADKIrCb7/9dhFzJuqSX3/9lUceeYTc3NxgZ0XUUsXFxTzyyCPyf44Q4rQMwc6AEEJcSO+++67f8jvvvMPChQsrrG/RosU5nef111/H4/Gc1b4PPvgg999//zmd/1I3ceJEZs+ezbx583jooYcqTfP+++/Tpk0b2rZte9bnue6667j66qsJCQmp9r69e/empKQEk8l01ucXwfXrr78yY8YMJk+eTGRkZLCzI6qQmppKSUkJRqMx2Fmp8H9DcXExM2bMAJCWD0KIKkmQLoSo06699lq/5RUrVrBw4cIK68srLi7GYrEEfJ5z+TFoMBgwGOTr+Fx07dqVxo0b8/7771capC9fvpzMzEyeeuqpczqPXq9Hr9ef1b46nQ6z2XxO5y+rqKgIq9V63o5Xm9ntdnn4ITSKopzXz9q5qAkPCoQQtY80dxdCXPL69u1L69atWbt2Lb1798ZisfD3v/8dgP/9738MHz6cxMREQkJCSE9P57HHHsPtdvsdo3y/w7L9Gl977TXS09MJCQmhc+fOrF692m/fyvqkK4rCnXfeyeeff07r1q0JCQmhVatWLFiwoEL+ly5dSqdOnTCbzaSnp/Pqq69WesyFCxdy2WWXERkZSVhYGM2aNdOus7qq6vtbWV/QyZMnExYWxsGDBxk1ahRhYWHExsYybdq0CvcxNzeXyZMnExERQWRkJJMmTQq4afHEiRPZtm0b69atq7Bt3rx5KIrChAkTcDgcPPTQQ2RkZBAREYHVaqVXr14sWbLkjOeorE+6qqo8/vjjNGjQAIvFQr9+/diyZUuFfau6ZytXrmTIkCFERERgsVjo06cPy5Yt80vjez9///13rrnmGqKiorjsssuAqvsin0uZBPj4449p2bIlZrOZ1q1b89lnn1Xav/aDDz4gIyOD8PBwbDYbbdq04cUXXwRgzZo1KIrC22+/XeH43333HYqi8NVXX2nrDh48yJQpU4iLi9PK/Jtvvlnpffzggw948MEHSUpKwmKxkJ+fX+EcUHFsAJ/K7tvs2bNp1aoVFouFqKgoOnXqxLx58wDve/C3v/0NgEaNGmldZU43PkGg5/Zd00cffcTMmTNp0KABZrOZyy+/vNIuHL73LzQ0lC5duvDzzz9XmYdAHT9+nOuuuw6bzaZ99jZu3Fhp3+5t27Zx5ZVXEh0djdlsplOnTnzxxRd+aXyflWXLlvGXv/yF2NhYrFYro0eP5ujRo35pA/2eDfR+VtUnPdAyXZ6qqtx8882YTCY+/fRTbf1///tfMjIyCA0NJTo6mquvvpr9+/f77Vv2+Hv27CE2NhaAGTNmaGWo7LgXQggBUpMuhBCA9wfq0KFDufrqq7n22muJi4sDvD80w8LC+Mtf/kJYWBg//PADDz30EPn5+cyaNeuMx503bx4FBQXccsstKIrCM888w5gxY9i9e/cZa1h++eUXPv30U26//XbCw8P517/+xdixY9m3bx8xMTEArF+/niFDhpCQkMCMGTNwu908+uij2g9Bny1btjBixAjatm3Lo48+SkhICDt37qwQDF4obrebwYMH07VrV5599lkWLVrEc889R3p6Orfddhvg/SF8xRVX8Msvv3DrrbfSokULPvvsMyZNmhTQOSZOnMiMGTOYN28eHTt29Dv3Rx99RK9evUhJSeHYsWO88cYbTJgwgZtuuomCggL+85//MHjwYFatWkX79u2rdW0PPfQQjz/+OMOGDWPYsGGsW7eOQYMG4XA4zrjvDz/8wNChQ8nIyODhhx9Gp9Px1ltv0b9/f37++We6dOnil/6qq66iSZMmPPHEE6iqWq18+gRSJr/++mvGjx9PmzZtePLJJ8nJyWHq1KkkJSX5HWvhwoVMmDCByy+/nKeffhqArVu3smzZMv785z/TqVMn0tLS+Oijjyq8jx9++CFRUVEMHjwYgCNHjtCtWzftAVVsbCzffvstU6dOJT8/n3vuucdv/8ceewyTycS0adMoLS0955r0119/nbvvvpsrr7ySP//5z9jtdn777TdWrlzJNddcw5gxY/jjjz94//33ef7556lXrx5Ahc/auXjqqafQ6XRMmzaNvLw8nnnmGSZOnMjKlSu1NP/5z3+45ZZb6NGjB/fccw+7d+/mT3/6E9HR0SQnJ5/VeT0eDyNHjmTVqlXcdtttNG/enP/973+Vfva2bNlCz549SUpK4v7778dqtfLRRx8xatQo5s+fz+jRo/3S33XXXURFRfHwww+zZ88eXnjhBe68804+/PBDLc25fs8GItAyXZ7b7WbKlCl8+OGHfPbZZwwfPhyAmTNn8o9//INx48Zx4403cvToUWbPnk3v3r1Zv359pd0hYmNj+fe//81tt93G6NGjGTNmDMA5dcERQtRRqhBCXELuuOMOtfxXX58+fVRAnTNnToX0xcXFFdbdcsstqsViUe12u7Zu0qRJampqqracmZmpAmpMTIx64sQJbf3//vc/FVC//PJLbd3DDz9cIU+AajKZ1J07d2rrNm7cqALq7NmztXUjR45ULRaLevDgQW3djh07VIPB4HfM559/XgXUo0ePVnpfqmvJkiUqoC5ZssRvve+633rrLW3dpEmTVEB99NFH/dJ26NBBzcjI0JY///xzFVCfeeYZbZ3L5VJ79epV4ZhV6dy5s9qgQQPV7XZr6xYsWKAC6quvvqods7S01G+/nJwcNS4uTp0yZYrfekB9+OGHteW33npLBdTMzExVVVU1OztbNZlM6vDhw1WPx6Ol+/vf/64C6qRJk7R15e+Zx+NRmzRpog4ePNhv3+LiYrVRo0bqwIEDtXW+MjJhwoQK19ynTx+1T58+FdafS5ls06aN2qBBA7WgoEBbt3TpUhXwO+af//xn1WazqS6Xq8L5faZPn64ajUa/c5aWlqqRkZF+93vq1KlqQkKCeuzYMb/9r776ajUiIkL7LPruY1paWoXPZ2XlMjU11e998Cl/36644gq1VatWVV6HqqrqrFmz/N7/Mwn03L58t2jRwq9svvjiiyqgbtq0SVVVVXU4HGr9+vXV9u3b+6V77bXXVKDSchCI+fPnq4D6wgsvaOvcbrfav3//Cp+9yy+/XG3Tpo3f95/H41F79OihNmnSRFvn+6wMGDDAr3z/3//9n6rX69Xc3FxtXaDfs4Hez8q+hwIt0759Z82apTqdTnX8+PFqaGio+t1332lp9uzZo+r1enXmzJl++di0aZNqMBj81pf/HB49erTC94oQQpQnzd2FEAIICQnhhhtuqLA+NDRU+7ugoIBjx47Rq1cviouL2bZt2xmPO378eKKiorTlXr16AbB79+4z7jtgwADS09O15bZt22Kz2bR93W43ixYtYtSoUSQmJmrpGjduzNChQ/2O5avV+d///nfWA9ydq1tvvdVvuVe
"text/plain": [
"<Figure size 1200x800 with 1 Axes>"
]
},
"metadata": {},
"output_type": "display_data"
},
{
"data": {
"image/png": "iVBORw0KGgoAAAANSUhEUgAAA+kAAAK9CAYAAABYVS0qAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuNCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8WgzjOAAAACXBIWXMAAA9hAAAPYQGoP6dpAAEAAElEQVR4nOzdd3gcxeE+8Hf3etGpWVaXbLn3InfjihsuwabZYMCmBQKEJHxJAimASSg/QgiJQ02BJMSUYNNtU4yNccEy7gX3XtSsrrvT3e3O74/VrXQqtuR2J+n9PI8e3+3N7s3urc56d2ZnJCGEABERERERERGFnRzuChARERERERGRhiGdiIiIiIiIKEIwpBMRERERERFFCIZ0IiIiIiIiogjBkE5EREREREQUIRjSiYiIiIiIiCIEQzoRERERERFRhGBIJyIiIiIiIooQDOlEREREREREEYIhnYjoIpg/fz46dOhwXus+/vjjkCTp4laoFTly5AgkScIbb7xx2d9bkiQ8/vjj+vM33ngDkiThyJEj51y3Q4cOmD9/vv581apVkCQJq1atuuj1JE1bPsZjx47F2LFjw12NiHAh3xnBdZ977rmLUpcL+b+BiNouhnQiatUkSWrST1v8o741eeCBByBJEg4cONBomV//+teQJAnbt2+/jDWj1mTdunV4/PHHUVJSEu6qUAvldrvx+OOP8/8cIjorY7grQER0Kf3nP/8Jef7vf/8bX3zxRb3lPXr0uKD3+dvf/gZVVc9r3d/85jd4+OGHL+j927q5c+di4cKFWLRoER599NEGy7z11lvo06cP+vbte97vc8stt2DOnDmwWCzNXnf06NHweDwwm83n/f4UXuvWrcOCBQswf/58xMTEhLs61IjMzEx4PB6YTKZwV6Xe/w1utxsLFiwAAPZ8IKJGMaQTUat28803hzz/9ttv8cUXX9RbXpfb7Ybdbm/y+1zIH4NGoxFGI7+OL8TQoUPRuXNnvPXWWw2G9PXr1+Pw4cN45plnLuh9DAYDDAbDea0ryzKsVusFvX9tlZWVcDgcF217LZnX6+XFD9JJknRRf9cuRCRcKCCilofd3YmozRs7dix69+6NTZs2YfTo0bDb7fjVr34FAPjwww8xbdo0pKSkwGKxoFOnTvjd734HRVFCtlH3vsPa9zW+9tpr6NSpEywWCwYPHoyNGzeGrNvQPemSJOH+++/HBx98gN69e8NisaBXr15Yvnx5vfqvWrUKgwYNgtVqRadOnfDqq682uM0vvvgCV1xxBWJiYuB0OtGtWzd9P5ursXt/G7oXdP78+XA6nTh58iRmzpwJp9OJhIQEPPTQQ/WOY0lJCebPn4/o6GjExMRg3rx5Te5aPHfuXOzZswebN2+u99qiRYsgSRJuvPFG+Hw+PProo8jOzkZ0dDQcDgdGjRqFlStXnvM9GronXQiB3//+90hLS4Pdbse4ceOwa9eueus2dsw2bNiAKVOmIDo6Gna7HWPGjMHatWtDygQ/z927d+Omm25CbGwsrrjiCgCN34t8IeckAPzvf/9Dz549YbVa0bt3b7z//vsN3l/79ttvIzs7G1FRUXC5XOjTpw/+/Oc/AwC+++47SJKEf/3rX/W2/9lnn0GSJHzyySf6spMnT+L2229HYmKifs7/85//bPA4vv322/jNb36D1NRU2O12lJWV1XsPoP7YAEENHbeFCxeiV69esNvtiI2NxaBBg7Bo0SIA2mfw85//HADQsWNH/VaZs41P0NT3Du7Tu+++iyeffBJpaWmwWq248sorG7yFI/j52Ww2DBkyBN98802jdWiqM2fO4JZbboHL5dJ/97Zt29bgvd179uzBddddh7i4OFitVgwaNAgfffRRSJng78ratWvx4IMPIiEhAQ6HA7NmzUJBQUFI2aZ+zzb1eDZ2T3pTz+m6hBD44Q9/CLPZjCVLlujL33zzTWRnZ8NmsyEuLg5z5szB8ePHQ9atvf0jR44gISEBALBgwQL9HKo97gUREcCWdCIiANofqFdddRXmzJmDm2++GYmJiQC0PzSdTicefPBBOJ1OfPXVV3j00UdRVlaGP/zhD+fc7qJFi1BeXo67774bkiTh2WefxTXXXINDhw6ds4VlzZo1WLJkCe69915ERUXhL3/5C6699locO3YM8fHxAIAtW7ZgypQpSE5OxoIFC6AoCp544gn9D8GgXbt2Yfr06ejbty+eeOIJWCwWHDhwoF4YvFQURcHkyZMxdOhQPPfcc/jyyy/xxz/+EZ06dcKPfvQjANofwldffTXWrFmDe+65Bz169MD777+PefPmNek95s6diwULFmDRokUYOHBgyHu/++67GDVqFDIyMlBYWIi///3vuPHGG3HXXXehvLwc//jHPzB58mTk5OSgf//+zdq3Rx99FL///e8xdepUTJ06FZs3b8akSZPg8/nOue5XX32Fq666CtnZ2XjssccgyzJef/11jB8/Ht988w2GDBkSUv76669Hly5d8NRTT0EI0ax6BjXlnPz0008xe/Zs9OnTB08//TSKi4txxx13IDU1NWRbX3zxBW688UZceeWV+H//7/8BAL7//nusXbsWP/nJTzBo0CBkZWXh3Xffrfc5vvPOO4iNjcXkyZMBAHl5eRg2bJh+gSohIQHLli3DHXfcgbKyMvz0pz8NWf93v/sdzGYzHnroIVRVVV1wS/rf/vY3PPDAA7juuuvwk5/8BF6vF9u3b8eGDRtw00034ZprrsG+ffvw1ltv4U9/+hPatWsHAPV+1y7EM888A1mW8dBDD6G0tBTPPvss5s6diw0bNuhl/vGPf+Duu+/GiBEj8NOf/hSHDh3CD37wA8TFxSE9Pf283ldVVcyYMQM5OTn40Y9+hO7du+PDDz9s8Hdv165dGDlyJFJTU/Hwww/D4XDg3XffxcyZM7F48WLMmjUrpPyPf/xjxMbG4rHHHsORI0fwwgsv4P7778c777yjl7nQ79mmaOo5XZeiKLj99tvxzjvv4P3338e0adMAAE8++SR++9vf4oYbbsCdd96JgoICLFy4EKNHj8aWLVsavB0iISEBL7/8Mn70ox9h1qxZuOaaawDggm7BIaJWShARtSH33XefqPvVN2bMGAFAvPLKK/XKu93uesvuvvtuYbfbhdfr1ZfNmzdPZGZm6s8PHz4sAIj4+HhRVFSkL//www8FAPHxxx/ryx577LF6dQIgzGazOHDggL5s27ZtAoBYuHChvmzGjBnCbreLkydP6sv2798vjEZjyDb/9Kc/CQCioKCgwePSXCtXrhQAxMqVK0OWB/f79ddf15fNmzdPABBPPPFESNkBAwaI7Oxs/fkHH3wgAIhnn31WXxYIBMSoUaPqbbMxgwcPFmlpaUJRFH3Z8uXLBQDx6quv6tusqqoKWa+4uFgkJiaK22+/PWQ5APHYY4/pz19//XUBQBw+fFgIIUR+fr4wm81i2rRpQlVVvdyvfvUrAUDMmzdPX1b3mKmqKrp06SImT54csq7b7RYdO3YUEydO1JcFz5Ebb7yx3j6PGTNGjBkzpt7yCzkn+/TpI9LS0kR5ebm+bNWqVQJAyDZ/8pOfCJfLJQKBQL33D3rkkUeEyWQKec+qqioRExMTcrzvuOMOkZycLAoLC0PWnzNnjoiOjtZ/F4PHMSsrq97vZ0PnZWZmZsjnEFT3uF199dWiV69eje6HEEL84Q9/CPn8z6Wp7x2sd48ePULOzT//+c8CgNixY4cQQgifzyfat28v+vfvH1LutddeEwAaPA+aYvHixQKAeOGFF/RliqKI8ePH1/vdu/LKK0WfPn1Cvv9UVRUjRowQXbp00ZcFf1cmTJgQcn7/7Gc/EwaDQZSUlOjLmvo929Tj2dD3UFPP6eC6f/jDH4Tf7xezZ88WNptNfPbZZ3qZI0eOCIPBIJ588smQeuzYsUMYjcaQ5XV/DwsKCup9rxAR1cXu7kREACwWC2677bZ6y202m/64vLwchYWFGDVqFNxuN/bs2XPO7c6ePRuxsbH681GjRgEADh06dM51J0yYgE6dOunP+/btC5fLpa+rKAq+/PJLzJw5EykpKXq
"text/plain": [
"<Figure size 1200x800 with 1 Axes>"
]
},
"metadata": {},
"output_type": "display_data"
}
],
"source": [
"\n",
"# Ergebnisse laden\n",
"base_path = 'Experiments'\n",
"loaded_results = load_results(base_path)\n",
"\n",
"# Ergebnisse verarbeiten und plotten\n",
"m_a_s = calculate_means_for_n_last(loaded_results, 50)\n",
"plot_results(m_a_s, show_lines=False)\n",
"plot_results(m_a_s)"
]
},
2024-01-04 15:15:24 +01:00
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"# TODO MNIST datenstaz mit wget ohne tensorflow oder pytorch einlesen"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
2024-01-04 14:47:29 +01:00
}
],
"metadata": {
"kernelspec": {
"display_name": "rl",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
2024-01-05 15:16:59 +01:00
"version": "3.8.18"
2024-01-04 14:47:29 +01:00
},
"orig_nbformat": 4
},
"nbformat": 4,
"nbformat_minor": 2
}