pickel-cancer-rick/project-cancer-classificati...

766 lines
112 KiB
Plaintext
Raw Normal View History

2024-01-04 14:47:29 +01:00
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Laden der Rohdaten"
]
},
{
"cell_type": "code",
2024-01-05 15:16:59 +01:00
"execution_count": 7,
2024-01-04 14:47:29 +01:00
"metadata": {},
"outputs": [],
"source": [
"import pickle\n",
"\n",
"# Laden der 'kirp' Liste aus der Pickle-Datei\n",
"with open('rick.pickle', 'rb') as f:\n",
" data_frame = pickle.load(f)"
]
},
2024-01-04 15:06:10 +01:00
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Aktiviere Cuda Support"
]
},
{
"cell_type": "code",
2024-01-05 15:16:59 +01:00
"execution_count": 8,
2024-01-04 15:06:10 +01:00
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"CUDA is available on your system.\n"
]
}
],
"source": [
"import torch\n",
"device = \"cpu\"\n",
"if torch.cuda.is_available():\n",
" print(\"CUDA is available on your system.\")\n",
" device = \"cuda\"\n",
"else:\n",
" print(\"CUDA is not available on your system.\")"
]
},
2024-01-04 14:47:29 +01:00
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# PCA Klasse zu Reduktion der Dimensionen"
]
},
{
"cell_type": "code",
2024-01-05 15:16:59 +01:00
"execution_count": 9,
2024-01-04 14:47:29 +01:00
"metadata": {},
"outputs": [],
"source": [
"from torch.utils.data import Dataset\n",
"import torch\n",
"import pandas as pd\n",
"from sklearn.preprocessing import LabelEncoder\n",
"from sklearn.decomposition import PCA\n",
"from sklearn.preprocessing import StandardScaler\n",
"from sklearn.model_selection import train_test_split\n",
2024-01-05 15:16:59 +01:00
"from typing import List, Tuple, Dict\n",
2024-01-04 14:47:29 +01:00
"\n",
"\n",
"class GenomeDataset(Dataset):\n",
" \"\"\"\n",
" Eine benutzerdefinierte Dataset-Klasse, die für die Handhabung von Genomdaten konzipiert ist.\n",
" Diese Klasse wendet eine Principal Component Analysis (PCA) auf die Frequenzen der Genome an\n",
" und teilt den Datensatz in Trainings- und Validierungsteile auf.\n",
"\n",
" Attributes:\n",
" dataframe (pd.DataFrame): Ein Pandas DataFrame, der die initialen Daten enthält.\n",
" train_df (pd.DataFrame): Ein DataFrame, der den Trainingsdatensatz nach der Anwendung von PCA und der Aufteilung enthält.\n",
" val_df (pd.DataFrame): Ein DataFrame, der den Validierungsdatensatz nach der Anwendung von PCA und der Aufteilung enthält.\n",
"\n",
" Methods:\n",
" __init__(self, dataframe, n_pca_components=1034, train_size=0.8, split_random_state=42):\n",
" Konstruktor für die GenomeDataset Klasse.\n",
" _do_PCA(self, frequencies, n_components=1034):\n",
" Wendet PCA auf die gegebenen Frequenzen an.\n",
" _split_dataset(self, train_size=0.8, random_state=42):\n",
" Teilt den DataFrame in Trainings- und Validierungsdatensätze auf.\n",
" __getitem__(self, index):\n",
" Gibt ein Tupel aus transformierten Frequenzen und dem zugehörigen Krebstyp für einen gegebenen Index zurück.\n",
" __len__(self):\n",
" Gibt die Gesamtlänge der kombinierten Trainings- und Validierungsdatensätze zurück.\n",
" \"\"\"\n",
"\n",
" def __init__(self, dataframe: pd.DataFrame, n_pca_components: int = 1034, train_size: float = 0.8, split_random_state: int = 42):\n",
" \"\"\"\n",
" Konstruktor für die GenomeDataset Klasse.\n",
"\n",
" Parameters:\n",
" dataframe (pd.DataFrame): Der DataFrame, der die Genome Frequenzen und Krebsarten enthält.\n",
" n_pca_components (int): Die Anzahl der PCA-Komponenten, auf die reduziert werden soll. Standardwert ist 1034.\n",
" train_size (float): Der Anteil der Daten, der als Trainingsdaten verwendet werden soll. Standardwert ist 0.8.\n",
" split_random_state (int): Der Zufalls-Saatwert, der für die Aufteilung des Datensatzes verwendet wird. Standardwert ist 42.\n",
" \"\"\"\n",
" self.dataframe = dataframe\n",
"\n",
" # Umwandlung der Krebsarten in numerische Werte\n",
" self.label_encoder = LabelEncoder()\n",
" self.dataframe['encoded_cancer_type'] = self.label_encoder.fit_transform(dataframe['cancer_type'])\n",
"\n",
" # Anwenden der PCA auf die Frequenzen\n",
" self.dataframe['pca_frequencies'] = self._do_PCA(self.dataframe['genome_frequencies'].tolist(), n_pca_components)\n",
"\n",
" # Teilen des DataFrame in Trainings- und Validierungsdatensatz\n",
" self._split_dataset(train_size=train_size, random_state=split_random_state)\n",
"\n",
" def transform_datapoint(self, datapoint: List[float]) -> List[float]:\n",
" \"\"\"\n",
" Transformiert einen einzelnen Datenpunkt durch Standardisierung und Anwendung der PCA.\n",
"\n",
" Diese Methode nimmt einen rohen Datenpunkt (eine Liste von Frequenzen), standardisiert ihn mit dem \n",
" zuvor angepassten Scaler und wendet dann die PCA-Transformation an, um ihn in den reduzierten \n",
" Feature-Raum zu überführen, der für das Training des Modells verwendet wurde.\n",
"\n",
" Parameters:\n",
" datapoint (List[float]): Ein roher Datenpunkt, bestehend aus einer Liste von Frequenzen.\n",
"\n",
" Returns:\n",
" List[float]: Der transformierte Datenpunkt, nach Anwendung der Standardisierung und der PCA.\n",
" \"\"\"\n",
" # Standardisierung des Datenpunkts\n",
" scaled_data_point = self.scaler.transform([datapoint])\n",
"\n",
" # PCA-Transformation des standardisierten Datenpunkts\n",
" pca_transformed_point = self.pca.transform(scaled_data_point)\n",
"\n",
" return pca_transformed_point.tolist()\n",
"\n",
" def _do_PCA(self, frequencies: List[List[float]], n_components: int = 1034) -> List[List[float]]:\n",
" \"\"\"\n",
" Wendet PCA auf die gegebenen Frequenzen an.\n",
"\n",
" Parameters:\n",
" frequencies (List[List[float]]): Die Liste der Frequenzen, auf die die PCA angewendet werden soll.\n",
" n_components (int): Die Anzahl der Komponenten für die PCA. Standardwert ist 1034.\n",
"\n",
" Returns:\n",
" List[List[float]]: Eine Liste von Listen, die die transformierten Frequenzen nach der PCA darstellt.\n",
" \"\"\"\n",
"\n",
" # Standardisieren der Frequenzen\n",
" self.scaler = StandardScaler()\n",
" scaled_frequencies = self.scaler.fit_transform(frequencies)\n",
"\n",
" # PCA-Instanz erstellen und auf die gewünschte Anzahl von Komponenten reduzieren\n",
" self.pca = PCA(n_components=n_components)\n",
"\n",
" # PCA auf die Frequenzen anwenden\n",
" pca_result = self.pca.fit_transform(scaled_frequencies)\n",
"\n",
" return pca_result.tolist()\n",
"\n",
" def _split_dataset(self, train_size: float = 0.8, random_state: int = 42):\n",
" \"\"\"\n",
" Teilt den DataFrame in Trainings- und Validierungsdatensätze auf.\n",
"\n",
" Parameters:\n",
" train_size (float): Der Anteil der Daten, der als Trainingsdaten verwendet werden soll.\n",
" random_state (int): Der Zufalls-Saatwert, der für die Aufteilung des Datensatzes verwendet wird.\n",
" \"\"\"\n",
"\n",
" class SplittedDataset(Dataset):\n",
" def __init__(self, dataframe):\n",
" self.dataframe = dataframe\n",
"\n",
" # Umwandlung der Genome Frequenzen in Tensoren\n",
" self.genome_frequencies = torch.tensor(dataframe['pca_frequencies'].tolist(), dtype=torch.float32)\n",
"\n",
" # Umwandlung der Krebsarten in numerische Werte\n",
" self.label_encoder = LabelEncoder()\n",
" self.cancer_types = torch.tensor(dataframe['encoded_cancer_type'].tolist(), dtype=torch.long)\n",
"\n",
" def __getitem__(self, index):\n",
" # Rückgabe eines Tupels aus Genome Frequenzen und dem entsprechenden Krebstyp\n",
" return self.genome_frequencies[index], self.cancer_types[index]\n",
"\n",
" def __len__(self):\n",
" return len(self.dataframe)\n",
"\n",
" # Teilen des DataFrame in Trainings- und Validierungsdatensatz\n",
2024-01-05 15:16:59 +01:00
" train_df, val_df = train_test_split(self.dataframe, train_size=train_size) #, random_state=random_state)\n",
2024-01-04 14:47:29 +01:00
" self.train_df = SplittedDataset(train_df)\n",
" self.val_df = SplittedDataset(val_df)\n",
"\n",
"\n",
" def __getitem__(self, index: int) -> Tuple[torch.Tensor, int]:\n",
" \"\"\"\n",
" Gibt ein Tupel aus transformierten Frequenzen und dem entsprechenden Krebstyp für einen gegebenen Index zurück.\n",
"\n",
" Parameters:\n",
" index (int): Der Index des zu abrufenden Datenelements.\n",
"\n",
" Returns:\n",
" Tuple[torch.Tensor, int]: Ein Tupel, bestehend aus einem Tensor der transformierten Frequenzen und dem zugehörigen Krebstyp.\n",
" \"\"\"\n",
"\n",
" print(self.train_df.shape)\n",
" print(self.val_df.shape)\n",
" \n",
" if index < len(self.train_df):\n",
" row = self.train_df.iloc[index]\n",
" else:\n",
" row = self.val_df.iloc[len(self.train_df) - index]\n",
"\n",
" pca_frequencies_tensor = torch.tensor(row['pca_frequencies'], dtype=torch.float32)\n",
" cancer_type = row['encoded_cancer_type']\n",
"\n",
" return pca_frequencies_tensor, cancer_type\n",
"\n",
" def __len__(self) -> int:\n",
" \"\"\"\n",
" Gibt die Gesamtlänge der kombinierten Trainings- und Validierungsdatensätze zurück.\n",
"\n",
" Returns:\n",
" int: Die Länge der kombinierten Datensätze.\n",
" \"\"\"\n",
" \n",
" return len(self.train_df) + len(self.val_df)\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Definition des neuronalen Netzes"
]
},
{
"cell_type": "code",
2024-01-05 15:16:59 +01:00
"execution_count": 10,
2024-01-04 14:47:29 +01:00
"metadata": {},
"outputs": [],
"source": [
"import torch\n",
"import torch.nn as nn\n",
"import torch.optim as optim\n",
"import torch.nn.functional as F\n",
"\n",
"class CancerClassifierNN(nn.Module):\n",
" \"\"\"\n",
" Eine benutzerdefinierte neuronale Netzwerkklassifikator-Klasse für die Krebsklassifikation.\n",
"\n",
" Diese Klasse definiert ein mehrschichtiges Perzeptron (MLP), das für die Klassifizierung von Krebsarten\n",
" anhand genetischer Frequenzdaten verwendet wird.\n",
"\n",
" Attributes:\n",
" fc1 (nn.Linear): Die erste lineare Schicht des Netzwerks.\n",
" fc2 (nn.Linear): Die zweite lineare Schicht des Netzwerks.\n",
" fc3 (nn.Linear): Die dritte lineare Schicht des Netzwerks.\n",
" fc4 (nn.Linear): Die Ausgabeschicht des Netzwerks.\n",
" dropout (nn.Dropout): Ein Dropout-Layer zur Vermeidung von Overfitting.\n",
"\n",
" Methods:\n",
" __init__(self, input_size: int, num_classes: int):\n",
" Konstruktor für die CancerClassifierNN Klasse.\n",
" forward(self, x: torch.Tensor) -> torch.Tensor:\n",
" Definiert den Vorwärtsdurchlauf des Netzwerks.\n",
" \"\"\"\n",
"\n",
" def __init__(self, input_size: int, num_classes: int):\n",
" \"\"\"\n",
" Konstruktor für die CancerClassifierNN Klasse.\n",
"\n",
" Parameters:\n",
" input_size (int): Die Größe des Input-Features.\n",
" num_classes (int): Die Anzahl der Zielklassen.\n",
" \"\"\"\n",
" super(CancerClassifierNN, self).__init__()\n",
" # Definieren der Schichten\n",
2024-01-05 13:19:38 +01:00
" self.fc1 = nn.Linear(input_size, input_size) # Eingabeschicht\n",
" self.fc2 = nn.Linear(input_size, input_size//2) # Versteckte Schicht\n",
" self.fc3 = nn.Linear(input_size//2, input_size//4) # Weitere versteckte Schicht\n",
" self.fc4 = nn.Linear(input_size//4, num_classes) # Ausgabeschicht\n",
2024-01-04 14:47:29 +01:00
" self.dropout = nn.Dropout(p=0.5) # Dropout\n",
"\n",
" def forward(self, x: torch.Tensor) -> torch.Tensor:\n",
" \"\"\"\n",
" Definiert den Vorwärtsdurchlauf des Netzwerks.\n",
"\n",
" Parameters:\n",
" x (torch.Tensor): Der Input-Tensor für das Netzwerk.\n",
"\n",
" Returns:\n",
" torch.Tensor: Der Output-Tensor nach dem Durchlauf durch das Netzwerk.\n",
" \"\"\"\n",
" x = F.relu(self.fc1(x))\n",
" x = self.dropout(x)\n",
" x = F.relu(self.fc2(x))\n",
" x = self.dropout(x)\n",
" x = F.relu(self.fc3(x))\n",
2024-01-05 13:19:38 +01:00
" x = self.dropout(x)\n",
" x = torch.softmax(self.fc4(x), dim=1) # Oder F.log_softmax(x, dim=1) für Mehrklassenklassifikation\n",
2024-01-04 14:47:29 +01:00
" return x"
]
},
{
"cell_type": "code",
2024-01-05 15:16:59 +01:00
"execution_count": 11,
2024-01-04 14:47:29 +01:00
"metadata": {},
"outputs": [],
"source": [
"from torch.utils.data import DataLoader\n",
"import torch.optim as optim\n",
2024-01-05 15:16:59 +01:00
"from IPython.display import clear_output\n",
"import matplotlib.pyplot as plt\n",
"import numpy as np\n",
"import os\n",
"\n",
"class ExperimentationalExperiments():\n",
"\n",
" def __init__(self) -> None:\n",
" self.results = None\n",
"\n",
" def run_single_experiment(self, train_loader: DataLoader, valid_loader: DataLoader, n_pca_components: int, n_epochs: int = 200, learning_rate: int = 0.0005, verbose: bool = True, experiment_num: int = None) -> Tuple:\n",
" if not isinstance(n_pca_components, int):\n",
" raise TypeError(\"n_pca_components must be an integers!\")\n",
"\n",
" model = CancerClassifierNN(input_size=n_pca_components, num_classes=3)\n",
" model.to(device=device)\n",
"\n",
" # Verlustfunktion\n",
" criterion = nn.CrossEntropyLoss()\n",
" # Optimierer\n",
" optimizer = optim.Adam(model.parameters(), lr=learning_rate)\n",
"\n",
" # Listen, um Verluste zu speichern\n",
" train_losses = []\n",
" valid_losses = []\n",
" train_accuracies = []\n",
" valid_accuracies = []\n",
"\n",
" for epoch in range(n_epochs):\n",
" model.train()\n",
" train_loss = 0.0\n",
" correct_predictions = 0\n",
" total_predictions = 0\n",
"\n",
" for i, (inputs, labels) in enumerate(train_loader):\n",
" inputs, labels = inputs.to(device), labels.to(device)\n",
" optimizer.zero_grad()\n",
" outputs = model(inputs)\n",
" loss = criterion(outputs, labels)\n",
" loss.backward()\n",
" optimizer.step()\n",
" train_loss += loss.item()\n",
"\n",
" # Berechnen der Genauigkeit\n",
" _, predicted = torch.max(outputs, 1)\n",
" correct_predictions += (predicted == labels).sum().item()\n",
" total_predictions += labels.size(0)\n",
"\n",
" # Durchschnittlicher Trainingsverlust und Genauigkeit\n",
" train_loss /= len(train_loader)\n",
" train_accuracy = correct_predictions / total_predictions\n",
" train_losses.append(train_loss)\n",
" train_accuracies.append(train_accuracy)\n",
"\n",
" # Validierungsverlust und Genauigkeit\n",
" model.eval()\n",
" valid_loss = 0.0\n",
" correct_predictions = 0\n",
" total_predictions = 0\n",
"\n",
" with torch.no_grad():\n",
" for inputs, labels in valid_loader:\n",
" inputs, labels = inputs.to(device), labels.to(device)\n",
" outputs = model(inputs)\n",
" loss = criterion(outputs, labels)\n",
" valid_loss += loss.item()\n",
"\n",
" # Berechnen der Genauigkeit\n",
" _, predicted = torch.max(outputs, 1)\n",
" correct_predictions += (predicted == labels).sum().item()\n",
" total_predictions += labels.size(0)\n",
"\n",
" # Durchschnittlicher Validierungsverlust und Genauigkeit\n",
" valid_loss /= len(valid_loader)\n",
" valid_accuracy = correct_predictions / total_predictions\n",
" valid_losses.append(valid_loss)\n",
" valid_accuracies.append(valid_accuracy)\n",
"\n",
" if valid_accuracy >= 0.999:\n",
" break\n",
"\n",
"\n",
" # Aktualisieren des Graphen\n",
" clear_output(wait=True)\n",
" fig, ax1 = plt.subplots()\n",
"\n",
" # Zeichnen der Verlustkurven\n",
" ax1.plot(train_losses, label='Trainingsverlust', color='r')\n",
" ax1.plot(valid_losses, label='Validierungsverlust', color='b')\n",
" ax1.set_xlabel('Epochen')\n",
" ax1.set_ylabel('Verlust', color='g')\n",
" ax1.tick_params(axis='y', labelcolor='g')\n",
"\n",
" # Zweite y-Achse für die Genauigkeit\n",
" ax2 = ax1.twinx()\n",
" ax2.plot(train_accuracies, label='Trainingsgenauigkeit', color='r', linestyle='dashed')\n",
" ax2.plot(valid_accuracies, label='Validierungsgenauigkeit', color='b', linestyle='dashed')\n",
" ax2.set_ylabel('Genauigkeit', color='g')\n",
" ax2.tick_params(axis='y', labelcolor='g')\n",
"\n",
" # Titel und Legende\n",
" plt.title(f'Experiment #{experiment_num}: Trainings- und Validierungsverlust und -genauigkeit über die Zeit mit \\n{n_pca_components}-Hauptkomponenten, Lernrate: {learning_rate}')\n",
" fig.tight_layout()\n",
"\n",
" # Legende außerhalb des Graphen\n",
" ax1.legend(loc='upper left', bbox_to_anchor=(1.15, 1))\n",
" ax2.legend(loc='upper left', bbox_to_anchor=(1.15, 0.85))\n",
"\n",
" # Fortschritt anzeigen, falls angegeben\n",
" if verbose:\n",
" # Plot speichern\n",
" name = str(experiment_num) + \".png\" if experiment_num is not None else \"single_experiment.png\"\n",
" if not os.path.exists(\"Experiments\"):\n",
" os.makedirs(\"Experiments\")\n",
" if not os.path.exists(f\"Experiments/{str(n_pca_components)}\"):\n",
" os.makedirs(f\"Experiments/{str(n_pca_components)}\")\n",
" plt.savefig(f\"Experiments/{str(n_pca_components)}/{name}\")\n",
"\n",
" plt.show()\n",
" print(f'Epoch [{epoch+1}/{n_epochs}], Trainingsverlust: {train_loss:.4f}, Trainingsgenauigkeit: {train_accuracies[-1]:.4f}, Validierungsverlust: {valid_loss:.4f}, Validierungsgenauigkeit: {valid_accuracies[-1]:.4f}')\n",
"\n",
" return train_losses, valid_losses, train_accuracies, valid_accuracies\n",
"\n",
" def run_single_pca_experiment(self, train_loader: DataLoader, valid_loader: DataLoader, n_pca_components: int, n_experiments: int, n_epochs: int = 200, learning_rate: int = 0.0005, verbose: bool = True) -> List:\n",
" if not isinstance(n_pca_components, int):\n",
" raise TypeError(\"n_pca_components must be an integers!\")\n",
"\n",
" results = []\n",
"\n",
" for n in range(n_experiments):\n",
" res = self.run_single_experiment(train_loader, valid_loader, n_pca_components, n_epochs=n_epochs, learning_rate=learning_rate, verbose=verbose, experiment_num=n+1)\n",
" results.append(res)\n",
"\n",
" return results\n",
" \n",
"\n",
" def run(self, n_pca_components: List[int], n_experiments: int, n_epochs: int = 200, learning_rate: int = 0.0005, batch_size: int = 64, verbose: bool = True) -> Dict:\n",
" if not isinstance(n_pca_components, list):\n",
" raise TypeError(\"n_pca_components must be a list of integers!\")\n",
"\n",
" self.n_pca_components = n_pca_components\n",
"\n",
" results = {}\n",
"\n",
" for n_pca_comps in n_pca_components:\n",
" genome_dataset = GenomeDataset(data_frame, n_pca_components=n_pca_comps)\n",
" train_dataset = genome_dataset.train_df\n",
" valid_dataset = genome_dataset.val_df\n",
"\n",
" train_loader = DataLoader(dataset=train_dataset, batch_size=batch_size, shuffle=True)\n",
" valid_loader = DataLoader(dataset=valid_dataset, batch_size=batch_size, shuffle=False)\n",
"\n",
" res = self.run_single_pca_experiment(train_loader, valid_loader, n_pca_comps, n_experiments, n_epochs=n_epochs, learning_rate=learning_rate, verbose=verbose)\n",
" results[str(n_pca_comps)] = res\n",
"\n",
" self.plot_and_save_results(res, n_pca_comps)\n",
"\n",
" self.results = results\n",
"\n",
" return results\n",
"\n",
" def plot_and_save_results(self, results: List[Tuple], n_pca_components: int) -> None:\n",
" # Mittelwerte und Standardabweichungen berechnen\n",
" train_losses, valid_losses, train_accuracies, valid_accuracies = zip(*results)\n",
"\n",
" train_losses = np.array(train_losses)\n",
" valid_losses = np.array(valid_losses)\n",
" train_accuracies = np.array(train_accuracies)\n",
" valid_accuracies = np.array(valid_accuracies)\n",
"\n",
" avg_train_losses = np.mean(train_losses, axis=0)\n",
" avg_valid_losses = np.mean(valid_losses, axis=0)\n",
" avg_train_acc = np.mean(train_accuracies, axis=0)\n",
" avg_valid_acc = np.mean(valid_accuracies, axis=0)\n",
"\n",
" std_train_losses = np.std(train_losses, axis=0)\n",
" std_valid_losses = np.std(valid_losses, axis=0)\n",
" std_train_acc = np.std(train_accuracies, axis=0)\n",
" std_valid_acc = np.std(valid_accuracies, axis=0)\n",
"\n",
" # Erstellen von Plots\n",
" epochs = range(1, len(avg_train_losses) + 1)\n",
"\n",
" # Plot für Verluste\n",
" plt.plot(epochs, avg_train_losses, label='Mittlerer Trainingsverlust', color='r')\n",
" plt.fill_between(epochs, np.subtract(avg_train_losses, std_train_losses), np.add(avg_train_losses, std_train_losses), color='r', alpha=0.2)\n",
" plt.plot(epochs, avg_valid_losses, label='Mittlerer Validierungsverlust', color='b')\n",
" plt.fill_between(epochs, np.subtract(avg_valid_losses, std_valid_losses), np.add(avg_valid_losses, std_valid_losses), color='b', alpha=0.2)\n",
" plt.title(f'Mittelwert und Standardabweichung der Verluste für {n_pca_components} PCA-Komponenten')\n",
" plt.xlabel('Experiment Nummer')\n",
" plt.ylabel('Verlust')\n",
" plt.legend()\n",
" plt.savefig(f\"Experiments/{n_pca_components}/average_losses.png\")\n",
" plt.clf()\n",
"\n",
" # Plot für Genauigkeiten\n",
" plt.plot(epochs, avg_train_acc, label='Mittlere Trainingsgenauigkeit', color='r')\n",
" plt.fill_between(epochs, np.subtract(avg_train_acc, std_train_acc), np.add(avg_train_acc, std_train_acc), color='r', alpha=0.2)\n",
" plt.plot(epochs, avg_valid_acc, label='Mittlere Validierungsgenauigkeit', color='b')\n",
" plt.fill_between(epochs, np.subtract(avg_valid_acc, std_valid_acc), np.add(avg_valid_acc, std_valid_acc), color='b', alpha=0.2)\n",
" plt.title(f'Mittelwert und Standardabweichung der Genauigkeiten für {n_pca_components} PCA-Komponenten')\n",
" plt.xlabel('Experiment Nummer')\n",
" plt.ylabel('Genauigkeit')\n",
" plt.legend()\n",
" plt.savefig(f\"Experiments/{n_pca_components}/average_accuracies.png\")\n",
" plt.clf()\n",
"\n",
" "
2024-01-04 14:47:29 +01:00
]
},
{
"cell_type": "code",
2024-01-05 15:16:59 +01:00
"execution_count": 12,
2024-01-04 14:47:29 +01:00
"metadata": {},
"outputs": [
2024-01-04 15:06:10 +01:00
{
"data": {
2024-01-05 15:16:59 +01:00
"image/png": "iVBORw0KGgoAAAANSUhEUgAAA6IAAAHWCAYAAABt1DR5AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjcuNCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8WgzjOAAAACXBIWXMAAA9hAAAPYQGoP6dpAADv3ElEQVR4nOzddVgU2xsH8O/SnRIGgoIBioWigoBiYIuFCnZ3XYtrX7v12gl257XRK1fFDkwwwcRGlJLY8/vj/HZgWWLJBXw/z7MP7OyZ2Xdmd2f3nVMixhgDIYQQQgghhBBSQJQUHQAhhBBCCCGEkN8LJaKEEEIIIYQQQgoUJaKEEEIIIYQQQgoUJaKEEEIIIYQQQgoUJaKEEEIIIYQQQgoUJaKEEEIIIYQQQgoUJaKEEEIIIYQQQgoUJaKEEEIIIYQQQgoUJaKEEEIIIYQQQgrUb5WIzpgxAyKRSNFhkFyysrJC7969c7Ruw4YN0bBhwzyNpzjx9/eHSCRCeHh4gT5vYGAgRCIRAgMDhWW9e/eGlZVVluuGh4dDJBLB399fWEaf9fz3Ox9jkUiEGTNmKDqMQiE35wzJurdu3cqTWHLz3ZBeTLdu3ZI5twAp55zFixfn6rnyQ27OpYqWF69fbqR37Agh+StbiajkBJ3R7dq1a/kV529l7ty5OHLkSI7WZYzByMgI69evBwDcvXs33R8JoaGhmDBhAmrUqAFdXV2ULFkSrVq1yvEPAskJXJ4bKdqqVauGsmXLgjGWYRlnZ2eYmZkhKSmpACMjxcmuXbuwfPlyRYdBirDHjx9jxowZ2UqSXV1dsX37dpQvXx7ly5fH9u3b4erqmn9BFjGSJDyrW9rkPSdy8vrlNToPEZK/VHKy0l9//YVy5crJLLexscl1QPlpypQpmDRpkqLDyNLcuXPRqVMneHp6ZnvdZ8+eITIyEvXq1QMAXL16FWZmZjJXQzdt2oTNmzejY8eOGDp0KKKiorB+/XrUq1cPp0+fRpMmTbL1vLa2tti+fbvUMl9fX+jo6GDy5MnZ3o/MPHnyBEpKOavMP3v2bJ7G8jvy8fHBpEmTcOnSpXR/oIWHh+Pq1asYPnw4VFRydIoBAGzcuBFisThH6xaVzzrJ2K5du/Dw4UOMHj1a0aGQTPTo0QNdu3aFurq6okOR+W54/PgxZs6ciYYNG8pdIyhJQCW6d++e12EWuNycS9MyMTGR+a6XSE5OxtixYxEdHY2aNWtme9t58frlhqurK+Li4qCmpiYso/MQIfkrR78SW7Rogdq1a+d1LPkmJiYG2traUFFRydUP46Lgxo0b0NHRQdWqVQHwRLRu3boy5bp164YZM2ZAR0dHWNa3b1/Y2tpixowZ2U5EzczMZL6w58+fjxIlSmT6RS4Wi5GQkAANDQ25nys3P3hSf8GQnPH29oavry927dqVbiK6e/duMMbg4+OTq+dRVVXN8bp5/VmPjY2FlpZWnm2vKJOcTwkBAGVlZSgrKys6DAC5+24ojPLqs5abc2la2traGX6nT5kyBd++fcOSJUtQvXr1bG9b0a+fkpJStn6LEEJyL1/6iE6fPh1KSko4f/681PKBAwdCTU0N9+7dA5DSnHPv3r34888/YW5uDm1tbbRt2xZv3ryR2e7169fRvHlz6OvrQ0tLC25ubggKCpIqI+m39PjxY3h7e8PQ0BANGjSQeiw1kUiE4cOHY//+/bCzs4Ompibq16+PBw8eAADWr18PGxsbaGhooGHDhuk2EclOXM+fP0fv3r1hYGAAfX199OnTB7GxsVLxxMTEYOvWrUITl6z6TERHR+PLly/48uULLl++DHt7e0RGRuLLly+4evUq7Ozs8OXLF0RGRgrrODg4SCWhAGBsbAwXFxeEhIRILY+NjUVoaCi+fPmSaRzykBzvnTt3okqVKlBXV8fp06cBAIsXL4aTkxOMjY2hqakJBwcHHDhwQGYbafuRSJqMBwUFYezYsTAxMYG2tjbat2+Pz58/S62bto+o5D24b98+zJkzB2XKlIGGhgYaN26M58+fyzz36tWrUb58eWhqasLR0RGXLl1Kt9/pypUrUaVKFWhpacHQ0BC1a9fGrl27cnTMMuqLl17fLCsrK7Ru3RqXL1+Go6MjNDQ0UL58eWzbtk1m/UePHsHd3R2ampooU6YMZs+eLddVcwsLC7i6uuLAgQNITEyUeXzXrl2wtrZG3bp18erVKwwdOhSVKlWCpqYmjI2N0blzZ7maWqXXr+n79+/o3bs39PX1YWBggF69euH79+8y62Z0zHbs2AEHBwdoamrCyMgIXbt2lTnXNGzYEFWrVsXt27fh6uoKLS0t/PnnnwAy7huYm/ekWCzGjBkzUKpUKWhpaaFRo0Z4/PixzDYTExMxc+ZMVKhQARoaGjA2NkaDBg0QEBAAgH9+RCIRXr16JROfr68v1NTUpM4BuT2fppVeX12JtMft58+fGD16NKysrKCurg5TU1M0bdoUd+7cEV6DEydO4NWrV8J5MLMakew8t7znYgD49esXxowZAxMTE+jq6qJt27Z4+/ZthnHI6/79+3Bzc5P67Pn5+aXbjeLUqVNwcXGBtrY2dHV10apVKzx69EiqTO/evaGjo4N3797B09MTOjo6MDExwbhx45CcnCxVVp7zbHaOZ3rnIXnf0+mJjIyEo6MjypQpgydPngDgr8P06dNhY2MDdXV1WFhYYMKECfj165fUuqm37+/vj86dOwMAGjVqJLyPMuv/l9EYApn1sVy2bBksLS2hqakJNzc3PHz4UKZMaGgoOnXqBCMjI2hoaKB27do4duyYVBnJcfzvv/8wdOhQmJqaokyZMhnGCgBv376Fp6cntLW1YWpqijFjxsgck4ziF4vFWL58OapUqQINDQ2YmZlh0KBBUueI7Dh//jzmzZuHli1bYsyYMVKPFdTrJ/kcvH79Gq1bt4aOjg5Kly6N1atXAwAePHgAd3d3aGtrw9LSUuY7OW0f0eyehwgh2ZejKoOoqCiZpEQkEsHY2BgAvyr2zz//oF+/fnjw4AF0dXVx5swZbNy4EbNmzZK5UjZnzhyIRCJMnDgRnz59wvLly9GkSRMEBwdDU1MTAPDvv/+iRYsWcHBwEBJdPz8/uLu749KlS3B0dJTaZufOnVGhQgXMnTs3075sAHDp0iUcO3YMw4YNAwDMmzcPrVu3xoQJE7BmzRoMHToUkZGRWLhwIfr27Yt///1XWDe7cXl5eaFcuXKYN28e7ty5g02bNsHU1BQLFiwAAGzfvh39+/eHo6MjBg4cCACwtrbONP7hw4dj69atUstMTEyE/+fPn4/58+fD0tIyywTgw4cPKFGihNSyGzduoFGjRpg+fXqeDNDx77//Yt++fRg+fDhKlCghnNhXrFiBtm3bwsfHBwkJCdizZw86d+6M48ePo1WrVllud8SIETA0NMT06dMRHh6O5cuXY/jw4di7d2+W686fPx9KSkoYN24coqKisHDhQvj4+OD69etCmbVr12L48OFwcXHBmDFjEB4eDk9PTxgaGkr9YNi4cSNGjhyJTp06YdSoUYiPj8f9+/dx/fp1eHt7Z/+AZdPz58/RqVMn9OvXD7169cKWLVvQu3dvODg4oEqVKgD469yoUSMkJSVh0qRJ0NbWxoYNG4TPW1Z8fHwwcOBAnDlzBq1btxaWP3jwAA8fPsS0adMAADdv3sSVK1fQtWtXlClTBuHh4Vi7di0aNmyIx48fZ6uWkTGGdu3a4fLlyxg8eDBsbW1x+PBh9OrVS67158yZg6lTp8LLywv9+/fH58+fsXLlSri6uuLu3bswMDAQyn79+hUtWrRA165d0b17d5iZmckdZ2ryvCd9fX2xcOFCtGnTBh4eHrh37x48PDwQHx8vta0ZM2Zg3rx5wvnhx48fuHXrFu7cuYOmTZvCy8sLEyZMwL59+zB+/Hipdfft24dmzZrB0NAQQP6eT+UxePBgHDhwAMOHD4ednR2+fv2Ky5cvIyQkBLVq1cLkyZMRFRWFt2/fYtmyZQAgc+Est7I6FwNA//79sWPHDnh7e8PJyQn//vuvXOeizLx79074Ye3r6wt
2024-01-04 15:06:10 +01:00
"text/plain": [
"<Figure size 640x480 with 2 Axes>"
]
},
"metadata": {},
"output_type": "display_data"
},
2024-01-04 14:47:29 +01:00
{
"name": "stdout",
"output_type": "stream",
"text": [
2024-01-05 15:16:59 +01:00
"Epoch [10/10], Trainingsverlust: 0.8922, Trainingsgenauigkeit: 0.6554, Validierungsverlust: 0.7765, Validierungsgenauigkeit: 0.8213\n"
2024-01-04 14:47:29 +01:00
]
2024-01-05 15:16:59 +01:00
},
{
"data": {
"text/plain": [
"{'128': [([1.1048660278320312,\n",
" 1.0021918095075166,\n",
" 0.9533032820774958,\n",
" 0.9180488632275507,\n",
" 0.8890538536585294,\n",
" 0.8560153337625357,\n",
" 0.8325601586928735,\n",
" 0.7921991394116328,\n",
" 0.7684880128273597,\n",
" 0.7482978334793677],\n",
" [1.0328907370567322,\n",
" 0.9768994599580765,\n",
" 0.9316655248403549,\n",
" 0.8864333480596542,\n",
" 0.8357236087322235,\n",
" 0.7867822349071503,\n",
" 0.7491058260202408,\n",
" 0.7292499840259552,\n",
" 0.7173636108636856,\n",
" 0.7077213227748871],\n",
" [0.3808948004836759,\n",
" 0.5646916565900847,\n",
" 0.6178960096735188,\n",
" 0.6360338573155986,\n",
" 0.660217654171705,\n",
" 0.6856106408706167,\n",
" 0.7267230955259976,\n",
" 0.7678355501813785,\n",
" 0.7980652962515115,\n",
" 0.8089480048367593],\n",
" [0.5603864734299517,\n",
" 0.5603864734299517,\n",
" 0.5700483091787439,\n",
" 0.6280193236714976,\n",
" 0.7342995169082126,\n",
" 0.7874396135265701,\n",
" 0.7971014492753623,\n",
" 0.8115942028985508,\n",
" 0.8309178743961353,\n",
" 0.8454106280193237]),\n",
" ([1.07738203727282,\n",
" 0.974720102090102,\n",
" 0.8973770370850196,\n",
" 0.8750206277920649,\n",
" 0.828568004644834,\n",
" 0.7929298465068524,\n",
" 0.776282012462616,\n",
" 0.7521205315223107,\n",
" 0.7466858258614173,\n",
" 0.7324622090046222],\n",
" [0.9812372475862503,\n",
" 0.8927455991506577,\n",
" 0.8332259058952332,\n",
" 0.780693456530571,\n",
" 0.7470725178718567,\n",
" 0.7277732938528061,\n",
" 0.7195615023374557,\n",
" 0.7130803763866425,\n",
" 0.7097116559743881,\n",
" 0.7066716551780701],\n",
" [0.4183796856106409,\n",
" 0.585247883917775,\n",
" 0.6650544135429263,\n",
" 0.6747279322853688,\n",
" 0.7291414752116082,\n",
" 0.7690447400241838,\n",
" 0.7847642079806529,\n",
" 0.8077388149939541,\n",
" 0.8077388149939541,\n",
" 0.8174123337363967],\n",
" [0.7777777777777778,\n",
" 0.7632850241545893,\n",
" 0.7729468599033816,\n",
" 0.8067632850241546,\n",
" 0.8309178743961353,\n",
" 0.8309178743961353,\n",
" 0.8357487922705314,\n",
" 0.8405797101449275,\n",
" 0.8357487922705314,\n",
" 0.8357487922705314])],\n",
" '64': [([1.1704304309991689,\n",
" 1.123612798177279,\n",
" 1.0620490037477934,\n",
" 1.0275296935668359,\n",
" 1.0067202678093543,\n",
" 0.9626164573889512,\n",
" 0.9474408672406123,\n",
" 0.9108310204285842,\n",
" 0.8937133229695834,\n",
" 0.8762712020140427],\n",
" [1.1392732560634613,\n",
" 1.0526379346847534,\n",
" 0.9852658659219742,\n",
" 0.935427650809288,\n",
" 0.8939110040664673,\n",
" 0.8603413552045822,\n",
" 0.8327844738960266,\n",
" 0.8083746880292892,\n",
" 0.7864409983158112,\n",
" 0.7707570642232895],\n",
" [0.2720677146311971,\n",
" 0.3422007255139057,\n",
" 0.4607013301088271,\n",
" 0.4703748488512696,\n",
" 0.5054413542926239,\n",
" 0.5804111245465539,\n",
" 0.5949214026602176,\n",
" 0.6348246674727932,\n",
" 0.6517533252720678,\n",
" 0.6771463119709794],\n",
" [0.2463768115942029,\n",
" 0.4927536231884058,\n",
" 0.6280193236714976,\n",
" 0.6714975845410628,\n",
" 0.6956521739130435,\n",
" 0.7198067632850241,\n",
" 0.7342995169082126,\n",
" 0.748792270531401,\n",
" 0.7584541062801933,\n",
" 0.7632850241545893]),\n",
" ([1.2071779507857103,\n",
" 1.1123931407928467,\n",
" 1.0899395575890174,\n",
" 1.0431747711621797,\n",
" 0.99530978844716,\n",
" 0.986173152923584,\n",
" 0.9467862248420715,\n",
" 0.9212788801926833,\n",
" 0.9122511377701392,\n",
" 0.8921931798641498],\n",
" [1.1124519407749176,\n",
" 1.0438154637813568,\n",
" 0.9916806370019913,\n",
" 0.9486012309789658,\n",
" 0.9076720178127289,\n",
" 0.8729775846004486,\n",
" 0.8453357666730881,\n",
" 0.819965660572052,\n",
" 0.7980590611696243,\n",
" 0.7765364795923233],\n",
" [0.2551390568319226,\n",
" 0.36638452237001207,\n",
" 0.4159613059250302,\n",
" 0.48972188633615477,\n",
" 0.5538089480048367,\n",
" 0.5767835550181378,\n",
" 0.6021765417170496,\n",
" 0.6432889963724304,\n",
" 0.6348246674727932,\n",
" 0.6553808948004837],\n",
" [0.4057971014492754,\n",
" 0.6183574879227053,\n",
" 0.6763285024154589,\n",
" 0.6956521739130435,\n",
" 0.6956521739130435,\n",
" 0.7101449275362319,\n",
" 0.7294685990338164,\n",
" 0.7584541062801933,\n",
" 0.7777777777777778,\n",
" 0.821256038647343])]}"
]
},
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
},
{
"data": {
"text/plain": [
"<Figure size 640x480 with 0 Axes>"
]
},
"metadata": {},
"output_type": "display_data"
2024-01-04 14:47:29 +01:00
}
],
"source": [
2024-01-05 15:16:59 +01:00
"e = ExperimentationalExperiments()\n",
"e.run([1024, 512, 256, 128, 64, 32, 16], 10, n_epochs=500)"
2024-01-04 14:47:29 +01:00
]
2024-01-04 15:15:24 +01:00
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
2024-01-04 14:47:29 +01:00
}
],
"metadata": {
"kernelspec": {
"display_name": "rl",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
2024-01-05 15:16:59 +01:00
"version": "3.8.18"
2024-01-04 14:47:29 +01:00
},
"orig_nbformat": 4
},
"nbformat": 4,
"nbformat_minor": 2
}