Inception code

파이썬정복
|2024. 7. 17. 04:58
Inception V3

import os
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"]="3"   
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, models, transforms
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import numpy as np
import time
import copy
from tqdm import tqdm
from sklearn.utils.class_weight import compute_class_weight

# Define the model and hyperparameters
BATCH_SIZE = 32
LEARNING_RATE = 1e-6
MOMENTUM = 0.9
NUM_EPOCHS = 50

# Data transformations
data_transforms = {
    'train': transforms.Compose([
        transforms.Resize((299, 299)),
        transforms.ToTensor()
    ]),
    'val': transforms.Compose([
        transforms.Resize((299, 299)),
        transforms.ToTensor()
    ]),
    'test': transforms.Compose([
        transforms.Resize((299, 299)),
        transforms.ToTensor()
    ]),
}

# Datasets
train_dataset = datasets.ImageFolder("~/datasets/train", transform=data_transforms['train'])
test_dataset = datasets.ImageFolder("~/datasets/test", transform=data_transforms['test'])
val_dataset = datasets.ImageFolder("~/datasets/val", transform=data_transforms['val'])

# Dataloaders
train_dataloader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=4, drop_last=True)
test_dataloader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=4, drop_last=True)
val_dataloader = DataLoader(val_dataset, batch_size=BATCH_SIZE, shuffle=True, num_workers=4, drop_last=True)

# Device configuration
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

# Calculate class weights
classes = train_dataset.classes
class_counts = np.bincount([label for _, label in train_dataset.samples])
class_weights = compute_class_weight(class_weight='balanced', classes=np.arange(len(classes)), y=[label for _, label in train_dataset.samples])
class_weights = torch.tensor(class_weights, dtype=torch.float).to(device)

# Load pretrained Inception V3 model
model = models.inception_v3(pretrained=True)
model.aux_logits = False

# Modify the final layer to match the number of classes
model.fc = nn.Linear(model.fc.in_features, 5)
model = model.to(device)

# Criterion and optimizer
criterion = nn.CrossEntropyLoss(weight=class_weights)
optimizer = optim.SGD(model.parameters(), lr=LEARNING_RATE, momentum=MOMENTUM)

# Training function with tqdm
def train_model(model, criterion, optimizer, num_epochs=25):
    since = time.time()
    best_model_wts = copy.deepcopy(model.state_dict())
    best_acc = 0.0

    train_losses, val_losses = [], []
    train_accuracies, val_accuracies = [], []

    for epoch in range(num_epochs):
        print(f'Epoch {epoch}/{num_epochs - 1}')
        print('-' * 10)

        # Training phase
        model.train()
        running_loss = 0.0
        running_corrects = 0

        for inputs, labels in tqdm(train_dataloader, desc='Training', leave=False):
            inputs = inputs.to(device)
            labels = labels.to(device)

            optimizer.zero_grad()

            outputs = model(inputs)
            _, preds = torch.max(outputs, 1)
            loss = criterion(outputs, labels)

            loss.backward()
            optimizer.step()

            running_loss += loss.item() * inputs.size(0)
            running_corrects += torch.sum(preds == labels.data)

        epoch_loss = running_loss / len(train_dataset)
        epoch_acc = running_corrects.double() / len(train_dataset)

        train_losses.append(epoch_loss)
        train_accuracies.append(epoch_acc.cpu().numpy())

        print(f'Train Loss: {epoch_loss:.4f} Acc: {epoch_acc:.4f}')

        # Validation phase
        model.eval()
        running_loss = 0.0
        running_corrects = 0

        with torch.no_grad():
            for inputs, labels in tqdm(val_dataloader, desc='Validation', leave=False):
                inputs = inputs.to(device)
                labels = labels.to(device)

                outputs = model(inputs)
                _, preds = torch.max(outputs, 1)
                loss = criterion(outputs, labels)

                running_loss += loss.item() * inputs.size(0)
                running_corrects += torch.sum(preds == labels.data)

        epoch_loss = running_loss / len(val_dataset)
        epoch_acc = running_corrects.double() / len(val_dataset)

        val_losses.append(epoch_loss)
        val_accuracies.append(epoch_acc.cpu().numpy())

        print(f'Val Loss: {epoch_loss:.4f} Acc: {epoch_acc:.4f}')

        # Deep copy the model
        if epoch_acc > best_acc:
            best_acc = epoch_acc
            best_model_wts = copy.deepcopy(model.state_dict())

        print()

    time_elapsed = time.time() - since
    print(f'Training complete in {time_elapsed // 60:.0f}m {time_elapsed % 60:.0f}s')
    print(f'Best val Acc: {best_acc:.4f}')

    # Load best model weights
    model.load_state_dict(best_model_wts)
    
    model_name = "Inception_V3"

    # Plot the loss graph
    plt.figure(figsize=(10, 5))
    plt.plot(train_losses, label='Training Loss', color='green')
    plt.plot(val_losses, label='Validation Loss', color='red')
    plt.xlabel('Epochs')
    plt.ylabel('Loss')
    plt.legend()
    plt.title(f'{model_name} Training and Validation Loss\nEpochs: {NUM_EPOCHS}, LR: {LEARNING_RATE}')
    plt.savefig('training_validation_loss_e6.png')  # Save the plot
    plt.clf()  # Clear the plot


    # Plot the accuracy graph
    plt.figure(figsize=(10, 5))
    plt.plot(train_accuracies, label='Training Accuracy', color='blue')
    plt.plot(val_accuracies, label='Validation Accuracy', color='orange')
    plt.xlabel('Epochs')
    plt.ylabel('Accuracy')
    plt.legend()
    plt.title(f'{model_name} Training and Validation Accuracy\nEpochs: {NUM_EPOCHS}, LR: {LEARNING_RATE}')
    plt.savefig('training_validation_accuracy_e6.png')  # Save the plot
    plt.clf()  # Clear the plot

    
    return model

# Train the model
model = train_model(model, criterion, optimizer, NUM_EPOCHS)

# Test the model
model.eval()
running_corrects = 0

with torch.no_grad():
    for inputs, labels in tqdm(test_dataloader, desc='Testing', leave=False):
        inputs = inputs.to(device)
        labels = labels.to(device)

        outputs = model(inputs)
        _, preds = torch.max(outputs, 1)
        running_corrects += torch.sum(preds == labels.data)

test_acc = running_corrects.double() / len(test_dataset)
print(f'Test Acc: {test_acc:.4f}')

'해외경험 > UNLV' 카테고리의 다른 글

ResNet code  (0) 2024.07.17
ResNET50이 뭘까  (0) 2024.07.11
3주차  (0) 2024.07.11
3주차 수요일  (0) 2024.07.11
2주차  (0) 2024.07.06