Files
mmp_wise2526_franksim/mmp/a3/main.py

59 lines
1.8 KiB
Python
Raw Normal View History

2025-10-28 16:03:53 +00:00
import torch
import argparse
from a2.main import MmpNet, get_criterion_optimizer, train_epoch, eval_epoch
2025-10-28 16:03:53 +00:00
from a3.dataset import get_dataloader
2025-10-13 14:48:00 +02:00
def main():
"""Put your code for Exercise 3.3 in here"""
parser = argparse.ArgumentParser()
parser.add_argument('--tensorboard', action='store_true',
help='Enable TensorBoard logging')
args = parser.parse_args()
2025-10-28 16:03:53 +00:00
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
train_epochs = 10
model = MmpNet(num_classes=2).to(device=device)
dataloader_train = get_dataloader(
path_to_data="/home/ubuntu/mmp_wise2526_franksim/.data/mmp-public-3.2",
image_size=244, batch_size=32, num_workers=6, is_train=True
)
dataloader_eval = get_dataloader(
path_to_data="/home/ubuntu/mmp_wise2526_franksim/.data/mmp-public-3.2",
image_size=244, batch_size=32, num_workers=6, is_train=False
)
2025-10-28 16:03:53 +00:00
criterion, optimizer = get_criterion_optimizer(model=model)
writer = None
if args.tensorboard:
from torch.utils.tensorboard import SummaryWriter
writer = SummaryWriter(log_dir="runs/a3_mmpnet")
for epoch in range(train_epochs):
train_loss = train_epoch(
2025-10-28 16:03:53 +00:00
model=model,
loader=dataloader_train,
optimizer=optimizer,
device=device,
criterion=criterion,
)
val_acc = eval_epoch(
2025-10-28 16:03:53 +00:00
model=model,
loader=dataloader_eval,
device=device
)
print(
f"Epoch [{epoch+1}/{train_epochs}] - Train Loss: {train_loss:.4f} - Val Acc: {val_acc:.4f}")
if writer is not None:
writer.add_scalar("Loss/train", train_loss, epoch)
writer.add_scalar("Accuracy/val", val_acc, epoch)
if writer is not None:
writer.close()
2025-10-13 14:48:00 +02:00
if __name__ == "__main__":
main()