font-identifier / train_font_identifier.py
Gabor Cselle
Train a Font Identifier using ResNet18
99f802a
raw
history blame
4.13 kB
import copy
import os
import time
import torch
import torch.optim as optim
from torch.optim import lr_scheduler
from torchvision import datasets, models, transforms
from tqdm import tqdm
# Directory with organized font images
data_dir = './train_test_images'
# Define transformations for the image data
data_transforms = {
'train': transforms.Compose([
transforms.Resize((224, 224)), # Resize to the input size expected by the model
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225]) # ImageNet standards
]),
'test': transforms.Compose([
transforms.Resize((224, 224)), # Resize to the input size expected by the model
transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
]),
}
# Create datasets
image_datasets = {
x: datasets.ImageFolder(os.path.join(data_dir, x), data_transforms[x])
for x in ['train', 'test']
}
# Create dataloaders
dataloaders = {
'train': torch.utils.data.DataLoader(image_datasets['train'], batch_size=4),
'test': torch.utils.data.DataLoader(image_datasets['test'], batch_size=4)
}
# Define the model
model = models.resnet18(weights=models.ResNet18_Weights.DEFAULT)
# Define the loss function
criterion = torch.nn.CrossEntropyLoss()
# Optimizer (you can replace 'model.parameters()' with specific parameters to optimize if needed)
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
# Decay LR by a factor of 0.1 every 7 epochs
exp_lr_scheduler = lr_scheduler.StepLR(optimizer, step_size=7, gamma=0.1)
# Number of epochs to train for
num_epochs = 25
def train_model(model, criterion, optimizer, scheduler, num_epochs=25):
since = time.time()
best_model_wts = copy.deepcopy(model.state_dict())
best_acc = 0.0
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch, num_epochs - 1))
print('-' * 10)
# Each epoch has a training and validation phase
for phase in ['train', 'test']:
if phase == 'train':
model.train() # Set model to training mode
else:
model.eval() # Set model to evaluate mode
running_loss = 0.0
running_corrects = 0
# Iterate over data.
# Here we wrap the dataloader with tqdm for a progress bar
for inputs, labels in tqdm(dataloaders[phase], desc=f"Epoch {epoch} - {phase}"):
# Zero the parameter gradients
optimizer.zero_grad()
# Forward
# Track history if only in train
with torch.set_grad_enabled(phase == 'train'):
outputs = model(inputs)
_, preds = torch.max(outputs, 1)
loss = criterion(outputs, labels)
# Backward + optimize only if in training phase
if phase == 'train':
loss.backward()
optimizer.step()
# Statistics
running_loss += loss.item() * inputs.size(0)
running_corrects += torch.sum(preds == labels.data)
if phase == 'train':
scheduler.step()
epoch_loss = running_loss / len(image_datasets[phase])
epoch_acc = running_corrects.double() / len(image_datasets[phase])
print('{} Loss: {:.4f} Acc: {:.4f}'.format(
phase, epoch_loss, epoch_acc))
# Deep copy the model
if phase == 'test' and epoch_acc > best_acc:
best_acc = epoch_acc
best_model_wts = copy.deepcopy(model.state_dict())
print()
time_elapsed = time.time() - since
print('Training complete in {:.0f}m {:.0f}s'.format(
time_elapsed // 60, time_elapsed % 60))
print('Best test Acc: {:4f}'.format(best_acc))
# Load best model weights
model.load_state_dict(best_model_wts)
return model
# Train the model
model = train_model(model, criterion, optimizer, exp_lr_scheduler, num_epochs=num_epochs)