# source
# - https://github.com/horovod/horovod/blob/master/examples/pytorch/pytorch_synthetic_benchmark.py
import argparseos
import time
import torch.backends.cudnn as cudnn
import torch.nn.functional as Fnn
import torch.optim as optim
import
from torch.utils.data.distributed import DataLoader
from torchvision.models import models
import sys
import time
import numpy as np
# Benchmark settings
parser = argparse.ArgumentParser(description='PyTorch Synthetic Benchmark',resnet50
from torchvision.datasets import FakeData
from torchvision.transforms import ToTensor
def main():
# vars
batch = 256
samples = 256*100
epochs = 1
# model
model = resnet50(weights=None)
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("-i",model.cuda()
optimizer = optim.SGD(model.parameters(), lr=0.001)
loss_fn = nn.CrossEntropyLoss()
# data
dataset = FakeData(samples,
"--images",
typenum_classes=int1000,
help="image number",
transform=ToTensor())
loader = DataLoader(dataset,
default=1024)
parser.add_argument('-- batch_size'=batch,
type shuffle=intFalse,
default=32 num_workers=1,
help='input batch size')
parser.add_argument("-e", pin_memory=True)
# train
for epoch in range(epochs):
"--epochs",start = time.time()
for batch, (images, labels) in enumerate(loader):
type=int,
images = images.cuda()
labels help="epochs",
= labels.cuda()
outputs = model(images)
default=1)
parser.add_argument('--model', classes = torch.argmax(outputs, dim=1)
loss = loss_fn(outputs, labels)
type=str,
optimizer.zero_grad()
loss.backward()
default='resnet50',
optimizer.step()
if (batch%10 help=='model to benchmark')
args = parser.parse_args()
# model
model = getattr(models, args.model)()
model.cuda()
lr_scaler = 1
optimizer = optim.SGD(model.parameters(), lr=0.01 * lr_scaler)
cudnn.benchmark = True
# data
data = torch.randn(args.batch_size, 3, 224, 224)
target = torch.LongTensor(args.batch_size).random_() % 1000
data, target = data.cuda(), target.cuda()
# fit
def benchmark_step():
optimizer.zero_grad()
output = model(data)
loss = F.cross_entropy(output, target)
loss.backward()
optimizer.step()
return loss.item()
for epoch in range(args.epochs):
begin = time.time()
for batches in range(args.images//args.batch_size):
0):
print('--- Epoch %i, Batch %3i / %3i, Loss = %0.2f ---' % (epoch,
batch,
loss = benchmark_step()
if (batches%10 == 0):
len(loader),
print('--- Epoch %2i, Batch %3i: Loss = %0.2f ---' % (epoch,
loss.item()))
elapsed batches,
= time.time()-start
imgsec = samples/elapsed
print('--- Epoch %i finished: %0.2f img/sec ---' % (epoch,
loss,))
end = time.time()
imgsec = args.images//(end-begin)
print('--- Epoch %2i finished: %0.2f img/sec ---' % (epoch, imgsec)imgsec))
if __name__ == "__main__":
main() |