Abdullah-Nazhat commited on
Commit
d6a47df
1 Parent(s): affdc5d

Upload 2 files

Browse files
Files changed (2) hide show
  1. favoriser.py +90 -0
  2. train.py +193 -0
favoriser.py ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import numpy as np
3
+ from torch import nn
4
+ from torch.nn import functional as F
5
+ from einops.layers.torch import Rearrange
6
+ from performer_pytorch import Performer
7
+
8
+
9
+
10
+ class FeedForward(nn.Module):
11
+ def __init__(self, dim, hidden_dim, dropout):
12
+ super().__init__()
13
+ self.net = nn.Sequential(
14
+ nn.Linear(dim, hidden_dim),
15
+ nn.GELU(),
16
+ nn.Dropout(dropout),
17
+ nn.Linear(hidden_dim, dim),
18
+ nn.Dropout(dropout)
19
+ )
20
+ def forward(self, x):
21
+ return self.net(x)
22
+
23
+
24
+
25
+
26
+
27
+
28
+ class FAVORiserGatingUnit(nn.Module):
29
+ def __init__(self,d_model,d_ffn,dropout):
30
+ super().__init__()
31
+ self.proj = nn.Linear(d_model,d_model)
32
+ self.fav = Performer(
33
+ dim = d_model,
34
+ heads = 8,
35
+ depth = 1,
36
+ dim_head=64,
37
+ ff_dropout = dropout,
38
+ attn_dropout = dropout
39
+ )
40
+
41
+
42
+
43
+
44
+ def forward(self, x):
45
+ u, v = x, x
46
+ u = self.proj(u)
47
+ v = self.fav(v)
48
+ out = u * v
49
+ return out
50
+
51
+
52
+ class FAVORiserBlock(nn.Module):
53
+ def __init__(self, d_model, d_ffn,dropout):
54
+ super().__init__()
55
+
56
+ self.norm = nn.LayerNorm(d_model)
57
+ self.fgu = FAVORiserGatingUnit(d_model,d_ffn,dropout)
58
+ self.ffn = FeedForward(d_model,d_ffn,dropout)
59
+ def forward(self, x):
60
+ residual = x
61
+ x = self.norm(x)
62
+ x = self.fgu(x)
63
+ x = x + residual
64
+ residual = x
65
+ x = self.norm(x)
66
+ x = self.ffn(x)
67
+ out = x + residual
68
+ return out
69
+
70
+
71
+
72
+ class FAVORiser(nn.Module):
73
+ def __init__(self, d_model, d_ffn, num_layers,dropout):
74
+ super().__init__()
75
+
76
+ self.model = nn.Sequential(
77
+ *[FAVORiserBlock(d_model,d_ffn,dropout) for _ in range(num_layers)]
78
+ )
79
+
80
+ def forward(self, x):
81
+
82
+ return self.model(x)
83
+
84
+
85
+
86
+
87
+
88
+
89
+
90
+
train.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #imports
2
+
3
+ import os
4
+ import csv
5
+ import torch
6
+ from torch import nn
7
+ from torch.utils.data import DataLoader
8
+ from torchvision import datasets
9
+ from torchvision.transforms import ToTensor, Normalize, RandomCrop, RandomHorizontalFlip, Compose
10
+ from favoriser import FAVORiser
11
+
12
+ # data transforms
13
+
14
+ transform = Compose([
15
+ RandomCrop(32, padding=4),
16
+ RandomHorizontalFlip(),
17
+ ToTensor(),
18
+ Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
19
+
20
+ ])
21
+
22
+ training_data = datasets.CIFAR10(
23
+ root='data',
24
+ train=True,
25
+ download=True,
26
+ transform=transform
27
+ )
28
+
29
+ test_data = datasets.CIFAR10(
30
+ root='data',
31
+ train=False,
32
+ download=True,
33
+ transform=transform
34
+ )
35
+ # create dataloaders
36
+
37
+ batch_size = 128
38
+
39
+ train_dataloader = DataLoader(training_data, batch_size=batch_size,shuffle=True)
40
+ test_dataloader = DataLoader(test_data, batch_size=batch_size)
41
+
42
+
43
+ for X, y in test_dataloader:
44
+ print(f"Shape of X [N,C,H,W]:{X.shape}")
45
+ print(f"Shape of y:{y.shape}{y.dtype}")
46
+ break
47
+
48
+ # size checking for loading images
49
+ def check_sizes(image_size, patch_size):
50
+ sqrt_num_patches, remainder = divmod(image_size, patch_size)
51
+ assert remainder == 0, "`image_size` must be divisibe by `patch_size`"
52
+ num_patches = sqrt_num_patches ** 2
53
+ return num_patches
54
+
55
+
56
+
57
+ # create model
58
+ # Get cpu or gpu device for training.
59
+ device = "cuda" if torch.cuda.is_available() else "cpu"
60
+
61
+ print(f"using {device} device")
62
+
63
+ # model definition
64
+
65
+ class FAVORiserImageClassification(FAVORiser):
66
+ def __init__(
67
+ self,
68
+ image_size=32,
69
+ patch_size=4,
70
+ in_channels=3,
71
+ num_classes=10,
72
+ d_model=256,
73
+ d_ffn=512,
74
+ num_layers=4,
75
+ dropout=0.5
76
+ ):
77
+ num_patches = check_sizes(image_size, patch_size)
78
+ super().__init__(d_model, d_ffn, num_layers,dropout)
79
+ self.patcher = nn.Conv2d(
80
+ in_channels, d_model, kernel_size=patch_size, stride=patch_size
81
+ )
82
+ self.classifier = nn.Linear(d_model, num_classes)
83
+
84
+ def forward(self, x):
85
+
86
+ patches = self.patcher(x)
87
+ batch_size, num_channels, _, _ = patches.shape
88
+ patches = patches.permute(0, 2, 3, 1)
89
+ patches = patches.view(batch_size, -1, num_channels)
90
+ embedding = self.model(patches)
91
+ embedding = embedding.mean(dim=1) # global average pooling
92
+ out = self.classifier(embedding)
93
+ return out
94
+
95
+ model = FAVORiserImageClassification().to(device)
96
+ print(model)
97
+
98
+ # Optimizer
99
+
100
+ loss_fn = nn.CrossEntropyLoss()
101
+ optimizer = torch.optim.Adam(model.parameters(),lr=1e-3)
102
+
103
+
104
+ # Training Loop
105
+
106
+ def train(dataloader, model, loss_fn, optimizer):
107
+ size = len(dataloader.dataset)
108
+ num_batches = len(dataloader)
109
+ model.train()
110
+ train_loss = 0
111
+ correct = 0
112
+ for batch, (X,y) in enumerate(dataloader):
113
+ X, y = X.to(device), y.to(device)
114
+
115
+ #compute prediction error
116
+ pred = model(X)
117
+ loss = loss_fn(pred,y)
118
+
119
+ # backpropagation
120
+ optimizer.zero_grad()
121
+ loss.backward()
122
+ optimizer.step()
123
+ train_loss += loss.item()
124
+ _, labels = torch.max(pred.data, 1)
125
+ correct += labels.eq(y.data).type(torch.float).sum()
126
+
127
+
128
+
129
+
130
+ if batch % 100 == 0:
131
+ loss, current = loss.item(), batch * len(X)
132
+ print(f"loss: {loss:>7f} [{current:>5d}/{size:>5d}]")
133
+
134
+ train_loss /= num_batches
135
+ train_accuracy = 100. * correct.item() / size
136
+ print(train_accuracy)
137
+ return train_loss,train_accuracy
138
+
139
+
140
+
141
+ # Test loop
142
+
143
+ def test(dataloader, model, loss_fn):
144
+ size = len(dataloader.dataset)
145
+ num_batches = len(dataloader)
146
+ model.eval()
147
+ test_loss = 0
148
+ correct = 0
149
+ with torch.no_grad():
150
+ for X,y in dataloader:
151
+ X,y = X.to(device), y.to(device)
152
+ pred = model(X)
153
+ test_loss += loss_fn(pred, y).item()
154
+ correct += (pred.argmax(1) == y).type(torch.float).sum().item()
155
+ test_loss /= num_batches
156
+ correct /= size
157
+ print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")
158
+ test_accuracy = 100*correct
159
+ return test_loss, test_accuracy
160
+
161
+
162
+
163
+ # apply train and test
164
+
165
+ logname = "/home/abdullah/Desktop/Proposals_experiments/FAVORiser/Experiments_cifar10/logs_favoriser/logs_cifar10.csv"
166
+ if not os.path.exists(logname):
167
+ with open(logname, 'w') as logfile:
168
+ logwriter = csv.writer(logfile, delimiter=',')
169
+ logwriter.writerow(['epoch', 'train loss', 'train acc',
170
+ 'test loss', 'test acc'])
171
+
172
+
173
+ epochs = 100
174
+ for epoch in range(epochs):
175
+ print(f"Epoch {epoch+1}\n-----------------------------------")
176
+ train_loss, train_acc = train(train_dataloader, model, loss_fn, optimizer)
177
+ # learning rate scheduler
178
+ #if scheduler is not None:
179
+ # scheduler.step()
180
+ test_loss, test_acc = test(test_dataloader, model, loss_fn)
181
+ with open(logname, 'a') as logfile:
182
+ logwriter = csv.writer(logfile, delimiter=',')
183
+ logwriter.writerow([epoch+1, train_loss, train_acc,
184
+ test_loss, test_acc])
185
+ print("Done!")
186
+
187
+ # saving trained model
188
+
189
+ path = "/home/abdullah/Desktop/Proposals_experiments/FAVORiser/Experiments_cifar10/weights_favoriser"
190
+ model_name = "FAVORiserImageClassification_cifar10"
191
+ torch.save(model.state_dict(), f"{path}/{model_name}.pth")
192
+ print(f"Saved Model State to {path}/{model_name}.pth ")
193
+