Skip to content

Commit 0762107

Browse files
author
Dodo
committed
sync and bugfixes
1 parent 9738efc commit 0762107

File tree

4 files changed

+30
-11
lines changed

4 files changed

+30
-11
lines changed

modelNo9.pth

64 Bytes
Binary file not shown.

modelRetr.pth

-64 Bytes
Binary file not shown.

trainer.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,8 @@
3333
test_mask = test_data.targets <= 8
3434
test_data.data = test_data.data[test_mask]
3535
test_data.targets = test_data.targets[test_mask]
36-
36+
print(training_data.classes)
37+
print(test_data.classes)
3738
device= 'cuda' if torch.cuda.is_available() else 'cpu'
3839

3940
#create a cnn with2 hidden layers
@@ -46,7 +47,7 @@ def __init__(self):
4647
torch.nn.init.xavier_normal_(self.conv2.weight)
4748
self.fc1 = nn.Linear(12*12*4, 32)
4849
torch.nn.init.xavier_normal_(self.fc1.weight)
49-
self.fc2 = nn.Linear(32, 9)
50+
self.fc2 = nn.Linear(32, 10)
5051
torch.nn.init.xavier_normal_(self.fc2.weight)
5152

5253
def forward(self, x):

unlearn copy.py

Lines changed: 27 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ def __init__(self):
5959
self.conv1 = nn.Conv2d(1, 4, 3, 1)
6060
self.conv2 = nn.Conv2d(4, 4, 3, 1)
6161
self.fc1 = nn.Linear(12*12*4, 32)
62-
self.fc2 = nn.Linear(32, 9)
62+
self.fc2 = nn.Linear(32, 10)
6363

6464
def forward(self, x):
6565
x = self.conv1(x)
@@ -81,7 +81,7 @@ def forward(self, x):
8181
SUB_TARGET=9
8282

8383
#hyperparameters
84-
learning_rate = 5e-6
84+
learning_rate = 5e-4
8585

8686
#DO NOT CHANGE
8787
batch_size = 1
@@ -117,7 +117,6 @@ def forward(self, x):
117117
test_mask = test_only_to_learn.targets == SUB_TARGET
118118
test_only_to_learn.data = test_only_to_learn.data[test_mask]
119119
test_only_to_learn.targets = test_only_to_learn.targets[test_mask]
120-
test_only_to_learn.targets[test_only_to_learn.targets == SUB_TARGET] = FORGET_TARGET
121120
test_only_to_learn_dataloader = DataLoader(test_only_to_learn, batch_size=batch_size)
122121

123122
#this will contain only the data about the forgotten class
@@ -144,8 +143,6 @@ def forward(self, x):
144143
train_mask = training_to_learn.targets != FORGET_TARGET
145144
training_to_learn.data = training_to_learn.data[train_mask]
146145
training_to_learn.targets = training_to_learn.targets[train_mask]
147-
training_to_learn.targets[training_to_learn.targets == SUB_TARGET] = FORGET_TARGET
148-
149146

150147
#this will contain the test data where the forgotten class is substituted with the new class
151148
test_to_learn = datasets.MNIST(
@@ -157,14 +154,30 @@ def forward(self, x):
157154
test_mask = test_to_learn.targets != FORGET_TARGET
158155
test_to_learn.data = test_to_learn.data[test_mask]
159156
test_to_learn.targets = test_to_learn.targets[test_mask]
160-
test_to_learn.targets[test_to_learn.targets == SUB_TARGET] = FORGET_TARGET
161157

162158

163159
################################# Gradient computation part #################################
164160

161+
def log_softmax(x):
162+
return x - torch.logsumexp(x,dim=1, keepdim=True)
163+
164+
def CrossEntropyLoss(outputs, targets):
165+
epsilon=1e-6
166+
num_examples = targets.shape[0]
167+
batch_size = outputs.shape[0]
168+
outputs = log_softmax(outputs)+epsilon
169+
inverse_output= 1/outputs
170+
outputs[targets==FORGET_TARGET]=inverse_output[targets==FORGET_TARGET]
171+
172+
outputs = outputs[range(batch_size), targets]
173+
174+
return - torch.sum(outputs)/num_examples
175+
176+
177+
165178
# Load the model
166179
model = CNN()
167-
model.load_state_dict(torch.load("modelNo9.pth"))
180+
model.load_state_dict(torch.load("modelNo9.pth",map_location=torch.device(device)))
168181

169182

170183
#create the gradient holders
@@ -221,8 +234,12 @@ def train(dataloader, model, loss_fn, optimizer,scheduler):
221234
X, y = X.to(device), y.to(device)
222235
pred = model(X)
223236
loss = loss_fn(pred, y)
237+
myloss= CrossEntropyLoss(pred,y)
238+
#print("pytorch Loss:",loss)
239+
#print("my loss:",myloss)
224240
optimizer.zero_grad()
225-
loss.backward()
241+
#loss
242+
myloss.backward()
226243
#remove the gradients from fc1 and fc2 using the mask
227244
#model.fc1.weight.grad[fc1_map == 0] = 0
228245
#model.fc2.weight.grad[fc2_map == 0] = 0
@@ -267,7 +284,8 @@ def forward(self, input, target):
267284
return loss
268285

269286

270-
loss_fn = MyCustomLoss()
287+
#loss_fn = MyCustomLoss()
288+
loss_fn = nn.CrossEntropyLoss()
271289
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
272290
#scheduler
273291
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.1)

0 commit comments

Comments
 (0)