Skip to content
Snippets Groups Projects
Commit b9f51f1b authored by Albin's avatar Albin
Browse files

cleanup

parent 5eca67be
No related branches found
No related tags found
No related merge requests found
%% Cell type:code id: tags:
``` python
import datasets
import torch
import torch.nn as nn
import torch.optim as optim
import pandas as pd
import numpy as np
from transformers import BertTokenizer, BertModel
from transformers import Seq2SeqTrainer, Seq2SeqTrainingArguments
from tqdm import tqdm
import json
```
%% Output
b:\Programs\Miniconda\envs\tdde19\lib\site-packages\tqdm\auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html
from .autonotebook import tqdm as notebook_tqdm
%% Cell type:code id: tags:
``` python
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
```
%% Cell type:code id: tags:
``` python
# Use GPU if available
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Print cuda version
print(device)
```
%% Output
cuda
%% Cell type:code id: tags:
``` python
class NgmOne(nn.Module):
def __init__(self, device):
super(NgmOne, self).__init__()
self.tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
self.bert = BertModel.from_pretrained("bert-base-uncased").to(device)
self.linear = nn.Linear(768, 163).to(device)
self.softmax = nn.Softmax(dim=1).to(device)
self.device = device
def forward(self, tokenized_seq, tokenized_mask):
tokenized_seq= tokenized_seq.to(self.device)
tokenized_mask = tokenized_mask.to(self.device)
with torch.no_grad():
x = self.bert.forward(tokenized_seq, attention_mask=tokenized_mask)
x = x[0][:,0,:].to(self.device)
x = self.linear(x)
x = self.softmax(x)
return x
```
%% Cell type:code id: tags:
``` python
# def encode(batch):
# return tokenizer(batch, padding="max_length", max_length=256, return_tensors="pt")
# def convert_to_features(example_batch):
# input_encodings = encode(example_batch['text'])
# target_encodings = encode(example_batch['summary'])
# labels = target_encodings['input_ids']
# decoder_input_ids = shift_tokens_right(
# labels, model.config.pad_token_id, model.config.decoder_start_token_id)
# labels[labels[:, :] == model.config.pad_token_id] = -100
# encodings = {
# 'input_ids': input_encodings['input_ids'],
# 'attention_mask': input_encodings['attention_mask'],
# 'decoder_input_ids': decoder_input_ids,
# 'labels': labels,
# }
# return encodings
# def get_dataset(path):
# df = pd.read_csv(path, sep=",", on_bad_lines='skip')
# dataset = datasets.Dataset.from_pandas(df)
# dataset = dataset.map(convert_to_features, batched=True)
# columns = ['input_ids', 'labels', 'decoder_input_ids', 'attention_mask', ]
# dataset.set_format(type='torch', columns=columns)
# return dataset
```
%% Cell type:code id: tags:
``` python
def make_batch():
"""Triplet is a list of [subject entity, relation, object entity], None if not present"""
# Load predicted data
pred = "../data/qald-9-train-linked.json"
#Load gold data
gold = "../data/qald-9-train-linked.json"
print("Beginning making batch")
with open(pred, "r") as p, open(gold, "r") as g:
pred = json.load(p)
gold = json.load(g)
inputs = []
correct_rels = []
inputs_max_len = 0
for d in tqdm(pred["questions"]):
question = d["question"][0]["string"]
query = d["query"]["sparql"]
#Take the first tripletin query
trip = query.split("WHERE {")[1]
trip = trip.split("}")[0]
trip = trip.split("FILTER ")[0]
trip = trip.replace("{", "").replace("}", "")
trip = trip.replace(".", "")
trip = trip.replace(";", "")
triplet = trip.split(" ")
#remove empty strings
triplet = [x for x in triplet if x != ""]
if len(triplet) % 3 == 0 and " ".join(triplet).find("rdf") == -1:
for i in range(len(triplet)//3):
triplet_i = triplet[i*3:i*3+3]
for t in triplet_i:
if not(t.find("?")):
triplet_i[triplet_i.index(t)] = None
#seq = "[CLS] " + question + " [SEP] "
if triplet_i[0] is not None and triplet_i[1] is not None:
#seq += "[SUB] [SEP] " + triplet[0]
# , padding=True, truncation=True)
tokenized_seq = tokenizer(question, "[SUB]", triplet_i[0], padding=True, truncation=True)
elif triplet_i[2] is not None and triplet_i[1] is not None:
#seq += "[OBJ] [SEP] " + triplet[2]
tokenized_seq = tokenizer(question, "[OBJ]", triplet_i[2], padding=True, truncation=True)
else:
continue
if inputs_max_len < len(tokenized_seq["input_ids"]):
inputs_max_len = len(tokenized_seq["input_ids"])
inputs.append(list(tokenized_seq.values())[0])
correct_rels.append(triplet_i[1])
inputs_padded = np.array([i + [0]*(inputs_max_len-len(i)) for i in inputs])
#correct_rels_padded = np.array([i + [0]*(correct_rels_max_len-len(i)) for i in correct_rels])
inputs_attention_mask = np.where(inputs_padded != 0, 1, 0)
#correct_rels_attention_mask = np.where(correct_rels_padded != 0, 1, 0)
print("Finished with batches")
return torch.LongTensor(inputs_padded), torch.LongTensor(inputs_attention_mask), correct_rels #torch.IntTensor(correct_rels_padded), torch.LongTensor(correct_rels_attention_mask)
```
%% Cell type:code id: tags:
``` python
# training_args = Seq2SeqTrainingArguments(
# output_dir='./models/blackbox',
# num_train_epochs=1,
# per_device_train_batch_size=1,
# per_device_eval_batch_size=1,
# warmup_steps=10,
# weight_decay=0.01,
# logging_dir='./logs',
# )
```
%% Cell type:code id: tags:
``` python
from torch.utils.data import Dataset, DataLoader
class MyDataset(Dataset):
def __init__(self, inputs, attention_mask, correct_rels, relations):
self.inputs = inputs
self.attention_mask = attention_mask
self.correct_rels = correct_rels
self.relations = relations
def __len__(self):
return len(self.inputs)
def __getitem__(self, idx):
return self.inputs[idx], self.attention_mask[idx], self.relations.index(self.correct_rels[idx])
#From scratch json creates data set.
# class MyDataset(Dataset):
# def __init__(self, json_file, transform=None):
# self.qald_data = json.load(json_file)
# def __len__(self):
# return len(self.qald_data)
# def __getitem__(self, idx):
# self.inputs[idx], self.attention_mask[idx], self.labels[idx]
```
%% Cell type:code id: tags:
``` python
#Prepare data
def open_json(file):
with open(file, "r") as f:
return json.load(f)
relations = open_json("../data/relations-query-qald-9-linked.json")
train_set = MyDataset(*make_batch(), relations=relations)
train_dataloader = DataLoader(train_set, batch_size=1, shuffle=True)
#show first entry
train_features, train_mask, train_label = next(iter(train_dataloader))
print("features:", train_features, "mask:",train_mask,"label_index", train_label[0])
```
%% Output
Beginning making batch
100%|██████████| 408/408 [00:00<00:00, 792.24it/s]
Finished with batches
features: tensor([[ 101, 2040, 2003, 1996, 3664, 1997, 15632, 1029, 102, 1031,
4942, 1033, 102, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0]]) mask: tensor([[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]) label_index tensor(128)
%% Cell type:code id: tags:
``` python
# Initialize model
model = NgmOne(device)
```
%% Cell type:code id: tags:
``` python
# Train with data loader.
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.0001)
epoch = 500
batch_size = 64
train_dataloader = DataLoader(train_set, batch_size=batch_size, shuffle=True)
for e in range(epoch):
epoch_loss = 0
for i_batch, sample_batched in enumerate(train_dataloader):
optimizer.zero_grad()
train = sample_batched[0]
train_mask = sample_batched[1]
label_index = sample_batched[2].to(device)
# Forward pass
output = model(train, train_mask)
loss = criterion(output, label_index)
# backward and optimize
loss.backward()
optimizer.step()
epoch_loss = epoch_loss + loss.item()
# if i_batch % batch_size == 0:
# print("Epoch", e, "batch:",i_batch, ', loss =', '{:.6f}'.format(loss))
print(e+1, epoch_loss / len(sample_batched))
```
%% Output
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
c:\Users\Albin\Documents\TDDE19\codebase\Neural graph module\ngm.ipynb Cell 11 in <cell line: 3>()
<a href='vscode-notebook-cell:/c%3A/Users/Albin/Documents/TDDE19/codebase/Neural%20graph%20module/ngm.ipynb#X13sZmlsZQ%3D%3D?line=0'>1</a> # Train with data loader.
----> <a href='vscode-notebook-cell:/c%3A/Users/Albin/Documents/TDDE19/codebase/Neural%20graph%20module/ngm.ipynb#X13sZmlsZQ%3D%3D?line=2'>3</a> criterion = nn.CrossEntropyLoss()
<a href='vscode-notebook-cell:/c%3A/Users/Albin/Documents/TDDE19/codebase/Neural%20graph%20module/ngm.ipynb#X13sZmlsZQ%3D%3D?line=3'>4</a> optimizer = optim.Adam(model.parameters(), lr=0.0001)
<a href='vscode-notebook-cell:/c%3A/Users/Albin/Documents/TDDE19/codebase/Neural%20graph%20module/ngm.ipynb#X13sZmlsZQ%3D%3D?line=5'>6</a> epoch = 500
NameError: name 'nn' is not defined
%% Cell type:code id: tags:
``` python
model = NgmOne(device)
EPOCHS = 1500
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.007)
with open("../data/relations-query-qald-9-linked.json", "r") as f:
relations = json.load(f)
train, train_mask, corr_rels = make_batch()
corr_indx = torch.LongTensor([relations.index(r) for r in corr_rels]).to(device)
for epoch in range(EPOCHS):
optimizer.zero_grad()
# Forward pass
output = model(train, train_mask)
loss = criterion(output, corr_indx)
if (epoch + 1) % 1 == 0:
print('Epoch:', '%04d' % (epoch + 1), 'loss =', '{:.6f}'.format(loss))
# Backward pass
loss.backward()
optimizer.step()
```
%% Output
Some weights of the model checkpoint at bert-base-uncased were not used when initializing BertModel: ['cls.seq_relationship.weight', 'cls.predictions.bias', 'cls.predictions.transform.dense.weight', 'cls.predictions.decoder.weight', 'cls.predictions.transform.dense.bias', 'cls.predictions.transform.LayerNorm.weight', 'cls.seq_relationship.bias', 'cls.predictions.transform.LayerNorm.bias']
- This IS expected if you are initializing BertModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).
- This IS NOT expected if you are initializing BertModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).
Beginning making batch
100%|██████████| 408/408 [00:00<00:00, 774.16it/s]
Finished with batches
Epoch: 0001 loss = 5.093835
Epoch: 0002 loss = 5.084588
Epoch: 0003 loss = 5.071274
Epoch: 0004 loss = 5.054411
Epoch: 0005 loss = 5.032239
Epoch: 0006 loss = 5.012146
Epoch: 0007 loss = 5.005000
Epoch: 0008 loss = 4.997761
Epoch: 0009 loss = 4.986701
Epoch: 0010 loss = 4.971669
Epoch: 0011 loss = 4.955543
Epoch: 0012 loss = 4.943308
Epoch: 0013 loss = 4.935583
Epoch: 0014 loss = 4.925481
Epoch: 0015 loss = 4.916763
Epoch: 0016 loss = 4.909473
Epoch: 0017 loss = 4.902980
Epoch: 0018 loss = 4.897297
Epoch: 0019 loss = 4.891988
Epoch: 0020 loss = 4.886408
Epoch: 0021 loss = 4.881417
Epoch: 0022 loss = 4.877658
Epoch: 0023 loss = 4.875204
Epoch: 0024 loss = 4.873054
Epoch: 0025 loss = 4.870307
Epoch: 0026 loss = 4.867515
Epoch: 0027 loss = 4.865225
Epoch: 0028 loss = 4.863073
Epoch: 0029 loss = 4.861112
Epoch: 0030 loss = 4.859608
Epoch: 0031 loss = 4.858322
Epoch: 0032 loss = 4.856856
Epoch: 0033 loss = 4.854882
Epoch: 0034 loss = 4.851583
Epoch: 0035 loss = 4.846420
Epoch: 0036 loss = 4.841257
Epoch: 0037 loss = 4.839985
Epoch: 0038 loss = 4.838554
Epoch: 0039 loss = 4.833128
Epoch: 0040 loss = 4.830694
Epoch: 0041 loss = 4.830802
Epoch: 0042 loss = 4.829382
Epoch: 0043 loss = 4.828467
Epoch: 0044 loss = 4.828149
Epoch: 0045 loss = 4.827055
Epoch: 0046 loss = 4.824977
Epoch: 0047 loss = 4.822845
Epoch: 0048 loss = 4.821745
Epoch: 0049 loss = 4.821719
Epoch: 0050 loss = 4.821740
Epoch: 0051 loss = 4.820979
Epoch: 0052 loss = 4.819626
Epoch: 0053 loss = 4.818414
Epoch: 0054 loss = 4.817791
Epoch: 0055 loss = 4.817641
Epoch: 0056 loss = 4.817585
Epoch: 0057 loss = 4.817320
Epoch: 0058 loss = 4.816803
Epoch: 0059 loss = 4.816196
Epoch: 0060 loss = 4.815702
Epoch: 0061 loss = 4.815412
Epoch: 0062 loss = 4.815280
Epoch: 0063 loss = 4.815196
Epoch: 0064 loss = 4.815063
Epoch: 0065 loss = 4.814850
Epoch: 0066 loss = 4.814575
Epoch: 0067 loss = 4.814219
Epoch: 0068 loss = 4.813494
Epoch: 0069 loss = 4.812033
Epoch: 0070 loss = 4.810763
Epoch: 0071 loss = 4.810414
Epoch: 0072 loss = 4.810355
Epoch: 0073 loss = 4.810412
Epoch: 0074 loss = 4.810295
Epoch: 0075 loss = 4.809524
Epoch: 0076 loss = 4.808568
Epoch: 0077 loss = 4.807861
Epoch: 0078 loss = 4.807343
Epoch: 0079 loss = 4.806979
Epoch: 0080 loss = 4.806736
Epoch: 0081 loss = 4.806551
Epoch: 0082 loss = 4.806396
Epoch: 0083 loss = 4.806274
Epoch: 0084 loss = 4.806180
Epoch: 0085 loss = 4.806106
Epoch: 0086 loss = 4.806038
Epoch: 0087 loss = 4.805974
Epoch: 0088 loss = 4.805913
Epoch: 0089 loss = 4.805856
Epoch: 0090 loss = 4.805807
Epoch: 0091 loss = 4.805765
Epoch: 0092 loss = 4.805728
Epoch: 0093 loss = 4.805696
Epoch: 0094 loss = 4.805665
Epoch: 0095 loss = 4.805635
Epoch: 0096 loss = 4.805604
Epoch: 0097 loss = 4.805574
Epoch: 0098 loss = 4.805547
Epoch: 0099 loss = 4.805521
Epoch: 0100 loss = 4.805498
Epoch: 0101 loss = 4.805474
Epoch: 0102 loss = 4.805451
Epoch: 0103 loss = 4.805429
Epoch: 0104 loss = 4.805408
Epoch: 0105 loss = 4.805386
Epoch: 0106 loss = 4.805366
Epoch: 0107 loss = 4.805346
Epoch: 0108 loss = 4.805329
Epoch: 0109 loss = 4.805310
Epoch: 0110 loss = 4.805292
Epoch: 0111 loss = 4.805275
Epoch: 0112 loss = 4.805256
Epoch: 0113 loss = 4.805240
Epoch: 0114 loss = 4.805222
Epoch: 0115 loss = 4.805207
Epoch: 0116 loss = 4.805192
Epoch: 0117 loss = 4.805176
Epoch: 0118 loss = 4.805163
Epoch: 0119 loss = 4.805148
Epoch: 0120 loss = 4.805134
Epoch: 0121 loss = 4.805120
Epoch: 0122 loss = 4.805106
Epoch: 0123 loss = 4.805093
Epoch: 0124 loss = 4.805080
Epoch: 0125 loss = 4.805068
Epoch: 0126 loss = 4.805055
Epoch: 0127 loss = 4.805042
Epoch: 0128 loss = 4.805030
Epoch: 0129 loss = 4.805019
Epoch: 0130 loss = 4.805007
Epoch: 0131 loss = 4.804996
Epoch: 0132 loss = 4.804984
Epoch: 0133 loss = 4.804973
Epoch: 0134 loss = 4.804961
Epoch: 0135 loss = 4.804952
Epoch: 0136 loss = 4.804941
Epoch: 0137 loss = 4.804930
Epoch: 0138 loss = 4.804919
Epoch: 0139 loss = 4.804910
Epoch: 0140 loss = 4.804900
Epoch: 0141 loss = 4.804890
Epoch: 0142 loss = 4.804880
Epoch: 0143 loss = 4.804871
Epoch: 0144 loss = 4.804861
Epoch: 0145 loss = 4.804852
Epoch: 0146 loss = 4.804842
Epoch: 0147 loss = 4.804834
Epoch: 0148 loss = 4.804825
Epoch: 0149 loss = 4.804816
Epoch: 0150 loss = 4.804807
Epoch: 0151 loss = 4.804799
Epoch: 0152 loss = 4.804790
Epoch: 0153 loss = 4.804782
Epoch: 0154 loss = 4.804773
Epoch: 0155 loss = 4.804766
Epoch: 0156 loss = 4.804757
Epoch: 0157 loss = 4.804749
Epoch: 0158 loss = 4.804741
Epoch: 0159 loss = 4.804733
Epoch: 0160 loss = 4.804725
Epoch: 0161 loss = 4.804718
Epoch: 0162 loss = 4.804710
Epoch: 0163 loss = 4.804703
Epoch: 0164 loss = 4.804695
Epoch: 0165 loss = 4.804688
Epoch: 0166 loss = 4.804681
Epoch: 0167 loss = 4.804673
Epoch: 0168 loss = 4.804665
Epoch: 0169 loss = 4.804657
Epoch: 0170 loss = 4.804648
Epoch: 0171 loss = 4.804638
Epoch: 0172 loss = 4.804619
Epoch: 0173 loss = 4.804549
Epoch: 0174 loss = 4.803915
Epoch: 0175 loss = 4.800674
Epoch: 0176 loss = 4.797853
Epoch: 0177 loss = 4.798222
Epoch: 0178 loss = 4.800125
Epoch: 0179 loss = 4.798518
Epoch: 0180 loss = 4.797715
Epoch: 0181 loss = 4.797562
Epoch: 0182 loss = 4.797585
Epoch: 0183 loss = 4.797699
Epoch: 0184 loss = 4.797858
Epoch: 0185 loss = 4.797929
Epoch: 0186 loss = 4.797824
Epoch: 0187 loss = 4.797550
Epoch: 0188 loss = 4.796398
Epoch: 0189 loss = 4.792130
Epoch: 0190 loss = 4.789983
Epoch: 0191 loss = 4.788803
Epoch: 0192 loss = 4.793661
Epoch: 0193 loss = 4.788590
Epoch: 0194 loss = 4.788193
Epoch: 0195 loss = 4.788942
Epoch: 0196 loss = 4.789340
Epoch: 0197 loss = 4.789372
Epoch: 0198 loss = 4.789063
Epoch: 0199 loss = 4.788374
Epoch: 0200 loss = 4.787567
Epoch: 0201 loss = 4.787129
Epoch: 0202 loss = 4.787015
Epoch: 0203 loss = 4.787010
Epoch: 0204 loss = 4.787023
Epoch: 0205 loss = 4.787029
Epoch: 0206 loss = 4.787025
Epoch: 0207 loss = 4.787012
Epoch: 0208 loss = 4.786981
Epoch: 0209 loss = 4.786938
Epoch: 0210 loss = 4.786892
Epoch: 0211 loss = 4.786852
Epoch: 0212 loss = 4.786819
Epoch: 0213 loss = 4.786791
Epoch: 0214 loss = 4.786764
Epoch: 0215 loss = 4.786734
Epoch: 0216 loss = 4.786704
Epoch: 0217 loss = 4.786675
Epoch: 0218 loss = 4.786652
Epoch: 0219 loss = 4.786634
Epoch: 0220 loss = 4.786625
Epoch: 0221 loss = 4.786621
Epoch: 0222 loss = 4.786622
Epoch: 0223 loss = 4.786623
Epoch: 0224 loss = 4.786623
Epoch: 0225 loss = 4.786622
Epoch: 0226 loss = 4.786618
Epoch: 0227 loss = 4.786611
Epoch: 0228 loss = 4.786601
Epoch: 0229 loss = 4.786594
Epoch: 0230 loss = 4.786586
Epoch: 0231 loss = 4.786580
Epoch: 0232 loss = 4.786575
Epoch: 0233 loss = 4.786572
Epoch: 0234 loss = 4.786569
Epoch: 0235 loss = 4.786566
Epoch: 0236 loss = 4.786561
Epoch: 0237 loss = 4.786557
Epoch: 0238 loss = 4.786552
Epoch: 0239 loss = 4.786547
Epoch: 0240 loss = 4.786541
Epoch: 0241 loss = 4.786537
Epoch: 0242 loss = 4.786531
Epoch: 0243 loss = 4.786526
Epoch: 0244 loss = 4.786522
Epoch: 0245 loss = 4.786518
Epoch: 0246 loss = 4.786515
Epoch: 0247 loss = 4.786512
Epoch: 0248 loss = 4.786508
Epoch: 0249 loss = 4.786504
Epoch: 0250 loss = 4.786500
Epoch: 0251 loss = 4.786497
Epoch: 0252 loss = 4.786492
Epoch: 0253 loss = 4.786488
Epoch: 0254 loss = 4.786485
Epoch: 0255 loss = 4.786481
Epoch: 0256 loss = 4.786478
Epoch: 0257 loss = 4.786475
Epoch: 0258 loss = 4.786472
Epoch: 0259 loss = 4.786469
Epoch: 0260 loss = 4.786465
Epoch: 0261 loss = 4.786463
Epoch: 0262 loss = 4.786459
Epoch: 0263 loss = 4.786456
Epoch: 0264 loss = 4.786452
Epoch: 0265 loss = 4.786449
Epoch: 0266 loss = 4.786446
Epoch: 0267 loss = 4.786443
Epoch: 0268 loss = 4.786441
Epoch: 0269 loss = 4.786438
Epoch: 0270 loss = 4.786434
Epoch: 0271 loss = 4.786431
Epoch: 0272 loss = 4.786428
Epoch: 0273 loss = 4.786425
Epoch: 0274 loss = 4.786422
Epoch: 0275 loss = 4.786419
Epoch: 0276 loss = 4.786417
Epoch: 0277 loss = 4.786414
Epoch: 0278 loss = 4.786411
Epoch: 0279 loss = 4.786408
Epoch: 0280 loss = 4.786404
Epoch: 0281 loss = 4.786402
Epoch: 0282 loss = 4.786399
Epoch: 0283 loss = 4.786396
Epoch: 0284 loss = 4.786394
Epoch: 0285 loss = 4.786390
Epoch: 0286 loss = 4.786388
Epoch: 0287 loss = 4.786385
Epoch: 0288 loss = 4.786382
Epoch: 0289 loss = 4.786379
Epoch: 0290 loss = 4.786377
Epoch: 0291 loss = 4.786375
Epoch: 0292 loss = 4.786372
Epoch: 0293 loss = 4.786369
Epoch: 0294 loss = 4.786366
Epoch: 0295 loss = 4.786364
Epoch: 0296 loss = 4.786361
Epoch: 0297 loss = 4.786359
Epoch: 0298 loss = 4.786356
Epoch: 0299 loss = 4.786353
Epoch: 0300 loss = 4.786350
Epoch: 0301 loss = 4.786348
Epoch: 0302 loss = 4.786345
Epoch: 0303 loss = 4.786343
Epoch: 0304 loss = 4.786341
Epoch: 0305 loss = 4.786338
Epoch: 0306 loss = 4.786335
Epoch: 0307 loss = 4.786334
Epoch: 0308 loss = 4.786330
Epoch: 0309 loss = 4.786327
Epoch: 0310 loss = 4.786325
Epoch: 0311 loss = 4.786323
Epoch: 0312 loss = 4.786321
Epoch: 0313 loss = 4.786318
Epoch: 0314 loss = 4.786316
Epoch: 0315 loss = 4.786313
Epoch: 0316 loss = 4.786311
Epoch: 0317 loss = 4.786307
Epoch: 0318 loss = 4.786306
Epoch: 0319 loss = 4.786304
Epoch: 0320 loss = 4.786301
Epoch: 0321 loss = 4.786299
Epoch: 0322 loss = 4.786296
Epoch: 0323 loss = 4.786294
Epoch: 0324 loss = 4.786293
Epoch: 0325 loss = 4.786289
Epoch: 0326 loss = 4.786287
Epoch: 0327 loss = 4.786284
Epoch: 0328 loss = 4.786283
Epoch: 0329 loss = 4.786281
Epoch: 0330 loss = 4.786278
Epoch: 0331 loss = 4.786276
Epoch: 0332 loss = 4.786273
Epoch: 0333 loss = 4.786272
Epoch: 0334 loss = 4.786268
Epoch: 0335 loss = 4.786267
Epoch: 0336 loss = 4.786265
Epoch: 0337 loss = 4.786263
Epoch: 0338 loss = 4.786261
Epoch: 0339 loss = 4.786258
Epoch: 0340 loss = 4.786256
Epoch: 0341 loss = 4.786253
Epoch: 0342 loss = 4.786252
Epoch: 0343 loss = 4.786249
Epoch: 0344 loss = 4.786247
Epoch: 0345 loss = 4.786245
Epoch: 0346 loss = 4.786243
Epoch: 0347 loss = 4.786241
Epoch: 0348 loss = 4.786238
Epoch: 0349 loss = 4.786237
Epoch: 0350 loss = 4.786234
Epoch: 0351 loss = 4.786232
Epoch: 0352 loss = 4.786230
Epoch: 0353 loss = 4.786228
Epoch: 0354 loss = 4.786226
Epoch: 0355 loss = 4.786223
Epoch: 0356 loss = 4.786222
Epoch: 0357 loss = 4.786221
Epoch: 0358 loss = 4.786217
Epoch: 0359 loss = 4.786215
Epoch: 0360 loss = 4.786213
Epoch: 0361 loss = 4.786211
Epoch: 0362 loss = 4.786209
Epoch: 0363 loss = 4.786207
Epoch: 0364 loss = 4.786203
Epoch: 0365 loss = 4.786200
Epoch: 0366 loss = 4.786190
Epoch: 0367 loss = 4.786129
Epoch: 0368 loss = 4.785011
Epoch: 0369 loss = 4.781053
Epoch: 0370 loss = 4.779747
Epoch: 0371 loss = 4.783240
Epoch: 0372 loss = 4.779751
Epoch: 0373 loss = 4.779325
Epoch: 0374 loss = 4.779717
Epoch: 0375 loss = 4.780351
Epoch: 0376 loss = 4.780504
Epoch: 0377 loss = 4.780063
Epoch: 0378 loss = 4.779569
Epoch: 0379 loss = 4.779338
Epoch: 0380 loss = 4.779263
Epoch: 0381 loss = 4.779239
Epoch: 0382 loss = 4.779237
Epoch: 0383 loss = 4.779246
Epoch: 0384 loss = 4.779265
Epoch: 0385 loss = 4.779281
Epoch: 0386 loss = 4.779284
Epoch: 0387 loss = 4.779267
Epoch: 0388 loss = 4.779233
Epoch: 0389 loss = 4.779195
Epoch: 0390 loss = 4.779158
Epoch: 0391 loss = 4.779129
Epoch: 0392 loss = 4.779109
Epoch: 0393 loss = 4.779094
Epoch: 0394 loss = 4.779084
Epoch: 0395 loss = 4.779078
Epoch: 0396 loss = 4.779074
Epoch: 0397 loss = 4.779072
Epoch: 0398 loss = 4.779070
Epoch: 0399 loss = 4.779066
Epoch: 0400 loss = 4.779061
Epoch: 0401 loss = 4.779056
Epoch: 0402 loss = 4.779051
Epoch: 0403 loss = 4.779047
Epoch: 0404 loss = 4.779043
Epoch: 0405 loss = 4.779040
Epoch: 0406 loss = 4.779038
Epoch: 0407 loss = 4.779037
Epoch: 0408 loss = 4.779036
Epoch: 0409 loss = 4.779035
Epoch: 0410 loss = 4.779033
Epoch: 0411 loss = 4.779032
Epoch: 0412 loss = 4.779031
Epoch: 0413 loss = 4.779028
Epoch: 0414 loss = 4.779026
Epoch: 0415 loss = 4.779021
Epoch: 0416 loss = 4.779011
Epoch: 0417 loss = 4.778957
Epoch: 0418 loss = 4.778023
Epoch: 0419 loss = 4.775119
Epoch: 0420 loss = 4.772221
Epoch: 0421 loss = 4.774364
Epoch: 0422 loss = 4.772817
---------------------------------------------------------------------------
KeyboardInterrupt Traceback (most recent call last)
c:\Users\Albin\Documents\TDDE19\codebase\Neural graph module\ngm.ipynb Cell 11 in <cell line: 13>()
<a href='vscode-notebook-cell:/c%3A/Users/Albin/Documents/TDDE19/codebase/Neural%20graph%20module/ngm.ipynb#X35sZmlsZQ%3D%3D?line=17'>18</a> loss = criterion(output, corr_indx)
<a href='vscode-notebook-cell:/c%3A/Users/Albin/Documents/TDDE19/codebase/Neural%20graph%20module/ngm.ipynb#X35sZmlsZQ%3D%3D?line=19'>20</a> if (epoch + 1) % 1 == 0:
---> <a href='vscode-notebook-cell:/c%3A/Users/Albin/Documents/TDDE19/codebase/Neural%20graph%20module/ngm.ipynb#X35sZmlsZQ%3D%3D?line=20'>21</a> print('Epoch:', '%04d' % (epoch + 1), 'loss =', '{:.6f}'.format(loss))
<a href='vscode-notebook-cell:/c%3A/Users/Albin/Documents/TDDE19/codebase/Neural%20graph%20module/ngm.ipynb#X35sZmlsZQ%3D%3D?line=21'>22</a> # Backward pass
<a href='vscode-notebook-cell:/c%3A/Users/Albin/Documents/TDDE19/codebase/Neural%20graph%20module/ngm.ipynb#X35sZmlsZQ%3D%3D?line=22'>23</a> loss.backward()
File b:\Programs\Miniconda\envs\tdde19\lib\site-packages\torch\_tensor.py:659, in Tensor.__format__(self, format_spec)
657 return handle_torch_function(Tensor.__format__, (self,), self, format_spec)
658 if self.dim() == 0 and not self.is_meta:
--> 659 return self.item().__format__(format_spec)
660 return object.__format__(self, format_spec)
KeyboardInterrupt:
%% Cell type:code id: tags:
``` python
# Predict
train, train_mask, corr_rels = make_batch()
with torch.no_grad():
output = model(train, train_mask)
output = output.detach().cpu().numpy()
prediction = [relations[np.argmax(pred).item()]for pred in output]
probability = [pred[np.argmax(pred)] for pred in output]
correct_pred = [corr_rels[i] for i in range(len(output))]
print("lowest confidence", min(probability))
def accuracy_score(y_true, y_pred):
corr_preds=0
wrong_preds=0
for pred, correct in zip(y_pred, y_true):
if pred == correct:
corr_preds += 1
else:
wrong_preds += 1
return corr_preds/(corr_preds+wrong_preds)
print("Accuracy:", accuracy_score(correct_pred, prediction))
```
%% Output
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
c:\Users\Albin\Documents\TDDE19\codebase\Neural graph module\ngm.ipynb Cell 13 in <cell line: 2>()
<a href='vscode-notebook-cell:/c%3A/Users/Albin/Documents/TDDE19/codebase/Neural%20graph%20module/ngm.ipynb#X15sZmlsZQ%3D%3D?line=0'>1</a> # Predict
----> <a href='vscode-notebook-cell:/c%3A/Users/Albin/Documents/TDDE19/codebase/Neural%20graph%20module/ngm.ipynb#X15sZmlsZQ%3D%3D?line=1'>2</a> train, train_mask, corr_rels = make_batch()
<a href='vscode-notebook-cell:/c%3A/Users/Albin/Documents/TDDE19/codebase/Neural%20graph%20module/ngm.ipynb#X15sZmlsZQ%3D%3D?line=2'>3</a> with torch.no_grad():
<a href='vscode-notebook-cell:/c%3A/Users/Albin/Documents/TDDE19/codebase/Neural%20graph%20module/ngm.ipynb#X15sZmlsZQ%3D%3D?line=3'>4</a> output = model(train, train_mask)
NameError: name 'make_batch' is not defined
%% Cell type:code id: tags:
``` python
```
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment