Update modeling_bertchunker.py
Browse files- modeling_bertchunker.py +2 -2
modeling_bertchunker.py
CHANGED
|
@@ -41,7 +41,7 @@ class BertChunker(PreTrainedModel):
|
|
| 41 |
# slide context window
|
| 42 |
MAX_TOKENS=255
|
| 43 |
tokens=tokenizer(text, return_tensors="pt",truncation=False)
|
| 44 |
-
input_ids=tokens['input_ids']
|
| 45 |
attention_mask=tokens['attention_mask'][:,0:MAX_TOKENS]
|
| 46 |
attention_mask=attention_mask.to(self.device)
|
| 47 |
CLS=input_ids[:,0].unsqueeze(0)
|
|
@@ -60,7 +60,7 @@ class BertChunker(PreTrainedModel):
|
|
| 60 |
|
| 61 |
ids=ids.to(self.device)
|
| 62 |
|
| 63 |
-
output=self(input_ids=ids,attention_mask=torch.ones(1, ids.shape[1]))
|
| 64 |
logits = output['logits'][:, 1:-1,:]
|
| 65 |
|
| 66 |
chunk_probabilities = F.softmax(logits, dim=-1)[:,:,1]
|
|
|
|
| 41 |
# slide context window
|
| 42 |
MAX_TOKENS=255
|
| 43 |
tokens=tokenizer(text, return_tensors="pt",truncation=False)
|
| 44 |
+
input_ids=tokens['input_ids'].to(self.device)
|
| 45 |
attention_mask=tokens['attention_mask'][:,0:MAX_TOKENS]
|
| 46 |
attention_mask=attention_mask.to(self.device)
|
| 47 |
CLS=input_ids[:,0].unsqueeze(0)
|
|
|
|
| 60 |
|
| 61 |
ids=ids.to(self.device)
|
| 62 |
|
| 63 |
+
output=self(input_ids=ids,attention_mask=torch.ones(1, ids.shape[1],device=self.device))
|
| 64 |
logits = output['logits'][:, 1:-1,:]
|
| 65 |
|
| 66 |
chunk_probabilities = F.softmax(logits, dim=-1)[:,:,1]
|