emanuelaboros commited on
Commit
e234a9b
·
1 Parent(s): 8e5dcff
Files changed (2) hide show
  1. generic_ner.py +3 -1
  2. modeling_stacked.py +3 -0
generic_ner.py CHANGED
@@ -718,7 +718,9 @@ class MultitaskTokenClassificationPipeline(Pipeline):
718
  attention_mask = torch.tensor([inputs["attention_mask"]], dtype=torch.long).to(
719
  self.model.device
720
  )
721
- print(f"Let's check the model: {self.model}")
 
 
722
  with torch.no_grad():
723
  outputs = self.model(input_ids, attention_mask)
724
  return outputs, text_sentences, text
 
718
  attention_mask = torch.tensor([inputs["attention_mask"]], dtype=torch.long).to(
719
  self.model.device
720
  )
721
+ # print(f"Let's check the model: {self.model}")
722
+ # check get floret model
723
+ print(f"Let's check the model: {self.model.get_floret_model()}")
724
  with torch.no_grad():
725
  outputs = self.model(input_ids, attention_mask)
726
  return outputs, text_sentences, text
modeling_stacked.py CHANGED
@@ -60,6 +60,9 @@ class ExtendedMultitaskModelForTokenClassification(PreTrainedModel):
60
  # Initialize weights and apply final processing
61
  self.post_init()
62
 
 
 
 
63
  def forward(
64
  self,
65
  input_ids: Optional[torch.Tensor] = None,
 
60
  # Initialize weights and apply final processing
61
  self.post_init()
62
 
63
+ def get_floret_model(self):
64
+ return self.model_floret
65
+
66
  def forward(
67
  self,
68
  input_ids: Optional[torch.Tensor] = None,