Update modeling_phi3_v.py for compatibility with transformers 4.49

#37
Files changed (2) hide show
  1. modeling_phi3_v.py +1 -1
  2. processing_phi3_v.py +1 -1
modeling_phi3_v.py CHANGED
@@ -1658,7 +1658,7 @@ class Phi3VForCausalLM(Phi3VPreTrainedModel):
1658
  if isinstance(past_key_values, Cache):
1659
  cache_length = past_key_values.get_seq_length()
1660
  past_length = past_key_values.seen_tokens
1661
- max_cache_length = past_key_values.get_max_length()
1662
  else:
1663
  cache_length = past_length = past_key_values[0][0].shape[2]
1664
  max_cache_length = None
 
1658
  if isinstance(past_key_values, Cache):
1659
  cache_length = past_key_values.get_seq_length()
1660
  past_length = past_key_values.seen_tokens
1661
+ max_cache_length = past_key_values.get_max_length() if hasattr(past_key_values, "get_max_length") else past_key_values.get_max_cache_shape()
1662
  else:
1663
  cache_length = past_length = past_key_values[0][0].shape[2]
1664
  max_cache_length = None
processing_phi3_v.py CHANGED
@@ -310,7 +310,7 @@ class Phi3VProcessor(ProcessorMixin):
310
  tokenizer_class = ("LlamaTokenizer", "LlamaTokenizerFast")
311
  special_image_token = "<|image|>"
312
 
313
- def __init__(self, image_processor, tokenizer):
314
  self.image_processor = image_processor
315
  self.tokenizer = tokenizer
316
  self.num_img_tokens = image_processor.num_img_tokens
 
310
  tokenizer_class = ("LlamaTokenizer", "LlamaTokenizerFast")
311
  special_image_token = "<|image|>"
312
 
313
+ def __init__(self, image_processor, tokenizer, **kwargs):
314
  self.image_processor = image_processor
315
  self.tokenizer = tokenizer
316
  self.num_img_tokens = image_processor.num_img_tokens