from typing import Unpack import torch from transformers import DataCollatorWithFlattening from transformers.masking_utils import create_bidirectional_mask from transformers.modeling_outputs import BaseModelOutput from transformers.models.eurobert import ( EuroBertForMaskedLM, EuroBertModel, EuroBertForSequenceClassification, EuroBertForTokenClassification ) from transformers.utils import TransformersKwargs def _unpad_input(input_ids: torch.Tensor, attention_mask: torch.Tensor): collator = DataCollatorWithFlattening(return_flash_attn_kwargs=True) features = collator([{"input_ids": i[a.bool()].tolist()} for i, a in zip(input_ids, attention_mask)]) return features def _pad_output(inputs: torch.Tensor, indices: torch.Tensor, batch: int, seqlen: int,) -> torch.Tensor: if inputs.dim() == 3: inputs = inputs.squeeze() if inputs.dim() == 1: output = torch.zeros(batch * seqlen, dtype=inputs.dtype, device=inputs.device) output[indices] = inputs padded_inputs = output.view(batch, seqlen) else: _, *rest = inputs.shape output = torch.zeros(batch * seqlen, *rest, dtype=inputs.dtype, device=inputs.device) output[indices] = inputs padded_inputs = output.view(batch, seqlen, *rest) return padded_inputs class UnpadEuroBertModel(EuroBertModel): def __init__(self, config): super().__init__(config) def forward( self, input_ids: torch.LongTensor = None, attention_mask: torch.Tensor | None = None, position_ids: torch.LongTensor | None = None, inputs_embeds: torch.FloatTensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple | BaseModelOutput: if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if input_ids is not None: device = input_ids.device seq_length = input_ids.shape[1] batch_size = input_ids.size(0) else: device = inputs_embeds.device seq_length = inputs_embeds.shape[1] batch_size = inputs_embeds.size(0) indices = None if self.config._attn_implementation.startswith("flash_attention"): if input_ids is None or attention_mask is None: raise ValueError("Unpadding requires both input_ids and attention_mask") with torch.no_grad(): indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten() features = _unpad_input(input_ids, attention_mask) input_ids = features["input_ids"].to(device=device) position_ids = features["position_ids"].to(device=device) attention_mask = None kwargs["cu_seq_lens_k"] = features["cu_seq_lens_k"].to(device=device) kwargs["cu_seq_lens_q"] = features["cu_seq_lens_q"].to(device=device) kwargs["max_length_k"] = features["max_length_k"] kwargs["max_length_q"] = features["max_length_q"] if inputs_embeds is None: inputs_embeds: torch.Tensor = self.embed_tokens(input_ids) if position_ids is None: position_ids = torch.arange(inputs_embeds.shape[1], device=inputs_embeds.device).unsqueeze(0) bidirectional_mask = create_bidirectional_mask( config=self.config, inputs_embeds=inputs_embeds, attention_mask=attention_mask, ) hidden_states = inputs_embeds position_embeddings = self.rotary_emb(hidden_states, position_ids=position_ids) for encoder_layer in self.layers[: self.config.num_hidden_layers]: hidden_states = encoder_layer( hidden_states, attention_mask=bidirectional_mask, position_embeddings=position_embeddings, position_ids=position_ids, **kwargs, ) hidden_states = self.norm(hidden_states) if self.config._attn_implementation.startswith("flash_attention"): hidden_states = _pad_output( inputs=hidden_states, indices=indices, batch=batch_size, seqlen=seq_length ) return BaseModelOutput( last_hidden_state=hidden_states, ) class UnpadEuroBertForMaskedLM(EuroBertForMaskedLM): def __init__(self, config): super().__init__(config) self.model = UnpadEuroBertModel(config) self.post_init() class UnpadEuroBertForSequenceClassification(EuroBertForSequenceClassification): def __init__(self, config): super().__init__(config) self.model = UnpadEuroBertModel(config) self.post_init() class UnpadEuroBertForTokenClassification(EuroBertForTokenClassification): def __init__(self, config): super().__init__(config) self.model = UnpadEuroBertModel(config) self.post_init() def enable_eurobert_unpadding(): EuroBertModel.forward = UnpadEuroBertModel.forward