| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | """PyTorch OpenAI GPT-2 model.""" |
| |
|
| | import math |
| | import os |
| | import warnings |
| | from dataclasses import dataclass |
| | from typing import Optional, Tuple, Union |
| |
|
| | import torch |
| | import torch.utils.checkpoint |
| | from packaging import version |
| | from torch import nn |
| | from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss |
| |
|
| | from transformers.activations import ACT2FN |
| | from transformers.generation import GenerationMixin |
| | from transformers.modeling_attn_mask_utils import ( |
| | _prepare_4d_attention_mask_for_sdpa, |
| | _prepare_4d_causal_attention_mask_for_sdpa, |
| | ) |
| | from transformers.modeling_outputs import ( |
| | BaseModelOutputWithPastAndCrossAttentions, |
| | CausalLMOutputWithCrossAttentions, |
| | QuestionAnsweringModelOutput, |
| | SequenceClassifierOutputWithPast, |
| | TokenClassifierOutput, |
| | ) |
| | from transformers.modeling_utils import PreTrainedModel, SequenceSummary |
| | from transformers.pytorch_utils import ( |
| | Conv1D, |
| | find_pruneable_heads_and_indices, |
| | prune_conv1d_layer, |
| | ) |
| | from transformers.utils import ( |
| | ModelOutput, |
| | add_code_sample_docstrings, |
| | add_start_docstrings, |
| | add_start_docstrings_to_model_forward, |
| | get_torch_version, |
| | is_flash_attn_2_available, |
| | is_flash_attn_greater_or_equal_2_10, |
| | logging, |
| | replace_return_docstrings, |
| | ) |
| | from transformers.utils.model_parallel_utils import assert_device_map, get_device_map |
| | from .configuration_gpt2 import GPT2Config |
| |
|
| |
|
| | if is_flash_attn_2_available(): |
| | from transformers.modeling_flash_attention_utils import _flash_attention_forward |
| |
|
| |
|
| | logger = logging.get_logger(__name__) |
| |
|
| | _CHECKPOINT_FOR_DOC = "openai-community/gpt2" |
| | _CONFIG_FOR_DOC = "GPT2Config" |
| |
|
| |
|
| | def load_tf_weights_in_gpt2(model, config, gpt2_checkpoint_path): |
| | """Load tf checkpoints in a pytorch model""" |
| | try: |
| | import re |
| |
|
| | import tensorflow as tf |
| | except ImportError: |
| | logger.error( |
| | "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see " |
| | "https://www.tensorflow.org/install/ for installation instructions." |
| | ) |
| | raise |
| | tf_path = os.path.abspath(gpt2_checkpoint_path) |
| | logger.info(f"Converting TensorFlow checkpoint from {tf_path}") |
| | |
| | init_vars = tf.train.list_variables(tf_path) |
| | names = [] |
| | arrays = [] |
| | for name, shape in init_vars: |
| | logger.info(f"Loading TF weight {name} with shape {shape}") |
| | array = tf.train.load_variable(tf_path, name) |
| | names.append(name) |
| | arrays.append(array.squeeze()) |
| |
|
| | for name, array in zip(names, arrays): |
| | name = name[6:] |
| | name = name.split("/") |
| | pointer = model |
| | for m_name in name: |
| | if re.fullmatch(r"[A-Za-z]+\d+", m_name): |
| | scope_names = re.split(r"(\d+)", m_name) |
| | else: |
| | scope_names = [m_name] |
| | if scope_names[0] == "w" or scope_names[0] == "g": |
| | pointer = getattr(pointer, "weight") |
| | elif scope_names[0] == "b": |
| | pointer = getattr(pointer, "bias") |
| | elif scope_names[0] == "wpe" or scope_names[0] == "wte": |
| | pointer = getattr(pointer, scope_names[0]) |
| | pointer = getattr(pointer, "weight") |
| | else: |
| | pointer = getattr(pointer, scope_names[0]) |
| | if len(scope_names) >= 2: |
| | num = int(scope_names[1]) |
| | pointer = pointer[num] |
| | try: |
| | if pointer.shape != array.shape: |
| | raise ValueError( |
| | f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched" |
| | ) |
| | except ValueError as e: |
| | e.args += (pointer.shape, array.shape) |
| | raise |
| | logger.info(f"Initialize PyTorch weight {name}") |
| | pointer.data = torch.from_numpy(array) |
| | return model |
| |
|
| |
|
| | class GPT2Attention(nn.Module): |
| | def __init__(self, config, is_cross_attention=False, layer_idx=None): |
| | super().__init__() |
| | self.config = config |
| | max_positions = config.max_position_embeddings |
| | self.register_buffer( |
| | "bias", |
| | torch.tril( |
| | torch.ones((max_positions, max_positions), dtype=torch.bool) |
| | ).view(1, 1, max_positions, max_positions), |
| | persistent=False, |
| | ) |
| | self.register_buffer("masked_bias", torch.tensor(-1e4), persistent=False) |
| |
|
| | self.embed_dim = config.hidden_size |
| | self.num_heads = config.num_attention_heads |
| | self.head_dim = self.embed_dim // self.num_heads |
| | self.split_size = self.embed_dim |
| | if self.head_dim * self.num_heads != self.embed_dim: |
| | raise ValueError( |
| | f"`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:" |
| | f" {self.num_heads})." |
| | ) |
| |
|
| | self.scale_attn_weights = config.scale_attn_weights |
| | self.is_cross_attention = is_cross_attention |
| |
|
| | |
| | self.scale_attn_by_inverse_layer_idx = config.scale_attn_by_inverse_layer_idx |
| | self.layer_idx = layer_idx |
| | self.reorder_and_upcast_attn = config.reorder_and_upcast_attn |
| |
|
| | if self.is_cross_attention: |
| | self.c_attn = Conv1D(2 * self.embed_dim, self.embed_dim) |
| | self.q_attn = Conv1D(self.embed_dim, self.embed_dim) |
| | else: |
| | self.c_attn = Conv1D(3 * self.embed_dim, self.embed_dim) |
| | self.c_proj = Conv1D(self.embed_dim, self.embed_dim) |
| |
|
| | self.attn_dropout = nn.Dropout(config.attn_pdrop) |
| | self.resid_dropout = nn.Dropout(config.resid_pdrop) |
| | self.is_causal = True |
| |
|
| | self.pruned_heads = set() |
| |
|
| | def prune_heads(self, heads): |
| | if len(heads) == 0: |
| | return |
| | heads, index = find_pruneable_heads_and_indices( |
| | heads, self.num_heads, self.head_dim, self.pruned_heads |
| | ) |
| | index_attn = torch.cat( |
| | [index, index + self.split_size, index + (2 * self.split_size)] |
| | ) |
| |
|
| | |
| | self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1) |
| | self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0) |
| |
|
| | |
| | self.split_size = (self.split_size // self.num_heads) * ( |
| | self.num_heads - len(heads) |
| | ) |
| | self.num_heads = self.num_heads - len(heads) |
| | self.pruned_heads = self.pruned_heads.union(heads) |
| |
|
| | def _attn(self, query, key, value, attention_mask=None, head_mask=None): |
| | attn_weights = torch.matmul(query, key.transpose(-1, -2)) |
| |
|
| | if self.scale_attn_weights: |
| | attn_weights = attn_weights / torch.full( |
| | [], |
| | value.size(-1) ** 0.5, |
| | dtype=attn_weights.dtype, |
| | device=attn_weights.device, |
| | ) |
| |
|
| | |
| | if self.scale_attn_by_inverse_layer_idx: |
| | attn_weights = attn_weights / float(self.layer_idx + 1) |
| |
|
| | if not self.is_cross_attention: |
| | |
| | query_length, key_length = query.size(-2), key.size(-2) |
| | causal_mask = self.bias[ |
| | :, :, key_length - query_length : key_length, :key_length |
| | ] |
| | mask_value = torch.finfo(attn_weights.dtype).min |
| | |
| | |
| | mask_value = torch.full( |
| | [], mask_value, dtype=attn_weights.dtype, device=attn_weights.device |
| | ) |
| | attn_weights = torch.where( |
| | causal_mask, attn_weights.to(attn_weights.dtype), mask_value |
| | ) |
| |
|
| | if attention_mask is not None: |
| | |
| | attn_weights = attn_weights + attention_mask |
| |
|
| | attn_weights = nn.functional.softmax(attn_weights, dim=-1) |
| |
|
| | |
| | attn_weights = attn_weights.type(value.dtype) |
| | attn_weights = self.attn_dropout(attn_weights) |
| |
|
| | |
| | if head_mask is not None: |
| | attn_weights = attn_weights * head_mask |
| |
|
| | attn_output = torch.matmul(attn_weights, value) |
| |
|
| | return attn_output, attn_weights |
| |
|
| | def _upcast_and_reordered_attn( |
| | self, query, key, value, attention_mask=None, head_mask=None |
| | ): |
| | |
| | bsz, num_heads, q_seq_len, dk = query.size() |
| | _, _, k_seq_len, _ = key.size() |
| |
|
| | |
| | attn_weights = torch.empty( |
| | bsz * num_heads, |
| | q_seq_len, |
| | k_seq_len, |
| | dtype=torch.float32, |
| | device=query.device, |
| | ) |
| |
|
| | |
| | scale_factor = 1.0 |
| | if self.scale_attn_weights: |
| | scale_factor /= float(value.size(-1)) ** 0.5 |
| |
|
| | if self.scale_attn_by_inverse_layer_idx: |
| | scale_factor /= float(self.layer_idx + 1) |
| |
|
| | |
| | with torch.amp.autocast(query.device.type, enabled=False): |
| | q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape( |
| | -1, dk, k_seq_len |
| | ) |
| | attn_weights = torch.baddbmm( |
| | attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor |
| | ) |
| | attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len) |
| |
|
| | if not self.is_cross_attention: |
| | |
| | query_length, key_length = query.size(-2), key.size(-2) |
| | causal_mask = self.bias[ |
| | :, :, key_length - query_length : key_length, :key_length |
| | ] |
| | mask_value = torch.finfo(attn_weights.dtype).min |
| | |
| | |
| | mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to( |
| | attn_weights.device |
| | ) |
| | attn_weights = torch.where(causal_mask, attn_weights, mask_value) |
| |
|
| | if attention_mask is not None: |
| | |
| | attn_weights = attn_weights + attention_mask |
| |
|
| | attn_weights = nn.functional.softmax(attn_weights, dim=-1) |
| |
|
| | |
| | if attn_weights.dtype != torch.float32: |
| | raise RuntimeError( |
| | "Error with upcasting, attn_weights does not have dtype torch.float32" |
| | ) |
| | attn_weights = attn_weights.type(value.dtype) |
| | attn_weights = self.attn_dropout(attn_weights) |
| |
|
| | |
| | if head_mask is not None: |
| | attn_weights = attn_weights * head_mask |
| |
|
| | attn_output = torch.matmul(attn_weights, value) |
| |
|
| | return attn_output, attn_weights |
| |
|
| | def _split_heads(self, tensor, num_heads, attn_head_size): |
| | """ |
| | Splits hidden_size dim into attn_head_size and num_heads |
| | """ |
| | new_shape = tensor.size()[:-1] + (num_heads, attn_head_size) |
| | tensor = tensor.view(new_shape) |
| | return tensor.permute(0, 2, 1, 3) |
| |
|
| | def _merge_heads(self, tensor, num_heads, attn_head_size): |
| | """ |
| | Merges attn_head_size dim and num_attn_heads dim into hidden_size |
| | """ |
| | tensor = tensor.permute(0, 2, 1, 3).contiguous() |
| | new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,) |
| | return tensor.view(new_shape) |
| |
|
| | def forward( |
| | self, |
| | hidden_states: Optional[Tuple[torch.FloatTensor]], |
| | layer_past: Optional[Tuple[torch.Tensor]] = None, |
| | attention_mask: Optional[torch.FloatTensor] = None, |
| | head_mask: Optional[torch.FloatTensor] = None, |
| | encoder_hidden_states: Optional[torch.Tensor] = None, |
| | encoder_attention_mask: Optional[torch.FloatTensor] = None, |
| | use_cache: Optional[bool] = False, |
| | output_attentions: Optional[bool] = False, |
| | ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]: |
| | if encoder_hidden_states is not None: |
| | if not hasattr(self, "q_attn"): |
| | raise ValueError( |
| | "If class is used as cross attention, the weights `q_attn` have to be defined. " |
| | "Please make sure to instantiate class with `GPT2Attention(..., is_cross_attention=True)`." |
| | ) |
| |
|
| | query = self.q_attn(hidden_states) |
| | key, value = self.c_attn(encoder_hidden_states).split( |
| | self.split_size, dim=2 |
| | ) |
| | attention_mask = encoder_attention_mask |
| | else: |
| | query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2) |
| |
|
| | query = self._split_heads(query, self.num_heads, self.head_dim) |
| | key = self._split_heads(key, self.num_heads, self.head_dim) |
| | value = self._split_heads(value, self.num_heads, self.head_dim) |
| |
|
| | if layer_past is not None: |
| | past_key, past_value = layer_past |
| | key = torch.cat((past_key, key), dim=-2) |
| | value = torch.cat((past_value, value), dim=-2) |
| |
|
| | if use_cache is True: |
| | present = (key, value) |
| | else: |
| | present = None |
| |
|
| | if self.reorder_and_upcast_attn: |
| | attn_output, attn_weights = self._upcast_and_reordered_attn( |
| | query, key, value, attention_mask, head_mask |
| | ) |
| | else: |
| | attn_output, attn_weights = self._attn( |
| | query, key, value, attention_mask, head_mask |
| | ) |
| |
|
| | attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim) |
| | attn_output = self.c_proj(attn_output) |
| | attn_output = self.resid_dropout(attn_output) |
| |
|
| | outputs = (attn_output, present) |
| | if output_attentions: |
| | outputs += (attn_weights,) |
| |
|
| | return outputs |
| |
|
| |
|
| | class GPT2FlashAttention2(GPT2Attention): |
| | """ |
| | GPT2 flash attention module. This module inherits from `GPT2Attention` as the weights of the module stays |
| | untouched. The only required change would be on the forward pass where it needs to correctly call the public API of |
| | flash attention and deal with padding tokens in case the input contains any of them. |
| | """ |
| |
|
| | |
| | def __init__(self, *args, **kwargs): |
| | super().__init__(*args, **kwargs) |
| |
|
| | |
| | |
| | |
| | self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10() |
| |
|
| | def forward( |
| | self, |
| | hidden_states: Optional[Tuple[torch.FloatTensor]], |
| | layer_past: Optional[Tuple[torch.Tensor]] = None, |
| | attention_mask: Optional[torch.FloatTensor] = None, |
| | head_mask: Optional[torch.FloatTensor] = None, |
| | encoder_hidden_states: Optional[torch.Tensor] = None, |
| | encoder_attention_mask: Optional[torch.FloatTensor] = None, |
| | use_cache: Optional[bool] = False, |
| | output_attentions: Optional[bool] = False, |
| | ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]: |
| | bsz, _, _ = hidden_states.size() |
| | if encoder_hidden_states is not None: |
| | if not hasattr(self, "q_attn"): |
| | raise ValueError( |
| | "If class is used as cross attention, the weights `q_attn` have to be defined. " |
| | "Please make sure to instantiate class with `GPT2Attention(..., is_cross_attention=True)`." |
| | ) |
| |
|
| | query = self.q_attn(hidden_states) |
| | key, value = self.c_attn(encoder_hidden_states).split( |
| | self.split_size, dim=2 |
| | ) |
| | attention_mask = encoder_attention_mask |
| | else: |
| | query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2) |
| |
|
| | query = self._split_heads(query, self.num_heads, self.head_dim) |
| | key = self._split_heads(key, self.num_heads, self.head_dim) |
| | value = self._split_heads(value, self.num_heads, self.head_dim) |
| |
|
| | if layer_past is not None: |
| | past_key = layer_past[0] |
| | past_value = layer_past[1] |
| | key = torch.cat((past_key, key), dim=-2) |
| | value = torch.cat((past_value, value), dim=-2) |
| |
|
| | present = None |
| | if use_cache is True: |
| | present = (key, value) |
| |
|
| | query_length = query.shape[2] |
| | tgt_len = key.shape[2] |
| |
|
| | |
| | |
| | query = query.transpose(1, 2).view( |
| | bsz, query_length, self.num_heads, self.head_dim |
| | ) |
| | key = key.transpose(1, 2).view(bsz, tgt_len, self.num_heads, self.head_dim) |
| | value = value.transpose(1, 2).view(bsz, tgt_len, self.num_heads, self.head_dim) |
| |
|
| | attn_dropout = self.attn_dropout.p if self.training else 0.0 |
| |
|
| | |
| | |
| | |
| | |
| | |
| |
|
| | if query.dtype == torch.float32: |
| | if torch.is_autocast_enabled(): |
| | target_dtype = torch.get_autocast_gpu_dtype() |
| | |
| | elif hasattr(self.config, "_pre_quantization_dtype"): |
| | target_dtype = self.config._pre_quantization_dtype |
| | else: |
| | target_dtype = self.c_proj.weight.dtype |
| |
|
| | logger.warning_once( |
| | f"The input hidden states seems to be silently casted in float32, this might be related to" |
| | f" the fact you have upcasted embedding or layer norm layers in float32. We will cast back the input in" |
| | f" {target_dtype}." |
| | ) |
| |
|
| | query = query.to(target_dtype) |
| | key = key.to(target_dtype) |
| | value = value.to(target_dtype) |
| |
|
| | attn_output = _flash_attention_forward( |
| | query, |
| | key, |
| | value, |
| | attention_mask, |
| | query_length, |
| | dropout=attn_dropout, |
| | is_causal=self.is_causal, |
| | use_top_left_mask=self._flash_attn_uses_top_left_mask, |
| | ) |
| |
|
| | attn_weights_reshaped = attn_output.reshape( |
| | bsz, query_length, self.num_heads * self.head_dim |
| | ) |
| | attn_output = self.c_proj(attn_weights_reshaped) |
| | attn_output = self.resid_dropout(attn_output) |
| |
|
| | outputs = (attn_output, present) |
| | if output_attentions: |
| | outputs += (attn_weights_reshaped,) |
| |
|
| | return outputs |
| |
|
| |
|
| | class GPT2SdpaAttention(GPT2Attention): |
| | """ |
| | GPT2 attention module using torch.nn.functional.scaled_dot_product_attention. This module inherits from |
| | `GPT2Attention` as the weights of the module stays untouched. The only changes are on the forward pass |
| | to adapt to the SDPA API. |
| | """ |
| |
|
| | def __init__(self, *args, **kwargs): |
| | super().__init__(*args, **kwargs) |
| |
|
| | |
| | |
| | |
| | |
| | self.require_contiguous_qkv = version.parse( |
| | get_torch_version() |
| | ) < version.parse("2.2.0") |
| |
|
| | def forward( |
| | self, |
| | hidden_states: Optional[Tuple[torch.FloatTensor]], |
| | layer_past: Optional[Tuple[torch.Tensor]] = None, |
| | attention_mask: Optional[torch.FloatTensor] = None, |
| | head_mask: Optional[torch.FloatTensor] = None, |
| | encoder_hidden_states: Optional[torch.Tensor] = None, |
| | encoder_attention_mask: Optional[torch.FloatTensor] = None, |
| | use_cache: Optional[bool] = False, |
| | output_attentions: Optional[bool] = False, |
| | ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]: |
| | if output_attentions or head_mask is not None: |
| | logger.warning_once( |
| | "`GPT2SdpaAttention` is used but `torch.nn.functional.scaled_dot_product_attention` does not support " |
| | "`output_attentions=True` or `head_mask`. Falling back to the manual attention implementation, but " |
| | "specifying the manual implementation will be required from Transformers version v5.0.0 onwards. " |
| | 'This warning can be removed using the argument `attn_implementation="eager"` when loading the model.' |
| | ) |
| | return super().forward( |
| | hidden_states=hidden_states, |
| | layer_past=layer_past, |
| | attention_mask=attention_mask, |
| | head_mask=head_mask, |
| | encoder_hidden_states=encoder_hidden_states, |
| | encoder_attention_mask=encoder_attention_mask, |
| | use_cache=use_cache, |
| | output_attentions=output_attentions, |
| | ) |
| |
|
| | bsz, q_len, _ = hidden_states.size() |
| |
|
| | |
| | is_cross_attention = encoder_hidden_states is not None |
| | if is_cross_attention: |
| | if not hasattr(self, "q_attn"): |
| | raise ValueError( |
| | "If class is used as cross attention, the weights `q_attn` have to be defined. " |
| | "Please make sure to instantiate class with `GPT2SdpaAttention(..., is_cross_attention=True)`." |
| | ) |
| |
|
| | query = self.q_attn(hidden_states) |
| | key, value = self.c_attn(encoder_hidden_states).split( |
| | self.split_size, dim=2 |
| | ) |
| | attention_mask = encoder_attention_mask |
| | else: |
| | query, key, value = self.c_attn(hidden_states).split(self.split_size, dim=2) |
| |
|
| | query = self._split_heads(query, self.num_heads, self.head_dim) |
| | key = self._split_heads(key, self.num_heads, self.head_dim) |
| | value = self._split_heads(value, self.num_heads, self.head_dim) |
| |
|
| | |
| | if layer_past is not None: |
| | past_key = layer_past[0] |
| | past_value = layer_past[1] |
| | key = torch.cat((past_key, key), dim=-2) |
| | value = torch.cat((past_value, value), dim=-2) |
| |
|
| | present = None |
| | if use_cache is True: |
| | present = (key, value) |
| |
|
| | |
| | if ( |
| | self.require_contiguous_qkv |
| | and query.device.type == "cuda" |
| | and attention_mask is not None |
| | ): |
| | query = query.contiguous() |
| | key = key.contiguous() |
| | value = value.contiguous() |
| |
|
| | |
| | |
| | is_causal = ( |
| | True |
| | if attention_mask is None and q_len > 1 and not is_cross_attention |
| | else False |
| | ) |
| |
|
| | attn_output = torch.nn.functional.scaled_dot_product_attention( |
| | query, |
| | key, |
| | value, |
| | attn_mask=attention_mask, |
| | dropout_p=self.attn_dropout.p if self.training else 0.0, |
| | is_causal=is_causal, |
| | ) |
| |
|
| | |
| | attn_output = attn_output.transpose(1, 2).contiguous() |
| | attn_output = attn_output.view(bsz, q_len, self.embed_dim) |
| |
|
| | |
| | attn_output = self.c_proj(attn_output) |
| | attn_output = self.resid_dropout(attn_output) |
| |
|
| | return attn_output, present, None |
| |
|
| |
|
| | class GPT2MLP(nn.Module): |
| | def __init__(self, intermediate_size, config): |
| | super().__init__() |
| | embed_dim = config.hidden_size |
| | self.c_fc = Conv1D(intermediate_size, embed_dim) |
| | self.c_proj = Conv1D(embed_dim, intermediate_size) |
| | self.act = ACT2FN[config.activation_function] |
| | self.dropout = nn.Dropout(config.resid_pdrop) |
| |
|
| | def forward( |
| | self, hidden_states: Optional[Tuple[torch.FloatTensor]] |
| | ) -> torch.FloatTensor: |
| | hidden_states = self.c_fc(hidden_states) |
| | hidden_states = self.act(hidden_states) |
| | hidden_states = self.c_proj(hidden_states) |
| | hidden_states = self.dropout(hidden_states) |
| | return hidden_states |
| |
|
| |
|
| | GPT2_ATTENTION_CLASSES = { |
| | "eager": GPT2Attention, |
| | "flash_attention_2": GPT2FlashAttention2, |
| | "sdpa": GPT2SdpaAttention, |
| | } |
| |
|
| |
|
| | class GPT2Block(nn.Module): |
| | def __init__(self, config, layer_idx=None): |
| | super().__init__() |
| | hidden_size = config.hidden_size |
| | inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size |
| | attention_class = GPT2_ATTENTION_CLASSES[config._attn_implementation] |
| |
|
| | self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) |
| | self.attn = attention_class(config=config, layer_idx=layer_idx) |
| | self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) |
| |
|
| | if config.add_cross_attention: |
| | self.crossattention = attention_class( |
| | config=config, is_cross_attention=True, layer_idx=layer_idx |
| | ) |
| | self.ln_cross_attn = nn.LayerNorm( |
| | hidden_size, eps=config.layer_norm_epsilon |
| | ) |
| |
|
| | self.mlp = GPT2MLP(inner_dim, config) |
| |
|
| | def forward( |
| | self, |
| | hidden_states: Optional[Tuple[torch.FloatTensor]], |
| | layer_past: Optional[Tuple[torch.Tensor]] = None, |
| | attention_mask: Optional[torch.FloatTensor] = None, |
| | head_mask: Optional[torch.FloatTensor] = None, |
| | encoder_hidden_states: Optional[torch.Tensor] = None, |
| | encoder_attention_mask: Optional[torch.FloatTensor] = None, |
| | use_cache: Optional[bool] = False, |
| | output_attentions: Optional[bool] = False, |
| | ) -> Union[ |
| | Tuple[torch.Tensor], |
| | Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]], |
| | ]: |
| | residual = hidden_states |
| | hidden_states = self.ln_1(hidden_states) |
| | attn_outputs = self.attn( |
| | hidden_states, |
| | layer_past=layer_past, |
| | attention_mask=attention_mask, |
| | head_mask=head_mask, |
| | use_cache=use_cache, |
| | output_attentions=output_attentions, |
| | ) |
| | attn_output = attn_outputs[0] |
| | outputs = attn_outputs[1:] |
| | |
| | hidden_states = attn_output + residual |
| |
|
| | if encoder_hidden_states is not None: |
| | |
| | if not hasattr(self, "crossattention"): |
| | raise ValueError( |
| | f"If `encoder_hidden_states` are passed, {self} has to be instantiated with " |
| | "cross-attention layers by setting `config.add_cross_attention=True`" |
| | ) |
| | residual = hidden_states |
| | hidden_states = self.ln_cross_attn(hidden_states) |
| | cross_attn_outputs = self.crossattention( |
| | hidden_states, |
| | attention_mask=attention_mask, |
| | head_mask=head_mask, |
| | encoder_hidden_states=encoder_hidden_states, |
| | encoder_attention_mask=encoder_attention_mask, |
| | output_attentions=output_attentions, |
| | ) |
| | attn_output = cross_attn_outputs[0] |
| | |
| | hidden_states = residual + attn_output |
| | outputs = ( |
| | outputs + cross_attn_outputs[2:] |
| | ) |
| |
|
| | residual = hidden_states |
| | hidden_states = self.ln_2(hidden_states) |
| | feed_forward_hidden_states = self.mlp(hidden_states) |
| | |
| | hidden_states = residual + feed_forward_hidden_states |
| |
|
| | if use_cache: |
| | outputs = (hidden_states,) + outputs |
| | else: |
| | outputs = (hidden_states,) + outputs[1:] |
| |
|
| | return outputs |
| |
|
| |
|
| | class GPT2PreTrainedModel(PreTrainedModel): |
| | """ |
| | An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained |
| | models. |
| | """ |
| |
|
| | config_class = GPT2Config |
| | load_tf_weights = load_tf_weights_in_gpt2 |
| | base_model_prefix = "transformer" |
| | is_parallelizable = True |
| | supports_gradient_checkpointing = True |
| | _no_split_modules = ["GPT2Block"] |
| | _skip_keys_device_placement = "past_key_values" |
| | _supports_flash_attn_2 = True |
| | _supports_sdpa = True |
| |
|
| | def __init__(self, *inputs, **kwargs): |
| | super().__init__(*inputs, **kwargs) |
| |
|
| | def _init_weights(self, module): |
| | """Initialize the weights.""" |
| | if isinstance(module, (nn.Linear, Conv1D)): |
| | |
| | |
| | module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) |
| | if module.bias is not None: |
| | module.bias.data.zero_() |
| | elif isinstance(module, nn.Embedding): |
| | module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) |
| | if module.padding_idx is not None: |
| | module.weight.data[module.padding_idx].zero_() |
| | elif isinstance(module, nn.LayerNorm): |
| | module.bias.data.zero_() |
| | module.weight.data.fill_(1.0) |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | for name, p in module.named_parameters(): |
| | if name == "c_proj.weight": |
| | |
| | p.data.normal_( |
| | mean=0.0, |
| | std=( |
| | self.config.initializer_range |
| | / math.sqrt(2 * self.config.n_layer) |
| | ), |
| | ) |
| |
|
| |
|
| | @dataclass |
| | class GPT2DoubleHeadsModelOutput(ModelOutput): |
| | """ |
| | Base class for outputs of models predicting if two sentences are consecutive or not. |
| | |
| | Args: |
| | loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided): |
| | Language modeling loss. |
| | mc_loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `mc_labels` is provided): |
| | Multiple choice classification loss. |
| | logits (`torch.FloatTensor` of shape `(batch_size, num_choices, sequence_length, config.vocab_size)`): |
| | Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax). |
| | mc_logits (`torch.FloatTensor` of shape `(batch_size, num_choices)`): |
| | Prediction scores of the multiple choice classification head (scores for each choice before SoftMax). |
| | past_key_values (`Tuple[Tuple[torch.Tensor]]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`): |
| | Tuple of length `config.n_layers`, containing tuples of tensors of shape `(batch_size, num_heads, |
| | sequence_length, embed_size_per_head)`). |
| | |
| | Contains pre-computed hidden-states (key and values in the attention blocks) that can be used (see |
| | `past_key_values` input) to speed up sequential decoding. |
| | hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`): |
| | Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of |
| | shape `(batch_size, sequence_length, hidden_size)`. |
| | |
| | Hidden-states of the model at the output of each layer plus the initial embedding outputs. |
| | attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`): |
| | Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length, |
| | sequence_length)`. |
| | |
| | GPT2Attentions weights after the attention softmax, used to compute the weighted average in the |
| | self-attention heads. |
| | """ |
| |
|
| | loss: Optional[torch.FloatTensor] = None |
| | mc_loss: Optional[torch.FloatTensor] = None |
| | logits: torch.FloatTensor = None |
| | mc_logits: torch.FloatTensor = None |
| | past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None |
| | hidden_states: Optional[Tuple[torch.FloatTensor]] = None |
| | attentions: Optional[Tuple[torch.FloatTensor]] = None |
| |
|
| |
|
| | GPT2_START_DOCSTRING = r""" |
| | |
| | This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the |
| | library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads |
| | etc.) |
| | |
| | This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. |
| | Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage |
| | and behavior. |
| | |
| | Parameters: |
| | config ([`GPT2Config`]): Model configuration class with all the parameters of the model. |
| | Initializing with a config file does not load the weights associated with the model, only the |
| | configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. |
| | """ |
| |
|
| | GPT2_INPUTS_DOCSTRING = r""" |
| | Args: |
| | input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`): |
| | `input_ids_length` = `sequence_length` if `past_key_values` is `None` else |
| | `past_key_values[0][0].shape[-2]` (`sequence_length` of input past key value states). Indices of input |
| | sequence tokens in the vocabulary. |
| | |
| | If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as |
| | `input_ids`. |
| | |
| | Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and |
| | [`PreTrainedTokenizer.__call__`] for details. |
| | |
| | [What are input IDs?](../glossary#input-ids) |
| | past_key_values (`Tuple[Tuple[torch.Tensor]]` of length `config.n_layers`): |
| | Contains precomputed hidden-states (key and values in the attention blocks) as computed by the model (see |
| | `past_key_values` output below). Can be used to speed up sequential decoding. The `input_ids` which have |
| | their past given to this model should not be passed as `input_ids` as they have already been computed. |
| | attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*): |
| | Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: |
| | |
| | - 1 for tokens that are **not masked**, |
| | - 0 for tokens that are **masked**. |
| | |
| | If `past_key_values` is used, `attention_mask` needs to contain the masking strategy that was used for |
| | `past_key_values`. In other words, the `attention_mask` always has to have the length: |
| | `len(past_key_values) + len(input_ids)` |
| | |
| | [What are attention masks?](../glossary#attention-mask) |
| | token_type_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*): |
| | Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0, |
| | 1]`: |
| | |
| | - 0 corresponds to a *sentence A* token, |
| | - 1 corresponds to a *sentence B* token. |
| | |
| | [What are token type IDs?](../glossary#token-type-ids) |
| | position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
| | Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0, |
| | config.max_position_embeddings - 1]`. |
| | |
| | [What are position IDs?](../glossary#position-ids) |
| | head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*): |
| | Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`: |
| | |
| | - 1 indicates the head is **not masked**, |
| | - 0 indicates the head is **masked**. |
| | |
| | inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*): |
| | Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This |
| | is useful if you want more control over how to convert `input_ids` indices into associated vectors than the |
| | model's internal embedding lookup matrix. |
| | |
| | If `past_key_values` is used, optionally only the last `inputs_embeds` have to be input (see |
| | `past_key_values`). |
| | use_cache (`bool`, *optional*): |
| | If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see |
| | `past_key_values`). |
| | output_attentions (`bool`, *optional*): |
| | Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned |
| | tensors for more detail. |
| | output_hidden_states (`bool`, *optional*): |
| | Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for |
| | more detail. |
| | return_dict (`bool`, *optional*): |
| | Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. |
| | """ |
| | PARALLELIZE_DOCSTRING = r""" |
| | This is an experimental feature and is a subject to change at a moment's notice. |
| | |
| | Uses a device map to distribute attention modules of the model across several devices. If no device map is given, |
| | it will evenly distribute blocks across all devices. |
| | |
| | Args: |
| | device_map (`Dict[int, list]`, *optional*): |
| | A dictionary that maps attention modules to devices. Note that the embedding module and LMHead are always |
| | automatically mapped to the first device (for esoteric reasons). That means that the first device should |
| | have fewer attention modules mapped to it than other devices. For reference, the gpt2 models have the |
| | following number of attention modules: |
| | |
| | - openai-community/gpt2: 12 |
| | - openai-community/gpt2-medium: 24 |
| | - openai-community/gpt2-large: 36 |
| | - openai-community/gpt2-xl: 48 |
| | |
| | Example: |
| | |
| | ```python |
| | # Here is an example of a device map on a machine with 4 GPUs using gpt2-xl, which has a total of 48 attention modules: |
| | model = GPT2LMHeadModel.from_pretrained("openai-community/gpt2-xl") |
| | device_map = { |
| | 0: [0, 1, 2, 3, 4, 5, 6, 7, 8], |
| | 1: [9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21], |
| | 2: [22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34], |
| | 3: [35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47], |
| | } |
| | model.parallelize(device_map) |
| | ``` |
| | """ |
| | DEPARALLELIZE_DOCSTRING = r""" |
| | Moves the model to cpu from a model parallel state. |
| | |
| | Example: |
| | |
| | ```python |
| | # On a 4 GPU machine with openai-community/gpt2-large: |
| | model = GPT2LMHeadModel.from_pretrained("openai-community/gpt2-large") |
| | device_map = { |
| | 0: [0, 1, 2, 3, 4, 5, 6, 7], |
| | 1: [8, 9, 10, 11, 12, 13, 14, 15], |
| | 2: [16, 17, 18, 19, 20, 21, 22, 23], |
| | 3: [24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35], |
| | } |
| | model.parallelize(device_map) # Splits the model across several devices |
| | model.deparallelize() # Put the model back on cpu and cleans memory by calling torch.cuda.empty_cache() |
| | ``` |
| | """ |
| |
|
| |
|
| | @add_start_docstrings( |
| | "The bare GPT2 Model transformer outputting raw hidden-states without any specific head on top.", |
| | GPT2_START_DOCSTRING, |
| | ) |
| | class GPT2Model(GPT2PreTrainedModel): |
| | _supports_param_buffer_assignment = False |
| |
|
| | def __init__(self, config): |
| | super().__init__(config) |
| |
|
| | self.embed_dim = config.hidden_size |
| |
|
| | self.wte = nn.Embedding(config.vocab_size, self.embed_dim) |
| | self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim) |
| |
|
| | self.drop = nn.Dropout(config.embd_pdrop) |
| | self.h = nn.ModuleList( |
| | [GPT2Block(config, layer_idx=i) for i in range(config.num_hidden_layers)] |
| | ) |
| | self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) |
| |
|
| | |
| | self.model_parallel = False |
| | self.device_map = None |
| | self.gradient_checkpointing = False |
| | self._attn_implementation = config._attn_implementation |
| |
|
| | |
| | self.post_init() |
| |
|
| | @add_start_docstrings(PARALLELIZE_DOCSTRING) |
| | def parallelize(self, device_map=None): |
| | |
| | warnings.warn( |
| | "`GPT2Model.parallelize` is deprecated and will be removed in v5 of Transformers, you should load your" |
| | " model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own" |
| | " `device_map` but it needs to be a dictionary module_name to device, so for instance {'h.0': 0, 'h.1': 1," |
| | " ...}", |
| | FutureWarning, |
| | ) |
| | self.device_map = ( |
| | get_device_map(len(self.h), range(torch.cuda.device_count())) |
| | if device_map is None |
| | else device_map |
| | ) |
| | assert_device_map(self.device_map, len(self.h)) |
| | self.model_parallel = True |
| | self.first_device = ( |
| | "cpu" |
| | if "cpu" in self.device_map.keys() |
| | else "cuda:" + str(min(self.device_map.keys())) |
| | ) |
| | self.last_device = "cuda:" + str(max(self.device_map.keys())) |
| | self.wte = self.wte.to(self.first_device) |
| | self.wpe = self.wpe.to(self.first_device) |
| | |
| | for k, v in self.device_map.items(): |
| | for block in v: |
| | cuda_device = "cuda:" + str(k) |
| | self.h[block] = self.h[block].to(cuda_device) |
| | |
| | self.ln_f = self.ln_f.to(self.last_device) |
| |
|
| | @add_start_docstrings(DEPARALLELIZE_DOCSTRING) |
| | def deparallelize(self): |
| | warnings.warn( |
| | "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.", |
| | FutureWarning, |
| | ) |
| | self.model_parallel = False |
| | self.device_map = None |
| | self.first_device = "cpu" |
| | self.last_device = "cpu" |
| | self.wte = self.wte.to("cpu") |
| | self.wpe = self.wpe.to("cpu") |
| | for index in range(len(self.h)): |
| | self.h[index] = self.h[index].to("cpu") |
| | self.ln_f = self.ln_f.to("cpu") |
| | torch.cuda.empty_cache() |
| |
|
| | def get_input_embeddings(self): |
| | return self.wte |
| |
|
| | def set_input_embeddings(self, new_embeddings): |
| | self.wte = new_embeddings |
| |
|
| | def _prune_heads(self, heads_to_prune): |
| | """ |
| | Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} |
| | """ |
| | for layer, heads in heads_to_prune.items(): |
| | self.h[layer].attn.prune_heads(heads) |
| |
|
| | @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) |
| | @add_code_sample_docstrings( |
| | checkpoint=_CHECKPOINT_FOR_DOC, |
| | output_type=BaseModelOutputWithPastAndCrossAttentions, |
| | config_class=_CONFIG_FOR_DOC, |
| | ) |
| | def forward( |
| | self, |
| | input_ids: Optional[torch.LongTensor] = None, |
| | past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, |
| | attention_mask: Optional[torch.FloatTensor] = None, |
| | token_type_ids: Optional[torch.LongTensor] = None, |
| | position_ids: Optional[torch.LongTensor] = None, |
| | head_mask: Optional[torch.FloatTensor] = None, |
| | inputs_embeds: Optional[torch.FloatTensor] = None, |
| | encoder_hidden_states: Optional[torch.Tensor] = None, |
| | encoder_attention_mask: Optional[torch.FloatTensor] = None, |
| | use_cache: Optional[bool] = None, |
| | output_attentions: Optional[bool] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | return_dict: Optional[bool] = None, |
| | ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: |
| | output_attentions = ( |
| | output_attentions |
| | if output_attentions is not None |
| | else self.config.output_attentions |
| | ) |
| | output_hidden_states = ( |
| | output_hidden_states |
| | if output_hidden_states is not None |
| | else self.config.output_hidden_states |
| | ) |
| | use_cache = use_cache if use_cache is not None else self.config.use_cache |
| | return_dict = ( |
| | return_dict if return_dict is not None else self.config.use_return_dict |
| | ) |
| |
|
| | if input_ids is not None and inputs_embeds is not None: |
| | raise ValueError( |
| | "You cannot specify both input_ids and inputs_embeds at the same time" |
| | ) |
| | elif input_ids is not None: |
| | self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask) |
| | input_shape = input_ids.size() |
| | input_ids = input_ids.view(-1, input_shape[-1]) |
| | batch_size = input_ids.shape[0] |
| | elif inputs_embeds is not None: |
| | input_shape = inputs_embeds.size()[:-1] |
| | batch_size = inputs_embeds.shape[0] |
| | else: |
| | raise ValueError("You have to specify either input_ids or inputs_embeds") |
| |
|
| | device = input_ids.device if input_ids is not None else inputs_embeds.device |
| |
|
| | if token_type_ids is not None: |
| | token_type_ids = token_type_ids.view(-1, input_shape[-1]) |
| |
|
| | if past_key_values is None: |
| | past_length = 0 |
| | past_key_values = tuple([None] * len(self.h)) |
| | else: |
| | past_length = past_key_values[0][0].size(-2) |
| | if position_ids is None: |
| | position_ids = torch.arange( |
| | past_length, |
| | input_shape[-1] + past_length, |
| | dtype=torch.long, |
| | device=device, |
| | ) |
| | position_ids = position_ids.unsqueeze(0) |
| |
|
| | if inputs_embeds is None: |
| | inputs_embeds = self.wte(input_ids) |
| | position_embeds = self.wpe(position_ids) |
| | hidden_states = inputs_embeds + position_embeds |
| |
|
| | |
| | _use_sdpa = ( |
| | self._attn_implementation == "sdpa" |
| | and output_attentions is False |
| | and head_mask is None |
| | ) |
| | attention_mask = ( |
| | attention_mask.view(batch_size, -1) if attention_mask is not None else None |
| | ) |
| | if self._attn_implementation == "flash_attention_2": |
| | attention_mask = ( |
| | attention_mask |
| | if (attention_mask is not None and 0 in attention_mask) |
| | else None |
| | ) |
| | elif _use_sdpa: |
| | attention_mask = _prepare_4d_causal_attention_mask_for_sdpa( |
| | attention_mask=attention_mask, |
| | input_shape=(batch_size, input_shape[-1]), |
| | inputs_embeds=inputs_embeds, |
| | past_key_values_length=past_length, |
| | ) |
| | else: |
| | if attention_mask is not None: |
| | |
| | |
| | |
| | |
| | |
| | attention_mask = attention_mask[:, None, None, :] |
| |
|
| | |
| | |
| | |
| | |
| | |
| | attention_mask = attention_mask.to( |
| | dtype=self.dtype |
| | ) |
| | attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min |
| |
|
| | |
| | |
| | if self.config.add_cross_attention and encoder_hidden_states is not None: |
| | encoder_batch_size, encoder_sequence_length, _ = ( |
| | encoder_hidden_states.size() |
| | ) |
| | encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length) |
| | if encoder_attention_mask is None: |
| | encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device) |
| | if _use_sdpa: |
| | encoder_attention_mask = _prepare_4d_attention_mask_for_sdpa( |
| | mask=encoder_attention_mask, |
| | dtype=inputs_embeds.dtype, |
| | tgt_len=input_shape[-1], |
| | ) |
| | elif not self._attn_implementation == "flash_attention_2": |
| | encoder_attention_mask = self.invert_attention_mask( |
| | encoder_attention_mask |
| | ) |
| | else: |
| | encoder_attention_mask = None |
| |
|
| | |
| | |
| | |
| | |
| | head_mask = self.get_head_mask(head_mask, self.config.n_layer) |
| |
|
| | if token_type_ids is not None: |
| | token_type_embeds = self.wte(token_type_ids) |
| | hidden_states = hidden_states + token_type_embeds |
| |
|
| | hidden_states = self.drop(hidden_states) |
| |
|
| | output_shape = (-1,) + input_shape[1:] + (hidden_states.size(-1),) |
| |
|
| | if self.gradient_checkpointing and self.training: |
| | if use_cache: |
| | logger.warning_once( |
| | "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..." |
| | ) |
| | use_cache = False |
| |
|
| | presents = () if use_cache else None |
| | all_self_attentions = () if output_attentions else None |
| | all_cross_attentions = ( |
| | () if output_attentions and self.config.add_cross_attention else None |
| | ) |
| | all_hidden_states = () if output_hidden_states else None |
| | for i in range(len(self.h)): |
| | block, layer_past = self.h[i], past_key_values[i] |
| | |
| | if self.model_parallel: |
| | torch.cuda.set_device(hidden_states.device) |
| | |
| | if layer_past is not None: |
| | layer_past = tuple( |
| | past_state.to(hidden_states.device) for past_state in layer_past |
| | ) |
| | |
| | if attention_mask is not None: |
| | attention_mask = attention_mask.to(hidden_states.device) |
| | if isinstance(head_mask, torch.Tensor): |
| | head_mask = head_mask.to(hidden_states.device) |
| | if output_hidden_states: |
| | all_hidden_states = all_hidden_states + (hidden_states,) |
| |
|
| | if self.gradient_checkpointing and self.training: |
| | outputs = self._gradient_checkpointing_func( |
| | block.__call__, |
| | hidden_states, |
| | None, |
| | attention_mask, |
| | head_mask[i], |
| | encoder_hidden_states, |
| | encoder_attention_mask, |
| | use_cache, |
| | output_attentions, |
| | ) |
| | else: |
| | outputs = block( |
| | hidden_states, |
| | layer_past=layer_past, |
| | attention_mask=attention_mask, |
| | head_mask=head_mask[i], |
| | encoder_hidden_states=encoder_hidden_states, |
| | encoder_attention_mask=encoder_attention_mask, |
| | use_cache=use_cache, |
| | output_attentions=output_attentions, |
| | ) |
| |
|
| | hidden_states = outputs[0] |
| | if use_cache is True: |
| | presents = presents + (outputs[1],) |
| |
|
| | if output_attentions: |
| | all_self_attentions = all_self_attentions + ( |
| | outputs[2 if use_cache else 1], |
| | ) |
| | if self.config.add_cross_attention: |
| | all_cross_attentions = all_cross_attentions + ( |
| | outputs[3 if use_cache else 2], |
| | ) |
| |
|
| | |
| | if self.model_parallel: |
| | for k, v in self.device_map.items(): |
| | if i == v[-1] and "cuda:" + str(k) != self.last_device: |
| | hidden_states = hidden_states.to("cuda:" + str(k + 1)) |
| |
|
| | hidden_states = self.ln_f(hidden_states) |
| |
|
| | hidden_states = hidden_states.view(output_shape) |
| | |
| | if output_hidden_states: |
| | all_hidden_states = all_hidden_states + (hidden_states,) |
| |
|
| | if not return_dict: |
| | return tuple( |
| | v |
| | for v in [ |
| | hidden_states, |
| | presents, |
| | all_hidden_states, |
| | all_self_attentions, |
| | all_cross_attentions, |
| | ] |
| | if v is not None |
| | ) |
| |
|
| | return BaseModelOutputWithPastAndCrossAttentions( |
| | last_hidden_state=hidden_states, |
| | past_key_values=presents, |
| | hidden_states=all_hidden_states, |
| | attentions=all_self_attentions, |
| | cross_attentions=all_cross_attentions, |
| | ) |
| |
|
| |
|
| | @add_start_docstrings( |
| | """ |
| | The GPT2 Model transformer with a language modeling head on top (linear layer with weights tied to the input |
| | embeddings). |
| | """, |
| | GPT2_START_DOCSTRING, |
| | ) |
| | class GPT2LMHeadModel(GPT2PreTrainedModel, GenerationMixin): |
| | _tied_weights_keys = ["lm_head.weight"] |
| |
|
| | def __init__(self, config): |
| | super().__init__(config) |
| | self.transformer = GPT2Model(config) |
| | self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) |
| |
|
| | |
| | self.model_parallel = False |
| | self.device_map = None |
| |
|
| | |
| | self.post_init() |
| |
|
| | @add_start_docstrings(PARALLELIZE_DOCSTRING) |
| | def parallelize(self, device_map=None): |
| | warnings.warn( |
| | "`GPT2LMHeadModel.parallelize` is deprecated and will be removed in v5 of Transformers, you should load" |
| | " your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your own" |
| | " `device_map` but it needs to be a dictionary module_name to device, so for instance {'transformer.h.0':" |
| | " 0, 'transformer.h.1': 1, ...}", |
| | FutureWarning, |
| | ) |
| | self.device_map = ( |
| | get_device_map(len(self.transformer.h), range(torch.cuda.device_count())) |
| | if device_map is None |
| | else device_map |
| | ) |
| | assert_device_map(self.device_map, len(self.transformer.h)) |
| | self.transformer.parallelize(self.device_map) |
| | self.lm_head = self.lm_head.to(self.transformer.first_device) |
| | self.model_parallel = True |
| |
|
| | @add_start_docstrings(DEPARALLELIZE_DOCSTRING) |
| | def deparallelize(self): |
| | warnings.warn( |
| | "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.", |
| | FutureWarning, |
| | ) |
| | self.transformer.deparallelize() |
| | self.transformer = self.transformer.to("cpu") |
| | self.lm_head = self.lm_head.to("cpu") |
| | self.model_parallel = False |
| | torch.cuda.empty_cache() |
| |
|
| | def get_output_embeddings(self): |
| | return self.lm_head |
| |
|
| | def set_output_embeddings(self, new_embeddings): |
| | self.lm_head = new_embeddings |
| |
|
| | @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) |
| | @add_code_sample_docstrings( |
| | checkpoint=_CHECKPOINT_FOR_DOC, |
| | output_type=CausalLMOutputWithCrossAttentions, |
| | config_class=_CONFIG_FOR_DOC, |
| | ) |
| | def forward( |
| | self, |
| | input_ids: Optional[torch.LongTensor] = None, |
| | past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, |
| | attention_mask: Optional[torch.FloatTensor] = None, |
| | token_type_ids: Optional[torch.LongTensor] = None, |
| | position_ids: Optional[torch.LongTensor] = None, |
| | head_mask: Optional[torch.FloatTensor] = None, |
| | inputs_embeds: Optional[torch.FloatTensor] = None, |
| | encoder_hidden_states: Optional[torch.Tensor] = None, |
| | encoder_attention_mask: Optional[torch.FloatTensor] = None, |
| | labels: Optional[torch.LongTensor] = None, |
| | use_cache: Optional[bool] = None, |
| | output_attentions: Optional[bool] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | return_dict: Optional[bool] = None, |
| | ) -> Union[Tuple, CausalLMOutputWithCrossAttentions]: |
| | r""" |
| | labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
| | Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set |
| | `labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100` |
| | are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]` |
| | """ |
| | return_dict = ( |
| | return_dict if return_dict is not None else self.config.use_return_dict |
| | ) |
| |
|
| | transformer_outputs = self.transformer( |
| | input_ids, |
| | past_key_values=past_key_values, |
| | attention_mask=attention_mask, |
| | token_type_ids=token_type_ids, |
| | position_ids=position_ids, |
| | head_mask=head_mask, |
| | inputs_embeds=inputs_embeds, |
| | encoder_hidden_states=encoder_hidden_states, |
| | encoder_attention_mask=encoder_attention_mask, |
| | use_cache=use_cache, |
| | output_attentions=output_attentions, |
| | output_hidden_states=output_hidden_states, |
| | return_dict=return_dict, |
| | ) |
| | hidden_states = transformer_outputs[0] |
| |
|
| | |
| | if self.model_parallel: |
| | torch.cuda.set_device(self.transformer.first_device) |
| | hidden_states = hidden_states.to(self.lm_head.weight.device) |
| |
|
| | lm_logits = self.lm_head(hidden_states) |
| |
|
| | loss = None |
| | if labels is not None: |
| | |
| | labels = labels.to(lm_logits.device) |
| | |
| | shift_logits = lm_logits[..., :-1, :].contiguous() |
| | shift_labels = labels[..., 1:].contiguous() |
| | |
| | loss_fct = CrossEntropyLoss() |
| | loss = loss_fct( |
| | shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1) |
| | ) |
| |
|
| | if not return_dict: |
| | output = (lm_logits,) + transformer_outputs[1:] |
| | return ((loss,) + output) if loss is not None else output |
| |
|
| | return CausalLMOutputWithCrossAttentions( |
| | loss=loss, |
| | logits=lm_logits, |
| | past_key_values=transformer_outputs.past_key_values, |
| | hidden_states=transformer_outputs.hidden_states, |
| | attentions=transformer_outputs.attentions, |
| | cross_attentions=transformer_outputs.cross_attentions, |
| | ) |
| |
|
| | @staticmethod |
| | def _reorder_cache( |
| | past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor |
| | ) -> Tuple[Tuple[torch.Tensor]]: |
| | """ |
| | This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or |
| | [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct |
| | beam_idx at every generation step. |
| | """ |
| | return tuple( |
| | tuple( |
| | past_state.index_select(0, beam_idx.to(past_state.device)) |
| | for past_state in layer_past |
| | ) |
| | for layer_past in past_key_values |
| | ) |
| |
|
| |
|
| | @add_start_docstrings( |
| | """ |
| | The GPT2 Model transformer with a language modeling and a multiple-choice classification head on top e.g. for |
| | RocStories/SWAG tasks. The two heads are two linear layers. The language modeling head has its weights tied to the |
| | input embeddings, the classification head takes as input the input of a specified classification token index in the |
| | input sequence). |
| | """, |
| | GPT2_START_DOCSTRING, |
| | ) |
| | class GPT2DoubleHeadsModel(GPT2PreTrainedModel, GenerationMixin): |
| | _tied_weights_keys = ["lm_head.weight"] |
| |
|
| | def __init__(self, config): |
| | super().__init__(config) |
| | config.num_labels = 1 |
| | self.transformer = GPT2Model(config) |
| | self.lm_head = nn.Linear(config.n_embd, config.vocab_size, bias=False) |
| | self.multiple_choice_head = SequenceSummary(config) |
| |
|
| | |
| | self.model_parallel = False |
| | self.device_map = None |
| |
|
| | |
| | self.post_init() |
| |
|
| | @add_start_docstrings(PARALLELIZE_DOCSTRING) |
| | def parallelize(self, device_map=None): |
| | warnings.warn( |
| | "`GPT2DoubleHeadsModel.parallelize` is deprecated and will be removed in v5 of Transformers, you should" |
| | " load your model with `device_map='balanced'` in the call to `from_pretrained`. You can also provide your" |
| | " own `device_map` but it needs to be a dictionary module_name to device, so for instance" |
| | " {'transformer.h.0': 0, 'transformer.h.1': 1, ...}", |
| | FutureWarning, |
| | ) |
| | self.device_map = ( |
| | get_device_map(len(self.transformer.h), range(torch.cuda.device_count())) |
| | if device_map is None |
| | else device_map |
| | ) |
| | assert_device_map(self.device_map, len(self.transformer.h)) |
| | self.transformer.parallelize(self.device_map) |
| | self.lm_head = self.lm_head.to(self.transformer.first_device) |
| | self.multiple_choice_head = self.multiple_choice_head.to( |
| | self.transformer.first_device |
| | ) |
| | self.model_parallel = True |
| |
|
| | @add_start_docstrings(DEPARALLELIZE_DOCSTRING) |
| | def deparallelize(self): |
| | warnings.warn( |
| | "Like `parallelize`, `deparallelize` is deprecated and will be removed in v5 of Transformers.", |
| | FutureWarning, |
| | ) |
| | self.transformer.deparallelize() |
| | self.transformer = self.transformer.to("cpu") |
| | self.lm_head = self.lm_head.to("cpu") |
| | self.multiple_choice_head = self.multiple_choice_head.to("cpu") |
| | self.model_parallel = False |
| | torch.cuda.empty_cache() |
| |
|
| | def get_output_embeddings(self): |
| | return self.lm_head |
| |
|
| | def set_output_embeddings(self, new_embeddings): |
| | self.lm_head = new_embeddings |
| |
|
| | @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) |
| | @replace_return_docstrings( |
| | output_type=GPT2DoubleHeadsModelOutput, config_class=_CONFIG_FOR_DOC |
| | ) |
| | def forward( |
| | self, |
| | input_ids: Optional[torch.LongTensor] = None, |
| | past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, |
| | attention_mask: Optional[torch.FloatTensor] = None, |
| | token_type_ids: Optional[torch.LongTensor] = None, |
| | position_ids: Optional[torch.LongTensor] = None, |
| | head_mask: Optional[torch.FloatTensor] = None, |
| | inputs_embeds: Optional[torch.FloatTensor] = None, |
| | mc_token_ids: Optional[torch.LongTensor] = None, |
| | labels: Optional[torch.LongTensor] = None, |
| | mc_labels: Optional[torch.LongTensor] = None, |
| | use_cache: Optional[bool] = None, |
| | output_attentions: Optional[bool] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | return_dict: Optional[bool] = None, |
| | **kwargs, |
| | ) -> Union[Tuple, GPT2DoubleHeadsModelOutput]: |
| | r""" |
| | mc_token_ids (`torch.LongTensor` of shape `(batch_size, num_choices)`, *optional*, default to index of the last token of the input): |
| | Index of the classification token in each input sequence. Selected in the range `[0, input_ids.size(-1) - |
| | 1]`. |
| | labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
| | Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set |
| | `labels = input_ids`. Indices are selected in `[-100, 0, ..., config.vocab_size - 1]`. All labels set to |
| | `-100` are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size - 1]` |
| | mc_labels (`torch.LongTensor` of shape `(batch_size)`, *optional*): |
| | Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]` |
| | where *num_choices* is the size of the second dimension of the input tensors. (see *input_ids* above) |
| | |
| | Return: |
| | |
| | Example: |
| | |
| | ```python |
| | >>> import torch |
| | >>> from transformers import AutoTokenizer, GPT2DoubleHeadsModel |
| | |
| | >>> tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2") |
| | >>> model = GPT2DoubleHeadsModel.from_pretrained("openai-community/gpt2") |
| | |
| | >>> # Add a [CLS] to the vocabulary (we should train it also!) |
| | >>> num_added_tokens = tokenizer.add_special_tokens({"cls_token": "[CLS]"}) |
| | >>> # Update the model embeddings with the new vocabulary size |
| | >>> embedding_layer = model.resize_token_embeddings(len(tokenizer)) |
| | |
| | >>> choices = ["Hello, my dog is cute [CLS]", "Hello, my cat is cute [CLS]"] |
| | >>> encoded_choices = [tokenizer.encode(s) for s in choices] |
| | >>> cls_token_location = [tokens.index(tokenizer.cls_token_id) for tokens in encoded_choices] |
| | |
| | >>> input_ids = torch.tensor(encoded_choices).unsqueeze(0) # Batch size: 1, number of choices: 2 |
| | >>> mc_token_ids = torch.tensor([cls_token_location]) # Batch size: 1 |
| | |
| | >>> outputs = model(input_ids, mc_token_ids=mc_token_ids) |
| | >>> lm_logits = outputs.logits |
| | >>> mc_logits = outputs.mc_logits |
| | ```""" |
| | return_dict = ( |
| | return_dict if return_dict is not None else self.config.use_return_dict |
| | ) |
| |
|
| | transformer_outputs = self.transformer( |
| | input_ids, |
| | past_key_values=past_key_values, |
| | attention_mask=attention_mask, |
| | token_type_ids=token_type_ids, |
| | position_ids=position_ids, |
| | head_mask=head_mask, |
| | inputs_embeds=inputs_embeds, |
| | use_cache=use_cache, |
| | output_attentions=output_attentions, |
| | output_hidden_states=output_hidden_states, |
| | return_dict=return_dict, |
| | ) |
| |
|
| | hidden_states = transformer_outputs[0] |
| |
|
| | |
| | if self.model_parallel: |
| | torch.cuda.set_device(self.transformer.first_device) |
| | hidden_states = hidden_states.to(self.lm_head.weight.device) |
| |
|
| | lm_logits = self.lm_head(hidden_states) |
| | mc_logits = self.multiple_choice_head(hidden_states, mc_token_ids).squeeze(-1) |
| |
|
| | mc_loss = None |
| | if mc_labels is not None: |
| | loss_fct = CrossEntropyLoss() |
| | mc_loss = loss_fct( |
| | mc_logits.view(-1, mc_logits.size(-1)), mc_labels.view(-1) |
| | ) |
| | lm_loss = None |
| | if labels is not None: |
| | labels = labels.to(lm_logits.device) |
| | shift_logits = lm_logits[..., :-1, :].contiguous() |
| | shift_labels = labels[..., 1:].contiguous() |
| | loss_fct = CrossEntropyLoss() |
| | lm_loss = loss_fct( |
| | shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1) |
| | ) |
| |
|
| | if not return_dict: |
| | output = (lm_logits, mc_logits) + transformer_outputs[1:] |
| | if mc_loss is not None: |
| | output = (mc_loss,) + output |
| | return ((lm_loss,) + output) if lm_loss is not None else output |
| |
|
| | return GPT2DoubleHeadsModelOutput( |
| | loss=lm_loss, |
| | mc_loss=mc_loss, |
| | logits=lm_logits, |
| | mc_logits=mc_logits, |
| | past_key_values=transformer_outputs.past_key_values, |
| | hidden_states=transformer_outputs.hidden_states, |
| | attentions=transformer_outputs.attentions, |
| | ) |
| |
|
| | @staticmethod |
| | def _reorder_cache( |
| | past_key_values: Tuple[Tuple[torch.Tensor]], beam_idx: torch.Tensor |
| | ) -> Tuple[Tuple[torch.Tensor]]: |
| | """ |
| | This function is used to re-order the `past_key_values` cache if [`~PreTrainedModel.beam_search`] or |
| | [`~PreTrainedModel.beam_sample`] is called. This is required to match `past_key_values` with the correct |
| | beam_idx at every generation step. |
| | """ |
| | return tuple( |
| | tuple( |
| | past_state.index_select(0, beam_idx.to(past_state.device)) |
| | for past_state in layer_past |
| | ) |
| | for layer_past in past_key_values |
| | ) |
| |
|
| |
|
| | @add_start_docstrings( |
| | """ |
| | The GPT2 Model transformer with a sequence classification head on top (linear layer). |
| | |
| | [`GPT2ForSequenceClassification`] uses the last token in order to do the classification, as other causal models |
| | (e.g. GPT-1) do. |
| | |
| | Since it does classification on the last token, it requires to know the position of the last token. If a |
| | `pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If |
| | no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the |
| | padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in |
| | each row of the batch). |
| | """, |
| | GPT2_START_DOCSTRING, |
| | ) |
| | class GPT2ForSequenceClassification(GPT2PreTrainedModel): |
| | def __init__(self, config): |
| | super().__init__(config) |
| | self.num_labels = config.num_labels |
| | self.transformer = GPT2Model(config) |
| | self.score = nn.Linear(config.n_embd, self.num_labels, bias=False) |
| |
|
| | |
| | self.model_parallel = False |
| | self.device_map = None |
| |
|
| | |
| | self.post_init() |
| |
|
| | @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) |
| | @add_code_sample_docstrings( |
| | checkpoint="microsoft/DialogRPT-updown", |
| | output_type=SequenceClassifierOutputWithPast, |
| | config_class=_CONFIG_FOR_DOC, |
| | ) |
| | def forward( |
| | self, |
| | input_ids: Optional[torch.LongTensor] = None, |
| | past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, |
| | attention_mask: Optional[torch.FloatTensor] = None, |
| | token_type_ids: Optional[torch.LongTensor] = None, |
| | position_ids: Optional[torch.LongTensor] = None, |
| | head_mask: Optional[torch.FloatTensor] = None, |
| | inputs_embeds: Optional[torch.FloatTensor] = None, |
| | labels: Optional[torch.LongTensor] = None, |
| | use_cache: Optional[bool] = None, |
| | output_attentions: Optional[bool] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | return_dict: Optional[bool] = None, |
| | ) -> Union[Tuple, SequenceClassifierOutputWithPast]: |
| | r""" |
| | labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*): |
| | Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., |
| | config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If |
| | `config.num_labels > 1` a classification loss is computed (Cross-Entropy). |
| | """ |
| | return_dict = ( |
| | return_dict if return_dict is not None else self.config.use_return_dict |
| | ) |
| |
|
| | transformer_outputs = self.transformer( |
| | input_ids, |
| | past_key_values=past_key_values, |
| | attention_mask=attention_mask, |
| | token_type_ids=token_type_ids, |
| | position_ids=position_ids, |
| | head_mask=head_mask, |
| | inputs_embeds=inputs_embeds, |
| | use_cache=use_cache, |
| | output_attentions=output_attentions, |
| | output_hidden_states=output_hidden_states, |
| | return_dict=return_dict, |
| | ) |
| | hidden_states = transformer_outputs[0] |
| | logits = self.score(hidden_states) |
| |
|
| | if input_ids is not None: |
| | batch_size, sequence_length = input_ids.shape[:2] |
| | else: |
| | batch_size, sequence_length = inputs_embeds.shape[:2] |
| |
|
| | assert ( |
| | self.config.pad_token_id is not None or batch_size == 1 |
| | ), "Cannot handle batch sizes > 1 if no padding token is defined." |
| | if self.config.pad_token_id is None: |
| | sequence_lengths = -1 |
| | else: |
| | if input_ids is not None: |
| | |
| | sequence_lengths = ( |
| | torch.eq(input_ids, self.config.pad_token_id).int().argmax(-1) - 1 |
| | ) |
| | sequence_lengths = sequence_lengths % input_ids.shape[-1] |
| | sequence_lengths = sequence_lengths.to(logits.device) |
| | else: |
| | sequence_lengths = -1 |
| | logger.warning_once( |
| | f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be " |
| | "unexpected if using padding tokens in conjunction with `inputs_embeds.`" |
| | ) |
| |
|
| | pooled_logits = logits[ |
| | torch.arange(batch_size, device=logits.device), sequence_lengths |
| | ] |
| |
|
| | loss = None |
| | if labels is not None: |
| | if self.config.problem_type is None: |
| | if self.num_labels == 1: |
| | self.config.problem_type = "regression" |
| | elif self.num_labels > 1 and ( |
| | labels.dtype == torch.long or labels.dtype == torch.int |
| | ): |
| | self.config.problem_type = "single_label_classification" |
| | else: |
| | self.config.problem_type = "multi_label_classification" |
| |
|
| | if self.config.problem_type == "regression": |
| | loss_fct = MSELoss() |
| | if self.num_labels == 1: |
| | loss = loss_fct(pooled_logits.squeeze(), labels.squeeze()) |
| | else: |
| | loss = loss_fct(pooled_logits, labels) |
| | elif self.config.problem_type == "single_label_classification": |
| | loss_fct = CrossEntropyLoss() |
| | loss = loss_fct( |
| | pooled_logits.view(-1, self.num_labels), labels.view(-1) |
| | ) |
| | elif self.config.problem_type == "multi_label_classification": |
| | loss_fct = BCEWithLogitsLoss() |
| | loss = loss_fct(pooled_logits, labels) |
| | if not return_dict: |
| | output = (pooled_logits,) + transformer_outputs[1:] |
| | return ((loss,) + output) if loss is not None else output |
| |
|
| | return SequenceClassifierOutputWithPast( |
| | loss=loss, |
| | logits=pooled_logits, |
| | past_key_values=transformer_outputs.past_key_values, |
| | hidden_states=transformer_outputs.hidden_states, |
| | attentions=transformer_outputs.attentions, |
| | ) |
| |
|
| |
|
| | @add_start_docstrings( |
| | """ |
| | GPT2 Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for |
| | Named-Entity-Recognition (NER) tasks. |
| | """, |
| | GPT2_START_DOCSTRING, |
| | ) |
| | class GPT2ForTokenClassification(GPT2PreTrainedModel): |
| | def __init__(self, config): |
| | super().__init__(config) |
| | self.num_labels = config.num_labels |
| |
|
| | self.transformer = GPT2Model(config) |
| | if ( |
| | hasattr(config, "classifier_dropout") |
| | and config.classifier_dropout is not None |
| | ): |
| | classifier_dropout = config.classifier_dropout |
| | elif hasattr(config, "hidden_dropout") and config.hidden_dropout is not None: |
| | classifier_dropout = config.hidden_dropout |
| | else: |
| | classifier_dropout = 0.1 |
| | self.dropout = nn.Dropout(classifier_dropout) |
| | self.classifier = nn.Linear(config.hidden_size, config.num_labels) |
| |
|
| | |
| | self.model_parallel = False |
| | self.device_map = None |
| |
|
| | |
| | self.post_init() |
| |
|
| | @add_start_docstrings_to_model_forward(GPT2_INPUTS_DOCSTRING) |
| | |
| | @add_code_sample_docstrings( |
| | checkpoint="brad1141/gpt2-finetuned-comp2", |
| | output_type=TokenClassifierOutput, |
| | config_class=_CONFIG_FOR_DOC, |
| | expected_loss=0.25, |
| | expected_output=[ |
| | "Lead", |
| | "Lead", |
| | "Lead", |
| | "Position", |
| | "Lead", |
| | "Lead", |
| | "Lead", |
| | "Lead", |
| | "Lead", |
| | "Lead", |
| | "Lead", |
| | "Lead", |
| | ], |
| | ) |
| | |
| | def forward( |
| | self, |
| | input_ids: Optional[torch.LongTensor] = None, |
| | past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, |
| | attention_mask: Optional[torch.FloatTensor] = None, |
| | token_type_ids: Optional[torch.LongTensor] = None, |
| | position_ids: Optional[torch.LongTensor] = None, |
| | head_mask: Optional[torch.FloatTensor] = None, |
| | inputs_embeds: Optional[torch.FloatTensor] = None, |
| | labels: Optional[torch.LongTensor] = None, |
| | use_cache: Optional[bool] = None, |
| | output_attentions: Optional[bool] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | return_dict: Optional[bool] = None, |
| | ) -> Union[Tuple, TokenClassifierOutput]: |
| | r""" |
| | labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): |
| | Labels for computing the sequence classification/regression loss. Indices should be in `[0, ..., |
| | config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If |
| | `config.num_labels > 1` a classification loss is computed (Cross-Entropy). |
| | """ |
| | return_dict = ( |
| | return_dict if return_dict is not None else self.config.use_return_dict |
| | ) |
| |
|
| | transformer_outputs = self.transformer( |
| | input_ids, |
| | past_key_values=past_key_values, |
| | attention_mask=attention_mask, |
| | token_type_ids=token_type_ids, |
| | position_ids=position_ids, |
| | head_mask=head_mask, |
| | inputs_embeds=inputs_embeds, |
| | use_cache=use_cache, |
| | output_attentions=output_attentions, |
| | output_hidden_states=output_hidden_states, |
| | return_dict=return_dict, |
| | ) |
| |
|
| | hidden_states = transformer_outputs[0] |
| | hidden_states = self.dropout(hidden_states) |
| | logits = self.classifier(hidden_states) |
| |
|
| | loss = None |
| | if labels is not None: |
| | labels = labels.to(logits.device) |
| | loss_fct = CrossEntropyLoss() |
| | loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1)) |
| |
|
| | if not return_dict: |
| | output = (logits,) + transformer_outputs[2:] |
| | return ((loss,) + output) if loss is not None else output |
| |
|
| | return TokenClassifierOutput( |
| | loss=loss, |
| | logits=logits, |
| | hidden_states=transformer_outputs.hidden_states, |
| | attentions=transformer_outputs.attentions, |
| | ) |
| |
|
| |
|
| | @add_start_docstrings( |
| | """ |
| | The GPT-2 Model transformer with a span classification head on top for extractive question-answering tasks like |
| | SQuAD (a linear layer on top of the hidden-states output to compute `span start logits` and `span end logits`). |
| | """, |
| | GPT2_START_DOCSTRING, |
| | ) |
| | class GPT2ForQuestionAnswering(GPT2PreTrainedModel): |
| | def __init__(self, config): |
| | super().__init__(config) |
| | self.num_labels = config.num_labels |
| | self.transformer = GPT2Model(config) |
| | self.qa_outputs = nn.Linear(config.hidden_size, 2) |
| |
|
| | |
| | self.model_parallel = False |
| | self.device_map = None |
| |
|
| | |
| | self.post_init() |
| |
|
| | @add_start_docstrings_to_model_forward( |
| | GPT2_INPUTS_DOCSTRING.format("batch_size, sequence_length") |
| | ) |
| | @add_code_sample_docstrings( |
| | checkpoint=_CHECKPOINT_FOR_DOC, |
| | output_type=QuestionAnsweringModelOutput, |
| | config_class=_CONFIG_FOR_DOC, |
| | real_checkpoint=_CHECKPOINT_FOR_DOC, |
| | ) |
| | def forward( |
| | self, |
| | input_ids: Optional[torch.LongTensor] = None, |
| | attention_mask: Optional[torch.FloatTensor] = None, |
| | token_type_ids: Optional[torch.LongTensor] = None, |
| | position_ids: Optional[torch.LongTensor] = None, |
| | head_mask: Optional[torch.FloatTensor] = None, |
| | inputs_embeds: Optional[torch.FloatTensor] = None, |
| | start_positions: Optional[torch.LongTensor] = None, |
| | end_positions: Optional[torch.LongTensor] = None, |
| | output_attentions: Optional[bool] = None, |
| | output_hidden_states: Optional[bool] = None, |
| | return_dict: Optional[bool] = None, |
| | ) -> Union[Tuple, QuestionAnsweringModelOutput]: |
| | r""" |
| | start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): |
| | Labels for position (index) of the start of the labelled span for computing the token classification loss. |
| | Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence |
| | are not taken into account for computing the loss. |
| | end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*): |
| | Labels for position (index) of the end of the labelled span for computing the token classification loss. |
| | Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence |
| | are not taken into account for computing the loss. |
| | """ |
| | return_dict = ( |
| | return_dict if return_dict is not None else self.config.use_return_dict |
| | ) |
| |
|
| | outputs = self.transformer( |
| | input_ids, |
| | attention_mask=attention_mask, |
| | token_type_ids=token_type_ids, |
| | position_ids=position_ids, |
| | head_mask=head_mask, |
| | inputs_embeds=inputs_embeds, |
| | output_attentions=output_attentions, |
| | output_hidden_states=output_hidden_states, |
| | return_dict=return_dict, |
| | ) |
| |
|
| | sequence_output = outputs[0] |
| |
|
| | logits = self.qa_outputs(sequence_output) |
| | start_logits, end_logits = logits.split(1, dim=-1) |
| | start_logits = start_logits.squeeze(-1).contiguous() |
| | end_logits = end_logits.squeeze(-1).contiguous() |
| |
|
| | total_loss = None |
| | if start_positions is not None and end_positions is not None: |
| | |
| | if len(start_positions.size()) > 1: |
| | start_positions = start_positions.squeeze(-1).to(start_logits.device) |
| | if len(end_positions.size()) > 1: |
| | end_positions = end_positions.squeeze(-1).to(end_logits.device) |
| | |
| | ignored_index = start_logits.size(1) |
| | start_positions = start_positions.clamp(0, ignored_index) |
| | end_positions = end_positions.clamp(0, ignored_index) |
| |
|
| | loss_fct = CrossEntropyLoss(ignore_index=ignored_index) |
| | start_loss = loss_fct(start_logits, start_positions) |
| | end_loss = loss_fct(end_logits, end_positions) |
| | total_loss = (start_loss + end_loss) / 2 |
| |
|
| | if not return_dict: |
| | output = (start_logits, end_logits) + outputs[2:] |
| | return ((total_loss,) + output) if total_loss is not None else output |
| |
|
| | return QuestionAnsweringModelOutput( |
| | loss=total_loss, |
| | start_logits=start_logits, |
| | end_logits=end_logits, |
| | hidden_states=outputs.hidden_states, |
| | attentions=outputs.attentions, |
| | ) |
| |
|