jeiku commited on
Commit
0e00cd3
1 Parent(s): a3337e1

modeling_stablelm_epoch.py

Browse files
Files changed (1) hide show
  1. modeling_stablelm_epoch.py +0 -915
modeling_stablelm_epoch.py DELETED
@@ -1,915 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2023 Stability AI, EleutherAI, and The HuggingFace Inc. team. All rights reserved.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- #
16
- # This code is based off the following work:
17
- # https://github.com/huggingface/transformers/blob/main/src/transformers/models/llama/modeling_llama.py
18
- # https://github.com/huggingface/transformers/blob/main/src/transformers/models/gpt_neox/modeling_gpt_neox.py
19
- """ PyTorch StableLM Epoch model. """
20
- from typing import Optional, Tuple, Union
21
- import math
22
- import warnings
23
-
24
- import torch
25
- import torch.nn.functional as F
26
- import torch.utils.checkpoint
27
- from torch import nn
28
- from torch.nn import CrossEntropyLoss
29
-
30
- from transformers.modeling_outputs import (
31
- BaseModelOutputWithPast,
32
- CausalLMOutputWithPast,
33
- )
34
- from transformers.modeling_utils import PreTrainedModel
35
- from transformers.utils import logging, is_flash_attn_greater_or_equal_2_10
36
-
37
- from .configuration_stablelm_epoch import StableLMEpochConfig
38
-
39
- try:
40
- from flash_attn import flash_attn_func, flash_attn_varlen_func
41
- from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input
42
- except:
43
- flash_attn_func, flash_attn_varlen_func = None, None
44
- index_first_axis, pad_input, unpad_input = None, None, None
45
-
46
-
47
- logger = logging.get_logger(__name__)
48
-
49
-
50
- # Copied from transformers.models.llama.modeling_llama._get_unpad_data
51
- def _get_unpad_data(attention_mask):
52
- seqlens_in_batch = attention_mask.sum(dim=-1, dtype=torch.int32)
53
- indices = torch.nonzero(attention_mask.flatten(), as_tuple=False).flatten()
54
- max_seqlen_in_batch = seqlens_in_batch.max().item()
55
- cu_seqlens = F.pad(torch.cumsum(seqlens_in_batch, dim=0, dtype=torch.torch.int32), (1, 0))
56
- return (
57
- indices,
58
- cu_seqlens,
59
- max_seqlen_in_batch,
60
- )
61
-
62
-
63
- # Copied from transformers.models.bart.modeling_bart._make_causal_mask
64
- def _make_causal_mask(
65
- input_ids_shape: torch.Size,
66
- dtype: torch.dtype,
67
- device: torch.device,
68
- past_key_values_length: int = 0,
69
- ):
70
- """Make causal mask used for bi-directional self-attention."""
71
- batch_size, tgt_len = input_ids_shape
72
- mask = torch.full((tgt_len, tgt_len), torch.finfo(torch.float16).min, device=device)
73
- mask_cond = torch.arange(mask.size(-1), device=device)
74
- mask.masked_fill_(mask_cond < (mask_cond + 1).view(mask.size(-1), 1), 0)
75
- mask = mask.to(dtype)
76
- if past_key_values_length > 0:
77
- mask = torch.cat([torch.zeros(tgt_len, past_key_values_length, dtype=dtype, device=device), mask], dim=-1)
78
- return mask[None, None, :, :].expand(batch_size, 1, tgt_len, tgt_len + past_key_values_length)
79
-
80
-
81
- # Copied from transformers.models.bart.modeling_bart._expand_mask
82
- def _expand_mask(mask: torch.Tensor, dtype: torch.dtype, tgt_len: Optional[int] = None):
83
- """Expands attention_mask from `[batch_size, seq_len]` to `[batch_size, 1, tgt_seq_len, src_seq_len]`."""
84
- batch_size, src_len = mask.size()
85
- tgt_len = tgt_len if tgt_len is not None else src_len
86
-
87
- expanded_mask = mask[:, None, None, :].expand(batch_size, 1, tgt_len, src_len).to(dtype)
88
- inverted_mask = 1.0 - expanded_mask
89
-
90
- return inverted_mask.masked_fill(
91
- inverted_mask.to(torch.bool), torch.finfo(dtype).min
92
- )
93
-
94
-
95
- class RotaryEmbedding(nn.Module):
96
- def __init__(
97
- self,
98
- dim: int,
99
- max_position_embeddings: int,
100
- base: int = 10_000,
101
- device: Optional[torch.device] = None,
102
- ):
103
- super().__init__()
104
-
105
- self.dim = dim
106
- self.max_position_embeddings = max_position_embeddings
107
- self.base = base
108
- inv_freq = 1.0 / (self.base ** (torch.arange(0, self.dim, 2, device=device, dtype=torch.float32) / self.dim))
109
- self.register_buffer("inv_freq", inv_freq, persistent=False)
110
-
111
- # Build here to make `torch.jit.trace` work.
112
- self._set_cos_sin_cache(
113
- seq_len=max_position_embeddings, device=self.inv_freq.device, dtype=torch.get_default_dtype(),
114
- )
115
-
116
- def _set_cos_sin_cache(self, seq_len: int, device: torch.device, dtype: torch.dtype):
117
- self.max_seq_len_cached = seq_len
118
- t = torch.arange(self.max_seq_len_cached, device=device, dtype=torch.float32)
119
-
120
- # Don't do einsum, it converts fp32 to fp16 under AMP
121
- # freqs = torch.einsum("i,j->ij", t, self.inv_freq)
122
- freqs = torch.outer(t, self.inv_freq)
123
- # Different from paper, but it uses a different permutation in order to obtain the same calculation
124
- emb = torch.cat((freqs, freqs), dim=-1)
125
- self.register_buffer("cos_cached", emb.cos()[None, None, :, :].to(dtype), persistent=False)
126
- self.register_buffer("sin_cached", emb.sin()[None, None, :, :].to(dtype), persistent=False)
127
-
128
- def forward(self, x: torch.Tensor, seq_len: Optional[int] = None):
129
- # x: [batch_size, num_heads, seq_len, head_size]
130
- if seq_len > self.max_seq_len_cached:
131
- self._set_cos_sin_cache(seq_len=seq_len, device=x.device, dtype=torch.get_default_dtype())
132
- return (
133
- self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
134
- self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype),
135
- )
136
-
137
-
138
- def rotate_half(x: torch.Tensor):
139
- """Rotates half the hidden dims of the input."""
140
- x1, x2 = torch.chunk(x, 2, dim=-1)
141
- return torch.cat((-x2, x1), dim=-1)
142
-
143
-
144
- def apply_rotary_pos_emb(q, k, cos, sin, position_ids):
145
- # The first two dimensions of cos and sin are always 1, so we can `squeeze` them.
146
- cos = cos.squeeze(1).squeeze(0) # [seq_len, dim]
147
- sin = sin.squeeze(1).squeeze(0) # [seq_len, dim]
148
- cos = cos[position_ids].unsqueeze(1) # [batch_size, 1, seq_len, dim]
149
- sin = sin[position_ids].unsqueeze(1) # [batch_size, 1, seq_len, dim]
150
- q_embed = (q * cos) + (rotate_half(q) * sin)
151
- k_embed = (k * cos) + (rotate_half(k) * sin)
152
- return q_embed, k_embed
153
-
154
-
155
- class MLP(nn.Module):
156
- def __init__(self, config: StableLMEpochConfig):
157
- super().__init__()
158
- self.config = config
159
- self.hidden_size = config.hidden_size
160
- self.intermediate_size = config.intermediate_size
161
- self.gate_proj = nn.Linear(config.hidden_size, config.intermediate_size, bias=False)
162
- self.up_proj = nn.Linear(config.hidden_size, config.intermediate_size, bias=False)
163
- self.down_proj = nn.Linear(config.intermediate_size, config.hidden_size, bias=False)
164
- self.act_fn = nn.SiLU()
165
-
166
- def forward(self, x: torch.Tensor) -> torch.Tensor:
167
- return self.down_proj(self.act_fn(self.gate_proj(x)) * self.up_proj(x))
168
-
169
-
170
- def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
171
- """
172
- This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
173
- num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
174
- """
175
- batch, num_key_value_heads, slen, head_dim = hidden_states.shape
176
- if n_rep == 1:
177
- return hidden_states
178
- hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
179
- return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
180
-
181
-
182
- class Attention(nn.Module):
183
- def __init__(self, config: StableLMEpochConfig):
184
- super().__init__()
185
- self.config = config
186
- self.hidden_size = config.hidden_size
187
- self.num_heads = config.num_attention_heads
188
- self.head_dim = self.hidden_size // self.num_heads
189
- self.num_key_value_heads = config.num_key_value_heads
190
- self.num_key_value_groups = self.num_heads // self.num_key_value_heads
191
- self.max_position_embeddings = config.max_position_embeddings
192
- self.is_causal = True
193
-
194
- if (self.head_dim * self.num_heads) != self.hidden_size:
195
- raise ValueError(
196
- f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
197
- f" and `num_heads`: {self.num_heads})."
198
- )
199
- self.q_proj = nn.Linear(self.hidden_size, self.num_heads * self.head_dim, bias=False)
200
- self.k_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
201
- self.v_proj = nn.Linear(self.hidden_size, self.num_key_value_heads * self.head_dim, bias=False)
202
- self.o_proj = nn.Linear(self.hidden_size, self.hidden_size, bias=False)
203
-
204
- self._init_rope()
205
-
206
- def _init_rope(self):
207
- self.rotary_ndims = int(self.head_dim * self.config.rope_pct)
208
- self.rotary_emb = RotaryEmbedding(
209
- self.rotary_ndims,
210
- max_position_embeddings=self.config.max_position_embeddings,
211
- base=self.config.rope_theta,
212
- )
213
-
214
- def forward(
215
- self,
216
- hidden_states: torch.FloatTensor,
217
- attention_mask: torch.FloatTensor,
218
- position_ids: torch.LongTensor,
219
- past_key_value: Optional[Tuple[torch.Tensor]] = None,
220
- output_attentions: Optional[bool] = False,
221
- use_cache: Optional[bool] = False,
222
- ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
223
- bsz, q_len, _ = hidden_states.size()
224
-
225
- query_states = self.q_proj(hidden_states)
226
- key_states = self.k_proj(hidden_states)
227
- value_states = self.v_proj(hidden_states)
228
-
229
- query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
230
- key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
231
- value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
232
-
233
- query_rot = query_states[..., : self.rotary_ndims]
234
- query_pass = query_states[..., self.rotary_ndims :]
235
- key_rot = key_states[..., : self.rotary_ndims]
236
- key_pass = key_states[..., self.rotary_ndims :]
237
-
238
- kv_seq_len = key_states.shape[-2]
239
- if past_key_value is not None:
240
- kv_seq_len += past_key_value[0].shape[-2]
241
- cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
242
- query_states, key_states = apply_rotary_pos_emb(query_rot, key_rot, cos, sin, position_ids)
243
-
244
- # [batch_size, num_heads, seq_len, head_dim]
245
- query_states = torch.cat((query_states, query_pass), dim=-1)
246
- key_states = torch.cat((key_states, key_pass), dim=-1)
247
-
248
- if past_key_value is not None:
249
- # Reuse k, v, self_attention
250
- key_states = torch.cat((past_key_value[0], key_states), dim=2)
251
- value_states = torch.cat((past_key_value[1], value_states), dim=2)
252
-
253
- past_key_value = (key_states, value_states) if use_cache else None
254
-
255
- # Repeat k/v heads if n_kv_heads < n_heads
256
- key_states = repeat_kv(key_states, self.num_key_value_groups)
257
- value_states = repeat_kv(value_states, self.num_key_value_groups)
258
-
259
- attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) / math.sqrt(self.head_dim)
260
-
261
- if attn_weights.size() != (bsz, self.num_heads, q_len, kv_seq_len):
262
- raise ValueError(
263
- f"Attention weights should be of size {(bsz, self.num_heads, q_len, kv_seq_len)}, but is"
264
- f" {attn_weights.size()}"
265
- )
266
-
267
- if attention_mask is not None:
268
- if attention_mask.size() != (bsz, 1, q_len, kv_seq_len):
269
- raise ValueError(
270
- f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)}, but is {attention_mask.size()}"
271
- )
272
- attn_weights = attn_weights + attention_mask
273
-
274
- # Upcast attention to fp32
275
- attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
276
- attn_output = torch.matmul(attn_weights, value_states)
277
-
278
- if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
279
- raise ValueError(
280
- f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
281
- f" {attn_output.size()}"
282
- )
283
-
284
- # Merge heads
285
- attn_output = attn_output.transpose(1, 2).contiguous()
286
- attn_output = attn_output.reshape(bsz, q_len, self.hidden_size)
287
-
288
- # Final linear projection
289
- attn_output = self.o_proj(attn_output)
290
-
291
- if not output_attentions:
292
- attn_weights = None
293
-
294
- return attn_output, attn_weights, past_key_value
295
-
296
-
297
- class FlashAttention2(Attention):
298
- """
299
- Reference: https://github.com/huggingface/transformers/blob/5d36025ca13d05151b7a0c761e90d429c4644a30/src/transformers/models/llama/modeling_llama.py#L456
300
- """
301
-
302
- def __init__(self, *args, **kwargs):
303
- super().__init__(*args, **kwargs)
304
-
305
- # TODO: Should be removed once Flash Attention for RoCm is bumped to 2.1.
306
- # flash_attn<2.1 generates top-left aligned causal mask, while what is needed here is bottom-right alignement, that was made default for flash_attn>=2.1. This attribute is used to handle this difference. Reference: https://github.com/Dao-AILab/flash-attention/releases/tag/v2.1.0.
307
- # Beware that with flash_attn<2.1, using q_seqlen != k_seqlen (except for the case q_seqlen == 1) produces a wrong mask (top-left).
308
- self._flash_attn_uses_top_left_mask = not is_flash_attn_greater_or_equal_2_10()
309
-
310
- def forward(
311
- self,
312
- hidden_states: torch.Tensor,
313
- attention_mask: Optional[torch.LongTensor] = None,
314
- position_ids: Optional[torch.LongTensor] = None,
315
- past_key_value: Optional[Cache] = None,
316
- output_attentions: bool = False,
317
- use_cache: bool = False,
318
- **kwargs,
319
- ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
320
- # FlashAttention2 attention does not support output_attentions
321
- if "padding_mask" in kwargs:
322
- warnings.warn(
323
- "Passing `padding_mask` is deprecated and will be removed in v4.37. Please make sure use `attention_mask` instead.`"
324
- )
325
-
326
- # overwrite attention_mask with padding_mask
327
- attention_mask = kwargs.pop("padding_mask")
328
-
329
- output_attentions = False
330
-
331
- bsz, q_len, _ = hidden_states.size()
332
-
333
- query_states = self.q_proj(hidden_states)
334
- key_states = self.k_proj(hidden_states)
335
- value_states = self.v_proj(hidden_states)
336
-
337
- # Flash attention requires the input to have the shape
338
- # batch_size x seq_length x head_dim x hidden_dim
339
- # therefore we just need to keep the original shape
340
- query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
341
- key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
342
- value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
343
-
344
- query_rot = query_states[..., : self.rotary_ndims]
345
- query_pass = query_states[..., self.rotary_ndims :]
346
- key_rot = key_states[..., : self.rotary_ndims]
347
- key_pass = key_states[..., self.rotary_ndims :]
348
-
349
- kv_seq_len = key_states.shape[-2]
350
- if past_key_value is not None:
351
- kv_seq_len += past_key_value[0].shape[-2]
352
- cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
353
- query_states, key_states = apply_rotary_pos_emb(query_rot, key_rot, cos, sin, position_ids)
354
-
355
- # [batch_size, num_heads, seq_len, head_dim]
356
- query_states = torch.cat((query_states, query_pass), dim=-1)
357
- key_states = torch.cat((key_states, key_pass), dim=-1)
358
-
359
- if past_key_value is not None:
360
- # Reuse k, v, self_attention
361
- key_states = torch.cat((past_key_value[0], key_states), dim=2)
362
- value_states = torch.cat((past_key_value[1], value_states), dim=2)
363
-
364
- past_key_value = (key_states, value_states) if use_cache else None
365
-
366
- # TODO: These transpose are quite inefficient but Flash Attention requires the layout [batch_size, sequence_length, num_heads, head_dim]. We would need to refactor the KV cache
367
- # to be able to avoid many of these transpose/reshape/view.
368
- query_states = query_states.transpose(1, 2)
369
- key_states = key_states.transpose(1, 2)
370
- value_states = value_states.transpose(1, 2)
371
-
372
- dropout_rate = self.attention_dropout if self.training else 0.0
373
-
374
- attn_output = self._flash_attention_forward(
375
- query_states, key_states, value_states, attention_mask, q_len, dropout=dropout_rate
376
- )
377
- attn_output = attn_output.reshape(bsz, q_len, self.hidden_size).contiguous()
378
- attn_output = self.o_proj(attn_output)
379
-
380
- if not output_attentions:
381
- attn_weights = None
382
-
383
- return attn_output, attn_weights, past_key_value
384
-
385
- def _flash_attention_forward(
386
- self, query_states, key_states, value_states, attention_mask, query_length, dropout=0.0, softmax_scale=None
387
- ):
388
- """
389
- Calls the forward method of Flash Attention - if the input hidden states contain at least one padding token
390
- first unpad the input, then computes the attention scores and pad the final attention scores.
391
-
392
- Args:
393
- query_states (`torch.Tensor`):
394
- Input query states to be passed to Flash Attention API
395
- key_states (`torch.Tensor`):
396
- Input key states to be passed to Flash Attention API
397
- value_states (`torch.Tensor`):
398
- Input value states to be passed to Flash Attention API
399
- attention_mask (`torch.Tensor`):
400
- The padding mask - corresponds to a tensor of size `(batch_size, seq_len)` where 0 stands for the
401
- position of padding tokens and 1 for the position of non-padding tokens.
402
- dropout (`int`, *optional*):
403
- Attention dropout
404
- softmax_scale (`float`, *optional*):
405
- The scaling of QK^T before applying softmax. Default to 1 / sqrt(head_dim)
406
- """
407
- if not self._flash_attn_uses_top_left_mask:
408
- causal = self.is_causal
409
- else:
410
- # TODO: Remove the `query_length != 1` check once Flash Attention for RoCm is bumped to 2.1. For details, please see the comment in FlashAttention2 __init__.
411
- causal = self.is_causal and query_length != 1
412
-
413
- # Contains at least one padding token in the sequence
414
- if attention_mask is not None:
415
- batch_size = query_states.shape[0]
416
- query_states, key_states, value_states, indices_q, cu_seq_lens, max_seq_lens = self._upad_input(
417
- query_states, key_states, value_states, attention_mask, query_length
418
- )
419
-
420
- cu_seqlens_q, cu_seqlens_k = cu_seq_lens
421
- max_seqlen_in_batch_q, max_seqlen_in_batch_k = max_seq_lens
422
-
423
- attn_output_unpad = flash_attn_varlen_func(
424
- query_states,
425
- key_states,
426
- value_states,
427
- cu_seqlens_q=cu_seqlens_q,
428
- cu_seqlens_k=cu_seqlens_k,
429
- max_seqlen_q=max_seqlen_in_batch_q,
430
- max_seqlen_k=max_seqlen_in_batch_k,
431
- dropout_p=dropout,
432
- softmax_scale=softmax_scale,
433
- causal=causal,
434
- )
435
-
436
- attn_output = pad_input(attn_output_unpad, indices_q, batch_size, query_length)
437
- else:
438
- attn_output = flash_attn_func(
439
- query_states, key_states, value_states, dropout, softmax_scale=softmax_scale, causal=causal
440
- )
441
-
442
- return attn_output
443
-
444
- def _upad_input(self, query_layer, key_layer, value_layer, attention_mask, query_length):
445
- indices_k, cu_seqlens_k, max_seqlen_in_batch_k = _get_unpad_data(attention_mask)
446
- batch_size, kv_seq_len, num_key_value_heads, head_dim = key_layer.shape
447
-
448
- key_layer = index_first_axis(
449
- key_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
450
- )
451
- value_layer = index_first_axis(
452
- value_layer.reshape(batch_size * kv_seq_len, num_key_value_heads, head_dim), indices_k
453
- )
454
- if query_length == kv_seq_len:
455
- query_layer = index_first_axis(
456
- query_layer.reshape(batch_size * kv_seq_len, self.num_heads, head_dim), indices_k
457
- )
458
- cu_seqlens_q = cu_seqlens_k
459
- max_seqlen_in_batch_q = max_seqlen_in_batch_k
460
- indices_q = indices_k
461
- elif query_length == 1:
462
- max_seqlen_in_batch_q = 1
463
- cu_seqlens_q = torch.arange(
464
- batch_size + 1, dtype=torch.int32, device=query_layer.device
465
- ) # There is a memcpy here, that is very bad.
466
- indices_q = cu_seqlens_q[:-1]
467
- query_layer = query_layer.squeeze(1)
468
- else:
469
- # The -q_len: slice assumes left padding.
470
- attention_mask = attention_mask[:, -query_length:]
471
- query_layer, indices_q, cu_seqlens_q, max_seqlen_in_batch_q = unpad_input(query_layer, attention_mask)
472
-
473
- return (
474
- query_layer,
475
- key_layer,
476
- value_layer,
477
- indices_q,
478
- (cu_seqlens_q, cu_seqlens_k),
479
- (max_seqlen_in_batch_q, max_seqlen_in_batch_k),
480
- )
481
-
482
-
483
- ATTENTION_CLASSES = {
484
- "eager": Attention,
485
- "flash_attention_2": FlashAttention2,
486
- }
487
-
488
-
489
- class DecoderLayer(nn.Module):
490
- def __init__(self, config: StableLMEpochConfig):
491
- super().__init__()
492
- self.self_attn = ATTENTION_CLASSES[config._attn_implementation](config=config)
493
- self.mlp = MLP(config)
494
- self.input_layernorm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps)
495
- self.post_attention_layernorm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps)
496
-
497
- def forward(
498
- self,
499
- hidden_states: Optional[torch.FloatTensor],
500
- attention_mask: Optional[torch.FloatTensor] = None,
501
- position_ids: Optional[torch.LongTensor] = None,
502
- past_key_value: Optional[Tuple[torch.Tensor]] = None,
503
- output_attentions: Optional[bool] = False,
504
- use_cache: Optional[bool] = False,
505
- ) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[torch.FloatTensor, ...]]]]:
506
- residual = hidden_states
507
-
508
- hidden_states = self.input_layernorm(hidden_states)
509
-
510
- # Self Attention
511
- hidden_states, self_attn_weights, present_key_value = self.self_attn(
512
- hidden_states=hidden_states,
513
- attention_mask=attention_mask,
514
- position_ids=position_ids,
515
- past_key_value=past_key_value,
516
- output_attentions=output_attentions,
517
- use_cache=use_cache,
518
- )
519
- hidden_states = residual + hidden_states
520
-
521
- # Fully Connected
522
- residual = hidden_states
523
- hidden_states = self.post_attention_layernorm(hidden_states)
524
- hidden_states = self.mlp(hidden_states)
525
- hidden_states = residual + hidden_states
526
-
527
- outputs = (hidden_states,)
528
-
529
- if output_attentions:
530
- outputs += (self_attn_weights,)
531
-
532
- if use_cache:
533
- outputs += (present_key_value,)
534
-
535
- return outputs
536
-
537
-
538
- class StableLMEpochPreTrainedModel(PreTrainedModel):
539
- """An abstract class to handle weights initialization and a simple interface
540
- for downloading and loading pretrained models.
541
- """
542
-
543
- config_class = StableLMEpochConfig
544
- base_model_prefix = "transformer"
545
- supports_gradient_checkpointing = True
546
- _no_split_modules = ["DecoderLayer"]
547
- _skip_keys_device_placement = "past_key_values"
548
- _supports_flash_attn_2 = True
549
-
550
- def _init_weights(self, module: nn.Module):
551
- """Initialize the weights"""
552
- if isinstance(module, nn.Linear):
553
- module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
554
- if module.bias is not None:
555
- module.bias.data.zero_()
556
- elif isinstance(module, nn.Embedding):
557
- module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
558
- if module.padding_idx is not None:
559
- module.weight.data[module.padding_idx].zero_()
560
- elif isinstance(module, nn.LayerNorm):
561
- module.bias.data.zero_()
562
- module.weight.data.fill_(1.0)
563
-
564
- def _set_gradient_checkpointing(self, module: nn.Module, value=False):
565
- if isinstance(module, StableLMEpochModel):
566
- module.gradient_checkpointing = value
567
-
568
-
569
- class StableLMEpochModel(StableLMEpochPreTrainedModel):
570
- def __init__(self, config: StableLMEpochConfig):
571
- super().__init__(config)
572
- self.embed_tokens = nn.Embedding(config.vocab_size, config.hidden_size, config.pad_token_id)
573
- self.layers = nn.ModuleList([DecoderLayer(config) for _ in range(config.num_hidden_layers)])
574
- self.norm = nn.LayerNorm(config.hidden_size, eps=config.norm_eps)
575
-
576
- self._use_flash_attention_2 = config._attn_implementation == "flash_attention_2"
577
- self.gradient_checkpointing = False
578
- # Initialize weights and apply final processing
579
- self.post_init()
580
-
581
- def get_input_embeddings(self):
582
- return self.embed_tokens
583
-
584
- def set_input_embeddings(self, value: nn.Module):
585
- self.embed_tokens = value
586
-
587
- # Copied from transformers.models.bart.modeling_bart.BartDecoder._prepare_decoder_attention_mask
588
- def _prepare_decoder_attention_mask(
589
- self,
590
- attention_mask: torch.Tensor,
591
- input_shape: torch.Size,
592
- inputs_embeds: torch.Tensor,
593
- past_key_values_length: int,
594
- ):
595
- # Create causal mask
596
- # [batch_size, seq_len] -> [batch_size, 1, tgt_seq_len, src_seq_len]
597
- combined_attention_mask = None
598
- if input_shape[-1] > 1:
599
- combined_attention_mask = _make_causal_mask(
600
- input_shape,
601
- inputs_embeds.dtype,
602
- device=inputs_embeds.device,
603
- past_key_values_length=past_key_values_length,
604
- )
605
-
606
- if attention_mask is not None:
607
- # [batch_size, seq_len] -> [batch_size, 1, tgt_seq_len, src_seq_len]
608
- expanded_attn_mask = _expand_mask(
609
- attention_mask, inputs_embeds.dtype, tgt_len=input_shape[-1]
610
- ).to(inputs_embeds.device)
611
- combined_attention_mask = expanded_attn_mask if combined_attention_mask is None else expanded_attn_mask + combined_attention_mask
612
-
613
- return combined_attention_mask
614
-
615
- def forward(
616
- self,
617
- input_ids: Optional[torch.LongTensor] = None,
618
- attention_mask: Optional[torch.FloatTensor] = None,
619
- position_ids: Optional[torch.LongTensor] = None,
620
- past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
621
- inputs_embeds: Optional[torch.FloatTensor] = None,
622
- use_cache: Optional[bool] = None,
623
- output_attentions: Optional[bool] = None,
624
- output_hidden_states: Optional[bool] = None,
625
- return_dict: Optional[bool] = None,
626
- ) -> Union[Tuple, BaseModelOutputWithPast]:
627
- output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
628
- output_hidden_states = output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
629
- use_cache = use_cache if use_cache is not None else self.config.use_cache
630
-
631
- return_dict = return_dict if return_dict is not None else self.config.use_return_dict
632
-
633
- # Retrieve input_ids and inputs_embeds
634
- if input_ids is not None and inputs_embeds is not None:
635
- raise ValueError(
636
- "You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time"
637
- )
638
- elif input_ids is not None:
639
- batch_size, seq_length = input_ids.shape
640
- elif inputs_embeds is not None:
641
- batch_size, seq_length, _ = inputs_embeds.shape
642
- else:
643
- raise ValueError(
644
- "You have to specify either decoder_input_ids or decoder_inputs_embeds"
645
- )
646
-
647
- seq_length_with_past = seq_length
648
- past_key_values_length = 0
649
-
650
- if position_ids is None:
651
- device = input_ids.device if input_ids is not None else inputs_embeds.device
652
- position_ids = torch.arange(
653
- past_key_values_length,
654
- seq_length + past_key_values_length,
655
- dtype=torch.long,
656
- device=device,
657
- )
658
- position_ids = position_ids.unsqueeze(0).view(-1, seq_length)
659
- else:
660
- position_ids = position_ids.view(-1, seq_length).long()
661
-
662
- if inputs_embeds is None:
663
- inputs_embeds = self.embed_tokens(input_ids)
664
- # Embed positions
665
- if self._use_flash_attention_2:
666
- # 2d mask is passed through the layers
667
- attention_mask = attention_mask if (attention_mask is not None and 0 in attention_mask) else None
668
- else:
669
- if attention_mask is None:
670
- attention_mask = torch.ones(
671
- (batch_size, seq_length_with_past),
672
- dtype=torch.bool,
673
- device=inputs_embeds.device,
674
- )
675
- attention_mask = self._prepare_decoder_attention_mask(
676
- attention_mask,
677
- (batch_size, seq_length),
678
- inputs_embeds,
679
- past_key_values_length,
680
- )
681
-
682
- hidden_states = inputs_embeds
683
-
684
- if self.gradient_checkpointing and self.training:
685
- if use_cache:
686
- logger.warning(
687
- "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
688
- )
689
- use_cache = False
690
-
691
- # Decoder layers
692
- all_hidden_states = () if output_hidden_states else None
693
- all_self_attns = () if output_attentions else None
694
- next_decoder_cache = () if use_cache else None
695
-
696
- for idx, decoder_layer in enumerate(self.layers):
697
- if output_hidden_states:
698
- all_hidden_states += (hidden_states,)
699
-
700
- past_key_value = (
701
- past_key_values[idx] if past_key_values is not None else None
702
- )
703
-
704
- if self.gradient_checkpointing and self.training:
705
-
706
- def create_custom_forward(module):
707
- def custom_forward(*inputs):
708
- # None for past_key_value
709
- return module(*inputs, past_key_value, output_attentions)
710
-
711
- return custom_forward
712
-
713
- layer_outputs = torch.utils.checkpoint.checkpoint(
714
- create_custom_forward(decoder_layer),
715
- hidden_states,
716
- attention_mask,
717
- position_ids,
718
- )
719
- else:
720
- layer_outputs = decoder_layer(
721
- hidden_states,
722
- attention_mask=attention_mask,
723
- position_ids=position_ids,
724
- past_key_value=past_key_value,
725
- output_attentions=output_attentions,
726
- use_cache=use_cache,
727
- )
728
-
729
- hidden_states = layer_outputs[0]
730
-
731
- if use_cache:
732
- next_decoder_cache += (layer_outputs[2 if output_attentions else 1],)
733
-
734
- if output_attentions:
735
- all_self_attns += (layer_outputs[1],)
736
-
737
- hidden_states = self.norm(hidden_states)
738
-
739
- # Add hidden states from the last decoder layer
740
- if output_hidden_states:
741
- all_hidden_states += (hidden_states,)
742
-
743
- next_cache = next_decoder_cache if use_cache else None
744
- if not return_dict:
745
- return tuple(
746
- v
747
- for v in [hidden_states, next_cache, all_hidden_states, all_self_attns]
748
- if v is not None
749
- )
750
- return BaseModelOutputWithPast(
751
- last_hidden_state=hidden_states,
752
- past_key_values=next_cache,
753
- hidden_states=all_hidden_states,
754
- attentions=all_self_attns,
755
- )
756
-
757
-
758
- class StableLMEpochForCausalLM(StableLMEpochPreTrainedModel):
759
- _tied_weights_keys = ["lm_head.weight"]
760
-
761
- def __init__(self, config: StableLMEpochConfig):
762
- super().__init__(config)
763
-
764
- self.model = StableLMEpochModel(config)
765
- self.lm_head = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
766
-
767
- # Initialize weights and apply final processing
768
- self.post_init()
769
-
770
- def get_input_embeddings(self):
771
- return self.model.embed_tokens
772
-
773
- def set_input_embeddings(self, value):
774
- self.model.embed_tokens = value
775
-
776
- def get_output_embeddings(self):
777
- return self.lm_head
778
-
779
- def set_output_embeddings(self, new_embeddings: nn.Module):
780
- self.lm_head = new_embeddings
781
-
782
- def get_decoder(self):
783
- return self.model
784
-
785
- def set_decoder(self, decoder):
786
- self.model = decoder
787
-
788
- def forward(
789
- self,
790
- input_ids: Optional[torch.LongTensor] = None,
791
- attention_mask: Optional[torch.FloatTensor] = None,
792
- position_ids: Optional[torch.LongTensor] = None,
793
- past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
794
- inputs_embeds: Optional[torch.FloatTensor] = None,
795
- labels: Optional[torch.LongTensor] = None,
796
- use_cache: Optional[bool] = None,
797
- output_attentions: Optional[bool] = None,
798
- output_hidden_states: Optional[bool] = None,
799
- return_dict: Optional[bool] = None,
800
- ) -> Union[Tuple, CausalLMOutputWithPast]:
801
- output_attentions = (
802
- output_attentions
803
- if output_attentions is not None
804
- else self.config.output_attentions
805
- )
806
- output_hidden_states = (
807
- output_hidden_states
808
- if output_hidden_states is not None
809
- else self.config.output_hidden_states
810
- )
811
- return_dict = (
812
- return_dict if return_dict is not None else self.config.use_return_dict
813
- )
814
-
815
- # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
816
- outputs = self.model(
817
- input_ids,
818
- attention_mask=attention_mask,
819
- position_ids=position_ids,
820
- past_key_values=past_key_values,
821
- inputs_embeds=inputs_embeds,
822
- use_cache=use_cache,
823
- output_attentions=output_attentions,
824
- output_hidden_states=output_hidden_states,
825
- return_dict=return_dict,
826
- )
827
-
828
- hidden_states = outputs[0]
829
- logits = self.lm_head(hidden_states).float()
830
-
831
- loss = None
832
- if labels is not None:
833
- # Shift so that tokens < n predict n
834
- shift_logits = logits[..., :-1, :].contiguous()
835
- shift_labels = labels[..., 1:].contiguous()
836
- # Flatten the tokens
837
- loss_fct = CrossEntropyLoss()
838
- shift_logits = shift_logits.view(-1, self.config.vocab_size)
839
- shift_labels = shift_labels.view(-1)
840
- # Enable model parallelism
841
- shift_labels = shift_labels.to(shift_logits.device)
842
- loss = loss_fct(shift_logits, shift_labels)
843
-
844
- if not return_dict:
845
- output = (logits,) + outputs[1:]
846
- return (loss,) + output if loss is not None else output
847
-
848
- return CausalLMOutputWithPast(
849
- loss=loss,
850
- logits=logits,
851
- past_key_values=outputs.past_key_values,
852
- hidden_states=outputs.hidden_states,
853
- attentions=outputs.attentions,
854
- )
855
-
856
- def prepare_inputs_for_generation(
857
- self,
858
- input_ids,
859
- past_key_values: Optional[torch.Tensor] = None,
860
- attention_mask: Optional[torch.Tensor] = None,
861
- inputs_embeds: Optional[torch.Tensor] = None,
862
- **kwargs,
863
- ):
864
- # Trim decoder_input_ids if past is used
865
- if past_key_values is not None:
866
- past_length = past_key_values[0][0].shape[2]
867
-
868
- # Some generation methods already pass only the last input ID
869
- if input_ids.shape[1] > past_length:
870
- remove_prefix_length = past_length
871
- else:
872
- # Default to old behavior: keep only final ID
873
- remove_prefix_length = input_ids.shape[1] - 1
874
-
875
- input_ids = input_ids[:, remove_prefix_length:]
876
-
877
- position_ids = kwargs.get("position_ids", None)
878
- if attention_mask is not None and position_ids is None:
879
- # Create position_ids on the fly for batch generation
880
- position_ids = attention_mask.long().cumsum(-1) - 1
881
- position_ids.masked_fill_(attention_mask == 0, 1)
882
- if past_key_values:
883
- position_ids = position_ids[:, -1].unsqueeze(-1)
884
-
885
- # If `inputs_embeds` are passed, we only want to use them in the 1st generation step
886
- if inputs_embeds is not None and past_key_values is None:
887
- model_inputs = {"inputs_embeds": inputs_embeds}
888
- else:
889
- model_inputs = {"input_ids": input_ids}
890
-
891
- model_inputs.update(
892
- {
893
- "attention_mask": attention_mask,
894
- "past_key_values": past_key_values,
895
- "use_cache": kwargs.get("use_cache"),
896
- "position_ids": position_ids,
897
- }
898
- )
899
- return model_inputs
900
-
901
- @staticmethod
902
- def _reorder_cache(past_key_values, beam_idx):
903
- reordered_past = ()
904
- for layer_past in past_key_values:
905
- reordered_past += (
906
- tuple(
907
- past_state.index_select(0, beam_idx.to(past_state.device))
908
- for past_state in layer_past
909
- ),
910
- )
911
- return reordered_past
912
-
913
-
914
- StableLMEpochConfig.register_for_auto_class()
915
- StableLMEpochForCausalLM.register_for_auto_class("AutoModelForCausalLM")