ACCC1380 commited on
Commit
bec213c
1 Parent(s): 89d0178

Upload lora-scripts/sd-scripts/library/ipex/attention.py with huggingface_hub

Browse files
lora-scripts/sd-scripts/library/ipex/attention.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import
4
+ from functools import cache
5
+
6
+ # pylint: disable=protected-access, missing-function-docstring, line-too-long
7
+
8
+ # ARC GPUs can't allocate more than 4GB to a single block so we slice the attetion layers
9
+
10
+ sdpa_slice_trigger_rate = float(os.environ.get('IPEX_SDPA_SLICE_TRIGGER_RATE', 4))
11
+ attention_slice_rate = float(os.environ.get('IPEX_ATTENTION_SLICE_RATE', 4))
12
+
13
+ # Find something divisible with the input_tokens
14
+ @cache
15
+ def find_slice_size(slice_size, slice_block_size):
16
+ while (slice_size * slice_block_size) > attention_slice_rate:
17
+ slice_size = slice_size // 2
18
+ if slice_size <= 1:
19
+ slice_size = 1
20
+ break
21
+ return slice_size
22
+
23
+ # Find slice sizes for SDPA
24
+ @cache
25
+ def find_sdpa_slice_sizes(query_shape, query_element_size):
26
+ if len(query_shape) == 3:
27
+ batch_size_attention, query_tokens, shape_three = query_shape
28
+ shape_four = 1
29
+ else:
30
+ batch_size_attention, query_tokens, shape_three, shape_four = query_shape
31
+
32
+ slice_block_size = query_tokens * shape_three * shape_four / 1024 / 1024 * query_element_size
33
+ block_size = batch_size_attention * slice_block_size
34
+
35
+ split_slice_size = batch_size_attention
36
+ split_2_slice_size = query_tokens
37
+ split_3_slice_size = shape_three
38
+
39
+ do_split = False
40
+ do_split_2 = False
41
+ do_split_3 = False
42
+
43
+ if block_size > sdpa_slice_trigger_rate:
44
+ do_split = True
45
+ split_slice_size = find_slice_size(split_slice_size, slice_block_size)
46
+ if split_slice_size * slice_block_size > attention_slice_rate:
47
+ slice_2_block_size = split_slice_size * shape_three * shape_four / 1024 / 1024 * query_element_size
48
+ do_split_2 = True
49
+ split_2_slice_size = find_slice_size(split_2_slice_size, slice_2_block_size)
50
+ if split_2_slice_size * slice_2_block_size > attention_slice_rate:
51
+ slice_3_block_size = split_slice_size * split_2_slice_size * shape_four / 1024 / 1024 * query_element_size
52
+ do_split_3 = True
53
+ split_3_slice_size = find_slice_size(split_3_slice_size, slice_3_block_size)
54
+
55
+ return do_split, do_split_2, do_split_3, split_slice_size, split_2_slice_size, split_3_slice_size
56
+
57
+ # Find slice sizes for BMM
58
+ @cache
59
+ def find_bmm_slice_sizes(input_shape, input_element_size, mat2_shape):
60
+ batch_size_attention, input_tokens, mat2_atten_shape = input_shape[0], input_shape[1], mat2_shape[2]
61
+ slice_block_size = input_tokens * mat2_atten_shape / 1024 / 1024 * input_element_size
62
+ block_size = batch_size_attention * slice_block_size
63
+
64
+ split_slice_size = batch_size_attention
65
+ split_2_slice_size = input_tokens
66
+ split_3_slice_size = mat2_atten_shape
67
+
68
+ do_split = False
69
+ do_split_2 = False
70
+ do_split_3 = False
71
+
72
+ if block_size > attention_slice_rate:
73
+ do_split = True
74
+ split_slice_size = find_slice_size(split_slice_size, slice_block_size)
75
+ if split_slice_size * slice_block_size > attention_slice_rate:
76
+ slice_2_block_size = split_slice_size * mat2_atten_shape / 1024 / 1024 * input_element_size
77
+ do_split_2 = True
78
+ split_2_slice_size = find_slice_size(split_2_slice_size, slice_2_block_size)
79
+ if split_2_slice_size * slice_2_block_size > attention_slice_rate:
80
+ slice_3_block_size = split_slice_size * split_2_slice_size / 1024 / 1024 * input_element_size
81
+ do_split_3 = True
82
+ split_3_slice_size = find_slice_size(split_3_slice_size, slice_3_block_size)
83
+
84
+ return do_split, do_split_2, do_split_3, split_slice_size, split_2_slice_size, split_3_slice_size
85
+
86
+
87
+ original_torch_bmm = torch.bmm
88
+ def torch_bmm_32_bit(input, mat2, *, out=None):
89
+ if input.device.type != "xpu":
90
+ return original_torch_bmm(input, mat2, out=out)
91
+ do_split, do_split_2, do_split_3, split_slice_size, split_2_slice_size, split_3_slice_size = find_bmm_slice_sizes(input.shape, input.element_size(), mat2.shape)
92
+
93
+ # Slice BMM
94
+ if do_split:
95
+ batch_size_attention, input_tokens, mat2_atten_shape = input.shape[0], input.shape[1], mat2.shape[2]
96
+ hidden_states = torch.zeros(input.shape[0], input.shape[1], mat2.shape[2], device=input.device, dtype=input.dtype)
97
+ for i in range(batch_size_attention // split_slice_size):
98
+ start_idx = i * split_slice_size
99
+ end_idx = (i + 1) * split_slice_size
100
+ if do_split_2:
101
+ for i2 in range(input_tokens // split_2_slice_size): # pylint: disable=invalid-name
102
+ start_idx_2 = i2 * split_2_slice_size
103
+ end_idx_2 = (i2 + 1) * split_2_slice_size
104
+ if do_split_3:
105
+ for i3 in range(mat2_atten_shape // split_3_slice_size): # pylint: disable=invalid-name
106
+ start_idx_3 = i3 * split_3_slice_size
107
+ end_idx_3 = (i3 + 1) * split_3_slice_size
108
+ hidden_states[start_idx:end_idx, start_idx_2:end_idx_2, start_idx_3:end_idx_3] = original_torch_bmm(
109
+ input[start_idx:end_idx, start_idx_2:end_idx_2, start_idx_3:end_idx_3],
110
+ mat2[start_idx:end_idx, start_idx_2:end_idx_2, start_idx_3:end_idx_3],
111
+ out=out
112
+ )
113
+ else:
114
+ hidden_states[start_idx:end_idx, start_idx_2:end_idx_2] = original_torch_bmm(
115
+ input[start_idx:end_idx, start_idx_2:end_idx_2],
116
+ mat2[start_idx:end_idx, start_idx_2:end_idx_2],
117
+ out=out
118
+ )
119
+ else:
120
+ hidden_states[start_idx:end_idx] = original_torch_bmm(
121
+ input[start_idx:end_idx],
122
+ mat2[start_idx:end_idx],
123
+ out=out
124
+ )
125
+ torch.xpu.synchronize(input.device)
126
+ else:
127
+ return original_torch_bmm(input, mat2, out=out)
128
+ return hidden_states
129
+
130
+ original_scaled_dot_product_attention = torch.nn.functional.scaled_dot_product_attention
131
+ def scaled_dot_product_attention_32_bit(query, key, value, attn_mask=None, dropout_p=0.0, is_causal=False, **kwargs):
132
+ if query.device.type != "xpu":
133
+ return original_scaled_dot_product_attention(query, key, value, attn_mask=attn_mask, dropout_p=dropout_p, is_causal=is_causal, **kwargs)
134
+ do_split, do_split_2, do_split_3, split_slice_size, split_2_slice_size, split_3_slice_size = find_sdpa_slice_sizes(query.shape, query.element_size())
135
+
136
+ # Slice SDPA
137
+ if do_split:
138
+ batch_size_attention, query_tokens, shape_three = query.shape[0], query.shape[1], query.shape[2]
139
+ hidden_states = torch.zeros(query.shape, device=query.device, dtype=query.dtype)
140
+ for i in range(batch_size_attention // split_slice_size):
141
+ start_idx = i * split_slice_size
142
+ end_idx = (i + 1) * split_slice_size
143
+ if do_split_2:
144
+ for i2 in range(query_tokens // split_2_slice_size): # pylint: disable=invalid-name
145
+ start_idx_2 = i2 * split_2_slice_size
146
+ end_idx_2 = (i2 + 1) * split_2_slice_size
147
+ if do_split_3:
148
+ for i3 in range(shape_three // split_3_slice_size): # pylint: disable=invalid-name
149
+ start_idx_3 = i3 * split_3_slice_size
150
+ end_idx_3 = (i3 + 1) * split_3_slice_size
151
+ hidden_states[start_idx:end_idx, start_idx_2:end_idx_2, start_idx_3:end_idx_3] = original_scaled_dot_product_attention(
152
+ query[start_idx:end_idx, start_idx_2:end_idx_2, start_idx_3:end_idx_3],
153
+ key[start_idx:end_idx, start_idx_2:end_idx_2, start_idx_3:end_idx_3],
154
+ value[start_idx:end_idx, start_idx_2:end_idx_2, start_idx_3:end_idx_3],
155
+ attn_mask=attn_mask[start_idx:end_idx, start_idx_2:end_idx_2, start_idx_3:end_idx_3] if attn_mask is not None else attn_mask,
156
+ dropout_p=dropout_p, is_causal=is_causal, **kwargs
157
+ )
158
+ else:
159
+ hidden_states[start_idx:end_idx, start_idx_2:end_idx_2] = original_scaled_dot_product_attention(
160
+ query[start_idx:end_idx, start_idx_2:end_idx_2],
161
+ key[start_idx:end_idx, start_idx_2:end_idx_2],
162
+ value[start_idx:end_idx, start_idx_2:end_idx_2],
163
+ attn_mask=attn_mask[start_idx:end_idx, start_idx_2:end_idx_2] if attn_mask is not None else attn_mask,
164
+ dropout_p=dropout_p, is_causal=is_causal, **kwargs
165
+ )
166
+ else:
167
+ hidden_states[start_idx:end_idx] = original_scaled_dot_product_attention(
168
+ query[start_idx:end_idx],
169
+ key[start_idx:end_idx],
170
+ value[start_idx:end_idx],
171
+ attn_mask=attn_mask[start_idx:end_idx] if attn_mask is not None else attn_mask,
172
+ dropout_p=dropout_p, is_causal=is_causal, **kwargs
173
+ )
174
+ torch.xpu.synchronize(query.device)
175
+ else:
176
+ return original_scaled_dot_product_attention(query, key, value, attn_mask=attn_mask, dropout_p=dropout_p, is_causal=is_causal, **kwargs)
177
+ return hidden_states