Update sd_hijack_optimizations.py
This commit is contained in:
parent
beb7dda5d6
commit
280ed8f00f
1 changed files with 1 additions and 1 deletions
|
@ -372,7 +372,7 @@ def scaled_dot_product_attention_forward(self, x, context=None, mask=None):
|
||||||
|
|
||||||
dtype = q.dtype
|
dtype = q.dtype
|
||||||
if shared.opts.upcast_attn:
|
if shared.opts.upcast_attn:
|
||||||
q, k = q.float(), k.float()
|
q, k, v = q.float(), k.float(), v.float()
|
||||||
|
|
||||||
# the output of sdp = (batch, num_heads, seq_len, head_dim)
|
# the output of sdp = (batch, num_heads, seq_len, head_dim)
|
||||||
hidden_states = torch.nn.functional.scaled_dot_product_attention(
|
hidden_states = torch.nn.functional.scaled_dot_product_attention(
|
||||||
|
|
Loading…
Reference in a new issue