Skip to content

Commit

Permalink
Fix RLHF llama rewarding modeling backward issue (huggingface#612)
Browse files Browse the repository at this point in the history
  • Loading branch information
sywangyi authored and Jinyan chen committed Feb 27, 2024
1 parent 7013e68 commit 78fc45f
Showing 1 changed file with 1 addition and 1 deletion.
2 changes: 1 addition & 1 deletion optimum/habana/transformers/models/llama/modeling_llama.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ def __init__(self, config: LlamaConfig):
self.past_key = None
self.past_value = None
self.inp_seq_len = -1
self.register_buffer("norm_factor", torch.tensor(1.0 / math.sqrt(self.head_dim)), persistent=False)
self.norm_factor = 1.0 / math.sqrt(self.head_dim)

def allocate_kv_cache(self, batch_size, max_seq_len, inp_seq_len, kv_cache_fp8):
key_shape = (batch_size, self.num_key_value_heads, max_seq_len, self.head_dim)
Expand Down

0 comments on commit 78fc45f

Please sign in to comment.