Error in KoboldAI

#2
by SLywnow - opened

Model loads normally, but when trying to gen smth return KeyError: model.layers.0.self_attn.rotary_emb.cos_cached
Running in 13 layers GPU and 19 CPU
Happens only with this model

Full error:
ERROR | main:generate:3895 - Traceback (most recent call last):
File "aiserver.py", line 3882, in generate
genout, already_generated = tpool.execute(model.core_generate, txt, found_entries)
File "B:\python\lib\site-packages\eventlet\tpool.py", line 132, in execute
six.reraise(c, e, tb)
File "B:\python\lib\site-packages\six.py", line 719, in reraise
raise value
File "B:\python\lib\site-packages\eventlet\tpool.py", line 86, in tworker
rv = meth(*args, **kwargs)
File "D:\AI\KoboldAI\modeling\inference_model.py", line 332, in core_generate
result = self.raw_generate(
File "D:\AI\KoboldAI\modeling\inference_model.py", line 579, in raw_generate
result = self._raw_generate(
File "D:\AI\KoboldAI\modeling\inference_models\hf_torch.py", line 250, in _raw_generate
genout = self.model.generate(
File "B:\python\lib\site-packages\torch\utils_contextlib.py", line 115, in decorate_context
return func(*args, **kwargs)
File "B:\python\lib\site-packages\transformers\generation\utils.py", line 1485, in generate
return self.sample(
File "D:\AI\KoboldAI\modeling\inference_models\hf_torch.py", line 218, in new_sample
return new_sample.old_sample(self, *args, **kwargs)
File "B:\python\lib\site-packages\transformers\generation\utils.py", line 2524, in sample
outputs = self(
File "B:\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "B:\python\lib\site-packages\accelerate\hooks.py", line 165, in new_forward
output = old_forward(*args, **kwargs)
File "B:\python\lib\site-packages\transformers\models\llama\modeling_llama.py", line 687, in forward
outputs = self.model(
File "B:\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "B:\python\lib\site-packages\transformers\models\llama\modeling_llama.py", line 577, in forward
layer_outputs = decoder_layer(
File "B:\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "B:\python\lib\site-packages\accelerate\hooks.py", line 165, in new_forward
output = old_forward(*args, **kwargs)
File "B:\python\lib\site-packages\transformers\models\llama\modeling_llama.py", line 292, in forward
hidden_states, self_attn_weights, present_key_value = self.self_attn(
File "B:\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "B:\python\lib\site-packages\accelerate\hooks.py", line 165, in new_forward
output = old_forward(*args, **kwargs)
File "B:\python\lib\site-packages\transformers\models\llama\modeling_llama.py", line 203, in forward
cos, sin = self.rotary_emb(value_states, seq_len=kv_seq_len)
File "B:\python\lib\site-packages\torch\nn\modules\module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "B:\python\lib\site-packages\accelerate\hooks.py", line 160, in new_forward
args, kwargs = module._hf_hook.pre_forward(module, *args, **kwargs)
File "B:\python\lib\site-packages\accelerate\hooks.py", line 280, in pre_forward
set_module_tensor_to_device(module, name, self.execution_device, value=self.weights_map[name])
File "B:\python\lib\site-packages\accelerate\utils\offload.py", line 123, in getitem
return self.dataset[f"{self.prefix}{key}"]
File "B:\python\lib\site-packages\accelerate\utils\offload.py", line 170, in getitem
weight_info = self.index[key]
KeyError: 'model.layers.0.self_attn.rotary_emb.cos_cached'

I believe KoboldAI has problems running Pygmalion 7B+ due to it being in llama. Try using oobabooga webui.

Sign up or log in to comment