xdm222 commited on
Commit
6145379
1 Parent(s): f816efa

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -2982,7 +2982,7 @@ import torch
2982
  from transformers import AutoModel, AutoTokenizer
2983
 
2984
  tokenizer = AutoTokenizer.from_pretrained('Snowflake/snowflake-arctic-embed-m-long')
2985
- model = AutoModel.from_pretrained('Snowflake/snowflake-arctic-embed-m-long', trust_remote_code=True, add_pooling_layer=False)
2986
  model.eval()
2987
 
2988
  query_prefix = 'Represent this sentence for searching relevant passages: '
@@ -3018,7 +3018,7 @@ If you use the long context model with more than 2048 tokens, ensure that you in
3018
 
3019
 
3020
  ``` py
3021
- model = AutoModel.from_pretrained('Snowflake/snowflake-arctic-embed-m-long', trust_remote_code=True, rotary_scaling_factor=2)
3022
  ```
3023
 
3024
  ### Using Transformers.js
 
2982
  from transformers import AutoModel, AutoTokenizer
2983
 
2984
  tokenizer = AutoTokenizer.from_pretrained('Snowflake/snowflake-arctic-embed-m-long')
2985
+ model = AutoModel.from_pretrained('Snowflake/snowflake-arctic-embed-m-long', trust_remote_code=True, add_pooling_layer=False, safe_serialization=True)
2986
  model.eval()
2987
 
2988
  query_prefix = 'Represent this sentence for searching relevant passages: '
 
3018
 
3019
 
3020
  ``` py
3021
+ model = AutoModel.from_pretrained('Snowflake/snowflake-arctic-embed-m-long', trust_remote_code=True, safe_serialization=True, rotary_scaling_factor=2)
3022
  ```
3023
 
3024
  ### Using Transformers.js