conan1024hao commited on
Commit
b243c3f
1 Parent(s): 0d2c96f

update readme

Browse files
Files changed (1) hide show
  1. README.md +1 -1
README.md CHANGED
@@ -23,7 +23,7 @@ from transformers import AutoTokenizer, AutoModelForMaskedLM
23
  tokenizer = AutoTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp")
24
  model = AutoModelForMaskedLM.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp")
25
 
26
- sentence = '早稲田 大学 で 自然 言語 処理 を [MASK] する 。' # input should be segmented into words by Juman++ in advance
27
  encoding = tokenizer(sentence, return_tensors='pt')
28
  ...
29
  ```
 
23
  tokenizer = AutoTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp")
24
  model = AutoModelForMaskedLM.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp")
25
 
26
+ sentence = '早稲田 大学 で 自然 言語 処理 を [MASK] する 。'
27
  encoding = tokenizer(sentence, return_tensors='pt')
28
  ...
29
  ```