Xenova HF staff commited on
Commit
2479537
1 Parent(s): 157da2e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -1
app.py CHANGED
@@ -23,7 +23,7 @@ MAX_INPUT_TOKEN_LENGTH = int(os.getenv("MAX_INPUT_TOKEN_LENGTH", "4096"))
23
 
24
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
25
 
26
- model_id = "gg-hf/gemma-2-2b-it"
27
  tokenizer = GemmaTokenizerFast.from_pretrained(model_id)
28
  model = AutoModelForCausalLM.from_pretrained(
29
  model_id,
 
23
 
24
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
25
 
26
+ model_id = "google/gemma-2-2b-it"
27
  tokenizer = GemmaTokenizerFast.from_pretrained(model_id)
28
  model = AutoModelForCausalLM.from_pretrained(
29
  model_id,