Update handler for Colab-generated model
Browse files- handler.py +2 -1
handler.py
CHANGED
@@ -14,7 +14,8 @@ class EndpointHandler():
|
|
14 |
def __init__(self, path=""):
|
15 |
# self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
16 |
# self.tokenizer = AutoTokenizer.from_pretrained("optimum/sbert-all-MiniLM-L6-with-pooler")
|
17 |
-
|
|
|
18 |
# self.model.to(self.device)
|
19 |
# print("model will run on ", self.device)
|
20 |
|
|
|
14 |
def __init__(self, path=""):
|
15 |
# self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
16 |
# self.tokenizer = AutoTokenizer.from_pretrained("optimum/sbert-all-MiniLM-L6-with-pooler")
|
17 |
+
model_regular = ORTModelForFeatureExtraction.from_pretrained("", file_name="model.onnx", from_transformers=False)
|
18 |
+
self.onnx_extractor = pipeline(task, model=model_regular, tokenizer=tokenizer)
|
19 |
# self.model.to(self.device)
|
20 |
# print("model will run on ", self.device)
|
21 |
|