File size: 1,429 Bytes
7f9c6bd |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 |
# EJEMPLO DE USO
## Cargar librerías
import torch
from transformers import XLMRobertaForSequenceClassification, XLMRobertaTokenizer, AutoTokenizer
## Cargar el modelo y el tokenizador
model_path = "nmarinnn/bert-bregman"
model = XLMRobertaForSequenceClassification.from_pretrained(model_path)
tokenizer = XLMRobertaTokenizer.from_pretrained(model_path)
loaded_tokenizer = AutoTokenizer.from_pretrained(model_path)
## Función para predecir etiqueta
def predict(text):
inputs = loaded_tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
with torch.no_grad():
outputs = model(**inputs)
probabilities = torch.nn.functional.softmax(outputs.logits, dim=-1)
predicted_class = torch.argmax(probabilities, dim=-1).item()
class_labels = {0: "negativo", 1: "neutro", 2: "positivo"}
predicted_label = class_labels[predicted_class]
predicted_probability = probabilities[0][predicted_class].item()
return predicted_label, predicted_probability, probabilities[0].tolist()
# Ejemplo de uso
text_to_classify = "vamos rusa"
predicted_label, predicted_prob, class_probabilities = predict(text_to_classify)
print(f"Clase predicha: {predicted_label} (probabilidad = {predicted_prob:.2f})")
print(f"Probabilidades de todas las clases: Negativo: {class_probabilities[0]:.2f}, Neutro: {class_probabilities[1]:.2f}, Positivo: {class_probabilities[2]:.2f}") |