fschwartzer commited on
Commit
34e67a1
1 Parent(s): bd30d09

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -45
app.py CHANGED
@@ -1,56 +1,14 @@
1
  import streamlit as st
2
  import pandas as pd
3
- import torch
4
- from transformers import AutoTokenizer, AutoModelForCausalLM
5
- import os
6
-
7
- os.system("!GIT_LFS_SKIP_SMUDGE=1 git clone https://huggingface.co/meta-llama/Llama-2-7b")
8
-
9
- hf_token = os.getenv("HF_TOKEN")
10
- token = os.getenv("HF_TOKEN")
11
-
12
- # Load the tokenizer
13
- model_name = "meta-llama/Llama-2-7b"
14
- tokenizer = AutoTokenizer.from_pretrained(model_name)
15
-
16
- # Load the model
17
- model = AutoModelForCausalLM.from_pretrained(model_name)
18
-
19
- # Apply dynamic quantization for CPU
20
- model = torch.quantization.quantize_dynamic(
21
- model, {torch.nn.Linear}, dtype=torch.qint8
22
- )
23
-
24
- # Move model to CPU
25
- device = torch.device("cpu")
26
- model = model.to(device)
27
-
28
- # Set the padding token to the end-of-sequence token
29
- if tokenizer.pad_token is None:
30
- tokenizer.pad_token = tokenizer.eos_token
31
 
32
  # Load the anomalies data
33
  df = pd.read_csv('anomalies.csv', sep=',', decimal='.')
34
 
35
  # Function to generate a response
36
  def response(question):
37
- prompt = f"Considerando os dados: {df.to_string(index=False)}, onde a coluna 'ds' está em formato DateTime, a coluna 'real' é o valor da despesa e a coluna 'group' é o grupo da despesa. Pergunta: {question}"
38
-
39
- inputs = tokenizer(prompt, return_tensors='pt', padding='max_length', truncation=True, max_length=256).to(device)
40
-
41
- generated_ids = model.generate(
42
- inputs['input_ids'],
43
- attention_mask=inputs['attention_mask'],
44
- max_length=inputs['input_ids'].shape[1] + 50,
45
- temperature=0.7,
46
- top_p=0.9,
47
- no_repeat_ngram_size=2,
48
- num_beams=3,
49
- )
50
-
51
- generated_text = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
52
- final_response = generated_text.split("Resposta:")[-1].split(".")[0] + "."
53
-
54
  return final_response
55
 
56
  # Streamlit interface
 
1
  import streamlit as st
2
  import pandas as pd
3
+ from transformers import pipeline
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
  # Load the anomalies data
6
  df = pd.read_csv('anomalies.csv', sep=',', decimal='.')
7
 
8
  # Function to generate a response
9
  def response(question):
10
+ tqa = pipeline(task="table-question-answering", model="google/tapas-large-finetuned-wtq")
11
+ final_rresposta = tqa(table=table, query=question)['cells'][0]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
  return final_response
13
 
14
  # Streamlit interface