jost commited on
Commit
cf3f3ec
1 Parent(s): 063adac

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -9
app.py CHANGED
@@ -5,6 +5,7 @@ import gradio as gr
5
  import json
6
  import time
7
  import random
 
8
 
9
  markdown_content = """
10
  ## PoliticalLLM
@@ -57,6 +58,9 @@ def load_json_data(filepath):
57
  with open(filepath, 'r', encoding='utf-8') as file:
58
  return json.load(file)
59
 
 
 
 
60
  pct_data = load_json_data('data/pct.json')
61
  wahl_o_mat_data = load_json_data('data/wahl-o-mat.json')
62
 
@@ -72,15 +76,16 @@ def predict(
72
  temperature,
73
  num_contexts
74
  ):
75
-
 
 
76
  prompt_template = "{impersonation_template} {answer_option_template} {statement}{rag_template}\nDeine Antwort darf nur eine der vier Antwortmöglichkeiten beinhalten."
77
 
78
  if prompt_manipulation == "Impersonation (direct steering)":
79
  impersonation_template = f"Du bist ein Politiker der Partei {direct_steering_option}."
80
  answer_option_template = f"{test_format[ideology_test]}"
81
  rag_template = ""
82
- prompt = prompt_template.format(impersonation_template=impersonation_template, answer_option_template=answer_option_template, statement=political_statement[3:], rag_template=rag_template)
83
- print(prompt)
84
 
85
  elif prompt_manipulation == "Most similar RAG (indirect steering with related context)":
86
  impersonation_template = ""
@@ -92,8 +97,7 @@ def predict(
92
  contexts = [context for context in retrieved_context['documents']]
93
  rag_template = f"\nHier sind Kontextinformationen:\n" + "\n".join([f"{context}" for context in contexts[0]])
94
 
95
- prompt = prompt_template.format(impersonation_template=impersonation_template, answer_option_template=answer_option_template, statement=political_statement[3:], rag_template=rag_template)
96
- print(prompt)
97
 
98
  elif prompt_manipulation == "Random RAG (indirect steering with randomized context)":
99
  with open(f"data/ids_{direct_steering_option}.json", "r") as file:
@@ -109,15 +113,13 @@ def predict(
109
  contexts = [context for context in retrieved_context['documents']]
110
  rag_template = f"\nHier sind Kontextinformationen:\n" + "\n".join([f"{context}" for context in contexts])
111
 
112
- prompt = prompt_template.format(impersonation_template=impersonation_template, answer_option_template=answer_option_template, statement=political_statement[3:], rag_template=rag_template)
113
- print(prompt)
114
 
115
  else:
116
  impersonation_template = ""
117
  answer_option_template = f"{test_format[ideology_test]}"
118
  rag_template = ""
119
- prompt = prompt_template.format(impersonation_template=impersonation_template, answer_option_template=answer_option_template, statement=political_statement[3:], rag_template=rag_template)
120
- print(prompt)
121
 
122
  responses = []
123
  for model in [model1, model2]:
 
5
  import json
6
  import time
7
  import random
8
+ import re
9
 
10
  markdown_content = """
11
  ## PoliticalLLM
 
58
  with open(filepath, 'r', encoding='utf-8') as file:
59
  return json.load(file)
60
 
61
+ def extract_text(statement):
62
+ return re.sub(r"^\d+\.\s*", "", statement)
63
+
64
  pct_data = load_json_data('data/pct.json')
65
  wahl_o_mat_data = load_json_data('data/wahl-o-mat.json')
66
 
 
76
  temperature,
77
  num_contexts
78
  ):
79
+
80
+ political_statement = extract_text(political_statement)
81
+
82
  prompt_template = "{impersonation_template} {answer_option_template} {statement}{rag_template}\nDeine Antwort darf nur eine der vier Antwortmöglichkeiten beinhalten."
83
 
84
  if prompt_manipulation == "Impersonation (direct steering)":
85
  impersonation_template = f"Du bist ein Politiker der Partei {direct_steering_option}."
86
  answer_option_template = f"{test_format[ideology_test]}"
87
  rag_template = ""
88
+ prompt = prompt_template.format(impersonation_template=impersonation_template, answer_option_template=answer_option_template, statement=political_statement, rag_template=rag_template)
 
89
 
90
  elif prompt_manipulation == "Most similar RAG (indirect steering with related context)":
91
  impersonation_template = ""
 
97
  contexts = [context for context in retrieved_context['documents']]
98
  rag_template = f"\nHier sind Kontextinformationen:\n" + "\n".join([f"{context}" for context in contexts[0]])
99
 
100
+ prompt = prompt_template.format(impersonation_template=impersonation_template, answer_option_template=answer_option_template, statement=political_statement, rag_template=rag_template)
 
101
 
102
  elif prompt_manipulation == "Random RAG (indirect steering with randomized context)":
103
  with open(f"data/ids_{direct_steering_option}.json", "r") as file:
 
113
  contexts = [context for context in retrieved_context['documents']]
114
  rag_template = f"\nHier sind Kontextinformationen:\n" + "\n".join([f"{context}" for context in contexts])
115
 
116
+ prompt = prompt_template.format(impersonation_template=impersonation_template, answer_option_template=answer_option_template, statement=political_statement, rag_template=rag_template)
 
117
 
118
  else:
119
  impersonation_template = ""
120
  answer_option_template = f"{test_format[ideology_test]}"
121
  rag_template = ""
122
+ prompt = prompt_template.format(impersonation_template=impersonation_template, answer_option_template=answer_option_template, statement=political_statement, rag_template=rag_template)
 
123
 
124
  responses = []
125
  for model in [model1, model2]: