cllatMTK commited on
Commit
b62d3e2
1 Parent(s): ea5ec2c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -49
app.py CHANGED
@@ -16,60 +16,13 @@ db = mongo_client[DB_NAME]
16
  conversations_collection = db['conversations']
17
 
18
  DESCRIPTION = """
19
- # Language Models for Taiwanese Culture
20
-
21
- <p align="center">
22
- ✍️ <a href="https://huggingface.co/spaces/yentinglin/Taiwan-LLaMa2" target="_blank">Online Demo</a>
23
-
24
- 🤗 <a href="https://huggingface.co/yentinglin" target="_blank">HF Repo</a> • 🐦 <a href="https://twitter.com/yentinglin56" target="_blank">Twitter</a> • 📃 <a href="https://arxiv.org/pdf/2305.13711.pdf" target="_blank">[Paper Coming Soon]</a>
25
- • 👨️ <a href="https://github.com/MiuLab/Taiwan-LLaMa/tree/main" target="_blank">Github Repo</a>
26
- <br/><br/>
27
- <img src="https://www.csie.ntu.edu.tw/~miulab/taiwan-llama/logo-v2.png" width="100"> <br/>
28
- </p>
29
-
30
- # 🌟 Checkout New [Taiwan-LLM UI](http://www.twllm.com) 🌟
31
-
32
-
33
- Taiwan-LLaMa is a fine-tuned model specifically designed for traditional mandarin applications. It is built upon the LLaMa 2 architecture and includes a pretraining phase with over 5 billion tokens and fine-tuning with over 490k multi-turn conversational data in Traditional Mandarin.
34
-
35
- ## Key Features
36
-
37
- 1. **Traditional Mandarin Support**: The model is fine-tuned to understand and generate text in Traditional Mandarin, making it suitable for Taiwanese culture and related applications.
38
-
39
- 2. **Instruction-Tuned**: Further fine-tuned on conversational data to offer context-aware and instruction-following responses.
40
-
41
- 3. **Performance on Vicuna Benchmark**: Taiwan-LLaMa's relative performance on Vicuna Benchmark is measured against models like GPT-4 and ChatGPT. It's particularly optimized for Taiwanese culture.
42
-
43
- 4. **Flexible Customization**: Advanced options for controlling the model's behavior like system prompt, temperature, top-p, and top-k are available in the demo.
44
-
45
- ## Model Versions
46
-
47
- Different versions of Taiwan-LLaMa are available:
48
-
49
- - **Taiwan-LLM v2.0 (This demo)**: Cleaner pretraining, Better post-training
50
- - **Taiwan-LLM v1.0**: Optimized for Taiwanese Culture
51
- - **Taiwan-LLM v0.9**: Partial instruction set
52
- - **Taiwan-LLM v0.0**: No Traditional Mandarin pretraining
53
-
54
- The models can be accessed from the provided links in the Hugging Face repository.
55
-
56
- Try out the demo to interact with Taiwan-LLaMa and experience its capabilities in handling Traditional Mandarin!
57
  """
58
 
59
  LICENSE = """
60
- ## Licenses
61
-
62
- - Code is licensed under Apache 2.0 License.
63
- - Models are licensed under the LLAMA 2 Community License.
64
- - By using this model, you agree to the terms and conditions specified in the license.
65
- - By using this demo, you agree to share your input utterances with us to improve the model.
66
-
67
- ## Acknowledgements
68
-
69
- Taiwan-LLaMa project acknowledges the efforts of the [Meta LLaMa team](https://github.com/facebookresearch/llama) and [Vicuna team](https://github.com/lm-sys/FastChat) in democratizing large language models.
70
  """
71
 
72
- DEFAULT_SYSTEM_PROMPT = "你是人工智慧助理,以下是用戶和人工智能助理之間的對話。你要對用戶的問題提供有用、安全、詳細和禮貌的回答。 您是由國立臺灣大學的林彥廷博士生為研究目的而建造的。"
73
 
74
  endpoint_url = os.environ.get("ENDPOINT_URL", "http://127.0.0.1:8080")
75
  client = Client(endpoint_url, timeout=120)
 
16
  conversations_collection = db['conversations']
17
 
18
  DESCRIPTION = """
19
+ # Breeze
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  """
21
 
22
  LICENSE = """
 
 
 
 
 
 
 
 
 
 
23
  """
24
 
25
+ DEFAULT_SYSTEM_PROMPT = "You are a helpful AI assistant built by MediaTek Research. The user you are helping speaks Traditional Chinese and comes from Taiwan."
26
 
27
  endpoint_url = os.environ.get("ENDPOINT_URL", "http://127.0.0.1:8080")
28
  client = Client(endpoint_url, timeout=120)