File size: 4,800 Bytes
8ca3bc5
a203e8e
 
b555c35
b1e1493
a203e8e
8ca3bc5
a203e8e
 
 
 
 
 
daacba2
 
a203e8e
 
 
 
 
 
daacba2
8ca3bc5
a203e8e
 
 
daacba2
a203e8e
 
 
8ca3bc5
 
 
a203e8e
 
 
 
 
 
 
 
 
 
 
 
8ca3bc5
a203e8e
b1e1493
 
450a1d6
a203e8e
 
 
 
8ca3bc5
a203e8e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8ca3bc5
a203e8e
 
 
8ca3bc5
a203e8e
 
 
 
 
8ca3bc5
a203e8e
 
 
 
 
 
 
 
8ca3bc5
a203e8e
 
 
 
 
 
8ca3bc5
a203e8e
 
8ca3bc5
a203e8e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8ca3bc5
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
import gradio as gr
import os
import spaces
from transformers import AutoTokenizer, TextIteratorStreamer
from auto_gptq import AutoGPTQForCausalLM, BaseQuantizeConfig
from threading import Thread

# Set an environment variable
HF_TOKEN = os.environ.get("HF_TOKEN", None)


DESCRIPTION = '''
<div>
<h1 style="text-align: center;">CyberNative-AI/Colibri_8b_v0.1</h1>
<p>This Space demonstrates the CyberSecurity-tuned model <a href="https://huggingface.co/CyberNative-AI/Colibri_8b_v0.1"><b>Colibri_8b_v0.1</b></a>.
</div>
'''

LICENSE = """
<p/>
---
Colibri v0.1 is built on top of Dolphin Llama 3
"""

PLACEHOLDER = """
<div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
   <img src="https://huggingface.co/CyberNative-AI/Colibri_8b_v0.1/resolve/main/cybernative_ai_colibri_logo.jpeg" style="width: 80%; max-width: 550px; height: auto; opacity: 0.55;  "> 
   <h1 style="font-size: 28px; margin-bottom: 2px; opacity: 0.55;">Colibri_v0.1 Dolphin Meta llama3</h1>
   <p style="font-size: 18px; margin-bottom: 2px; opacity: 0.65;">Ask me anything...</p>
</div>
"""


css = """
h1 {
  text-align: center;
  display: block;
}
#duplicate-button {
  margin: auto;
  color: white;
  background: #1565c0;
  border-radius: 100vh;
}
"""

# Load the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained("CyberNative-AI/Colibri_8b_v0.1_gptq_128_4bit")
model = AutoGPTQForCausalLM.from_quantized("CyberNative-AI/Colibri_8b_v0.1_gptq_128_4bit", device="cpu")

terminators = [
    tokenizer.eos_token_id,
    tokenizer.convert_tokens_to_ids("<|eot_id|>")
]

@spaces.GPU(duration=120)
def chat_llama3_8b(message: str, 
              history: list, 
              temperature: float, 
              max_new_tokens: int
             ) -> str:
    """
    Generate a streaming response using the llama3-8b model.
    Args:
        message (str): The input message.
        history (list): The conversation history used by ChatInterface.
        temperature (float): The temperature for generating the response.
        max_new_tokens (int): The maximum number of new tokens to generate.
    Returns:
        str: The generated response.
    """
    conversation = []
    for user, assistant in history:
        conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
    conversation.append({"role": "user", "content": message})

    input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt").to(model.device)
    
    streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)

    generate_kwargs = dict(
        input_ids= input_ids,
        streamer=streamer,
        max_new_tokens=max_new_tokens,
        do_sample=True,
        temperature=temperature,
        eos_token_id=terminators,
    )
    # This will enforce greedy generation (do_sample=False) when the temperature is passed 0, avoiding the crash.             
    if temperature == 0:
        generate_kwargs['do_sample'] = False
        
    t = Thread(target=model.generate, kwargs=generate_kwargs)
    t.start()

    outputs = []
    for text in streamer:
        outputs.append(text)
        #print(outputs)
        yield "".join(outputs)
        

# Gradio block
chatbot=gr.Chatbot(height=450, placeholder=PLACEHOLDER, label='Gradio ChatInterface')

with gr.Blocks(fill_height=True, css=css) as demo:
    
    gr.Markdown(DESCRIPTION)
    gr.ChatInterface(
        fn=chat_llama3_8b,
        chatbot=chatbot,
        fill_height=True,
        additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
        additional_inputs=[
            gr.Slider(minimum=0,
                      maximum=1, 
                      step=0.1,
                      value=0.95, 
                      label="Temperature", 
                      render=False),
            gr.Slider(minimum=128, 
                      maximum=4096,
                      step=1,
                      value=512, 
                      label="Max new tokens", 
                      render=False ),
            ],
        examples=[
            ['What are the two main methods used in the research to collect DKIM information?'],
            ['What is the primary purpose of OS fingerprinting using tools like Nmap, and why might it not always be 100% accurate?'],
            ['What is 9,000 * 9,000?'],
            ['What technique can be used to enumerate SMB shares within a Windows environment from a Windows client?'],
            ['What is the primary benefit of interleaving in cybersecurity education and training?']
            ],
        cache_examples=False,
                     )
    
    gr.Markdown(LICENSE)
    
if __name__ == "__main__":
    demo.launch()