File size: 6,034 Bytes
da27d7f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
807a492
 
 
 
0942965
 
 
 
 
 
807a492
 
 
f5ce7b4
da27d7f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
724ec44
da27d7f
724ec44
da27d7f
 
 
 
724ec44
da27d7f
724ec44
 
 
 
 
 
 
 
 
 
da27d7f
 
724ec44
da27d7f
 
 
 
 
 
 
 
f85fe6f
da27d7f
f85fe6f
 
da27d7f
 
 
 
 
 
f85fe6f
da27d7f
 
 
 
f85fe6f
da27d7f
 
f85fe6f
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
import json
import re
import requests
from messagers.message_outputer import OpenaiStreamOutputer
from utils.logger import logger
from utils.enver import enver


class MessageStreamer:
    MODEL_MAP = {
        "mixtral-8x7b": "mistralai/Mixtral-8x7B-Instruct-v0.1",  # 72.62, fast [Recommended]
        "mistral-7b": "mistralai/Mistral-7B-Instruct-v0.2",  # 65.71, fast
        "openchat-3.5": "openchat/openchat-3.5-1210",  # ??, fast
        # "zephyr-7b-alpha": "HuggingFaceH4/zephyr-7b-alpha",  # 59.5, fast
        # "zephyr-7b-beta": "HuggingFaceH4/zephyr-7b-beta",  # 61.95, slow
        "default": "mistralai/Mixtral-8x7B-Instruct-v0.1",
    }
    STOP_SEQUENCES_MAP = {
        "mixtral-8x7b": "</s>",
        "mistral-7b": "</s>",
        "openchat-3.5": "<|end_of_turn|>",
    }

    def __init__(self, model: str):
        if model in self.MODEL_MAP.keys():
            self.model = model
        else:
            self.model = "default"
        self.model_fullname = self.MODEL_MAP[self.model]
        self.message_outputer = OpenaiStreamOutputer()

    def parse_line(self, line):
        try:
            line = line.decode("utf-8")
            line = re.sub(r"data:\s*", "", line)
            data = json.loads(line)
    
            # Check if 'token' key exists and return its 'text' value
            if 'token' in data:
                return data['token'].get('text', '')  # Return an empty string if 'text' is not found
            else:
                return None  # Return None if 'token' key is not present
        except Exception as e:
            print(f"An error occurred: {e}")
            return None

    def chat_response(
        self,
        prompt: str = None,
        temperature: float = 0.01,
        max_new_tokens: int = 8192,
        api_key: str = None,
    ):
        # https://huggingface.co/docs/api-inference/detailed_parameters?code=curl
        # curl --proxy http://<server>:<port> https://api-inference.huggingface.co/models/<org>/<model_name> -X POST -d '{"inputs":"who are you?","parameters":{"max_new_token":64}}' -H 'Content-Type: application/json' -H 'Authorization: Bearer <HF_TOKEN>'
        self.request_url = (
            f"https://api-inference.huggingface.co/models/{self.model_fullname}"
        )
        self.request_headers = {
            "Content-Type": "application/json",
        }

        if api_key:
            logger.note(
                f"Using API Key: {api_key[:3]}{(len(api_key)-7)*'*'}{api_key[-4:]}"
            )
            self.request_headers["Authorization"] = f"Bearer {api_key}"

        # References:
        #   huggingface_hub/inference/_client.py:
        #     class InferenceClient > def text_generation()
        #   huggingface_hub/inference/_text_generation.py:
        #     class TextGenerationRequest > param `stream`
        # https://huggingface.co/docs/text-generation-inference/conceptual/streaming#streaming-with-curl
        self.request_body = {
            "inputs": prompt,
            "parameters": {
                "temperature": max(temperature, 0.01),  # must be positive
                "max_new_tokens": max_new_tokens,
                "return_full_text": False,
            },
            "stream": True,
        }

        if self.model in self.STOP_SEQUENCES_MAP.keys():
            self.stop_sequences = self.STOP_SEQUENCES_MAP[self.model]
        #     self.request_body["parameters"]["stop_sequences"] = [
        #         self.STOP_SEQUENCES[self.model]
        #     ]

        logger.back(self.request_url)
        enver.set_envs(proxies=True)
        stream_response = requests.post(
            self.request_url,
            headers=self.request_headers,
            json=self.request_body,
            proxies=enver.requests_proxies,
            stream=True,
        )
        status_code = stream_response.status_code
        if status_code == 200:
            logger.success(status_code)
        else:
            logger.err(status_code)

        return stream_response

    def chat_return_dict(self, stream_response):
        final_output = self.message_outputer.default_data.copy()
        final_output["choices"] = [
            {
                "index": 0,
                "finish_reason": "stop",
                "message": {
                    "role": "assistant",
                    "content": "",
                },
            }
        ]
    
        logger.back(final_output)
    
        final_content = ""
        for line in stream_response.iter_lines():
            if not line:
                continue
    
            content = self.parse_line(line)
            
            # Check if content is None before calling strip()
            if content is not None:
                if content.strip() == self.stop_sequences:
                    logger.success("\n[Finished]")
                    break
                else:
                    logger.back(content, end="")
                    final_content += content
    
        if self.model in self.STOP_SEQUENCES_MAP.keys():
            final_content = final_content.replace(self.stop_sequences, "")
    
        final_output["choices"][0]["message"]["content"] = final_content
        return final_output

    def chat_return_generator(self, stream_response):
        is_finished = False
        for line in stream_response.iter_lines():
            if not line:
                continue
    
            content = self.parse_line(line)
    
            if content is not None and content.strip() == self.stop_sequences:
                content_type = "Finished"
                logger.success("\n[Finished]")
                is_finished = True
            else:
                content_type = "Completions"
                logger.back(content, end="")
    
            output = self.message_outputer.output(
                content=content, content_type=content_type
            )
            yield output
    
        if not is_finished:
            yield self.message_outputer.output(content="", content_type="Finished")