Spaces:
Sleeping
Sleeping
Commit
·
7e4e636
1
Parent(s):
bc70f4a
initial commit
Browse files
gen.py
CHANGED
@@ -162,7 +162,7 @@ prompt = (
|
|
162 |
|
163 |
|
164 |
def generate(event):
|
165 |
-
combined_input = f"{prompt} {event}"
|
166 |
prompt_msg = [{'role': 'user', 'content': combined_input}]
|
167 |
|
168 |
inputs = tokenizer.apply_chat_template(
|
@@ -178,8 +178,10 @@ def generate(event):
|
|
178 |
do_sample=True
|
179 |
)
|
180 |
|
|
|
|
|
181 |
|
182 |
-
output_text = tokenizer.decode(tokens[0], skip_special_tokens=
|
183 |
print(output_text)
|
184 |
json_start_index = output_text.find("<json>")
|
185 |
json_end_index = output_text.find("</json>")
|
|
|
162 |
|
163 |
|
164 |
def generate(event):
|
165 |
+
combined_input = f"{prompt} {event}"
|
166 |
prompt_msg = [{'role': 'user', 'content': combined_input}]
|
167 |
|
168 |
inputs = tokenizer.apply_chat_template(
|
|
|
178 |
do_sample=True
|
179 |
)
|
180 |
|
181 |
+
# Get the length of the input tokens (adjust based on your tokenizer)
|
182 |
+
input_length = len(tokenizer.encode(combined_input))
|
183 |
|
184 |
+
output_text = tokenizer.decode(tokens[0][input_length:], skip_special_tokens=True)
|
185 |
print(output_text)
|
186 |
json_start_index = output_text.find("<json>")
|
187 |
json_end_index = output_text.find("</json>")
|