Thomas G. Lopes commited on
Commit
631cc27
·
1 Parent(s): 95a0fb1

fix type check

Browse files
.github/workflows/lint-and-test.yml CHANGED
@@ -31,9 +31,6 @@ jobs:
31
  ${{ runner.os }}-pnpm-store-
32
  - name: Install dependencies
33
  run: pnpm install --frozen-lockfile
34
- - name: "SvelteKit sync"
35
- run: |
36
- pnpm run sync
37
  - name: "Checking lint/format errors"
38
  run: |
39
  pnpm run lint
 
31
  ${{ runner.os }}-pnpm-store-
32
  - name: Install dependencies
33
  run: pnpm install --frozen-lockfile
 
 
 
34
  - name: "Checking lint/format errors"
35
  run: |
36
  pnpm run lint
.prettierignore CHANGED
@@ -2,3 +2,17 @@
2
  package-lock.json
3
  pnpm-lock.yaml
4
  yarn.lock
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  package-lock.json
3
  pnpm-lock.yaml
4
  yarn.lock
5
+ .pnpm-store
6
+
7
+ .DS_Store
8
+ node_modules
9
+ /build
10
+ /.svelte-kit
11
+ /package
12
+ .env
13
+ .env.*
14
+ !.env.example
15
+
16
+ # Ignore files for PNPM, NPM and YARN
17
+ pnpm-lock.yaml
18
+ yarn.lock
package.json CHANGED
@@ -6,6 +6,7 @@
6
  "dev": "vite dev",
7
  "build": "vite build",
8
  "preview": "vite preview",
 
9
  "check": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json",
10
  "check:watch": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json --watch",
11
  "lint": "prettier . --check . && eslint --ext .js,.ts,.svelte src/",
@@ -36,10 +37,9 @@
36
  },
37
  "type": "module",
38
  "dependencies": {
39
- "@huggingface/hub": "^0.15.1",
40
- "@huggingface/inference": "^2.7.0",
41
- "@huggingface/tasks": "^0.10.22",
42
- "@tailwindcss/container-queries": "^0.1.1",
43
- "runed": "^0.23.4"
44
  }
45
  }
 
6
  "dev": "vite dev",
7
  "build": "vite build",
8
  "preview": "vite preview",
9
+ "prepare": "svelte-kit sync || echo ''",
10
  "check": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json",
11
  "check:watch": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json --watch",
12
  "lint": "prettier . --check . && eslint --ext .js,.ts,.svelte src/",
 
37
  },
38
  "type": "module",
39
  "dependencies": {
40
+ "@huggingface/hub": "^1.0.1",
41
+ "@huggingface/inference": "^3.5.1",
42
+ "@huggingface/tasks": "^0.17.1",
43
+ "@tailwindcss/container-queries": "^0.1.1"
 
44
  }
45
  }
pnpm-lock.yaml CHANGED
@@ -9,20 +9,17 @@ importers:
9
  .:
10
  dependencies:
11
  '@huggingface/hub':
12
- specifier: ^0.15.1
13
- version: 0.15.2
14
  '@huggingface/inference':
15
- specifier: ^2.7.0
16
- version: 2.8.1
17
  '@huggingface/tasks':
18
- specifier: ^0.10.22
19
- version: 0.10.22
20
  '@tailwindcss/container-queries':
21
  specifier: ^0.1.1
22
  version: 0.1.1([email protected])
23
- runed:
24
- specifier: ^0.23.4
25
- version: 0.23.4([email protected])
26
  devDependencies:
27
  '@sveltejs/adapter-auto':
28
  specifier: ^3.2.2
@@ -254,19 +251,19 @@ packages:
254
  resolution: {integrity: sha512-d9zaMRSTIKDLhctzH12MtXvJKSSUhaHcjV+2Z+GK+EEY7XKpP5yR4x+N3TAcHTcu963nIr+TMcCb4DBCYX1z6Q==}
255
  engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
256
 
257
- '@huggingface/hub@0.15.2':
258
- resolution: {integrity: sha512-MKu7RTkEBp8tvn0ZhBIjQwC1CHA+q8rzTYXAVklOTJEo/SZwCewVL+IcEuj4CoA5DnJ1IhyjkBquxN2dFVnEHg==}
259
  engines: {node: '>=18'}
260
 
261
- '@huggingface/inference@2.8.1':
262
- resolution: {integrity: sha512-EfsNtY9OR6JCNaUa5bZu2mrs48iqeTz0Gutwf+fU0Kypx33xFQB4DKMhp8u4Ee6qVbLbNWvTHuWwlppLQl4p4Q==}
263
  engines: {node: '>=18'}
264
 
265
- '@huggingface/tasks@0.10.22':
266
- resolution: {integrity: sha512-sCtp+A6sq6NXoUU7NXuXWoVNNjKddk1GTQIh3cJ6illF8S4zmFoerCVRvFf19BdgICGvF+RVZiv9sGGK9KRDTg==}
267
 
268
- '@huggingface/tasks@0.12.30':
269
- resolution: {integrity: sha512-A1ITdxbEzx9L8wKR8pF7swyrTLxWNDFIGDLUWInxvks2ruQ8PLRBZe8r0EcjC3CDdtlj9jV1V4cgV35K/iy3GQ==}
270
 
271
  '@humanwhocodes/[email protected]':
272
  resolution: {integrity: sha512-DZLEEqFWQFiyK6h5YIeynKx7JlvCYWL0cImfSRXZ9l4Sg2efkFGTuFf6vzXjK1cq6IYkU+Eg/JizXw+TD2vRNw==}
@@ -1395,11 +1392,6 @@ packages:
1395
1396
  resolution: {integrity: sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==}
1397
 
1398
1399
- resolution: {integrity: sha512-9q8oUiBYeXIDLWNK5DfCWlkL0EW3oGbk845VdKlPeia28l751VpfesaB/+7pI6rnbx1I6rqoZ2fZxptOJLxILA==}
1400
- peerDependencies:
1401
- svelte: ^5.7.0
1402
-
1403
1404
  resolution: {integrity: sha512-xal3CZX1Xlo/k4ApwCFrHVACi9fBqJ7V+mwhBsuf/1IOKbBy098Fex+Wa/5QMubw09pSZ/u8EY8PWgevJsXp1A==}
1405
  engines: {node: '>=6'}
@@ -1775,17 +1767,17 @@ snapshots:
1775
 
1776
  '@eslint/[email protected]': {}
1777
 
1778
- '@huggingface/hub@0.15.2':
1779
  dependencies:
1780
- '@huggingface/tasks': 0.12.30
1781
 
1782
- '@huggingface/inference@2.8.1':
1783
  dependencies:
1784
- '@huggingface/tasks': 0.12.30
1785
 
1786
- '@huggingface/tasks@0.10.22': {}
1787
 
1788
- '@huggingface/tasks@0.12.30': {}
1789
 
1790
  '@humanwhocodes/[email protected]':
1791
  dependencies:
@@ -2844,11 +2836,6 @@ snapshots:
2844
  dependencies:
2845
  queue-microtask: 1.2.3
2846
 
2847
2848
- dependencies:
2849
- esm-env: 1.2.2
2850
- svelte: 4.2.19
2851
-
2852
2853
  dependencies:
2854
  mri: 1.2.0
 
9
  .:
10
  dependencies:
11
  '@huggingface/hub':
12
+ specifier: ^1.0.1
13
+ version: 1.0.1
14
  '@huggingface/inference':
15
+ specifier: ^3.5.1
16
+ version: 3.5.1
17
  '@huggingface/tasks':
18
+ specifier: ^0.17.1
19
+ version: 0.17.1
20
  '@tailwindcss/container-queries':
21
  specifier: ^0.1.1
22
  version: 0.1.1([email protected])
 
 
 
23
  devDependencies:
24
  '@sveltejs/adapter-auto':
25
  specifier: ^3.2.2
 
251
  resolution: {integrity: sha512-d9zaMRSTIKDLhctzH12MtXvJKSSUhaHcjV+2Z+GK+EEY7XKpP5yR4x+N3TAcHTcu963nIr+TMcCb4DBCYX1z6Q==}
252
  engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
253
 
254
+ '@huggingface/hub@1.0.1':
255
+ resolution: {integrity: sha512-wogGVETaNUV/wYBkny0uQD48L0rK9cttVtbaA1Rw/pGCuSYoZ8YlvTV6zymsGJfXaxQU8zup0aOR2XLIf6HVfg==}
256
  engines: {node: '>=18'}
257
 
258
+ '@huggingface/inference@3.5.1':
259
+ resolution: {integrity: sha512-NwTj5MS1eb4HfSp/O1/PyH1bEhTXl/iFh/K+8yYkUTHtK4AHXi/NDsV2LblhACaTEIhpOpYswL9cZx7z3y1tlg==}
260
  engines: {node: '>=18'}
261
 
262
+ '@huggingface/tasks@0.15.9':
263
+ resolution: {integrity: sha512-cbnZcpMHKdhURWIplVP4obHxAZcxjyRm0zI7peTPksZN4CtIOMmJC4ZqGEymo0lk+0VNkXD7ULwFJ3JjT/VpkQ==}
264
 
265
+ '@huggingface/tasks@0.17.1':
266
+ resolution: {integrity: sha512-kN5F/pzwxtmdZ0jORumNyegNKOX/ciU5G/DMZcqK3SJShod4C6yfvBRCMn5sEDzanxtU8VjX+7TaInQFmmU8Nw==}
267
 
268
  '@humanwhocodes/[email protected]':
269
  resolution: {integrity: sha512-DZLEEqFWQFiyK6h5YIeynKx7JlvCYWL0cImfSRXZ9l4Sg2efkFGTuFf6vzXjK1cq6IYkU+Eg/JizXw+TD2vRNw==}
 
1392
1393
  resolution: {integrity: sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==}
1394
 
 
 
 
 
 
1395
1396
  resolution: {integrity: sha512-xal3CZX1Xlo/k4ApwCFrHVACi9fBqJ7V+mwhBsuf/1IOKbBy098Fex+Wa/5QMubw09pSZ/u8EY8PWgevJsXp1A==}
1397
  engines: {node: '>=6'}
 
1767
 
1768
  '@eslint/[email protected]': {}
1769
 
1770
+ '@huggingface/hub@1.0.1':
1771
  dependencies:
1772
+ '@huggingface/tasks': 0.15.9
1773
 
1774
+ '@huggingface/inference@3.5.1':
1775
  dependencies:
1776
+ '@huggingface/tasks': 0.17.1
1777
 
1778
+ '@huggingface/tasks@0.15.9': {}
1779
 
1780
+ '@huggingface/tasks@0.17.1': {}
1781
 
1782
  '@humanwhocodes/[email protected]':
1783
  dependencies:
 
2836
  dependencies:
2837
  queue-microtask: 1.2.3
2838
 
 
 
 
 
 
2839
2840
  dependencies:
2841
  mri: 1.2.0
src/lib/components/InferencePlayground/InferencePlayground.svelte CHANGED
@@ -1,37 +1,36 @@
1
  <script lang="ts">
2
- import type { Conversation, ModelEntryWithTokenizer, Session } from "./types";
3
- import type { ChatCompletionInputMessage } from "@huggingface/tasks";
4
 
5
  import { page } from "$app/stores";
6
  import { defaultGenerationConfig } from "./generationConfigSettings";
7
  import {
8
  createHfInference,
9
- handleStreamingResponse,
10
  handleNonStreamingResponse,
 
11
  isSystemPromptSupported,
12
- FEATURED_MODELS_IDS,
13
  } from "./inferencePlaygroundUtils";
14
 
 
15
  import { onDestroy, onMount } from "svelte";
 
 
 
 
 
 
 
16
  import GenerationConfig, { defaultSystemMessage } from "./InferencePlaygroundGenerationConfig.svelte";
17
  import HFTokenModal from "./InferencePlaygroundHFTokenModal.svelte";
18
  import ModelSelector from "./InferencePlaygroundModelSelector.svelte";
19
- import PlaygroundConversation from "./InferencePlaygroundConversation.svelte";
20
- import PlaygroundConversationHeader from "./InferencePlaygroundConversationHeader.svelte";
21
- import IconDelete from "../Icons/IconDelete.svelte";
22
- import IconCode from "../Icons/IconCode.svelte";
23
- import IconInfo from "../Icons/IconInfo.svelte";
24
- import IconCompare from "../Icons/IconCompare.svelte";
25
  import ModelSelectorModal from "./InferencePlaygroundModelSelectorModal.svelte";
26
- import IconThrashcan from "../Icons/IconThrashcan.svelte";
27
- import { goto } from "$app/navigation";
28
 
29
  export let models: ModelEntryWithTokenizer[];
30
 
31
- const startMessageUser: ChatCompletionInputMessage = { role: "user", content: "" };
32
  const modelIdsFromQueryParam = $page.url.searchParams.get("modelId")?.split(",");
33
  const modelsFromQueryParam = modelIdsFromQueryParam?.map(id => models.find(model => model.id === id));
34
- const systemMessage: ChatCompletionInputMessage = {
35
  role: "system",
36
  content: modelIdsFromQueryParam ? (defaultSystemMessage?.[modelIdsFromQueryParam[0]] ?? "") : "",
37
  };
 
1
  <script lang="ts">
2
+ import type { Conversation, ConversationMessage, ModelEntryWithTokenizer, Session } from "./types";
 
3
 
4
  import { page } from "$app/stores";
5
  import { defaultGenerationConfig } from "./generationConfigSettings";
6
  import {
7
  createHfInference,
8
+ FEATURED_MODELS_IDS,
9
  handleNonStreamingResponse,
10
+ handleStreamingResponse,
11
  isSystemPromptSupported,
 
12
  } from "./inferencePlaygroundUtils";
13
 
14
+ import { goto } from "$app/navigation";
15
  import { onDestroy, onMount } from "svelte";
16
+ import IconCode from "../Icons/IconCode.svelte";
17
+ import IconCompare from "../Icons/IconCompare.svelte";
18
+ import IconDelete from "../Icons/IconDelete.svelte";
19
+ import IconInfo from "../Icons/IconInfo.svelte";
20
+ import IconThrashcan from "../Icons/IconThrashcan.svelte";
21
+ import PlaygroundConversation from "./InferencePlaygroundConversation.svelte";
22
+ import PlaygroundConversationHeader from "./InferencePlaygroundConversationHeader.svelte";
23
  import GenerationConfig, { defaultSystemMessage } from "./InferencePlaygroundGenerationConfig.svelte";
24
  import HFTokenModal from "./InferencePlaygroundHFTokenModal.svelte";
25
  import ModelSelector from "./InferencePlaygroundModelSelector.svelte";
 
 
 
 
 
 
26
  import ModelSelectorModal from "./InferencePlaygroundModelSelectorModal.svelte";
 
 
27
 
28
  export let models: ModelEntryWithTokenizer[];
29
 
30
+ const startMessageUser: ConversationMessage = { role: "user", content: "" };
31
  const modelIdsFromQueryParam = $page.url.searchParams.get("modelId")?.split(",");
32
  const modelsFromQueryParam = modelIdsFromQueryParam?.map(id => models.find(model => model.id === id));
33
+ const systemMessage: ConversationMessage = {
34
  role: "system",
35
  content: modelIdsFromQueryParam ? (defaultSystemMessage?.[modelIdsFromQueryParam[0]] ?? "") : "",
36
  };
src/lib/components/InferencePlayground/InferencePlaygroundCodeSnippets.svelte CHANGED
@@ -85,12 +85,13 @@
85
  messages.unshift(systemMessage);
86
  }
87
 
88
- messages = messages.map(({ role, content }) => ({
89
  role,
90
  content: JSON.stringify(content).slice(1, -1),
91
  }));
 
92
 
93
- return messages;
94
  }
95
 
96
  function highlight(code: string, language: Language) {
@@ -139,7 +140,7 @@ for await (const chunk of stream) {
139
  const newContent = chunk.choices[0].delta.content;
140
  out += newContent;
141
  console.log(newContent);
142
- }
143
  }`,
144
  });
145
  } else {
@@ -210,7 +211,7 @@ for await (const chunk of stream) {
210
  const newContent = chunk.choices[0].delta.content;
211
  out += newContent;
212
  console.log(newContent);
213
- }
214
  }`,
215
  });
216
  } else {
@@ -270,8 +271,8 @@ client = InferenceClient(api_key="${tokenStr}")
270
  messages = ${formattedMessages({ sep: ",\n\t", start: `[\n\t`, end: `\n]` })}
271
 
272
  stream = client.chat.completions.create(
273
- model="${conversation.model.id}",
274
- messages=messages,
275
  ${formattedConfig({ sep: ",\n\t", start: "", end: "", connector: "=" })},
276
  stream=True
277
  )
@@ -291,8 +292,8 @@ client = InferenceClient(api_key="${tokenStr}")
291
  messages = ${formattedMessages({ sep: ",\n\t", start: `[\n\t`, end: `\n]` })}
292
 
293
  completion = client.chat.completions.create(
294
- model="${conversation.model.id}",
295
- messages=messages,
296
  ${formattedConfig({ sep: ",\n\t", start: "", end: "", connector: "=" })}
297
  )
298
 
@@ -338,8 +339,8 @@ client = OpenAI(
338
  messages = ${formattedMessages({ sep: ",\n\t", start: `[\n\t`, end: `\n]` })}
339
 
340
  stream = client.chat.completions.create(
341
- model="${conversation.model.id}",
342
- messages=messages,
343
  ${formattedConfig({ sep: ",\n\t", start: "", end: "", connector: "=" })},
344
  stream=True
345
  )
@@ -362,8 +363,8 @@ client = OpenAI(
362
  messages = ${formattedMessages({ sep: ",\n\t", start: `[\n\t`, end: `\n]` })}
363
 
364
  completion = client.chat.completions.create(
365
- model="${conversation.model.id}",
366
- messages=messages,
367
  ${formattedConfig({ sep: ",\n\t", start: "", end: "", connector: "=" })}
368
  )
369
 
 
85
  messages.unshift(systemMessage);
86
  }
87
 
88
+ const res = messages.map(({ role, content }) => ({
89
  role,
90
  content: JSON.stringify(content).slice(1, -1),
91
  }));
92
+ messages = res;
93
 
94
+ return res;
95
  }
96
 
97
  function highlight(code: string, language: Language) {
 
140
  const newContent = chunk.choices[0].delta.content;
141
  out += newContent;
142
  console.log(newContent);
143
+ }
144
  }`,
145
  });
146
  } else {
 
211
  const newContent = chunk.choices[0].delta.content;
212
  out += newContent;
213
  console.log(newContent);
214
+ }
215
  }`,
216
  });
217
  } else {
 
271
  messages = ${formattedMessages({ sep: ",\n\t", start: `[\n\t`, end: `\n]` })}
272
 
273
  stream = client.chat.completions.create(
274
+ model="${conversation.model.id}",
275
+ messages=messages,
276
  ${formattedConfig({ sep: ",\n\t", start: "", end: "", connector: "=" })},
277
  stream=True
278
  )
 
292
  messages = ${formattedMessages({ sep: ",\n\t", start: `[\n\t`, end: `\n]` })}
293
 
294
  completion = client.chat.completions.create(
295
+ model="${conversation.model.id}",
296
+ messages=messages,
297
  ${formattedConfig({ sep: ",\n\t", start: "", end: "", connector: "=" })}
298
  )
299
 
 
339
  messages = ${formattedMessages({ sep: ",\n\t", start: `[\n\t`, end: `\n]` })}
340
 
341
  stream = client.chat.completions.create(
342
+ model="${conversation.model.id}",
343
+ messages=messages,
344
  ${formattedConfig({ sep: ",\n\t", start: "", end: "", connector: "=" })},
345
  stream=True
346
  )
 
363
  messages = ${formattedMessages({ sep: ",\n\t", start: `[\n\t`, end: `\n]` })}
364
 
365
  completion = client.chat.completions.create(
366
+ model="${conversation.model.id}",
367
+ messages=messages,
368
  ${formattedConfig({ sep: ",\n\t", start: "", end: "", connector: "=" })}
369
  )
370
 
src/lib/components/InferencePlayground/InferencePlaygroundMessage.svelte CHANGED
@@ -1,8 +1,8 @@
1
  <script lang="ts">
2
- import { type ChatCompletionInputMessage } from "@huggingface/tasks";
3
  import { createEventDispatcher } from "svelte";
 
4
 
5
- export let message: ChatCompletionInputMessage;
6
  export let loading: boolean = false;
7
  export let autofocus: boolean = false;
8
 
 
1
  <script lang="ts">
 
2
  import { createEventDispatcher } from "svelte";
3
+ import type { ConversationMessage } from "./types";
4
 
5
+ export let message: ConversationMessage;
6
  export let loading: boolean = false;
7
  export let autofocus: boolean = false;
8
 
src/lib/components/InferencePlayground/inferencePlaygroundUtils.ts CHANGED
@@ -1,4 +1,4 @@
1
- import { type ChatCompletionInputMessage } from "@huggingface/tasks";
2
  import type { Conversation, ModelEntryWithTokenizer } from "./types";
3
 
4
  import { HfInference } from "@huggingface/inference";
@@ -25,7 +25,7 @@ export async function handleStreamingResponse(
25
  messages,
26
  ...conversation.config,
27
  },
28
- { signal: abortController.signal, use_cache: false }
29
  )) {
30
  if (chunk.choices && chunk.choices.length > 0 && chunk.choices[0]?.delta?.content) {
31
  out += chunk.choices[0].delta.content;
@@ -37,21 +37,18 @@ export async function handleStreamingResponse(
37
  export async function handleNonStreamingResponse(
38
  hf: HfInference,
39
  conversation: Conversation
40
- ): Promise<{ message: ChatCompletionInputMessage; completion_tokens: number }> {
41
  const { model, systemMessage } = conversation;
42
  const messages = [
43
  ...(isSystemPromptSupported(model) && systemMessage.content?.length ? [systemMessage] : []),
44
  ...conversation.messages,
45
  ];
46
 
47
- const response = await hf.chatCompletion(
48
- {
49
- model: model.id,
50
- messages,
51
- ...conversation.config,
52
- },
53
- { use_cache: false }
54
- );
55
 
56
  if (response.choices && response.choices.length > 0) {
57
  const { message } = response.choices[0];
 
1
+ import { type ChatCompletionOutputMessage } from "@huggingface/tasks";
2
  import type { Conversation, ModelEntryWithTokenizer } from "./types";
3
 
4
  import { HfInference } from "@huggingface/inference";
 
25
  messages,
26
  ...conversation.config,
27
  },
28
+ { signal: abortController.signal }
29
  )) {
30
  if (chunk.choices && chunk.choices.length > 0 && chunk.choices[0]?.delta?.content) {
31
  out += chunk.choices[0].delta.content;
 
37
  export async function handleNonStreamingResponse(
38
  hf: HfInference,
39
  conversation: Conversation
40
+ ): Promise<{ message: ChatCompletionOutputMessage; completion_tokens: number }> {
41
  const { model, systemMessage } = conversation;
42
  const messages = [
43
  ...(isSystemPromptSupported(model) && systemMessage.content?.length ? [systemMessage] : []),
44
  ...conversation.messages,
45
  ];
46
 
47
+ const response = await hf.chatCompletion({
48
+ model: model.id,
49
+ messages,
50
+ ...conversation.config,
51
+ });
 
 
 
52
 
53
  if (response.choices && response.choices.length > 0) {
54
  const { message } = response.choices[0];
src/lib/components/InferencePlayground/types.ts CHANGED
@@ -2,11 +2,13 @@ import type { GenerationConfig } from "$lib/components/InferencePlayground/gener
2
  import type { ModelEntry } from "@huggingface/hub";
3
  import type { ChatCompletionInputMessage } from "@huggingface/tasks";
4
 
 
 
5
  export type Conversation = {
6
  model: ModelEntryWithTokenizer;
7
  config: GenerationConfig;
8
- messages: ChatCompletionInputMessage[];
9
- systemMessage: ChatCompletionInputMessage;
10
  streaming: boolean;
11
  };
12
 
 
2
  import type { ModelEntry } from "@huggingface/hub";
3
  import type { ChatCompletionInputMessage } from "@huggingface/tasks";
4
 
5
+ export type ConversationMessage = Omit<ChatCompletionInputMessage, "content"> & { content?: string };
6
+
7
  export type Conversation = {
8
  model: ModelEntryWithTokenizer;
9
  config: GenerationConfig;
10
+ messages: ConversationMessage[];
11
+ systemMessage: ConversationMessage;
12
  streaming: boolean;
13
  };
14