feat(search-api): add `optimizationMode`
This commit is contained in:
parent
7cce853618
commit
0a7167eb04
|
@ -26,6 +26,7 @@ The API accepts a JSON object in the request body, where you define the focus mo
|
|||
"provider": "openai",
|
||||
"model": "text-embedding-3-large"
|
||||
},
|
||||
"optimizationMode": "speed",
|
||||
"focusMode": "webSearch",
|
||||
"query": "What is Perplexica",
|
||||
"history": [
|
||||
|
@ -37,7 +38,7 @@ The API accepts a JSON object in the request body, where you define the focus mo
|
|||
|
||||
### Request Parameters
|
||||
|
||||
- **`chatModel`** (object, optional): Defines the chat model to be used for the query. For model details you can send a GET request at `http://localhost:3001/api/models`.
|
||||
- **`chatModel`** (object, optional): Defines the chat model to be used for the query. For model details you can send a GET request at `http://localhost:3001/api/models`. Make sure to use the key value (For example "gpt-4o-mini" instead of the display name "GPT 4 omni mini").
|
||||
|
||||
- `provider`: Specifies the provider for the chat model (e.g., `openai`, `ollama`).
|
||||
- `model`: The specific model from the chosen provider (e.g., `gpt-4o-mini`).
|
||||
|
@ -45,7 +46,7 @@ The API accepts a JSON object in the request body, where you define the focus mo
|
|||
- `customOpenAIBaseURL`: If you’re using a custom OpenAI instance, provide the base URL.
|
||||
- `customOpenAIKey`: The API key for a custom OpenAI instance.
|
||||
|
||||
- **`embeddingModel`** (object, optional): Defines the embedding model for similarity-based searching. For model details you can send a GET request at `http://localhost:3001/api/models`.
|
||||
- **`embeddingModel`** (object, optional): Defines the embedding model for similarity-based searching. For model details you can send a GET request at `http://localhost:3001/api/models`. Make sure to use the key value (For example "text-embedding-3-large" instead of the display name "Text Embedding 3 Large").
|
||||
|
||||
- `provider`: The provider for the embedding model (e.g., `openai`).
|
||||
- `model`: The specific embedding model (e.g., `text-embedding-3-large`).
|
||||
|
@ -54,9 +55,15 @@ The API accepts a JSON object in the request body, where you define the focus mo
|
|||
|
||||
- `webSearch`, `academicSearch`, `writingAssistant`, `wolframAlphaSearch`, `youtubeSearch`, `redditSearch`.
|
||||
|
||||
- **`optimizationMode`** (string, optional): Specifies the optimization mode to control the balance between performance and quality. Available modes:
|
||||
|
||||
- `speed`: Prioritize speed and return the fastest answer.
|
||||
- `balanced`: Provide a balanced answer with good speed and reasonable quality.
|
||||
|
||||
- **`query`** (string, required): The search query or question.
|
||||
|
||||
- **`history`** (array, optional): An array of message pairs representing the conversation history. Each pair consists of a role (either 'human' or 'assistant') and the message content. This allows the system to use the context of the conversation to refine results. Example:
|
||||
|
||||
```json
|
||||
[
|
||||
["human", "What is Perplexica?"],
|
||||
|
|
|
@ -12,7 +12,19 @@ router.get('/', async (req, res) => {
|
|||
const [chatModelProviders, embeddingModelProviders] = await Promise.all([
|
||||
getAvailableChatModelProviders(),
|
||||
getAvailableEmbeddingModelProviders(),
|
||||
]);
|
||||
]);
|
||||
|
||||
Object.keys(chatModelProviders).forEach((provider) => {
|
||||
Object.keys(chatModelProviders[provider]).forEach((model) => {
|
||||
delete chatModelProviders[provider][model].model;
|
||||
});
|
||||
});
|
||||
|
||||
Object.keys(embeddingModelProviders).forEach((provider) => {
|
||||
Object.keys(embeddingModelProviders[provider]).forEach((model) => {
|
||||
delete embeddingModelProviders[provider][model].model;
|
||||
});
|
||||
});
|
||||
|
||||
res.status(200).json({ chatModelProviders, embeddingModelProviders });
|
||||
} catch (err) {
|
||||
|
|
|
@ -25,6 +25,7 @@ interface embeddingModel {
|
|||
}
|
||||
|
||||
interface ChatRequestBody {
|
||||
optimizationMode: 'speed' | 'balanced';
|
||||
focusMode: string;
|
||||
chatModel?: chatModel;
|
||||
embeddingModel?: embeddingModel;
|
||||
|
@ -41,6 +42,7 @@ router.post('/', async (req, res) => {
|
|||
}
|
||||
|
||||
body.history = body.history || [];
|
||||
body.optimizationMode = body.optimizationMode || 'balanced';
|
||||
|
||||
const history: BaseMessage[] = body.history.map((msg) => {
|
||||
if (msg[0] === 'human') {
|
||||
|
@ -119,7 +121,7 @@ router.post('/', async (req, res) => {
|
|||
return res.status(400).json({ message: 'Invalid focus mode' });
|
||||
}
|
||||
|
||||
const emitter = searchHandler(body.query, history, llm, embeddings);
|
||||
const emitter = searchHandler(body.query, history, llm, embeddings, body.optimizationMode);
|
||||
|
||||
let message = '';
|
||||
let sources = [];
|
||||
|
|
Loading…
Reference in New Issue