feat(search-api): add `optimizationMode`

This commit is contained in:
ItzCrazyKns 2024-10-11 10:54:08 +05:30
parent 7cce853618
commit 0a7167eb04
3 changed files with 25 additions and 4 deletions

View File

@ -26,6 +26,7 @@ The API accepts a JSON object in the request body, where you define the focus mo
"provider": "openai", "provider": "openai",
"model": "text-embedding-3-large" "model": "text-embedding-3-large"
}, },
"optimizationMode": "speed",
"focusMode": "webSearch", "focusMode": "webSearch",
"query": "What is Perplexica", "query": "What is Perplexica",
"history": [ "history": [
@ -37,7 +38,7 @@ The API accepts a JSON object in the request body, where you define the focus mo
### Request Parameters ### Request Parameters
- **`chatModel`** (object, optional): Defines the chat model to be used for the query. For model details you can send a GET request at `http://localhost:3001/api/models`. - **`chatModel`** (object, optional): Defines the chat model to be used for the query. For model details you can send a GET request at `http://localhost:3001/api/models`. Make sure to use the key value (For example "gpt-4o-mini" instead of the display name "GPT 4 omni mini").
- `provider`: Specifies the provider for the chat model (e.g., `openai`, `ollama`). - `provider`: Specifies the provider for the chat model (e.g., `openai`, `ollama`).
- `model`: The specific model from the chosen provider (e.g., `gpt-4o-mini`). - `model`: The specific model from the chosen provider (e.g., `gpt-4o-mini`).
@ -45,7 +46,7 @@ The API accepts a JSON object in the request body, where you define the focus mo
- `customOpenAIBaseURL`: If youre using a custom OpenAI instance, provide the base URL. - `customOpenAIBaseURL`: If youre using a custom OpenAI instance, provide the base URL.
- `customOpenAIKey`: The API key for a custom OpenAI instance. - `customOpenAIKey`: The API key for a custom OpenAI instance.
- **`embeddingModel`** (object, optional): Defines the embedding model for similarity-based searching. For model details you can send a GET request at `http://localhost:3001/api/models`. - **`embeddingModel`** (object, optional): Defines the embedding model for similarity-based searching. For model details you can send a GET request at `http://localhost:3001/api/models`. Make sure to use the key value (For example "text-embedding-3-large" instead of the display name "Text Embedding 3 Large").
- `provider`: The provider for the embedding model (e.g., `openai`). - `provider`: The provider for the embedding model (e.g., `openai`).
- `model`: The specific embedding model (e.g., `text-embedding-3-large`). - `model`: The specific embedding model (e.g., `text-embedding-3-large`).
@ -54,9 +55,15 @@ The API accepts a JSON object in the request body, where you define the focus mo
- `webSearch`, `academicSearch`, `writingAssistant`, `wolframAlphaSearch`, `youtubeSearch`, `redditSearch`. - `webSearch`, `academicSearch`, `writingAssistant`, `wolframAlphaSearch`, `youtubeSearch`, `redditSearch`.
- **`optimizationMode`** (string, optional): Specifies the optimization mode to control the balance between performance and quality. Available modes:
- `speed`: Prioritize speed and return the fastest answer.
- `balanced`: Provide a balanced answer with good speed and reasonable quality.
- **`query`** (string, required): The search query or question. - **`query`** (string, required): The search query or question.
- **`history`** (array, optional): An array of message pairs representing the conversation history. Each pair consists of a role (either 'human' or 'assistant') and the message content. This allows the system to use the context of the conversation to refine results. Example: - **`history`** (array, optional): An array of message pairs representing the conversation history. Each pair consists of a role (either 'human' or 'assistant') and the message content. This allows the system to use the context of the conversation to refine results. Example:
```json ```json
[ [
["human", "What is Perplexica?"], ["human", "What is Perplexica?"],

View File

@ -12,7 +12,19 @@ router.get('/', async (req, res) => {
const [chatModelProviders, embeddingModelProviders] = await Promise.all([ const [chatModelProviders, embeddingModelProviders] = await Promise.all([
getAvailableChatModelProviders(), getAvailableChatModelProviders(),
getAvailableEmbeddingModelProviders(), getAvailableEmbeddingModelProviders(),
]); ]);
Object.keys(chatModelProviders).forEach((provider) => {
Object.keys(chatModelProviders[provider]).forEach((model) => {
delete chatModelProviders[provider][model].model;
});
});
Object.keys(embeddingModelProviders).forEach((provider) => {
Object.keys(embeddingModelProviders[provider]).forEach((model) => {
delete embeddingModelProviders[provider][model].model;
});
});
res.status(200).json({ chatModelProviders, embeddingModelProviders }); res.status(200).json({ chatModelProviders, embeddingModelProviders });
} catch (err) { } catch (err) {

View File

@ -25,6 +25,7 @@ interface embeddingModel {
} }
interface ChatRequestBody { interface ChatRequestBody {
optimizationMode: 'speed' | 'balanced';
focusMode: string; focusMode: string;
chatModel?: chatModel; chatModel?: chatModel;
embeddingModel?: embeddingModel; embeddingModel?: embeddingModel;
@ -41,6 +42,7 @@ router.post('/', async (req, res) => {
} }
body.history = body.history || []; body.history = body.history || [];
body.optimizationMode = body.optimizationMode || 'balanced';
const history: BaseMessage[] = body.history.map((msg) => { const history: BaseMessage[] = body.history.map((msg) => {
if (msg[0] === 'human') { if (msg[0] === 'human') {
@ -119,7 +121,7 @@ router.post('/', async (req, res) => {
return res.status(400).json({ message: 'Invalid focus mode' }); return res.status(400).json({ message: 'Invalid focus mode' });
} }
const emitter = searchHandler(body.query, history, llm, embeddings); const emitter = searchHandler(body.query, history, llm, embeddings, body.optimizationMode);
let message = ''; let message = '';
let sources = []; let sources = [];