Reapply "Merge remote-tracking branch 'origin/master' into ollama-auth"

This reverts commit d75f1c743e.
This commit is contained in:
yeet 2024-10-11 10:26:10 +02:00
parent d75f1c743e
commit b048c4b173
8 changed files with 138 additions and 19 deletions

70
.github/workflows/docker-build.yaml vendored Normal file
View File

@ -0,0 +1,70 @@
name: Build & Push Docker Images
on:
push:
branches:
- master
release:
types: [published]
jobs:
build-and-push:
runs-on: ubuntu-latest
strategy:
matrix:
service: [backend, app]
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
with:
install: true
- name: Log in to DockerHub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKER_USERNAME }}
password: ${{ secrets.DOCKER_PASSWORD }}
- name: Extract version from release tag
if: github.event_name == 'release'
id: version
run: echo "RELEASE_VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
- name: Build and push Docker image for ${{ matrix.service }}
if: github.ref == 'refs/heads/master' && github.event_name == 'push'
run: |
docker buildx create --use
if [[ "${{ matrix.service }}" == "backend" ]]; then \
DOCKERFILE=backend.dockerfile; \
IMAGE_NAME=perplexica-backend; \
else \
DOCKERFILE=app.dockerfile; \
IMAGE_NAME=perplexica-frontend; \
fi
docker buildx build --platform linux/amd64,linux/arm64 \
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:main \
--cache-to=type=inline \
-f $DOCKERFILE \
-t itzcrazykns1337/${IMAGE_NAME}:main \
--push .
- name: Build and push release Docker image for ${{ matrix.service }}
if: github.event_name == 'release'
run: |
docker buildx create --use
if [[ "${{ matrix.service }}" == "backend" ]]; then \
DOCKERFILE=backend.dockerfile; \
IMAGE_NAME=perplexica-backend; \
else \
DOCKERFILE=app.dockerfile; \
IMAGE_NAME=perplexica-frontend; \
fi
docker buildx build --platform linux/amd64,linux/arm64 \
--cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }} \
--cache-to=type=inline \
-f $DOCKERFILE \
-t itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }} \
--push .

3
.gitignore vendored
View File

@ -35,4 +35,5 @@ logs/
Thumbs.db Thumbs.db
# Db # Db
db.sqlite db.sqlite
/searxng

View File

@ -1,7 +1,7 @@
FROM node:alpine FROM node:alpine
ARG NEXT_PUBLIC_WS_URL ARG NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001
ARG NEXT_PUBLIC_API_URL ARG NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api
ENV NEXT_PUBLIC_WS_URL=${NEXT_PUBLIC_WS_URL} ENV NEXT_PUBLIC_WS_URL=${NEXT_PUBLIC_WS_URL}
ENV NEXT_PUBLIC_API_URL=${NEXT_PUBLIC_API_URL} ENV NEXT_PUBLIC_API_URL=${NEXT_PUBLIC_API_URL}
@ -9,7 +9,7 @@ WORKDIR /home/perplexica
COPY ui /home/perplexica/ COPY ui /home/perplexica/
RUN yarn install RUN yarn install --frozen-lockfile
RUN yarn build RUN yarn build
CMD ["yarn", "start"] CMD ["yarn", "start"]

View File

@ -1,20 +1,16 @@
FROM node:slim FROM node:slim
ARG SEARXNG_API_URL
ENV SEARXNG_API_URL=${SEARXNG_API_URL}
WORKDIR /home/perplexica WORKDIR /home/perplexica
COPY src /home/perplexica/src COPY src /home/perplexica/src
COPY tsconfig.json /home/perplexica/ COPY tsconfig.json /home/perplexica/
COPY config.toml /home/perplexica/
COPY drizzle.config.ts /home/perplexica/ COPY drizzle.config.ts /home/perplexica/
COPY package.json /home/perplexica/ COPY package.json /home/perplexica/
COPY yarn.lock /home/perplexica/ COPY yarn.lock /home/perplexica/
RUN mkdir /home/perplexica/data RUN mkdir /home/perplexica/data
RUN yarn install RUN yarn install --frozen-lockfile
RUN yarn build RUN yarn build
CMD ["yarn", "start"] CMD ["yarn", "start"]

View File

@ -13,8 +13,9 @@ services:
build: build:
context: . context: .
dockerfile: backend.dockerfile dockerfile: backend.dockerfile
args: image: itzcrazykns1337/perplexica-backend:main
- SEARXNG_API_URL=http://searxng:8080 environment:
- SEARXNG_API_URL=http://searxng:8080
depends_on: depends_on:
- searxng - searxng
ports: ports:
@ -35,6 +36,7 @@ services:
args: args:
- NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api - NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api
- NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001 - NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001
image: itzcrazykns1337/perplexica-frontend:main
depends_on: depends_on:
- perplexica-backend - perplexica-backend
ports: ports:

View File

@ -6,7 +6,9 @@ Perplexicas Search API makes it easy to use our AI-powered search engine. You
## Endpoint ## Endpoint
### **POST** `/api/search` ### **POST** `http://localhost:3001/api/search`
**Note**: Replace `3001` with any other port if you've changed the default PORT
### Request ### Request
@ -26,13 +28,16 @@ The API accepts a JSON object in the request body, where you define the focus mo
}, },
"focusMode": "webSearch", "focusMode": "webSearch",
"query": "What is Perplexica", "query": "What is Perplexica",
"history": [] "history": [
["human", "Hi, how are you?"],
["assistant", "I am doing well, how can I help you today?"]
]
} }
``` ```
### Request Parameters ### Request Parameters
- **`chatModel`** (object, optional): Defines the chat model to be used for the query. - **`chatModel`** (object, optional): Defines the chat model to be used for the query. For model details you can send a GET request at `http://localhost:3001/api/models`.
- `provider`: Specifies the provider for the chat model (e.g., `openai`, `ollama`). - `provider`: Specifies the provider for the chat model (e.g., `openai`, `ollama`).
- `model`: The specific model from the chosen provider (e.g., `gpt-4o-mini`). - `model`: The specific model from the chosen provider (e.g., `gpt-4o-mini`).
@ -40,7 +45,7 @@ The API accepts a JSON object in the request body, where you define the focus mo
- `customOpenAIBaseURL`: If youre using a custom OpenAI instance, provide the base URL. - `customOpenAIBaseURL`: If youre using a custom OpenAI instance, provide the base URL.
- `customOpenAIKey`: The API key for a custom OpenAI instance. - `customOpenAIKey`: The API key for a custom OpenAI instance.
- **`embeddingModel`** (object, optional): Defines the embedding model for similarity-based searching. - **`embeddingModel`** (object, optional): Defines the embedding model for similarity-based searching. For model details you can send a GET request at `http://localhost:3001/api/models`.
- `provider`: The provider for the embedding model (e.g., `openai`). - `provider`: The provider for the embedding model (e.g., `openai`).
- `model`: The specific embedding model (e.g., `text-embedding-3-large`). - `model`: The specific embedding model (e.g., `text-embedding-3-large`).

View File

@ -10,15 +10,21 @@ To update Perplexica to the latest version, follow these steps:
git clone https://github.com/ItzCrazyKns/Perplexica.git git clone https://github.com/ItzCrazyKns/Perplexica.git
``` ```
2. Navigate to the Project Directory 2. Navigate to the Project Directory.
3. Update and Rebuild Docker Containers: 3. Pull latest images from registry.
```bash ```bash
docker compose up -d --build docker compose pull
``` ```
4. Once the command completes running go to http://localhost:3000 and verify the latest changes. 4. Update and Recreate containers.
```bash
docker compose up -d
```
5. Once the command completes running go to http://localhost:3000 and verify the latest changes.
## For non Docker users ## For non Docker users

View File

@ -9,6 +9,45 @@ export const loadGroqChatModels = async () => {
try { try {
const chatModels = { const chatModels = {
'llama-3.2-3b-preview': {
displayName: 'Llama 3.2 3B',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama-3.2-3b-preview',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'llama-3.2-11b-text-preview': {
displayName: 'Llama 3.2 11B Text',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama-3.2-11b-text-preview',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'llama-3.2-90b-text-preview': {
displayName: 'Llama 3.2 90B Text',
model: new ChatOpenAI(
{
openAIApiKey: groqApiKey,
modelName: 'llama-3.2-90b-text-preview',
temperature: 0.7,
},
{
baseURL: 'https://api.groq.com/openai/v1',
},
),
},
'llama-3.1-70b-versatile': { 'llama-3.1-70b-versatile': {
displayName: 'Llama 3.1 70B', displayName: 'Llama 3.1 70B',
model: new ChatOpenAI( model: new ChatOpenAI(