From b048c4b173760243e46b3e13bf8c3b991da6460a Mon Sep 17 00:00:00 2001 From: yeet Date: Fri, 11 Oct 2024 10:26:10 +0200 Subject: [PATCH] Reapply "Merge remote-tracking branch 'origin/master' into ollama-auth" This reverts commit d75f1c743ec2de8e8e7d360f91ab4be3453707fd. --- .github/workflows/docker-build.yaml | 70 +++++++++++++++++++++++++++++ .gitignore | 3 +- app.dockerfile | 6 +-- backend.dockerfile | 6 +-- docker-compose.yaml | 6 ++- docs/API/SEARCH.md | 13 ++++-- docs/installation/UPDATING.md | 14 ++++-- src/lib/providers/groq.ts | 39 ++++++++++++++++ 8 files changed, 138 insertions(+), 19 deletions(-) create mode 100644 .github/workflows/docker-build.yaml diff --git a/.github/workflows/docker-build.yaml b/.github/workflows/docker-build.yaml new file mode 100644 index 0000000..3cd9044 --- /dev/null +++ b/.github/workflows/docker-build.yaml @@ -0,0 +1,70 @@ +name: Build & Push Docker Images + +on: + push: + branches: + - master + release: + types: [published] + +jobs: + build-and-push: + runs-on: ubuntu-latest + strategy: + matrix: + service: [backend, app] + steps: + - name: Checkout code + uses: actions/checkout@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + with: + install: true + + - name: Log in to DockerHub + uses: docker/login-action@v2 + with: + username: ${{ secrets.DOCKER_USERNAME }} + password: ${{ secrets.DOCKER_PASSWORD }} + + - name: Extract version from release tag + if: github.event_name == 'release' + id: version + run: echo "RELEASE_VERSION=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV + + - name: Build and push Docker image for ${{ matrix.service }} + if: github.ref == 'refs/heads/master' && github.event_name == 'push' + run: | + docker buildx create --use + if [[ "${{ matrix.service }}" == "backend" ]]; then \ + DOCKERFILE=backend.dockerfile; \ + IMAGE_NAME=perplexica-backend; \ + else \ + DOCKERFILE=app.dockerfile; \ + IMAGE_NAME=perplexica-frontend; \ + fi + docker buildx build --platform linux/amd64,linux/arm64 \ + --cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:main \ + --cache-to=type=inline \ + -f $DOCKERFILE \ + -t itzcrazykns1337/${IMAGE_NAME}:main \ + --push . + + - name: Build and push release Docker image for ${{ matrix.service }} + if: github.event_name == 'release' + run: | + docker buildx create --use + if [[ "${{ matrix.service }}" == "backend" ]]; then \ + DOCKERFILE=backend.dockerfile; \ + IMAGE_NAME=perplexica-backend; \ + else \ + DOCKERFILE=app.dockerfile; \ + IMAGE_NAME=perplexica-frontend; \ + fi + docker buildx build --platform linux/amd64,linux/arm64 \ + --cache-from=type=registry,ref=itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }} \ + --cache-to=type=inline \ + -f $DOCKERFILE \ + -t itzcrazykns1337/${IMAGE_NAME}:${{ env.RELEASE_VERSION }} \ + --push . diff --git a/.gitignore b/.gitignore index a3dd5cc..8391d19 100644 --- a/.gitignore +++ b/.gitignore @@ -35,4 +35,5 @@ logs/ Thumbs.db # Db -db.sqlite \ No newline at end of file +db.sqlite +/searxng diff --git a/app.dockerfile b/app.dockerfile index 105cf86..ff1824d 100644 --- a/app.dockerfile +++ b/app.dockerfile @@ -1,7 +1,7 @@ FROM node:alpine -ARG NEXT_PUBLIC_WS_URL -ARG NEXT_PUBLIC_API_URL +ARG NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001 +ARG NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api ENV NEXT_PUBLIC_WS_URL=${NEXT_PUBLIC_WS_URL} ENV NEXT_PUBLIC_API_URL=${NEXT_PUBLIC_API_URL} @@ -9,7 +9,7 @@ WORKDIR /home/perplexica COPY ui /home/perplexica/ -RUN yarn install +RUN yarn install --frozen-lockfile RUN yarn build CMD ["yarn", "start"] \ No newline at end of file diff --git a/backend.dockerfile b/backend.dockerfile index 0169218..b8d0155 100644 --- a/backend.dockerfile +++ b/backend.dockerfile @@ -1,20 +1,16 @@ FROM node:slim -ARG SEARXNG_API_URL -ENV SEARXNG_API_URL=${SEARXNG_API_URL} - WORKDIR /home/perplexica COPY src /home/perplexica/src COPY tsconfig.json /home/perplexica/ -COPY config.toml /home/perplexica/ COPY drizzle.config.ts /home/perplexica/ COPY package.json /home/perplexica/ COPY yarn.lock /home/perplexica/ RUN mkdir /home/perplexica/data -RUN yarn install +RUN yarn install --frozen-lockfile RUN yarn build CMD ["yarn", "start"] \ No newline at end of file diff --git a/docker-compose.yaml b/docker-compose.yaml index d3892e5..46d82c6 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -13,8 +13,9 @@ services: build: context: . dockerfile: backend.dockerfile - args: - - SEARXNG_API_URL=http://searxng:8080 + image: itzcrazykns1337/perplexica-backend:main + environment: + - SEARXNG_API_URL=http://searxng:8080 depends_on: - searxng ports: @@ -35,6 +36,7 @@ services: args: - NEXT_PUBLIC_API_URL=http://127.0.0.1:3001/api - NEXT_PUBLIC_WS_URL=ws://127.0.0.1:3001 + image: itzcrazykns1337/perplexica-frontend:main depends_on: - perplexica-backend ports: diff --git a/docs/API/SEARCH.md b/docs/API/SEARCH.md index d3391c9..a573021 100644 --- a/docs/API/SEARCH.md +++ b/docs/API/SEARCH.md @@ -6,7 +6,9 @@ Perplexica’s Search API makes it easy to use our AI-powered search engine. You ## Endpoint -### **POST** `/api/search` +### **POST** `http://localhost:3001/api/search` + +**Note**: Replace `3001` with any other port if you've changed the default PORT ### Request @@ -26,13 +28,16 @@ The API accepts a JSON object in the request body, where you define the focus mo }, "focusMode": "webSearch", "query": "What is Perplexica", - "history": [] + "history": [ + ["human", "Hi, how are you?"], + ["assistant", "I am doing well, how can I help you today?"] + ] } ``` ### Request Parameters -- **`chatModel`** (object, optional): Defines the chat model to be used for the query. +- **`chatModel`** (object, optional): Defines the chat model to be used for the query. For model details you can send a GET request at `http://localhost:3001/api/models`. - `provider`: Specifies the provider for the chat model (e.g., `openai`, `ollama`). - `model`: The specific model from the chosen provider (e.g., `gpt-4o-mini`). @@ -40,7 +45,7 @@ The API accepts a JSON object in the request body, where you define the focus mo - `customOpenAIBaseURL`: If you’re using a custom OpenAI instance, provide the base URL. - `customOpenAIKey`: The API key for a custom OpenAI instance. -- **`embeddingModel`** (object, optional): Defines the embedding model for similarity-based searching. +- **`embeddingModel`** (object, optional): Defines the embedding model for similarity-based searching. For model details you can send a GET request at `http://localhost:3001/api/models`. - `provider`: The provider for the embedding model (e.g., `openai`). - `model`: The specific embedding model (e.g., `text-embedding-3-large`). diff --git a/docs/installation/UPDATING.md b/docs/installation/UPDATING.md index df67775..031a3e8 100644 --- a/docs/installation/UPDATING.md +++ b/docs/installation/UPDATING.md @@ -10,15 +10,21 @@ To update Perplexica to the latest version, follow these steps: git clone https://github.com/ItzCrazyKns/Perplexica.git ``` -2. Navigate to the Project Directory +2. Navigate to the Project Directory. -3. Update and Rebuild Docker Containers: +3. Pull latest images from registry. ```bash -docker compose up -d --build +docker compose pull ``` -4. Once the command completes running go to http://localhost:3000 and verify the latest changes. +4. Update and Recreate containers. + +```bash +docker compose up -d +``` + +5. Once the command completes running go to http://localhost:3000 and verify the latest changes. ## For non Docker users diff --git a/src/lib/providers/groq.ts b/src/lib/providers/groq.ts index 6249267..69db4f7 100644 --- a/src/lib/providers/groq.ts +++ b/src/lib/providers/groq.ts @@ -9,6 +9,45 @@ export const loadGroqChatModels = async () => { try { const chatModels = { + 'llama-3.2-3b-preview': { + displayName: 'Llama 3.2 3B', + model: new ChatOpenAI( + { + openAIApiKey: groqApiKey, + modelName: 'llama-3.2-3b-preview', + temperature: 0.7, + }, + { + baseURL: 'https://api.groq.com/openai/v1', + }, + ), + }, + 'llama-3.2-11b-text-preview': { + displayName: 'Llama 3.2 11B Text', + model: new ChatOpenAI( + { + openAIApiKey: groqApiKey, + modelName: 'llama-3.2-11b-text-preview', + temperature: 0.7, + }, + { + baseURL: 'https://api.groq.com/openai/v1', + }, + ), + }, + 'llama-3.2-90b-text-preview': { + displayName: 'Llama 3.2 90B Text', + model: new ChatOpenAI( + { + openAIApiKey: groqApiKey, + modelName: 'llama-3.2-90b-text-preview', + temperature: 0.7, + }, + { + baseURL: 'https://api.groq.com/openai/v1', + }, + ), + }, 'llama-3.1-70b-versatile': { displayName: 'Llama 3.1 70B', model: new ChatOpenAI(