pre-release commit
|
@ -0,0 +1,69 @@
|
|||
# .aoneci/workflows/ci.yml
|
||||
|
||||
name: Qwen Code CI
|
||||
|
||||
triggers:
|
||||
push:
|
||||
branches: [main, dev, integration]
|
||||
merge_request:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build and Lint
|
||||
steps:
|
||||
- uses: checkout
|
||||
- uses: setup-env
|
||||
inputs:
|
||||
node-version: '20'
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
|
||||
- name: Run formatter check
|
||||
run: |
|
||||
npm run format
|
||||
git diff --exit-code
|
||||
|
||||
- name: Run linter
|
||||
run: npm run lint:ci
|
||||
|
||||
- name: Build project
|
||||
run: npm run build
|
||||
|
||||
- name: Run type check
|
||||
run: npm run typecheck
|
||||
|
||||
- name: Upload build artifacts
|
||||
uses: upload-artifact
|
||||
inputs:
|
||||
name: build-artifacts-20
|
||||
path: |
|
||||
packages/*/dist/**/*
|
||||
package-lock.json
|
||||
|
||||
test:
|
||||
name: Test
|
||||
needs: build # This job depends on the 'build' job
|
||||
steps:
|
||||
- uses: checkout
|
||||
|
||||
- uses: setup-env
|
||||
inputs:
|
||||
node-version: '20'
|
||||
|
||||
- uses: download-artifact
|
||||
inputs:
|
||||
name: build-artifacts-20
|
||||
path: .
|
||||
|
||||
- name: Install dependencies for testing
|
||||
run: npm ci
|
||||
|
||||
- name: Run tests and generate reports
|
||||
run: NO_COLOR=true npm run test:ci
|
||||
|
||||
- name: Upload coverage reports
|
||||
uses: upload-artifact
|
||||
inputs:
|
||||
name: coverage-reports-20
|
||||
path: packages/*/coverage
|
|
@ -0,0 +1,13 @@
|
|||
root = true
|
||||
|
||||
[*]
|
||||
charset = utf-8
|
||||
insert_final_newline = true
|
||||
end_of_line = lf
|
||||
indent_style = space
|
||||
indent_size = 2
|
||||
max_line_length = 80
|
||||
|
||||
[Makefile]
|
||||
indent_style = tab
|
||||
indent_size = 8
|
|
@ -0,0 +1,89 @@
|
|||
# Use a common base image like Debian.
|
||||
# Using 'bookworm-slim' for a balance of size and compatibility.
|
||||
FROM debian:bookworm-slim
|
||||
|
||||
# Set environment variables to prevent interactive prompts during installation
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
ENV NODE_VERSION=20.12.2
|
||||
ENV NODE_VERSION_MAJOR=20
|
||||
ENV DOCKER_CLI_VERSION=26.1.3
|
||||
ENV BUILDX_VERSION=v0.14.0
|
||||
|
||||
# Install dependencies for adding NodeSource repository, gcloud, and other tools
|
||||
# - curl: for downloading files
|
||||
# - gnupg: for managing GPG keys (used by NodeSource & Google Cloud SDK)
|
||||
# - apt-transport-https: for HTTPS apt repositories
|
||||
# - ca-certificates: for HTTPS apt repositories
|
||||
# - rsync: the rsync utility itself
|
||||
# - git: often useful in build environments
|
||||
# - python3, python3-pip, python3-venv, python3-crcmod: for gcloud SDK and some of its components
|
||||
# - lsb-release: for gcloud install script to identify distribution
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends \
|
||||
curl \
|
||||
gnupg \
|
||||
apt-transport-https \
|
||||
ca-certificates \
|
||||
rsync \
|
||||
git \
|
||||
python3 \
|
||||
python3-pip \
|
||||
python3-venv \
|
||||
python3-crcmod \
|
||||
lsb-release \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install Node.js and npm
|
||||
# We'll use the official NodeSource repository for a specific version
|
||||
RUN set -eux; \
|
||||
curl -fsSL https://deb.nodesource.com/gpgkey/nodesource-repo.gpg.key | gpg --dearmor -o /etc/apt/keyrings/nodesource.gpg && \
|
||||
# For Node.js 20.x, it's node_20.x
|
||||
# Let's explicitly define the major version for clarity
|
||||
echo "deb [signed-by=/etc/apt/keyrings/nodesource.gpg] https://deb.nodesource.com/node_20.x nodistro main" > /etc/apt/sources.list.d/nodesource.list && \
|
||||
apt-get update && \
|
||||
apt-get install -y --no-install-recommends nodejs && \
|
||||
npm install -g npm@latest && \
|
||||
# Verify installations
|
||||
node -v && \
|
||||
npm -v && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install Docker CLI
|
||||
# Download the static binary from Docker's official source
|
||||
RUN set -eux; \
|
||||
DOCKER_CLI_ARCH=$(dpkg --print-architecture); \
|
||||
case "${DOCKER_CLI_ARCH}" in \
|
||||
amd64) DOCKER_CLI_ARCH_SUFFIX="x86_64" ;; \
|
||||
arm64) DOCKER_CLI_ARCH_SUFFIX="aarch64" ;; \
|
||||
*) echo "Unsupported architecture: ${DOCKER_CLI_ARCH}"; exit 1 ;; \
|
||||
esac; \
|
||||
curl -fsSL "https://download.docker.com/linux/static/stable/${DOCKER_CLI_ARCH_SUFFIX}/docker-${DOCKER_CLI_VERSION}.tgz" -o docker.tgz && \
|
||||
tar -xzf docker.tgz --strip-components=1 -C /usr/local/bin docker/docker && \
|
||||
rm docker.tgz && \
|
||||
# Verify installation
|
||||
docker --version
|
||||
|
||||
# Install Docker Buildx plugin
|
||||
RUN set -eux; \
|
||||
BUILDX_ARCH_DEB=$(dpkg --print-architecture); \
|
||||
case "${BUILDX_ARCH_DEB}" in \
|
||||
amd64) BUILDX_ARCH_SUFFIX="amd64" ;; \
|
||||
arm64) BUILDX_ARCH_SUFFIX="arm64" ;; \
|
||||
*) echo "Unsupported architecture for Buildx: ${BUILDX_ARCH_DEB}"; exit 1 ;; \
|
||||
esac; \
|
||||
mkdir -p /usr/local/lib/docker/cli-plugins && \
|
||||
curl -fsSL "https://github.com/docker/buildx/releases/download/${BUILDX_VERSION}/buildx-${BUILDX_VERSION}.linux-${BUILDX_ARCH_SUFFIX}" -o /usr/local/lib/docker/cli-plugins/docker-buildx && \
|
||||
chmod +x /usr/local/lib/docker/cli-plugins/docker-buildx && \
|
||||
# verify installation
|
||||
docker buildx version
|
||||
|
||||
# Install Google Cloud SDK (gcloud CLI)
|
||||
RUN echo "deb [signed-by=/usr/share/keyrings/cloud.google.gpg] https://packages.cloud.google.com/apt cloud-sdk main" | tee -a /etc/apt/sources.list.d/google-cloud-sdk.list && curl https://packages.cloud.google.com/apt/doc/apt-key.gpg | gpg --dearmor -o /usr/share/keyrings/cloud.google.gpg && apt-get update -y && apt-get install google-cloud-cli -y
|
||||
|
||||
# Set a working directory (optional, but good practice)
|
||||
WORKDIR /workspace
|
||||
|
||||
# You can add a CMD or ENTRYPOINT if you intend to run this image directly,
|
||||
# but for Cloud Build, it's usually not necessary as Cloud Build steps override it.
|
||||
# For example:
|
||||
ENTRYPOINT '/bin/bash'
|
|
@ -0,0 +1,75 @@
|
|||
steps:
|
||||
# Step 1: Install root dependencies (includes workspaces)
|
||||
- name: 'us-west1-docker.pkg.dev/gemini-code-dev/gemini-code-containers/gemini-code-builder'
|
||||
id: 'Install Dependencies'
|
||||
entrypoint: 'npm'
|
||||
args: ['install']
|
||||
|
||||
# Step 4: Authenticate for Docker (so we can push images to the artifact registry)
|
||||
- name: 'us-west1-docker.pkg.dev/gemini-code-dev/gemini-code-containers/gemini-code-builder'
|
||||
id: 'Authenticate docker'
|
||||
entrypoint: 'npm'
|
||||
args: ['run', 'auth']
|
||||
|
||||
# Step 5: Build workspace packages
|
||||
- name: 'us-west1-docker.pkg.dev/gemini-code-dev/gemini-code-containers/gemini-code-builder'
|
||||
id: 'Build packages'
|
||||
entrypoint: 'npm'
|
||||
args: ['run', 'build:packages']
|
||||
|
||||
# Step 6: Determine Docker Image Tag
|
||||
- name: 'us-west1-docker.pkg.dev/gemini-code-dev/gemini-code-containers/gemini-code-builder'
|
||||
id: 'Determine Docker Image Tag'
|
||||
entrypoint: 'bash'
|
||||
args:
|
||||
- -c
|
||||
- |
|
||||
SHELL_TAG_NAME="$TAG_NAME"
|
||||
FINAL_TAG="$SHORT_SHA" # Default to SHA
|
||||
if [[ "$$SHELL_TAG_NAME" == *"-nightly"* ]]; then
|
||||
echo "Nightly release detected."
|
||||
FINAL_TAG="$${SHELL_TAG_NAME#v}"
|
||||
# Also escape the variable in the regex match
|
||||
elif [[ "$$SHELL_TAG_NAME" =~ ^v[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
echo "Official release detected."
|
||||
FINAL_TAG="$${SHELL_TAG_NAME#v}"
|
||||
else
|
||||
echo "Development/RC release detected. Using commit SHA as tag."
|
||||
fi
|
||||
echo "Determined image tag: $$FINAL_TAG"
|
||||
echo "$$FINAL_TAG" > /workspace/image_tag.txt
|
||||
|
||||
# Step 7: Build sandbox container image
|
||||
- name: 'us-west1-docker.pkg.dev/gemini-code-dev/gemini-code-containers/gemini-code-builder'
|
||||
id: 'Build sandbox Docker image'
|
||||
entrypoint: 'bash'
|
||||
args:
|
||||
- -c
|
||||
- |
|
||||
export GEMINI_SANDBOX_IMAGE_TAG=$$(cat /workspace/image_tag.txt)
|
||||
echo "Using Docker image tag for build: $$GEMINI_SANDBOX_IMAGE_TAG"
|
||||
npm run build:sandbox
|
||||
env:
|
||||
- 'GEMINI_SANDBOX=$_CONTAINER_TOOL'
|
||||
|
||||
# Step 8: Publish sandbox container image
|
||||
- name: 'us-west1-docker.pkg.dev/gemini-code-dev/gemini-code-containers/gemini-code-builder'
|
||||
id: 'Publish sandbox Docker image'
|
||||
entrypoint: 'bash'
|
||||
args:
|
||||
- -c
|
||||
- |
|
||||
set -e
|
||||
FINAL_IMAGE_URI=$$(cat /workspace/final_image_uri.txt)
|
||||
|
||||
echo "Pushing sandbox image: $${FINAL_IMAGE_URI}"
|
||||
$_CONTAINER_TOOL push "$${FINAL_IMAGE_URI}"
|
||||
env:
|
||||
- 'GEMINI_SANDBOX=$_CONTAINER_TOOL'
|
||||
|
||||
options:
|
||||
defaultLogsBucketBehavior: REGIONAL_USER_OWNED_BUCKET
|
||||
dynamicSubstitutions: true
|
||||
|
||||
substitutions:
|
||||
_CONTAINER_TOOL: 'docker'
|
|
@ -0,0 +1,24 @@
|
|||
# Set the default behavior for all files to automatically handle line endings.
|
||||
# This will ensure that all text files are normalized to use LF (line feed)
|
||||
# line endings in the repository, which helps prevent cross-platform issues.
|
||||
* text=auto eol=lf
|
||||
|
||||
# Explicitly declare files that must have LF line endings for proper execution
|
||||
# on Unix-like systems.
|
||||
*.sh eol=lf
|
||||
*.bash eol=lf
|
||||
Makefile eol=lf
|
||||
|
||||
# Explicitly declare binary file types to prevent Git from attempting to
|
||||
# normalize their line endings.
|
||||
*.png binary
|
||||
*.jpg binary
|
||||
*.jpeg binary
|
||||
*.gif binary
|
||||
*.ico binary
|
||||
*.pdf binary
|
||||
*.woff binary
|
||||
*.woff2 binary
|
||||
*.eot binary
|
||||
*.ttf binary
|
||||
*.otf binary
|
|
@ -0,0 +1,7 @@
|
|||
# By default, require reviews from the release approvers for all files.
|
||||
* @google-gemini/gemini-cli-askmode-approvers
|
||||
|
||||
# The following files don't need reviews from the release approvers.
|
||||
# These patterns override the rule above.
|
||||
**/*.md
|
||||
/docs/
|
|
@ -0,0 +1,55 @@
|
|||
name: Bug Report
|
||||
description: Report a bug to help us improve Gemini CLI
|
||||
labels: ['kind/bug', 'status/need-triage']
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
> [!IMPORTANT]
|
||||
> Thanks for taking the time to fill out this bug report!
|
||||
>
|
||||
> Please search **[existing issues](https://github.com/google-gemini/gemini-cli/issues)** to see if an issue already exists for the bug you encountered.
|
||||
|
||||
- type: textarea
|
||||
id: problem
|
||||
attributes:
|
||||
label: What happened?
|
||||
description: A clear and concise description of what the bug is.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: expected
|
||||
attributes:
|
||||
label: What did you expect to happen?
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: info
|
||||
attributes:
|
||||
label: Client information
|
||||
description: Please paste the full text from the `/about` command run from Gemini CLI. Also include which platform (MacOS, Windows, Linux).
|
||||
value: |
|
||||
<details>
|
||||
|
||||
```console
|
||||
$ gemini /about
|
||||
# paste output here
|
||||
```
|
||||
|
||||
</details>
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: login-info
|
||||
attributes:
|
||||
label: Login information
|
||||
description: Describe how you are logging in (e.g., Google Account, API key).
|
||||
|
||||
- type: textarea
|
||||
id: additional-context
|
||||
attributes:
|
||||
label: Anything else we need to know?
|
||||
description: Add any other context about the problem here.
|
|
@ -0,0 +1,33 @@
|
|||
name: Feature Request
|
||||
description: Suggest an idea for this project
|
||||
labels: ['kind/enhancement', 'status/need-triage']
|
||||
body:
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
> [!IMPORTANT]
|
||||
> Thanks for taking the time to suggest an enhancement!
|
||||
>
|
||||
> Please search **[existing issues](https://github.com/google-gemini/gemini-cli/issues)** to see if a similar feature has already been requested.
|
||||
|
||||
- type: textarea
|
||||
id: feature
|
||||
attributes:
|
||||
label: What would you like to be added?
|
||||
description: A clear and concise description of the enhancement.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: rationale
|
||||
attributes:
|
||||
label: Why is this needed?
|
||||
description: A clear and concise description of why this enhancement is needed.
|
||||
validations:
|
||||
required: true
|
||||
|
||||
- type: textarea
|
||||
id: additional-context
|
||||
attributes:
|
||||
label: Additional context
|
||||
description: Add any other context or screenshots about the feature request here.
|
|
@ -0,0 +1,102 @@
|
|||
name: 'Post Coverage Comment Action'
|
||||
description: 'Prepares and posts a code coverage comment to a PR.'
|
||||
|
||||
inputs:
|
||||
cli_json_file:
|
||||
description: 'Path to CLI coverage-summary.json'
|
||||
required: true
|
||||
core_json_file:
|
||||
description: 'Path to Core coverage-summary.json'
|
||||
required: true
|
||||
cli_full_text_summary_file:
|
||||
description: 'Path to CLI full-text-summary.txt'
|
||||
required: true
|
||||
core_full_text_summary_file:
|
||||
description: 'Path to Core full-text-summary.txt'
|
||||
required: true
|
||||
node_version:
|
||||
description: 'Node.js version for context in messages'
|
||||
required: true
|
||||
github_token:
|
||||
description: 'GitHub token for posting comments'
|
||||
required: true
|
||||
|
||||
runs:
|
||||
using: 'composite'
|
||||
steps:
|
||||
- name: Prepare Coverage Comment
|
||||
id: prep_coverage_comment
|
||||
shell: bash
|
||||
run: |
|
||||
cli_json_file="${{ inputs.cli_json_file }}"
|
||||
core_json_file="${{ inputs.core_json_file }}"
|
||||
cli_full_text_summary_file="${{ inputs.cli_full_text_summary_file }}"
|
||||
core_full_text_summary_file="${{ inputs.core_full_text_summary_file }}"
|
||||
comment_file="coverage-comment.md"
|
||||
|
||||
# Extract percentages using jq for the main table
|
||||
if [ -f "$cli_json_file" ]; then
|
||||
cli_lines_pct=$(jq -r '.total.lines.pct' "$cli_json_file")
|
||||
cli_statements_pct=$(jq -r '.total.statements.pct' "$cli_json_file")
|
||||
cli_functions_pct=$(jq -r '.total.functions.pct' "$cli_json_file")
|
||||
cli_branches_pct=$(jq -r '.total.branches.pct' "$cli_json_file")
|
||||
else
|
||||
cli_lines_pct="N/A"; cli_statements_pct="N/A"; cli_functions_pct="N/A"; cli_branches_pct="N/A"
|
||||
echo "CLI coverage-summary.json not found at: $cli_json_file" >&2 # Error to stderr
|
||||
fi
|
||||
|
||||
if [ -f "$core_json_file" ]; then
|
||||
core_lines_pct=$(jq -r '.total.lines.pct' "$core_json_file")
|
||||
core_statements_pct=$(jq -r '.total.statements.pct' "$core_json_file")
|
||||
core_functions_pct=$(jq -r '.total.functions.pct' "$core_json_file")
|
||||
core_branches_pct=$(jq -r '.total.branches.pct' "$core_json_file")
|
||||
else
|
||||
core_lines_pct="N/A"; core_statements_pct="N/A"; core_functions_pct="N/A"; core_branches_pct="N/A"
|
||||
echo "Core coverage-summary.json not found at: $core_json_file" >&2 # Error to stderr
|
||||
fi
|
||||
|
||||
echo "## Code Coverage Summary" > "$comment_file"
|
||||
echo "" >> "$comment_file"
|
||||
echo "| Package | Lines | Statements | Functions | Branches |" >> "$comment_file"
|
||||
echo "|---|---|---|---|---|" >> "$comment_file"
|
||||
echo "| CLI | ${cli_lines_pct}% | ${cli_statements_pct}% | ${cli_functions_pct}% | ${cli_branches_pct}% |" >> "$comment_file"
|
||||
echo "| Core | ${core_lines_pct}% | ${core_statements_pct}% | ${core_functions_pct}% | ${core_branches_pct}% |" >> "$comment_file"
|
||||
echo "" >> "$comment_file"
|
||||
|
||||
# CLI Package - Collapsible Section (with full text summary from file)
|
||||
echo "<details>" >> "$comment_file"
|
||||
echo "<summary>CLI Package - Full Text Report</summary>" >> "$comment_file"
|
||||
echo "" >> "$comment_file"
|
||||
echo '```text' >> "$comment_file"
|
||||
if [ -f "$cli_full_text_summary_file" ]; then
|
||||
cat "$cli_full_text_summary_file" >> "$comment_file"
|
||||
else
|
||||
echo "CLI full-text-summary.txt not found at: $cli_full_text_summary_file" >> "$comment_file"
|
||||
fi
|
||||
echo '```' >> "$comment_file"
|
||||
echo "</details>" >> "$comment_file"
|
||||
echo "" >> "$comment_file"
|
||||
|
||||
# Core Package - Collapsible Section (with full text summary from file)
|
||||
echo "<details>" >> "$comment_file"
|
||||
echo "<summary>Core Package - Full Text Report</summary>" >> "$comment_file"
|
||||
echo "" >> "$comment_file"
|
||||
echo '```text' >> "$comment_file"
|
||||
if [ -f "$core_full_text_summary_file" ]; then
|
||||
cat "$core_full_text_summary_file" >> "$comment_file"
|
||||
else
|
||||
echo "Core full-text-summary.txt not found at: $core_full_text_summary_file" >> "$comment_file"
|
||||
fi
|
||||
echo '```' >> "$comment_file"
|
||||
echo "</details>" >> "$comment_file"
|
||||
echo "" >> "$comment_file"
|
||||
|
||||
echo "_For detailed HTML reports, please see the 'coverage-reports-${{ inputs.node_version }}' artifact from the main CI run._" >> "$comment_file"
|
||||
|
||||
- name: Post Coverage Comment
|
||||
uses: thollander/actions-comment-pull-request@v3
|
||||
if: always()
|
||||
with:
|
||||
file-path: coverage-comment.md # Use the generated file directly
|
||||
comment-tag: code-coverage-summary
|
||||
github-token: ${{ inputs.github_token }}
|
|
@ -0,0 +1,41 @@
|
|||
## TLDR
|
||||
|
||||
<!-- Add a brief description of what this pull request changes and why and any important things for reviewers to look at -->
|
||||
|
||||
## Dive Deeper
|
||||
|
||||
<!-- more thoughts and in depth discussion here -->
|
||||
|
||||
## Reviewer Test Plan
|
||||
|
||||
<!-- when a person reviews your code they should ideally be pulling and running that code. How would they validate your change works and if relevant what are some good classes of example prompts and ways they can exercise your changes -->
|
||||
|
||||
## Testing Matrix
|
||||
|
||||
<!-- Before submitting please validate your changes on as many of these options as possible -->
|
||||
|
||||
| | 🍏 | 🪟 | 🐧 |
|
||||
| -------- | --- | --- | --- |
|
||||
| npm run | ❓ | ❓ | ❓ |
|
||||
| npx | ❓ | ❓ | ❓ |
|
||||
| Docker | ❓ | ❓ | ❓ |
|
||||
| Podman | ❓ | - | - |
|
||||
| Seatbelt | ❓ | - | - |
|
||||
|
||||
## Linked issues / bugs
|
||||
|
||||
<!--
|
||||
Link to any related issues or bugs.
|
||||
|
||||
**If this PR fully resolves the issue, use one of the following keywords to automatically close the issue when this PR is merged:**
|
||||
|
||||
- Closes #<issue_number>
|
||||
- Fixes #<issue_number>
|
||||
- Resolves #<issue_number>
|
||||
|
||||
*Example: `Resolves #123`*
|
||||
|
||||
**If this PR is only related to an issue or is a partial fix, simply reference the issue number without a keyword:**
|
||||
|
||||
*Example: `This PR makes progress on #456` or `Related to #789`*
|
||||
-->
|
|
@ -0,0 +1,163 @@
|
|||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
# Initialize a comma-separated string to hold PR numbers that need a comment
|
||||
PRS_NEEDING_COMMENT=""
|
||||
|
||||
# Function to process a single PR
|
||||
process_pr() {
|
||||
local PR_NUMBER=$1
|
||||
echo "🔄 Processing PR #$PR_NUMBER"
|
||||
|
||||
# Get PR body with error handling
|
||||
local PR_BODY
|
||||
if ! PR_BODY=$(gh pr view "$PR_NUMBER" --repo "$GITHUB_REPOSITORY" --json body -q .body 2>/dev/null); then
|
||||
echo " ⚠️ Could not fetch PR #$PR_NUMBER details"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Look for issue references using multiple patterns
|
||||
local ISSUE_NUMBER=""
|
||||
|
||||
# Pattern 1: Direct reference like #123
|
||||
if [ -z "$ISSUE_NUMBER" ]; then
|
||||
ISSUE_NUMBER=$(echo "$PR_BODY" | grep -oE '#[0-9]+' | head -1 | sed 's/#//' 2>/dev/null || echo "")
|
||||
fi
|
||||
|
||||
# Pattern 2: Closes/Fixes/Resolves patterns (case insensitive)
|
||||
if [ -z "$ISSUE_NUMBER" ]; then
|
||||
ISSUE_NUMBER=$(echo "$PR_BODY" | grep -iE '(closes?|fixes?|resolves?) #[0-9]+' | grep -oE '#[0-9]+' | head -1 | sed 's/#//' 2>/dev/null || echo "")
|
||||
fi
|
||||
|
||||
if [ -z "$ISSUE_NUMBER" ]; then
|
||||
echo "⚠️ No linked issue found for PR #$PR_NUMBER, adding status/need-issue label"
|
||||
if ! gh pr edit "$PR_NUMBER" --repo "$GITHUB_REPOSITORY" --add-label "status/need-issue" 2>/dev/null; then
|
||||
echo " ⚠️ Failed to add label (may already exist or have permission issues)"
|
||||
fi
|
||||
# Add PR number to the list
|
||||
if [ -z "$PRS_NEEDING_COMMENT" ]; then
|
||||
PRS_NEEDING_COMMENT="$PR_NUMBER"
|
||||
else
|
||||
PRS_NEEDING_COMMENT="$PRS_NEEDING_COMMENT,$PR_NUMBER"
|
||||
fi
|
||||
echo "needs_comment=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "🔗 Found linked issue #$ISSUE_NUMBER"
|
||||
|
||||
# Remove status/need-issue label if present
|
||||
if ! gh pr edit "$PR_NUMBER" --repo "$GITHUB_REPOSITORY" --remove-label "status/need-issue" 2>/dev/null; then
|
||||
echo " status/need-issue label not present or could not be removed"
|
||||
fi
|
||||
|
||||
# Get issue labels
|
||||
echo "📥 Fetching labels from issue #$ISSUE_NUMBER"
|
||||
local ISSUE_LABELS=""
|
||||
if ! ISSUE_LABELS=$(gh issue view "$ISSUE_NUMBER" --repo "$GITHUB_REPOSITORY" --json labels -q '.labels[].name' 2>/dev/null | tr '\n' ',' | sed 's/,$//' || echo ""); then
|
||||
echo " ⚠️ Could not fetch issue #$ISSUE_NUMBER (may not exist or be in different repo)"
|
||||
ISSUE_LABELS=""
|
||||
fi
|
||||
|
||||
# Get PR labels
|
||||
echo "📥 Fetching labels from PR #$PR_NUMBER"
|
||||
local PR_LABELS=""
|
||||
if ! PR_LABELS=$(gh pr view "$PR_NUMBER" --repo "$GITHUB_REPOSITORY" --json labels -q '.labels[].name' 2>/dev/null | tr '\n' ',' | sed 's/,$//' || echo ""); then
|
||||
echo " ⚠️ Could not fetch PR labels"
|
||||
PR_LABELS=""
|
||||
fi
|
||||
|
||||
echo " Issue labels: $ISSUE_LABELS"
|
||||
echo " PR labels: $PR_LABELS"
|
||||
|
||||
# Convert comma-separated strings to arrays
|
||||
local ISSUE_LABEL_ARRAY PR_LABEL_ARRAY
|
||||
IFS=',' read -ra ISSUE_LABEL_ARRAY <<< "$ISSUE_LABELS"
|
||||
IFS=',' read -ra PR_LABEL_ARRAY <<< "$PR_LABELS"
|
||||
|
||||
# Find labels to add (on issue but not on PR)
|
||||
local LABELS_TO_ADD=""
|
||||
for label in "${ISSUE_LABEL_ARRAY[@]}"; do
|
||||
if [ -n "$label" ] && [[ ! " ${PR_LABEL_ARRAY[*]} " =~ " ${label} " ]]; then
|
||||
if [ -z "$LABELS_TO_ADD" ]; then
|
||||
LABELS_TO_ADD="$label"
|
||||
else
|
||||
LABELS_TO_ADD="$LABELS_TO_ADD,$label"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
# Find labels to remove (on PR but not on issue)
|
||||
local LABELS_TO_REMOVE=""
|
||||
for label in "${PR_LABEL_ARRAY[@]}"; do
|
||||
if [ -n "$label" ] && [[ ! " ${ISSUE_LABEL_ARRAY[*]} " =~ " ${label} " ]]; then
|
||||
# Don't remove status/need-issue since we already handled it
|
||||
if [ "$label" != "status/need-issue" ]; then
|
||||
if [ -z "$LABELS_TO_REMOVE" ]; then
|
||||
LABELS_TO_REMOVE="$label"
|
||||
else
|
||||
LABELS_TO_REMOVE="$LABELS_TO_REMOVE,$label"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
# Apply label changes
|
||||
if [ -n "$LABELS_TO_ADD" ]; then
|
||||
echo "➕ Adding labels: $LABELS_TO_ADD"
|
||||
if ! gh pr edit "$PR_NUMBER" --repo "$GITHUB_REPOSITORY" --add-label "$LABELS_TO_ADD" 2>/dev/null; then
|
||||
echo " ⚠️ Failed to add some labels"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -n "$LABELS_TO_REMOVE" ]; then
|
||||
echo "➖ Removing labels: $LABELS_TO_REMOVE"
|
||||
if ! gh pr edit "$PR_NUMBER" --repo "$GITHUB_REPOSITORY" --remove-label "$LABELS_TO_REMOVE" 2>/dev/null; then
|
||||
echo " ⚠️ Failed to remove some labels"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ -z "$LABELS_TO_ADD" ] && [ -z "$LABELS_TO_REMOVE" ]; then
|
||||
echo "✅ Labels already synchronized"
|
||||
fi
|
||||
echo "needs_comment=false" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
}
|
||||
|
||||
# If PR_NUMBER is set, process only that PR
|
||||
if [ -n "${PR_NUMBER:-}" ]; then
|
||||
if ! process_pr "$PR_NUMBER"; then
|
||||
echo "❌ Failed to process PR #$PR_NUMBER"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
# Otherwise, get all open PRs and process them
|
||||
# The script logic will determine which ones need issue linking or label sync
|
||||
echo "📥 Getting all open pull requests..."
|
||||
if ! PR_NUMBERS=$(gh pr list --repo "$GITHUB_REPOSITORY" --state open --limit 1000 --json number -q '.[].number' 2>/dev/null); then
|
||||
echo "❌ Failed to fetch PR list"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$PR_NUMBERS" ]; then
|
||||
echo "✅ No open PRs found"
|
||||
else
|
||||
# Count the number of PRs
|
||||
PR_COUNT=$(echo "$PR_NUMBERS" | wc -w | tr -d ' ')
|
||||
echo "📊 Found $PR_COUNT open PRs to process"
|
||||
|
||||
for pr_number in $PR_NUMBERS; do
|
||||
if ! process_pr "$pr_number"; then
|
||||
echo "⚠️ Failed to process PR #$pr_number, continuing with next PR..."
|
||||
continue
|
||||
fi
|
||||
done
|
||||
fi
|
||||
fi
|
||||
|
||||
# Ensure output is always set, even if empty
|
||||
if [ -z "$PRS_NEEDING_COMMENT" ]; then
|
||||
echo "prs_needing_comment=[]" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "prs_needing_comment=[$PRS_NEEDING_COMMENT]" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
echo "✅ PR triage completed"
|
|
@ -0,0 +1,165 @@
|
|||
# .github/workflows/ci.yml
|
||||
|
||||
name: Gemini CLI CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main, release]
|
||||
pull_request:
|
||||
branches: [main, release]
|
||||
merge_group:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
name: Build and Lint
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read # For checkout
|
||||
strategy:
|
||||
matrix:
|
||||
node-version: [20.x, 22.x, 24.x]
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
|
||||
- name: Set up Node.js ${{ matrix.node-version }}
|
||||
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4
|
||||
with:
|
||||
node-version: ${{ matrix.node-version }}
|
||||
cache: 'npm'
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
|
||||
- name: Run formatter check
|
||||
run: |
|
||||
npm run format
|
||||
git diff --exit-code
|
||||
|
||||
- name: Run linter
|
||||
run: npm run lint:ci
|
||||
|
||||
- name: Build project
|
||||
run: npm run build
|
||||
|
||||
- name: Run type check
|
||||
run: npm run typecheck
|
||||
|
||||
- name: Upload build artifacts
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
|
||||
with:
|
||||
name: build-artifacts-${{ matrix.node-version }}
|
||||
path: |
|
||||
packages/*/dist
|
||||
package-lock.json # Only upload dist and lockfile
|
||||
test:
|
||||
name: Test
|
||||
runs-on: ubuntu-latest
|
||||
needs: build # This job depends on the 'build' job
|
||||
permissions:
|
||||
contents: read
|
||||
checks: write
|
||||
pull-requests: write
|
||||
strategy:
|
||||
matrix:
|
||||
node-version: [20.x, 22.x, 24.x] # Should match the build job's matrix
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
|
||||
- name: Set up Node.js ${{ matrix.node-version }}
|
||||
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4
|
||||
with:
|
||||
node-version: ${{ matrix.node-version }}
|
||||
cache: 'npm'
|
||||
|
||||
- name: Download build artifacts
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4
|
||||
with:
|
||||
name: build-artifacts-${{ matrix.node-version }}
|
||||
path: . # Download to the root, this will include package-lock.json and packages/*/dist
|
||||
|
||||
# Restore/create package structure for dist folders if necessary.
|
||||
# The download-artifact action with path: . should place them correctly if the
|
||||
# upload paths were relative to the workspace root.
|
||||
# Example: if uploaded `packages/cli/dist`, it will be at `./packages/cli/dist`.
|
||||
|
||||
- name: Install dependencies for testing
|
||||
run: npm ci # Install fresh dependencies using the downloaded package-lock.json
|
||||
|
||||
- name: Run tests and generate reports
|
||||
run: NO_COLOR=true npm run test:ci
|
||||
|
||||
- name: Publish Test Report (for non-forks)
|
||||
if: always() && (github.event.pull_request.head.repo.full_name == github.repository)
|
||||
uses: dorny/test-reporter@890a17cecf52a379fc869ab770a71657660be727 # v2
|
||||
with:
|
||||
name: Test Results (Node ${{ matrix.node-version }})
|
||||
path: packages/*/junit.xml
|
||||
reporter: java-junit
|
||||
fail-on-error: 'false'
|
||||
|
||||
- name: Upload Test Results Artifact (for forks)
|
||||
if: always() && (github.event_name == 'pull_request' && github.event.pull_request.head.repo.full_name != github.repository)
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
|
||||
with:
|
||||
name: test-results-fork-${{ matrix.node-version }}
|
||||
path: packages/*/junit.xml
|
||||
|
||||
- name: Upload coverage reports
|
||||
uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4
|
||||
if: always()
|
||||
with:
|
||||
name: coverage-reports-${{ matrix.node-version }}
|
||||
path: packages/*/coverage
|
||||
|
||||
post_coverage_comment:
|
||||
name: Post Coverage Comment
|
||||
runs-on: ubuntu-latest
|
||||
needs: test
|
||||
if: always() && github.event_name == 'pull_request' && (github.event.pull_request.head.repo.full_name == github.repository)
|
||||
continue-on-error: true
|
||||
permissions:
|
||||
contents: read # For checkout
|
||||
pull-requests: write # For commenting
|
||||
strategy:
|
||||
matrix:
|
||||
node-version: [22.x] # Reduce noise by only posting the comment once
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
|
||||
- name: Download coverage reports artifact
|
||||
uses: actions/download-artifact@d3f86a106a0bac45b974a628896c90dbdf5c8093 # v4
|
||||
with:
|
||||
name: coverage-reports-${{ matrix.node-version }}
|
||||
path: coverage_artifact # Download to a specific directory
|
||||
|
||||
- name: Post Coverage Comment using Composite Action
|
||||
uses: ./.github/actions/post-coverage-comment # Path to the composite action directory
|
||||
with:
|
||||
cli_json_file: coverage_artifact/cli/coverage/coverage-summary.json
|
||||
core_json_file: coverage_artifact/core/coverage/coverage-summary.json
|
||||
cli_full_text_summary_file: coverage_artifact/cli/coverage/full-text-summary.txt
|
||||
core_full_text_summary_file: coverage_artifact/core/coverage/full-text-summary.txt
|
||||
node_version: ${{ matrix.node-version }}
|
||||
github_token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
codeql:
|
||||
name: CodeQL
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
actions: read
|
||||
contents: read
|
||||
security-events: write
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@181d5eefc20863364f96762470ba6f862bdef56b # v3
|
||||
with:
|
||||
languages: javascript
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@181d5eefc20863364f96762470ba6f862bdef56b # v3
|
|
@ -0,0 +1,188 @@
|
|||
name: Generate Weekly Community Report 📊
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 12 * * 1' # Run at 12:00 UTC on Monday
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
days:
|
||||
description: 'Number of days to look back for the report'
|
||||
required: true
|
||||
default: '7'
|
||||
|
||||
jobs:
|
||||
generate-report:
|
||||
name: Generate Report 📝
|
||||
if: ${{ github.repository == 'google-gemini/gemini-cli' }}
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
issues: write
|
||||
pull-requests: read
|
||||
discussions: read
|
||||
contents: read
|
||||
id-token: write
|
||||
|
||||
steps:
|
||||
- name: Generate GitHub App Token 🔑
|
||||
id: generate_token
|
||||
uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2
|
||||
with:
|
||||
app-id: ${{ secrets.APP_ID }}
|
||||
private-key: ${{ secrets.PRIVATE_KEY }}
|
||||
|
||||
- name: Generate Report 📜
|
||||
id: report
|
||||
env:
|
||||
GH_TOKEN: ${{ steps.generate_token.outputs.token }}
|
||||
REPO: ${{ github.repository }}
|
||||
DAYS: ${{ github.event.inputs.days || '7' }}
|
||||
run: |
|
||||
set -e
|
||||
|
||||
START_DATE=$(date -u -d "$DAYS days ago" +'%Y-%m-%d')
|
||||
END_DATE=$(date -u +'%Y-%m-%d')
|
||||
echo "⏳ Generating report for contributions from $START_DATE to $END_DATE..."
|
||||
|
||||
declare -A author_is_googler
|
||||
check_googler_status() {
|
||||
local author=$1
|
||||
if [[ "$author" == *"[bot]" ]]; then
|
||||
author_is_googler[$author]=1
|
||||
return 1
|
||||
fi
|
||||
if [[ -v "author_is_googler[$author]" ]]; then
|
||||
return ${author_is_googler[$author]}
|
||||
fi
|
||||
|
||||
if gh api "orgs/googlers/members/$author" --silent 2>/dev/null; then
|
||||
echo "🧑💻 $author is a Googler."
|
||||
author_is_googler[$author]=0
|
||||
else
|
||||
echo "🌍 $author is a community contributor."
|
||||
author_is_googler[$author]=1
|
||||
fi
|
||||
return ${author_is_googler[$author]}
|
||||
}
|
||||
|
||||
googler_issues=0
|
||||
non_googler_issues=0
|
||||
googler_prs=0
|
||||
non_googler_prs=0
|
||||
|
||||
echo "🔎 Fetching issues and pull requests..."
|
||||
ITEMS_JSON=$(gh search issues --repo "$REPO" "created:>$START_DATE" --json author,isPullRequest --limit 1000)
|
||||
|
||||
for row in $(echo "${ITEMS_JSON}" | jq -r '.[] | @base64'); do
|
||||
_jq() {
|
||||
echo ${row} | base64 --decode | jq -r ${1}
|
||||
}
|
||||
author=$(_jq '.author.login')
|
||||
is_pr=$(_jq '.isPullRequest')
|
||||
|
||||
if [[ -z "$author" || "$author" == "null" ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
if check_googler_status "$author"; then
|
||||
if [[ "$is_pr" == "true" ]]; then
|
||||
((googler_prs++))
|
||||
else
|
||||
((googler_issues++))
|
||||
fi
|
||||
else
|
||||
if [[ "$is_pr" == "true" ]]; then
|
||||
((non_googler_prs++))
|
||||
else
|
||||
((non_googler_issues++))
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
googler_discussions=0
|
||||
non_googler_discussions=0
|
||||
|
||||
echo "🗣️ Fetching discussions..."
|
||||
DISCUSSION_QUERY='''
|
||||
query($q: String!) {
|
||||
search(query: $q, type: DISCUSSION, first: 100) {
|
||||
nodes {
|
||||
... on Discussion {
|
||||
author {
|
||||
login
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}'''
|
||||
DISCUSSIONS_JSON=$(gh api graphql -f q="repo:$REPO created:>$START_DATE" -f query="$DISCUSSION_QUERY")
|
||||
|
||||
for row in $(echo "${DISCUSSIONS_JSON}" | jq -r '.data.search.nodes[] | @base64'); do
|
||||
_jq() {
|
||||
echo ${row} | base64 --decode | jq -r ${1}
|
||||
}
|
||||
author=$(_jq '.author.login')
|
||||
|
||||
if [[ -z "$author" || "$author" == "null" ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
if check_googler_status "$author"; then
|
||||
((googler_discussions++))
|
||||
else
|
||||
((non_googler_discussions++))
|
||||
fi
|
||||
done
|
||||
|
||||
echo "✍️ Generating report content..."
|
||||
REPORT_TITLE="Community Contribution Report: $START_DATE to $END_DATE"
|
||||
TOTAL_ISSUES=$((googler_issues + non_googler_issues))
|
||||
TOTAL_PRS=$((googler_prs + non_googler_prs))
|
||||
TOTAL_DISCUSSIONS=$((googler_discussions + non_googler_discussions))
|
||||
|
||||
REPORT_BODY=$(cat <<EOF
|
||||
### 💖 Community Contribution Report
|
||||
|
||||
**Period:** $START_DATE to $END_DATE
|
||||
|
||||
| Category | Googlers | Community | Total |
|
||||
|---|---:|---:|---:|
|
||||
| **Issues** | $googler_issues | $non_googler_issues | **$TOTAL_ISSUES** |
|
||||
| **Pull Requests** | $googler_prs | $non_googler_prs | **$TOTAL_PRS** |
|
||||
| **Discussions** | $googler_discussions | $non_googler_discussions | **$TOTAL_DISCUSSIONS** |
|
||||
|
||||
_This report was generated automatically by a GitHub Action._
|
||||
EOF
|
||||
)
|
||||
|
||||
echo "report_body<<EOF" >> $GITHUB_OUTPUT
|
||||
echo "$REPORT_BODY" >> $GITHUB_OUTPUT
|
||||
echo "EOF" >> $GITHUB_OUTPUT
|
||||
|
||||
echo "📊 Community Contribution Report:"
|
||||
echo "$REPORT_BODY"
|
||||
|
||||
- name: 🤖 Get Insights from Report
|
||||
if: steps.report.outputs.report_body != ''
|
||||
uses: google-gemini/gemini-cli-action@df3f890f003d28c60a2a09d2c29e0126e4d1e2ff
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ steps.generate_token.outputs.token }}
|
||||
with:
|
||||
version: 0.1.8-rc.0
|
||||
GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }}
|
||||
OTLP_GCP_WIF_PROVIDER: ${{ secrets.OTLP_GCP_WIF_PROVIDER }}
|
||||
OTLP_GOOGLE_CLOUD_PROJECT: ${{ secrets.OTLP_GOOGLE_CLOUD_PROJECT }}
|
||||
settings_json: |
|
||||
{
|
||||
"coreTools": [
|
||||
"run_shell_command(gh issue list)",
|
||||
"run_shell_command(gh pr list)",
|
||||
"run_shell_command(gh search issues)",
|
||||
"run_shell_command(gh search prs)"
|
||||
]
|
||||
}
|
||||
prompt: |
|
||||
You are a helpful assistant that analyzes community contribution reports.
|
||||
Based on the following report, please provide a brief summary and highlight any interesting trends or potential areas for improvement.
|
||||
|
||||
Report:
|
||||
${{ steps.report.outputs.report_body }}
|
|
@ -0,0 +1,48 @@
|
|||
# .github/workflows/e2e.yml
|
||||
|
||||
name: E2E Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
merge_group:
|
||||
|
||||
jobs:
|
||||
e2e-test:
|
||||
name: E2E Test - ${{ matrix.sandbox }}
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
sandbox: [sandbox:none, sandbox:docker]
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4
|
||||
with:
|
||||
node-version: 20.x
|
||||
cache: 'npm'
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
|
||||
- name: Build project
|
||||
run: npm run build
|
||||
|
||||
- name: Set up Docker
|
||||
if: matrix.sandbox == 'sandbox:docker'
|
||||
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3
|
||||
|
||||
- name: Set up Podman
|
||||
if: matrix.sandbox == 'sandbox:podman'
|
||||
uses: redhat-actions/podman-login@4934294ad0449894bcd1e9f191899d7292469603 # v1
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Run E2E tests
|
||||
env:
|
||||
GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }}
|
||||
run: npm run test:integration:${{ matrix.sandbox }} -- --verbose --keep-output
|
|
@ -0,0 +1,63 @@
|
|||
name: Gemini Automated Issue Triage
|
||||
|
||||
on:
|
||||
issues:
|
||||
types: [opened, reopened]
|
||||
|
||||
jobs:
|
||||
triage-issue:
|
||||
timeout-minutes: 5
|
||||
if: ${{ github.repository == 'google-gemini/gemini-cli' }}
|
||||
permissions:
|
||||
issues: write
|
||||
contents: read
|
||||
id-token: write
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.event.issue.number }}
|
||||
cancel-in-progress: true
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Generate GitHub App Token
|
||||
id: generate_token
|
||||
uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2
|
||||
with:
|
||||
app-id: ${{ secrets.APP_ID }}
|
||||
private-key: ${{ secrets.PRIVATE_KEY }}
|
||||
|
||||
- name: Run Gemini Issue Triage
|
||||
uses: google-gemini/gemini-cli-action@df3f890f003d28c60a2a09d2c29e0126e4d1e2ff
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ steps.generate_token.outputs.token }}
|
||||
with:
|
||||
version: 0.1.8-rc.0
|
||||
GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }}
|
||||
OTLP_GCP_WIF_PROVIDER: ${{ secrets.OTLP_GCP_WIF_PROVIDER }}
|
||||
OTLP_GOOGLE_CLOUD_PROJECT: ${{ secrets.OTLP_GOOGLE_CLOUD_PROJECT }}
|
||||
settings_json: |
|
||||
{
|
||||
"coreTools": [
|
||||
"run_shell_command(gh label list)",
|
||||
"run_shell_command(gh issue edit)",
|
||||
"run_shell_command(gh issue list)"
|
||||
],
|
||||
"telemetry": {
|
||||
"enabled": true,
|
||||
"target": "gcp"
|
||||
},
|
||||
"sandbox": false
|
||||
}
|
||||
prompt: |
|
||||
You are an issue triage assistant. Analyze the current GitHub issue and apply the most appropriate existing labels.
|
||||
|
||||
Steps:
|
||||
1. Run: `gh label list --repo ${{ github.repository }} --limit 100` to get all available labels.
|
||||
2. Review the issue title and body provided in the environment variables.
|
||||
3. Select the most relevant labels from the existing labels, focusing on kind/*, area/*, and priority/*.
|
||||
4. Apply the selected labels to this issue using: `gh issue edit ${{ github.event.issue.number }} --repo ${{ github.repository }} --add-label "label1,label2"`
|
||||
5. If the issue has a "status/need-triage" label, remove it after applying the appropriate labels: `gh issue edit ${{ github.event.issue.number }} --repo ${{ github.repository }} --remove-label "status/need-triage"`
|
||||
|
||||
Guidelines:
|
||||
- Only use labels that already exist in the repository.
|
||||
- Do not add comments or modify the issue content.
|
||||
- Triage only the current issue.
|
||||
- Assign all applicable kind/*, area/*, and priority/* labels based on the issue content.
|
|
@ -0,0 +1,100 @@
|
|||
name: Gemini Scheduled Issue Triage
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 * * * *' # Runs every hour
|
||||
workflow_dispatch: {}
|
||||
|
||||
jobs:
|
||||
triage-issues:
|
||||
timeout-minutes: 10
|
||||
if: ${{ github.repository == 'google-gemini/gemini-cli' }}
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write
|
||||
issues: write
|
||||
steps:
|
||||
- name: Generate GitHub App Token
|
||||
id: generate_token
|
||||
uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2
|
||||
with:
|
||||
app-id: ${{ secrets.APP_ID }}
|
||||
private-key: ${{ secrets.PRIVATE_KEY }}
|
||||
|
||||
- name: Find untriaged issues
|
||||
id: find_issues
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ steps.generate_token.outputs.token }}
|
||||
run: |
|
||||
echo "🔍 Finding issues without labels..."
|
||||
NO_LABEL_ISSUES=$(gh issue list --repo ${{ github.repository }} --search "is:open is:issue no:label" --json number,title,body)
|
||||
|
||||
echo "🏷️ Finding issues that need triage..."
|
||||
NEED_TRIAGE_ISSUES=$(gh issue list --repo ${{ github.repository }} --search "is:open is:issue label:\"status/need-triage\"" --json number,title,body)
|
||||
|
||||
echo "🔄 Merging and deduplicating issues..."
|
||||
ISSUES=$(echo "$NO_LABEL_ISSUES" "$NEED_TRIAGE_ISSUES" | jq -c -s 'add | unique_by(.number)')
|
||||
|
||||
echo "📝 Setting output for GitHub Actions..."
|
||||
echo "issues_to_triage=$ISSUES" >> "$GITHUB_OUTPUT"
|
||||
|
||||
echo "✅ Found $(echo "$ISSUES" | jq 'length') issues to triage! 🎯"
|
||||
|
||||
- name: Run Gemini Issue Triage
|
||||
if: steps.find_issues.outputs.issues_to_triage != '[]'
|
||||
uses: google-gemini/gemini-cli-action@df3f890f003d28c60a2a09d2c29e0126e4d1e2ff
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ steps.generate_token.outputs.token }}
|
||||
ISSUES_TO_TRIAGE: ${{ steps.find_issues.outputs.issues_to_triage }}
|
||||
REPOSITORY: ${{ github.repository }}
|
||||
with:
|
||||
version: 0.1.8-rc.0
|
||||
GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }}
|
||||
OTLP_GCP_WIF_PROVIDER: ${{ secrets.OTLP_GCP_WIF_PROVIDER }}
|
||||
OTLP_GOOGLE_CLOUD_PROJECT: ${{ secrets.OTLP_GOOGLE_CLOUD_PROJECT }}
|
||||
settings_json: |
|
||||
{
|
||||
"coreTools": [
|
||||
"run_shell_command(echo)",
|
||||
"run_shell_command(gh label list)",
|
||||
"run_shell_command(gh issue edit)",
|
||||
"run_shell_command(gh issue list)"
|
||||
],
|
||||
"telemetry": {
|
||||
"enabled": true,
|
||||
"target": "gcp"
|
||||
},
|
||||
"sandbox": false
|
||||
}
|
||||
prompt: |
|
||||
You are an issue triage assistant. Analyze issues and apply appropriate labels ONE AT A TIME.
|
||||
|
||||
Repository: ${{ github.repository }}
|
||||
|
||||
Steps:
|
||||
1. Run: `gh label list --repo ${{ github.repository }} --limit 100` to see available labels
|
||||
2. Check environment variable for issues to triage: $ISSUES_TO_TRIAGE (JSON array of issues)
|
||||
3. Parse the JSON array from step 2 and for EACH INDIVIDUAL issue, apply appropriate labels using separate commands:
|
||||
- `gh issue edit ISSUE_NUMBER --repo ${{ github.repository }} --add-label "label1"`
|
||||
- `gh issue edit ISSUE_NUMBER --repo ${{ github.repository }} --add-label "label2"`
|
||||
- Continue for each label separately
|
||||
|
||||
IMPORTANT: Label each issue individually, one command per issue, one label at a time if needed.
|
||||
|
||||
Guidelines:
|
||||
- Only use existing repository labels from step 1
|
||||
- Do not add comments to issues
|
||||
- Triage each issue independently based on title and body content
|
||||
- Focus on applying: kind/* (bug/enhancement/documentation), area/* (core/cli/testing/windows), and priority/* labels
|
||||
- If an issue has insufficient information, consider applying "status/need-information"
|
||||
- After applying appropriate labels to an issue, remove the "status/need-triage" label if present: `gh issue edit ISSUE_NUMBER --repo ${{ github.repository }} --remove-label "status/need-triage"`
|
||||
- Execute one `gh issue edit` command per issue, wait for success before proceeding to the next
|
||||
|
||||
Example triage logic:
|
||||
- Issues with "bug", "error", "broken" → kind/bug
|
||||
- Issues with "feature", "enhancement", "improve" → kind/enhancement
|
||||
- Issues about Windows/performance → area/windows, area/performance
|
||||
- Critical bugs → priority/p0, other bugs → priority/p1, enhancements → priority/p2
|
||||
|
||||
Process each issue sequentially and confirm each labeling operation before moving to the next issue.
|
|
@ -0,0 +1,36 @@
|
|||
name: Gemini Scheduled PR Triage 🚀
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '*/15 * * * *' # Runs every 15 minutes
|
||||
workflow_dispatch: {}
|
||||
|
||||
jobs:
|
||||
audit-prs:
|
||||
timeout-minutes: 15
|
||||
if: ${{ github.repository == 'google-gemini/gemini-cli' }}
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write
|
||||
issues: write
|
||||
pull-requests: write
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
prs_needing_comment: ${{ steps.run_triage.outputs.prs_needing_comment }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
|
||||
- name: Generate GitHub App Token
|
||||
id: generate_token
|
||||
uses: actions/create-github-app-token@df432ceedc7162793a195dd1713ff69aefc7379e # v2
|
||||
with:
|
||||
app-id: ${{ secrets.APP_ID }}
|
||||
private-key: ${{ secrets.PRIVATE_KEY }}
|
||||
|
||||
- name: Run PR Triage Script
|
||||
id: run_triage
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ steps.generate_token.outputs.token }}
|
||||
GITHUB_REPOSITORY: ${{ github.repository }}
|
||||
run: ./.github/scripts/pr-triage.sh
|
|
@ -0,0 +1,173 @@
|
|||
name: Release
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Runs every day at midnight UTC for the nightly release.
|
||||
- cron: '0 0 * * *'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
version:
|
||||
description: 'The version to release (e.g., v0.1.11). Required for manual patch releases.'
|
||||
required: false # Not required for scheduled runs
|
||||
type: string
|
||||
ref:
|
||||
description: 'The branch or ref (full git sha) to release from.'
|
||||
required: true
|
||||
type: string
|
||||
default: 'main'
|
||||
dry_run:
|
||||
description: 'Run a dry-run of the release process; no branches, npm packages or GitHub releases will be created.'
|
||||
required: true
|
||||
type: boolean
|
||||
default: true
|
||||
create_nightly_release:
|
||||
description: 'Auto apply the nightly release tag, input version is ignored.'
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
force_skip_tests:
|
||||
description: 'Select to skip the "Run Tests" step in testing. Prod releases should run tests'
|
||||
required: false
|
||||
type: boolean
|
||||
default: false
|
||||
|
||||
jobs:
|
||||
release:
|
||||
runs-on: ubuntu-latest
|
||||
environment:
|
||||
name: production-release
|
||||
url: ${{ github.server_url }}/${{ github.repository }}/releases/tag/${{ steps.version.outputs.RELEASE_TAG }}
|
||||
if: github.repository == 'google-gemini/gemini-cli'
|
||||
permissions:
|
||||
contents: write
|
||||
packages: write
|
||||
id-token: write
|
||||
issues: write # For creating issues on failure
|
||||
outputs:
|
||||
RELEASE_TAG: ${{ steps.version.outputs.RELEASE_TAG }}
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4
|
||||
with:
|
||||
ref: ${{ github.sha }}
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set booleans for simplified logic
|
||||
id: vars
|
||||
run: |
|
||||
is_nightly="false"
|
||||
if [[ "${{ github.event_name }}" == "schedule" || "${{ github.event.inputs.create_nightly_release }}" == "true" ]]; then
|
||||
is_nightly="true"
|
||||
fi
|
||||
echo "is_nightly=${is_nightly}" >> $GITHUB_OUTPUT
|
||||
|
||||
is_dry_run="false"
|
||||
if [[ "${{ github.event.inputs.dry_run }}" == "true" ]]; then
|
||||
is_dry_run="true"
|
||||
fi
|
||||
echo "is_dry_run=${is_dry_run}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Setup Node.js
|
||||
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4
|
||||
with:
|
||||
node-version: '20'
|
||||
cache: 'npm'
|
||||
|
||||
- name: Install Dependencies
|
||||
run: npm ci
|
||||
|
||||
- name: Get the version
|
||||
id: version
|
||||
run: |
|
||||
VERSION_JSON=$(node scripts/get-release-version.js)
|
||||
echo "RELEASE_TAG=$(echo $VERSION_JSON | jq -r .releaseTag)" >> $GITHUB_OUTPUT
|
||||
echo "RELEASE_VERSION=$(echo $VERSION_JSON | jq -r .releaseVersion)" >> $GITHUB_OUTPUT
|
||||
echo "NPM_TAG=$(echo $VERSION_JSON | jq -r .npmTag)" >> $GITHUB_OUTPUT
|
||||
env:
|
||||
IS_NIGHTLY: ${{ steps.vars.outputs.is_nightly }}
|
||||
MANUAL_VERSION: ${{ inputs.version }}
|
||||
|
||||
- name: Run Tests
|
||||
if: github.event.inputs.force_skip_tests != 'true'
|
||||
run: |
|
||||
npm run preflight
|
||||
npm run test:integration:sandbox:none
|
||||
npm run test:integration:sandbox:docker
|
||||
env:
|
||||
GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }}
|
||||
|
||||
- name: Configure Git User
|
||||
run: |
|
||||
git config user.name "github-actions[bot]"
|
||||
git config user.email "github-actions[bot]@users.noreply.github.com"
|
||||
|
||||
- name: Create and switch to a release branch
|
||||
id: release_branch
|
||||
run: |
|
||||
BRANCH_NAME="release/${{ steps.version.outputs.RELEASE_TAG }}"
|
||||
git switch -c $BRANCH_NAME
|
||||
echo "BRANCH_NAME=${BRANCH_NAME}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Update package versions
|
||||
run: |
|
||||
npm run release:version ${{ steps.version.outputs.RELEASE_VERSION }}
|
||||
|
||||
- name: Commit and Conditionally Push package versions
|
||||
run: |
|
||||
git add package.json package-lock.json packages/*/package.json
|
||||
git commit -m "chore(release): ${{ steps.version.outputs.RELEASE_TAG }}"
|
||||
if [[ "${{ steps.vars.outputs.is_dry_run }}" == "false" ]]; then
|
||||
echo "Pushing release branch to remote..."
|
||||
git push --set-upstream origin ${{ steps.release_branch.outputs.BRANCH_NAME }} --follow-tags
|
||||
else
|
||||
echo "Dry run enabled. Skipping push."
|
||||
fi
|
||||
|
||||
- name: Build and Prepare Packages
|
||||
run: |
|
||||
npm run build:packages
|
||||
npm run prepare:package
|
||||
|
||||
- name: Configure npm for publishing
|
||||
uses: actions/setup-node@49933ea5288caeca8642d1e84afbd3f7d6820020 # v4
|
||||
with:
|
||||
node-version: '20'
|
||||
registry-url: 'https://wombat-dressing-room.appspot.com'
|
||||
scope: '@google'
|
||||
|
||||
- name: Publish @google/gemini-cli-core
|
||||
run: npm publish --workspace=@google/gemini-cli-core --tag=${{ steps.version.outputs.NPM_TAG }} ${{ steps.vars.outputs.is_dry_run == 'true' && '--dry-run' || '' }}
|
||||
env:
|
||||
NODE_AUTH_TOKEN: ${{ secrets.WOMBAT_TOKEN_CORE }}
|
||||
|
||||
- name: Install latest core package
|
||||
if: steps.vars.outputs.is_dry_run == 'false'
|
||||
run: npm install @google/gemini-cli-core@${{ steps.version.outputs.RELEASE_VERSION }} --workspace=@google/gemini-cli --save-exact
|
||||
|
||||
- name: Publish @google/gemini-cli
|
||||
run: npm publish --workspace=@google/gemini-cli --tag=${{ steps.version.outputs.NPM_TAG }} ${{ steps.vars.outputs.is_dry_run == 'true' && '--dry-run' || '' }}
|
||||
env:
|
||||
NODE_AUTH_TOKEN: ${{ secrets.WOMBAT_TOKEN_CLI }}
|
||||
|
||||
- name: Create GitHub Release and Tag
|
||||
if: ${{ steps.vars.outputs.is_dry_run == 'false' }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
RELEASE_BRANCH: ${{ steps.release_branch.outputs.BRANCH_NAME }}
|
||||
run: |
|
||||
gh release create ${{ steps.version.outputs.RELEASE_TAG }} \
|
||||
bundle/gemini.js \
|
||||
--target "$RELEASE_BRANCH" \
|
||||
--title "Release ${{ steps.version.outputs.RELEASE_TAG }}" \
|
||||
--generate-notes
|
||||
|
||||
- name: Create Issue on Failure
|
||||
if: failure()
|
||||
run: |
|
||||
gh issue create \
|
||||
--title "Release Failed for ${{ steps.version.outputs.RELEASE_TAG || 'N/A' }} on $(date +'%Y-%m-%d')" \
|
||||
--body "The release workflow failed. See the full run for details: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }}" \
|
||||
--label "kind/bug,release-failure"
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
@ -0,0 +1,48 @@
|
|||
# API keys and secrets
|
||||
.env
|
||||
.env~
|
||||
|
||||
# gemini-cli settings
|
||||
.gemini/
|
||||
!gemini/config.yaml
|
||||
|
||||
# Note: .gemini-clipboard/ is NOT in gitignore so Gemini can access pasted images
|
||||
|
||||
# Dependency directory
|
||||
node_modules
|
||||
bower_components
|
||||
|
||||
# Editors
|
||||
.idea
|
||||
*.iml
|
||||
|
||||
# OS metadata
|
||||
.DS_Store
|
||||
Thumbs.db
|
||||
|
||||
# TypeScript build info files
|
||||
*.tsbuildinfo
|
||||
|
||||
# Ignore built ts files
|
||||
dist
|
||||
|
||||
# Docker folder to help skip auth refreshes
|
||||
.docker
|
||||
|
||||
bundle
|
||||
|
||||
# Test report files
|
||||
junit.xml
|
||||
packages/*/coverage/
|
||||
|
||||
# Generated files
|
||||
packages/cli/src/generated/
|
||||
.integration-tests/
|
||||
|
||||
|
||||
# Logs
|
||||
logs/
|
||||
|
||||
|
||||
# Qwen Code Configs
|
||||
.qwen/
|
|
@ -0,0 +1,2 @@
|
|||
@google:registry=https://wombat-dressing-room.appspot.com
|
||||
@ali:registry=https://registry.anpm.alibaba-inc.com
|
|
@ -0,0 +1,7 @@
|
|||
{
|
||||
"semi": true,
|
||||
"trailingComma": "all",
|
||||
"singleQuote": true,
|
||||
"printWidth": 80,
|
||||
"tabWidth": 2
|
||||
}
|
|
@ -0,0 +1,81 @@
|
|||
{
|
||||
// Use IntelliSense to learn about possible attributes.
|
||||
// Hover to view descriptions of existing attributes.
|
||||
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"type": "node",
|
||||
"request": "launch",
|
||||
"name": "Launch CLI",
|
||||
"runtimeExecutable": "npm",
|
||||
"runtimeArgs": ["run", "start"],
|
||||
"skipFiles": ["<node_internals>/**"],
|
||||
"cwd": "${workspaceFolder}",
|
||||
"console": "integratedTerminal",
|
||||
"env": {
|
||||
"GEMINI_SANDBOX": "false"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "node",
|
||||
"request": "launch",
|
||||
"name": "Launch E2E",
|
||||
"program": "${workspaceFolder}/integration-tests/run-tests.js",
|
||||
"args": ["--verbose", "--keep-output", "list_directory"],
|
||||
"skipFiles": ["<node_internals>/**"],
|
||||
"cwd": "${workspaceFolder}",
|
||||
"console": "integratedTerminal",
|
||||
"env": {
|
||||
"GEMINI_SANDBOX": "false"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "Attach",
|
||||
"port": 9229,
|
||||
"request": "attach",
|
||||
"skipFiles": ["<node_internals>/**"],
|
||||
"type": "node",
|
||||
// fix source mapping when debugging in sandbox using global installation
|
||||
// note this does not interfere when remoteRoot is also ${workspaceFolder}/packages
|
||||
"remoteRoot": "/usr/local/share/npm-global/lib/node_modules/@gemini-cli",
|
||||
"localRoot": "${workspaceFolder}/packages"
|
||||
},
|
||||
{
|
||||
"type": "node",
|
||||
"request": "launch",
|
||||
"name": "Launch Program",
|
||||
"skipFiles": ["<node_internals>/**"],
|
||||
"program": "${file}",
|
||||
"outFiles": ["${workspaceFolder}/**/*.js"]
|
||||
},
|
||||
{
|
||||
"type": "node",
|
||||
"request": "launch",
|
||||
"name": "Debug Test File",
|
||||
"runtimeExecutable": "npm",
|
||||
"runtimeArgs": [
|
||||
"run",
|
||||
"test",
|
||||
"-w",
|
||||
"packages",
|
||||
"--",
|
||||
"--inspect-brk=9229",
|
||||
"--no-file-parallelism",
|
||||
"${input:testFile}"
|
||||
],
|
||||
"cwd": "${workspaceFolder}",
|
||||
"console": "integratedTerminal",
|
||||
"internalConsoleOptions": "neverOpen",
|
||||
"skipFiles": ["<node_internals>/**"]
|
||||
}
|
||||
],
|
||||
"inputs": [
|
||||
{
|
||||
"id": "testFile",
|
||||
"type": "promptString",
|
||||
"description": "Enter the path to the test file (e.g., ${workspaceFolder}/packages/cli/src/ui/components/LoadingIndicator.test.tsx)",
|
||||
"default": "${workspaceFolder}/packages/cli/src/ui/components/LoadingIndicator.test.tsx"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -0,0 +1,3 @@
|
|||
{
|
||||
"typescript.tsserver.experimental.enableProjectDiagnostics": true
|
||||
}
|
|
@ -0,0 +1,16 @@
|
|||
{
|
||||
"version": "2.0.0",
|
||||
"tasks": [
|
||||
{
|
||||
"type": "npm",
|
||||
"script": "build",
|
||||
"group": {
|
||||
"kind": "build",
|
||||
"isDefault": true
|
||||
},
|
||||
"problemMatcher": [],
|
||||
"label": "npm: build",
|
||||
"detail": "scripts/build.sh"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -0,0 +1,299 @@
|
|||
# How to Contribute
|
||||
|
||||
We would love to accept your patches and contributions to this project.
|
||||
|
||||
## Before you begin
|
||||
|
||||
### Sign our Contributor License Agreement
|
||||
|
||||
Contributions to this project must be accompanied by a
|
||||
[Contributor License Agreement](https://cla.developers.google.com/about) (CLA).
|
||||
You (or your employer) retain the copyright to your contribution; this simply
|
||||
gives us permission to use and redistribute your contributions as part of the
|
||||
project.
|
||||
|
||||
If you or your current employer have already signed the Google CLA (even if it
|
||||
was for a different project), you probably don't need to do it again.
|
||||
|
||||
Visit <https://cla.developers.google.com/> to see your current agreements or to
|
||||
sign a new one.
|
||||
|
||||
### Review our Community Guidelines
|
||||
|
||||
This project follows [Google's Open Source Community
|
||||
Guidelines](https://opensource.google/conduct/).
|
||||
|
||||
## Contribution Process
|
||||
|
||||
### Code Reviews
|
||||
|
||||
All submissions, including submissions by project members, require review. We
|
||||
use [GitHub pull requests](https://docs.github.com/articles/about-pull-requests)
|
||||
for this purpose.
|
||||
|
||||
### Pull Request Guidelines
|
||||
|
||||
To help us review and merge your PRs quickly, please follow these guidelines. PRs that do not meet these standards may be closed.
|
||||
|
||||
#### 1. Link to an Existing Issue
|
||||
|
||||
All PRs should be linked to an existing issue in our tracker. This ensures that every change has been discussed and is aligned with the project's goals before any code is written.
|
||||
|
||||
- **For bug fixes:** The PR should be linked to the bug report issue.
|
||||
- **For features:** The PR should be linked to the feature request or proposal issue that has been approved by a maintainer.
|
||||
|
||||
If an issue for your change doesn't exist, please **open one first** and wait for feedback before you start coding.
|
||||
|
||||
#### 2. Keep It Small and Focused
|
||||
|
||||
We favor small, atomic PRs that address a single issue or add a single, self-contained feature.
|
||||
|
||||
- **Do:** Create a PR that fixes one specific bug or adds one specific feature.
|
||||
- **Don't:** Bundle multiple unrelated changes (e.g., a bug fix, a new feature, and a refactor) into a single PR.
|
||||
|
||||
Large changes should be broken down into a series of smaller, logical PRs that can be reviewed and merged independently.
|
||||
|
||||
#### 3. Use Draft PRs for Work in Progress
|
||||
|
||||
If you'd like to get early feedback on your work, please use GitHub's **Draft Pull Request** feature. This signals to the maintainers that the PR is not yet ready for a formal review but is open for discussion and initial feedback.
|
||||
|
||||
#### 4. Ensure All Checks Pass
|
||||
|
||||
Before submitting your PR, ensure that all automated checks are passing by running `npm run preflight`. This command runs all tests, linting, and other style checks.
|
||||
|
||||
#### 5. Update Documentation
|
||||
|
||||
If your PR introduces a user-facing change (e.g., a new command, a modified flag, or a change in behavior), you must also update the relevant documentation in the `/docs` directory.
|
||||
|
||||
#### 6. Write Clear Commit Messages and a Good PR Description
|
||||
|
||||
Your PR should have a clear, descriptive title and a detailed description of the changes. Follow the [Conventional Commits](https://www.conventionalcommits.org/) standard for your commit messages.
|
||||
|
||||
- **Good PR Title:** `feat(cli): Add --json flag to 'config get' command`
|
||||
- **Bad PR Title:** `Made some changes`
|
||||
|
||||
In the PR description, explain the "why" behind your changes and link to the relevant issue (e.g., `Fixes #123`).
|
||||
|
||||
## Forking
|
||||
|
||||
If you are forking the repository you will be able to run the Build, Test and Integration test workflows. However in order to make the integration tests run you'll need to add a [GitHub Repository Secret](https://docs.github.com/en/actions/security-for-github-actions/security-guides/using-secrets-in-github-actions#creating-secrets-for-a-repository) with a value of `GEMINI_API_KEY` and set that to a valid API key that you have available. Your key and secret are private to your repo; no one without access can see your key and you cannot see any secrets related to this repo.
|
||||
|
||||
Additionally you will need to click on the `Actions` tab and enable workflows for your repository, you'll find it's the large blue button in the center of the screen.
|
||||
|
||||
## Development Setup and Workflow
|
||||
|
||||
This section guides contributors on how to build, modify, and understand the development setup of this project.
|
||||
|
||||
### Setting Up the Development Environment
|
||||
|
||||
**Prerequisites:**
|
||||
|
||||
1. **Node.js**:
|
||||
- **Development:** Please use Node.js `~20.19.0`. This specific version is required due to an upstream development dependency issue. You can use a tool like [nvm](https://github.com/nvm-sh/nvm) to manage Node.js versions.
|
||||
- **Production:** For running the CLI in a production environment, any version of Node.js `>=20` is acceptable.
|
||||
2. **Git**
|
||||
|
||||
### Build Process
|
||||
|
||||
To clone the repository:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/google-gemini/gemini-cli.git # Or your fork's URL
|
||||
cd gemini-cli
|
||||
```
|
||||
|
||||
To install dependencies defined in `package.json` as well as root dependencies:
|
||||
|
||||
```bash
|
||||
npm install
|
||||
```
|
||||
|
||||
To build the entire project (all packages):
|
||||
|
||||
```bash
|
||||
npm run build
|
||||
```
|
||||
|
||||
This command typically compiles TypeScript to JavaScript, bundles assets, and prepares the packages for execution. Refer to `scripts/build.js` and `package.json` scripts for more details on what happens during the build.
|
||||
|
||||
### Enabling Sandboxing
|
||||
|
||||
[Sandboxing](#sandboxing) is highly recommended and requires, at a minimum, setting `GEMINI_SANDBOX=true` in your `~/.env` and ensuring a sandboxing provider (e.g. `macOS Seatbelt`, `docker`, or `podman`) is available. See [Sandboxing](#sandboxing) for details.
|
||||
|
||||
To build both the `gemini` CLI utility and the sandbox container, run `build:all` from the root directory:
|
||||
|
||||
```bash
|
||||
npm run build:all
|
||||
```
|
||||
|
||||
To skip building the sandbox container, you can use `npm run build` instead.
|
||||
|
||||
### Running
|
||||
|
||||
To start the Gemini CLI from the source code (after building), run the following command from the root directory:
|
||||
|
||||
```bash
|
||||
npm start
|
||||
```
|
||||
|
||||
If you'd like to run the source build outside of the gemini-cli folder you can utilize `npm link path/to/gemini-cli/packages/cli` (see: [docs](https://docs.npmjs.com/cli/v9/commands/npm-link)) or `alias gemini="node path/to/gemini-cli/packages/cli"` to run with `gemini`
|
||||
|
||||
### Running Tests
|
||||
|
||||
This project contains two types of tests: unit tests and integration tests.
|
||||
|
||||
#### Unit Tests
|
||||
|
||||
To execute the unit test suite for the project:
|
||||
|
||||
```bash
|
||||
npm run test
|
||||
```
|
||||
|
||||
This will run tests located in the `packages/core` and `packages/cli` directories. Ensure tests pass before submitting any changes. For a more comprehensive check, it is recommended to run `npm run preflight`.
|
||||
|
||||
#### Integration Tests
|
||||
|
||||
The integration tests are designed to validate the end-to-end functionality of the Gemini CLI. They are not run as part of the default `npm run test` command.
|
||||
|
||||
To run the integration tests, use the following command:
|
||||
|
||||
```bash
|
||||
npm run test:e2e
|
||||
```
|
||||
|
||||
For more detailed information on the integration testing framework, please see the [Integration Tests documentation](./docs/integration-tests.md).
|
||||
|
||||
### Linting and Preflight Checks
|
||||
|
||||
To ensure code quality and formatting consistency, run the preflight check:
|
||||
|
||||
```bash
|
||||
npm run preflight
|
||||
```
|
||||
|
||||
This command will run ESLint, Prettier, all tests, and other checks as defined in the project's `package.json`.
|
||||
|
||||
_ProTip_
|
||||
|
||||
after cloning create a git precommit hook file to ensure your commits are always clean.
|
||||
|
||||
```bash
|
||||
echo "
|
||||
# Run npm build and check for errors
|
||||
if ! npm run preflight; then
|
||||
echo "npm build failed. Commit aborted."
|
||||
exit 1
|
||||
fi
|
||||
" > .git/hooks/pre-commit && chmod +x .git/hooks/pre-commit
|
||||
```
|
||||
|
||||
#### Formatting
|
||||
|
||||
To separately format the code in this project by running the following command from the root directory:
|
||||
|
||||
```bash
|
||||
npm run format
|
||||
```
|
||||
|
||||
This command uses Prettier to format the code according to the project's style guidelines.
|
||||
|
||||
#### Linting
|
||||
|
||||
To separately lint the code in this project, run the following command from the root directory:
|
||||
|
||||
```bash
|
||||
npm run lint
|
||||
```
|
||||
|
||||
### Coding Conventions
|
||||
|
||||
- Please adhere to the coding style, patterns, and conventions used throughout the existing codebase.
|
||||
- Consult [GEMINI.md](https://github.com/google-gemini/gemini-cli/blob/main/GEMINI.md) (typically found in the project root) for specific instructions related to AI-assisted development, including conventions for React, comments, and Git usage.
|
||||
- **Imports:** Pay special attention to import paths. The project uses `eslint-rules/no-relative-cross-package-imports.js` to enforce restrictions on relative imports between packages.
|
||||
|
||||
### Project Structure
|
||||
|
||||
- `packages/`: Contains the individual sub-packages of the project.
|
||||
- `cli/`: The command-line interface.
|
||||
- `core/`: The core backend logic for the Gemini CLI.
|
||||
- `docs/`: Contains all project documentation.
|
||||
- `scripts/`: Utility scripts for building, testing, and development tasks.
|
||||
|
||||
For more detailed architecture, see `docs/architecture.md`.
|
||||
|
||||
## Debugging
|
||||
|
||||
### VS Code:
|
||||
|
||||
0. Run the CLI to interactively debug in VS Code with `F5`
|
||||
1. Start the CLI in debug mode from the root directory:
|
||||
```bash
|
||||
npm run debug
|
||||
```
|
||||
This command runs `node --inspect-brk dist/gemini.js` within the `packages/cli` directory, pausing execution until a debugger attaches. You can then open `chrome://inspect` in your Chrome browser to connect to the debugger.
|
||||
2. In VS Code, use the "Attach" launch configuration (found in `.vscode/launch.json`).
|
||||
|
||||
Alternatively, you can use the "Launch Program" configuration in VS Code if you prefer to launch the currently open file directly, but 'F5' is generally recommended.
|
||||
|
||||
To hit a breakpoint inside the sandbox container run:
|
||||
|
||||
```bash
|
||||
DEBUG=1 gemini
|
||||
```
|
||||
|
||||
### React DevTools
|
||||
|
||||
To debug the CLI's React-based UI, you can use React DevTools. Ink, the library used for the CLI's interface, is compatible with React DevTools version 4.x.
|
||||
|
||||
1. **Start the Gemini CLI in development mode:**
|
||||
|
||||
```bash
|
||||
DEV=true npm start
|
||||
```
|
||||
|
||||
2. **Install and run React DevTools version 4.28.5 (or the latest compatible 4.x version):**
|
||||
|
||||
You can either install it globally:
|
||||
|
||||
```bash
|
||||
npm install -g react-devtools@4.28.5
|
||||
react-devtools
|
||||
```
|
||||
|
||||
Or run it directly using npx:
|
||||
|
||||
```bash
|
||||
npx react-devtools@4.28.5
|
||||
```
|
||||
|
||||
Your running CLI application should then connect to React DevTools.
|
||||

|
||||
|
||||
## Sandboxing
|
||||
|
||||
### MacOS Seatbelt
|
||||
|
||||
On MacOS, `gemini` uses Seatbelt (`sandbox-exec`) under a `permissive-open` profile (see `packages/cli/src/utils/sandbox-macos-permissive-open.sb`) that restricts writes to the project folder but otherwise allows all other operations and outbound network traffic ("open") by default. You can switch to a `restrictive-closed` profile (see `packages/cli/src/utils/sandbox-macos-restrictive-closed.sb`) that declines all operations and outbound network traffic ("closed") by default by setting `SEATBELT_PROFILE=restrictive-closed` in your environment or `.env` file. Available built-in profiles are `{permissive,restrictive}-{open,closed,proxied}` (see below for proxied networking). You can also switch to a custom profile `SEATBELT_PROFILE=<profile>` if you also create a file `.qwen/sandbox-macos-<profile>.sb` under your project settings directory `.gemini`.
|
||||
|
||||
### Container-based Sandboxing (All Platforms)
|
||||
|
||||
For stronger container-based sandboxing on MacOS or other platforms, you can set `GEMINI_SANDBOX=true|docker|podman|<command>` in your environment or `.env` file. The specified command (or if `true` then either `docker` or `podman`) must be installed on the host machine. Once enabled, `npm run build:all` will build a minimal container ("sandbox") image and `npm start` will launch inside a fresh instance of that container. The first build can take 20-30s (mostly due to downloading of the base image) but after that both build and start overhead should be minimal. Default builds (`npm run build`) will not rebuild the sandbox.
|
||||
|
||||
Container-based sandboxing mounts the project directory (and system temp directory) with read-write access and is started/stopped/removed automatically as you start/stop Gemini CLI. Files created within the sandbox should be automatically mapped to your user/group on host machine. You can easily specify additional mounts, ports, or environment variables by setting `SANDBOX_{MOUNTS,PORTS,ENV}` as needed. You can also fully customize the sandbox for your projects by creating the files `.qwen/sandbox.Dockerfile` and/or `.qwen/sandbox.bashrc` under your project settings directory (`.gemini`) and running `gemini` with `BUILD_SANDBOX=1` to trigger building of your custom sandbox.
|
||||
|
||||
#### Proxied Networking
|
||||
|
||||
All sandboxing methods, including MacOS Seatbelt using `*-proxied` profiles, support restricting outbound network traffic through a custom proxy server that can be specified as `GEMINI_SANDBOX_PROXY_COMMAND=<command>`, where `<command>` must start a proxy server that listens on `:::8877` for relevant requests. See `docs/examples/proxy-script.md` for a minimal proxy that only allows `HTTPS` connections to `example.com:443` (e.g. `curl https://example.com`) and declines all other requests. The proxy is started and stopped automatically alongside the sandbox.
|
||||
|
||||
## Manual Publish
|
||||
|
||||
We publish an artifact for each commit to our internal registry. But if you need to manually cut a local build, then run the following commands:
|
||||
|
||||
```
|
||||
npm run clean
|
||||
npm install
|
||||
npm run auth
|
||||
npm run prerelease:dev
|
||||
npm publish --workspaces
|
||||
```
|
|
@ -0,0 +1,50 @@
|
|||
FROM docker.io/library/node:20-slim
|
||||
|
||||
ARG SANDBOX_NAME="gemini-cli-sandbox"
|
||||
ARG CLI_VERSION_ARG
|
||||
ENV SANDBOX="$SANDBOX_NAME"
|
||||
ENV CLI_VERSION=$CLI_VERSION_ARG
|
||||
|
||||
# install minimal set of packages, then clean up
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
python3 \
|
||||
make \
|
||||
g++ \
|
||||
man-db \
|
||||
curl \
|
||||
dnsutils \
|
||||
less \
|
||||
jq \
|
||||
bc \
|
||||
gh \
|
||||
git \
|
||||
unzip \
|
||||
rsync \
|
||||
ripgrep \
|
||||
procps \
|
||||
psmisc \
|
||||
lsof \
|
||||
socat \
|
||||
ca-certificates \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# set up npm global package folder under /usr/local/share
|
||||
# give it to non-root user node, already set up in base image
|
||||
RUN mkdir -p /usr/local/share/npm-global \
|
||||
&& chown -R node:node /usr/local/share/npm-global
|
||||
ENV NPM_CONFIG_PREFIX=/usr/local/share/npm-global
|
||||
ENV PATH=$PATH:/usr/local/share/npm-global/bin
|
||||
|
||||
# switch to non-root user node
|
||||
USER node
|
||||
|
||||
# install gemini-cli and clean up
|
||||
COPY packages/cli/dist/google-gemini-cli-*.tgz /usr/local/share/npm-global/gemini-cli.tgz
|
||||
COPY packages/core/dist/google-gemini-cli-core-*.tgz /usr/local/share/npm-global/gemini-core.tgz
|
||||
RUN npm install -g /usr/local/share/npm-global/gemini-cli.tgz /usr/local/share/npm-global/gemini-core.tgz \
|
||||
&& npm cache clean --force \
|
||||
&& rm -f /usr/local/share/npm-global/gemini-{cli,core}.tgz
|
||||
|
||||
# default entrypoint when none specified
|
||||
CMD ["gemini"]
|
3
LICENSE
|
@ -1,3 +1,4 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
@ -186,7 +187,7 @@
|
|||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
Copyright 2025 Google LLC
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -0,0 +1,59 @@
|
|||
# Makefile for gemini-cli
|
||||
|
||||
.PHONY: help install build build-sandbox build-all test lint format preflight clean start debug release run-npx create-alias
|
||||
|
||||
help:
|
||||
@echo "Makefile for gemini-cli"
|
||||
@echo ""
|
||||
@echo "Usage:"
|
||||
@echo " make install - Install npm dependencies"
|
||||
@echo " make build - Build the main project"
|
||||
@echo " make build-all - Build the main project and sandbox"
|
||||
@echo " make test - Run the test suite"
|
||||
@echo " make lint - Lint the code"
|
||||
@echo " make format - Format the code"
|
||||
@echo " make preflight - Run formatting, linting, and tests"
|
||||
@echo " make clean - Remove generated files"
|
||||
@echo " make start - Start the Gemini CLI"
|
||||
@echo " make debug - Start the Gemini CLI in debug mode"
|
||||
@echo ""
|
||||
@echo " make run-npx - Run the CLI using npx (for testing the published package)"
|
||||
@echo " make create-alias - Create a 'gemini' alias for your shell"
|
||||
|
||||
install:
|
||||
npm install
|
||||
|
||||
build:
|
||||
npm run build
|
||||
|
||||
|
||||
build-all:
|
||||
npm run build:all
|
||||
|
||||
test:
|
||||
npm run test
|
||||
|
||||
lint:
|
||||
npm run lint
|
||||
|
||||
format:
|
||||
npm run format
|
||||
|
||||
preflight:
|
||||
npm run preflight
|
||||
|
||||
clean:
|
||||
npm run clean
|
||||
|
||||
start:
|
||||
npm run start
|
||||
|
||||
debug:
|
||||
npm run debug
|
||||
|
||||
|
||||
run-npx:
|
||||
npx https://github.com/google-gemini/gemini-cli
|
||||
|
||||
create-alias:
|
||||
scripts/create_alias.sh
|
|
@ -0,0 +1,184 @@
|
|||
## Building and running
|
||||
|
||||
Before submitting any changes, it is crucial to validate them by running the full preflight check. This command will build the repository, run all tests, check for type errors, and lint the code.
|
||||
|
||||
To run the full suite of checks, execute the following command:
|
||||
|
||||
```bash
|
||||
npm run preflight
|
||||
```
|
||||
|
||||
This single command ensures that your changes meet all the quality gates of the project. While you can run the individual steps (`build`, `test`, `typecheck`, `lint`) separately, it is highly recommended to use `npm run preflight` to ensure a comprehensive validation.
|
||||
|
||||
## Writing Tests
|
||||
|
||||
This project uses **Vitest** as its primary testing framework. When writing tests, aim to follow existing patterns. Key conventions include:
|
||||
|
||||
### Test Structure and Framework
|
||||
|
||||
- **Framework**: All tests are written using Vitest (`describe`, `it`, `expect`, `vi`).
|
||||
- **File Location**: Test files (`*.test.ts` for logic, `*.test.tsx` for React components) are co-located with the source files they test.
|
||||
- **Configuration**: Test environments are defined in `vitest.config.ts` files.
|
||||
- **Setup/Teardown**: Use `beforeEach` and `afterEach`. Commonly, `vi.resetAllMocks()` is called in `beforeEach` and `vi.restoreAllMocks()` in `afterEach`.
|
||||
|
||||
### Mocking (`vi` from Vitest)
|
||||
|
||||
- **ES Modules**: Mock with `vi.mock('module-name', async (importOriginal) => { ... })`. Use `importOriginal` for selective mocking.
|
||||
- _Example_: `vi.mock('os', async (importOriginal) => { const actual = await importOriginal(); return { ...actual, homedir: vi.fn() }; });`
|
||||
- **Mocking Order**: For critical dependencies (e.g., `os`, `fs`) that affect module-level constants, place `vi.mock` at the _very top_ of the test file, before other imports.
|
||||
- **Hoisting**: Use `const myMock = vi.hoisted(() => vi.fn());` if a mock function needs to be defined before its use in a `vi.mock` factory.
|
||||
- **Mock Functions**: Create with `vi.fn()`. Define behavior with `mockImplementation()`, `mockResolvedValue()`, or `mockRejectedValue()`.
|
||||
- **Spying**: Use `vi.spyOn(object, 'methodName')`. Restore spies with `mockRestore()` in `afterEach`.
|
||||
|
||||
### Commonly Mocked Modules
|
||||
|
||||
- **Node.js built-ins**: `fs`, `fs/promises`, `os` (especially `os.homedir()`), `path`, `child_process` (`execSync`, `spawn`).
|
||||
- **External SDKs**: `@google/genai`, `@modelcontextprotocol/sdk`.
|
||||
- **Internal Project Modules**: Dependencies from other project packages are often mocked.
|
||||
|
||||
### React Component Testing (CLI UI - Ink)
|
||||
|
||||
- Use `render()` from `ink-testing-library`.
|
||||
- Assert output with `lastFrame()`.
|
||||
- Wrap components in necessary `Context.Provider`s.
|
||||
- Mock custom React hooks and complex child components using `vi.mock()`.
|
||||
|
||||
### Asynchronous Testing
|
||||
|
||||
- Use `async/await`.
|
||||
- For timers, use `vi.useFakeTimers()`, `vi.advanceTimersByTimeAsync()`, `vi.runAllTimersAsync()`.
|
||||
- Test promise rejections with `await expect(promise).rejects.toThrow(...)`.
|
||||
|
||||
### General Guidance
|
||||
|
||||
- When adding tests, first examine existing tests to understand and conform to established conventions.
|
||||
- Pay close attention to the mocks at the top of existing test files; they reveal critical dependencies and how they are managed in a test environment.
|
||||
|
||||
## Git Repo
|
||||
|
||||
The main branch for this project is called "main"
|
||||
|
||||
## JavaScript/TypeScript
|
||||
|
||||
When contributing to this React, Node, and TypeScript codebase, please prioritize the use of plain JavaScript objects with accompanying TypeScript interface or type declarations over JavaScript class syntax. This approach offers significant advantages, especially concerning interoperability with React and overall code maintainability.
|
||||
|
||||
### Preferring Plain Objects over Classes
|
||||
|
||||
JavaScript classes, by their nature, are designed to encapsulate internal state and behavior. While this can be useful in some object-oriented paradigms, it often introduces unnecessary complexity and friction when working with React's component-based architecture. Here's why plain objects are preferred:
|
||||
|
||||
- Seamless React Integration: React components thrive on explicit props and state management. Classes' tendency to store internal state directly within instances can make prop and state propagation harder to reason about and maintain. Plain objects, on the other hand, are inherently immutable (when used thoughtfully) and can be easily passed as props, simplifying data flow and reducing unexpected side effects.
|
||||
|
||||
- Reduced Boilerplate and Increased Conciseness: Classes often promote the use of constructors, this binding, getters, setters, and other boilerplate that can unnecessarily bloat code. TypeScript interface and type declarations provide powerful static type checking without the runtime overhead or verbosity of class definitions. This allows for more succinct and readable code, aligning with JavaScript's strengths in functional programming.
|
||||
|
||||
- Enhanced Readability and Predictability: Plain objects, especially when their structure is clearly defined by TypeScript interfaces, are often easier to read and understand. Their properties are directly accessible, and there's no hidden internal state or complex inheritance chains to navigate. This predictability leads to fewer bugs and a more maintainable codebase.
|
||||
Simplified Immutability: While not strictly enforced, plain objects encourage an immutable approach to data. When you need to modify an object, you typically create a new one with the desired changes, rather than mutating the original. This pattern aligns perfectly with React's reconciliation process and helps prevent subtle bugs related to shared mutable state.
|
||||
|
||||
- Better Serialization and Deserialization: Plain JavaScript objects are naturally easy to serialize to JSON and deserialize back, which is a common requirement in web development (e.g., for API communication or local storage). Classes, with their methods and prototypes, can complicate this process.
|
||||
|
||||
### Embracing ES Module Syntax for Encapsulation
|
||||
|
||||
Rather than relying on Java-esque private or public class members, which can be verbose and sometimes limit flexibility, we strongly prefer leveraging ES module syntax (`import`/`export`) for encapsulating private and public APIs.
|
||||
|
||||
- Clearer Public API Definition: With ES modules, anything that is exported is part of the public API of that module, while anything not exported is inherently private to that module. This provides a very clear and explicit way to define what parts of your code are meant to be consumed by other modules.
|
||||
|
||||
- Enhanced Testability (Without Exposing Internals): By default, unexported functions or variables are not accessible from outside the module. This encourages you to test the public API of your modules, rather than their internal implementation details. If you find yourself needing to spy on or stub an unexported function for testing purposes, it's often a "code smell" indicating that the function might be a good candidate for extraction into its own separate, testable module with a well-defined public API. This promotes a more robust and maintainable testing strategy.
|
||||
|
||||
- Reduced Coupling: Explicitly defined module boundaries through import/export help reduce coupling between different parts of your codebase. This makes it easier to refactor, debug, and understand individual components in isolation.
|
||||
|
||||
### Avoiding `any` Types and Type Assertions; Preferring `unknown`
|
||||
|
||||
TypeScript's power lies in its ability to provide static type checking, catching potential errors before your code runs. To fully leverage this, it's crucial to avoid the `any` type and be judicious with type assertions.
|
||||
|
||||
- **The Dangers of `any`**: Using any effectively opts out of TypeScript's type checking for that particular variable or expression. While it might seem convenient in the short term, it introduces significant risks:
|
||||
- **Loss of Type Safety**: You lose all the benefits of type checking, making it easy to introduce runtime errors that TypeScript would otherwise have caught.
|
||||
- **Reduced Readability and Maintainability**: Code with `any` types is harder to understand and maintain, as the expected type of data is no longer explicitly defined.
|
||||
- **Masking Underlying Issues**: Often, the need for any indicates a deeper problem in the design of your code or the way you're interacting with external libraries. It's a sign that you might need to refine your types or refactor your code.
|
||||
|
||||
- **Preferring `unknown` over `any`**: When you absolutely cannot determine the type of a value at compile time, and you're tempted to reach for any, consider using unknown instead. unknown is a type-safe counterpart to any. While a variable of type unknown can hold any value, you must perform type narrowing (e.g., using typeof or instanceof checks, or a type assertion) before you can perform any operations on it. This forces you to handle the unknown type explicitly, preventing accidental runtime errors.
|
||||
|
||||
```
|
||||
function processValue(value: unknown) {
|
||||
if (typeof value === 'string') {
|
||||
// value is now safely a string
|
||||
console.log(value.toUpperCase());
|
||||
} else if (typeof value === 'number') {
|
||||
// value is now safely a number
|
||||
console.log(value * 2);
|
||||
}
|
||||
// Without narrowing, you cannot access properties or methods on 'value'
|
||||
// console.log(value.someProperty); // Error: Object is of type 'unknown'.
|
||||
}
|
||||
```
|
||||
|
||||
- **Type Assertions (`as Type`) - Use with Caution**: Type assertions tell the TypeScript compiler, "Trust me, I know what I'm doing; this is definitely of this type." While there are legitimate use cases (e.g., when dealing with external libraries that don't have perfect type definitions, or when you have more information than the compiler), they should be used sparingly and with extreme caution.
|
||||
- **Bypassing Type Checking**: Like `any`, type assertions bypass TypeScript's safety checks. If your assertion is incorrect, you introduce a runtime error that TypeScript would not have warned you about.
|
||||
- **Code Smell in Testing**: A common scenario where `any` or type assertions might be tempting is when trying to test "private" implementation details (e.g., spying on or stubbing an unexported function within a module). This is a strong indication of a "code smell" in your testing strategy and potentially your code structure. Instead of trying to force access to private internals, consider whether those internal details should be refactored into a separate module with a well-defined public API. This makes them inherently testable without compromising encapsulation.
|
||||
|
||||
### Embracing JavaScript's Array Operators
|
||||
|
||||
To further enhance code cleanliness and promote safe functional programming practices, leverage JavaScript's rich set of array operators as much as possible. Methods like `.map()`, `.filter()`, `.reduce()`, `.slice()`, `.sort()`, and others are incredibly powerful for transforming and manipulating data collections in an immutable and declarative way.
|
||||
|
||||
Using these operators:
|
||||
|
||||
- Promotes Immutability: Most array operators return new arrays, leaving the original array untouched. This functional approach helps prevent unintended side effects and makes your code more predictable.
|
||||
- Improves Readability: Chaining array operators often lead to more concise and expressive code than traditional for loops or imperative logic. The intent of the operation is clear at a glance.
|
||||
- Facilitates Functional Programming: These operators are cornerstones of functional programming, encouraging the creation of pure functions that take inputs and produce outputs without causing side effects. This paradigm is highly beneficial for writing robust and testable code that pairs well with React.
|
||||
|
||||
By consistently applying these principles, we can maintain a codebase that is not only efficient and performant but also a joy to work with, both now and in the future.
|
||||
|
||||
## React (mirrored and adjusted from [react-mcp-server](https://github.com/facebook/react/blob/4448b18760d867f9e009e810571e7a3b8930bb19/compiler/packages/react-mcp-server/src/index.ts#L376C1-L441C94))
|
||||
|
||||
### Role
|
||||
|
||||
You are a React assistant that helps users write more efficient and optimizable React code. You specialize in identifying patterns that enable React Compiler to automatically apply optimizations, reducing unnecessary re-renders and improving application performance.
|
||||
|
||||
### Follow these guidelines in all code you produce and suggest
|
||||
|
||||
Use functional components with Hooks: Do not generate class components or use old lifecycle methods. Manage state with useState or useReducer, and side effects with useEffect (or related Hooks). Always prefer functions and Hooks for any new component logic.
|
||||
|
||||
Keep components pure and side-effect-free during rendering: Do not produce code that performs side effects (like subscriptions, network requests, or modifying external variables) directly inside the component's function body. Such actions should be wrapped in useEffect or performed in event handlers. Ensure your render logic is a pure function of props and state.
|
||||
|
||||
Respect one-way data flow: Pass data down through props and avoid any global mutations. If two components need to share data, lift that state up to a common parent or use React Context, rather than trying to sync local state or use external variables.
|
||||
|
||||
Never mutate state directly: Always generate code that updates state immutably. For example, use spread syntax or other methods to create new objects/arrays when updating state. Do not use assignments like state.someValue = ... or array mutations like array.push() on state variables. Use the state setter (setState from useState, etc.) to update state.
|
||||
|
||||
Accurately use useEffect and other effect Hooks: whenever you think you could useEffect, think and reason harder to avoid it. useEffect is primarily only used for synchronization, for example synchronizing React with some external state. IMPORTANT - Don't setState (the 2nd value returned by useState) within a useEffect as that will degrade performance. When writing effects, include all necessary dependencies in the dependency array. Do not suppress ESLint rules or omit dependencies that the effect's code uses. Structure the effect callbacks to handle changing values properly (e.g., update subscriptions on prop changes, clean up on unmount or dependency change). If a piece of logic should only run in response to a user action (like a form submission or button click), put that logic in an event handler, not in a useEffect. Where possible, useEffects should return a cleanup function.
|
||||
|
||||
Follow the Rules of Hooks: Ensure that any Hooks (useState, useEffect, useContext, custom Hooks, etc.) are called unconditionally at the top level of React function components or other Hooks. Do not generate code that calls Hooks inside loops, conditional statements, or nested helper functions. Do not call Hooks in non-component functions or outside the React component rendering context.
|
||||
|
||||
Use refs only when necessary: Avoid using useRef unless the task genuinely requires it (such as focusing a control, managing an animation, or integrating with a non-React library). Do not use refs to store application state that should be reactive. If you do use refs, never write to or read from ref.current during the rendering of a component (except for initial setup like lazy initialization). Any ref usage should not affect the rendered output directly.
|
||||
|
||||
Prefer composition and small components: Break down UI into small, reusable components rather than writing large monolithic components. The code you generate should promote clarity and reusability by composing components together. Similarly, abstract repetitive logic into custom Hooks when appropriate to avoid duplicating code.
|
||||
|
||||
Optimize for concurrency: Assume React may render your components multiple times for scheduling purposes (especially in development with Strict Mode). Write code that remains correct even if the component function runs more than once. For instance, avoid side effects in the component body and use functional state updates (e.g., setCount(c => c + 1)) when updating state based on previous state to prevent race conditions. Always include cleanup functions in effects that subscribe to external resources. Don't write useEffects for "do this when this changes" side effects. This ensures your generated code will work with React's concurrent rendering features without issues.
|
||||
|
||||
Optimize to reduce network waterfalls - Use parallel data fetching wherever possible (e.g., start multiple requests at once rather than one after another). Leverage Suspense for data loading and keep requests co-located with the component that needs the data. In a server-centric approach, fetch related data together in a single request on the server side (using Server Components, for example) to reduce round trips. Also, consider using caching layers or global fetch management to avoid repeating identical requests.
|
||||
|
||||
Rely on React Compiler - useMemo, useCallback, and React.memo can be omitted if React Compiler is enabled. Avoid premature optimization with manual memoization. Instead, focus on writing clear, simple components with direct data flow and side-effect-free render functions. Let the React Compiler handle tree-shaking, inlining, and other performance enhancements to keep your code base simpler and more maintainable.
|
||||
|
||||
Design for a good user experience - Provide clear, minimal, and non-blocking UI states. When data is loading, show lightweight placeholders (e.g., skeleton screens) rather than intrusive spinners everywhere. Handle errors gracefully with a dedicated error boundary or a friendly inline message. Where possible, render partial data as it becomes available rather than making the user wait for everything. Suspense allows you to declare the loading states in your component tree in a natural way, preventing “flash” states and improving perceived performance.
|
||||
|
||||
### Process
|
||||
|
||||
1. Analyze the user's code for optimization opportunities:
|
||||
- Check for React anti-patterns that prevent compiler optimization
|
||||
- Look for component structure issues that limit compiler effectiveness
|
||||
- Think about each suggestion you are making and consult React docs for best practices
|
||||
|
||||
2. Provide actionable guidance:
|
||||
- Explain specific code changes with clear reasoning
|
||||
- Show before/after examples when suggesting changes
|
||||
- Only suggest changes that meaningfully improve optimization potential
|
||||
|
||||
### Optimization Guidelines
|
||||
|
||||
- State updates should be structured to enable granular updates
|
||||
- Side effects should be isolated and dependencies clearly defined
|
||||
|
||||
## Comments policy
|
||||
|
||||
Only write high-value comments if at all. Avoid talking to the user through comments.
|
||||
|
||||
## General style requirements
|
||||
|
||||
Use hyphens instead of underscores in flag names (e.g. `my-flag` instead of `my_flag`).
|
|
@ -0,0 +1,162 @@
|
|||
# Gemini CLI
|
||||
|
||||
[](https://github.com/google-gemini/gemini-cli/actions/workflows/ci.yml)
|
||||
|
||||

|
||||
|
||||
This repository contains the Gemini CLI, a command-line AI workflow tool that connects to your
|
||||
tools, understands your code and accelerates your workflows.
|
||||
|
||||
With the Gemini CLI you can:
|
||||
|
||||
- Query and edit large codebases in and beyond Gemini's 1M token context window.
|
||||
- Generate new apps from PDFs or sketches, using Gemini's multimodal capabilities.
|
||||
- Automate operational tasks, like querying pull requests or handling complex rebases.
|
||||
- Use tools and MCP servers to connect new capabilities, including [media generation with Imagen,
|
||||
Veo or Lyria](https://github.com/GoogleCloudPlatform/vertex-ai-creative-studio/tree/main/experiments/mcp-genmedia)
|
||||
- Ground your queries with the [Google Search](https://ai.google.dev/gemini-api/docs/grounding)
|
||||
tool, built in to Gemini.
|
||||
|
||||
## Quickstart
|
||||
|
||||
1. **Prerequisites:** Ensure you have [Node.js version 20](https://nodejs.org/en/download) or higher installed.
|
||||
2. **Run the CLI:** Execute the following command in your terminal:
|
||||
|
||||
```bash
|
||||
npx https://github.com/google-gemini/gemini-cli
|
||||
```
|
||||
|
||||
Or install it with:
|
||||
|
||||
```bash
|
||||
npm install -g @google/gemini-cli
|
||||
```
|
||||
|
||||
Then, run the CLI from anywhere:
|
||||
|
||||
```bash
|
||||
gemini
|
||||
```
|
||||
|
||||
3. **Pick a color theme**
|
||||
4. **Authenticate:** When prompted, sign in with your personal Google account. This will grant you up to 60 model requests per minute and 1,000 model requests per day using Gemini.
|
||||
|
||||
You are now ready to use the Gemini CLI!
|
||||
|
||||
### Use a Gemini API key:
|
||||
|
||||
The Gemini API provides a free tier with [100 requests per day](https://ai.google.dev/gemini-api/docs/rate-limits#free-tier) using Gemini 2.5 Pro, control over which model you use, and access to higher rate limits (with a paid plan):
|
||||
|
||||
1. Generate a key from [Google AI Studio](https://aistudio.google.com/apikey).
|
||||
2. Set it as an environment variable in your terminal. Replace `YOUR_API_KEY` with your generated key.
|
||||
|
||||
```bash
|
||||
export GEMINI_API_KEY="YOUR_API_KEY"
|
||||
```
|
||||
|
||||
3. (Optionally) Upgrade your Gemini API project to a paid plan on the API key page (will automatically unlock [Tier 1 rate limits](https://ai.google.dev/gemini-api/docs/rate-limits#tier-1))
|
||||
|
||||
### Use a Vertex AI API key:
|
||||
|
||||
The Vertex AI API provides a [free tier](https://cloud.google.com/vertex-ai/generative-ai/docs/start/express-mode/overview) using express mode for Gemini 2.5 Pro, control over which model you use, and access to higher rate limits with a billing account:
|
||||
|
||||
1. Generate a key from [Google Cloud](https://cloud.google.com/vertex-ai/generative-ai/docs/start/api-keys).
|
||||
2. Set it as an environment variable in your terminal. Replace `YOUR_API_KEY` with your generated key and set GOOGLE_GENAI_USE_VERTEXAI to true
|
||||
|
||||
```bash
|
||||
export GOOGLE_API_KEY="YOUR_API_KEY"
|
||||
export GOOGLE_GENAI_USE_VERTEXAI=true
|
||||
```
|
||||
|
||||
3. (Optionally) Add a billing account on your project to get access to [higher usage limits](https://cloud.google.com/vertex-ai/generative-ai/docs/quotas)
|
||||
|
||||
For other authentication methods, including Google Workspace accounts, see the [authentication](./docs/cli/authentication.md) guide.
|
||||
|
||||
## Examples
|
||||
|
||||
Once the CLI is running, you can start interacting with Gemini from your shell.
|
||||
|
||||
You can start a project from a new directory:
|
||||
|
||||
```sh
|
||||
cd new-project/
|
||||
gemini
|
||||
> Write me a Gemini Discord bot that answers questions using a FAQ.md file I will provide
|
||||
```
|
||||
|
||||
Or work with an existing project:
|
||||
|
||||
```sh
|
||||
git clone https://github.com/google-gemini/gemini-cli
|
||||
cd gemini-cli
|
||||
gemini
|
||||
> Give me a summary of all of the changes that went in yesterday
|
||||
```
|
||||
|
||||
### Next steps
|
||||
|
||||
- Learn how to [contribute to or build from the source](./CONTRIBUTING.md).
|
||||
- Explore the available **[CLI Commands](./docs/cli/commands.md)**.
|
||||
- If you encounter any issues, review the **[troubleshooting guide](./docs/troubleshooting.md)**.
|
||||
- For more comprehensive documentation, see the [full documentation](./docs/index.md).
|
||||
- Take a look at some [popular tasks](#popular-tasks) for more inspiration.
|
||||
- Check out our **[Official Roadmap](./ROADMAP.md)**
|
||||
|
||||
### Troubleshooting
|
||||
|
||||
Head over to the [troubleshooting guide](docs/troubleshooting.md) if you're
|
||||
having issues.
|
||||
|
||||
## Popular tasks
|
||||
|
||||
### Explore a new codebase
|
||||
|
||||
Start by `cd`ing into an existing or newly-cloned repository and running `gemini`.
|
||||
|
||||
```text
|
||||
> Describe the main pieces of this system's architecture.
|
||||
```
|
||||
|
||||
```text
|
||||
> What security mechanisms are in place?
|
||||
```
|
||||
|
||||
### Work with your existing code
|
||||
|
||||
```text
|
||||
> Implement a first draft for GitHub issue #123.
|
||||
```
|
||||
|
||||
```text
|
||||
> Help me migrate this codebase to the latest version of Java. Start with a plan.
|
||||
```
|
||||
|
||||
### Automate your workflows
|
||||
|
||||
Use MCP servers to integrate your local system tools with your enterprise collaboration suite.
|
||||
|
||||
```text
|
||||
> Make me a slide deck showing the git history from the last 7 days, grouped by feature and team member.
|
||||
```
|
||||
|
||||
```text
|
||||
> Make a full-screen web app for a wall display to show our most interacted-with GitHub issues.
|
||||
```
|
||||
|
||||
### Interact with your system
|
||||
|
||||
```text
|
||||
> Convert all the images in this directory to png, and rename them to use dates from the exif data.
|
||||
```
|
||||
|
||||
```text
|
||||
> Organize my PDF invoices by month of expenditure.
|
||||
```
|
||||
|
||||
### Uninstall
|
||||
|
||||
Head over to the [Uninstall](docs/Uninstall.md) guide for uninstallation instructions.
|
||||
|
||||
## Terms of Service and Privacy Notice
|
||||
|
||||
For details on the terms of service and privacy notice applicable to your use of Gemini CLI, see the [Terms of Service and Privacy Notice](./docs/tos-privacy.md).
|
131
README.md
|
@ -1,2 +1,131 @@
|
|||
# Qwen Code
|
||||
Qwen Code is a coding agent that lives in digital world
|
||||
|
||||

|
||||
|
||||
Qwen Code is a command-line AI workflow tool adapted from [**Gemini CLI**](https://github.com/google-gemini/gemini-cli)(Please refer to [this document](./README.gemini.md) for more details), optimized for Qwen-Coder models with enhanced parser support & tool support.
|
||||
|
||||
## Key Features
|
||||
|
||||
- **Code Understanding & Editing** - Query and edit large codebases beyond traditional context window limits
|
||||
- **Workflow Automation** - Automate operational tasks like handling pull requests and complex rebases
|
||||
- **Enhanced Parser** - Adapted parser specifically optimized for Qwen-Coder models
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Prerequisites
|
||||
|
||||
Ensure you have [Node.js version 20](https://nodejs.org/en/download) or higher installed.
|
||||
|
||||
```bash
|
||||
curl -qL https://www.npmjs.com/install.sh | sh
|
||||
```
|
||||
|
||||
### Installation
|
||||
|
||||
```bash
|
||||
npm install -g @qwen-code/qwen-code
|
||||
qwen --version
|
||||
```
|
||||
|
||||
Then run from anywhere:
|
||||
|
||||
```bash
|
||||
qwen
|
||||
```
|
||||
|
||||
Or you can install it from source:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/qwen-code/qwen-code.git
|
||||
cd qwen-code
|
||||
npm install
|
||||
npm install -g .
|
||||
```
|
||||
|
||||
### API Configuration
|
||||
|
||||
Set your Qwen API key (In Qwen Code project, you can also set your API key in `.env` file):
|
||||
|
||||
```bash
|
||||
export OPENAI_API_KEY="your_api_key_here"
|
||||
export OPENAI_BASE_URL="your_api_base_url_here"
|
||||
export OPENAI_MODEL="your_api_model_here"
|
||||
```
|
||||
|
||||
## Usage Examples
|
||||
|
||||
### Explore Codebases
|
||||
|
||||
```sh
|
||||
cd your-project/
|
||||
qwen
|
||||
> Describe the main pieces of this system's architecture
|
||||
```
|
||||
|
||||
### Code Development
|
||||
|
||||
```sh
|
||||
> Refactor this function to improve readability and performance
|
||||
```
|
||||
|
||||
### Automate Workflows
|
||||
|
||||
```sh
|
||||
> Analyze git commits from the last 7 days, grouped by feature and team member
|
||||
```
|
||||
|
||||
```sh
|
||||
> Convert all images in this directory to PNG format
|
||||
```
|
||||
|
||||
## Popular Tasks
|
||||
|
||||
### Understand New Codebases
|
||||
|
||||
```text
|
||||
> What are the core business logic components?
|
||||
> What security mechanisms are in place?
|
||||
> How does the data flow work?
|
||||
```
|
||||
|
||||
### Code Refactoring & Optimization
|
||||
|
||||
```text
|
||||
> What parts of this module can be optimized?
|
||||
> Help me refactor this class to follow better design patterns
|
||||
> Add proper error handling and logging
|
||||
```
|
||||
|
||||
### Documentation & Testing
|
||||
|
||||
```text
|
||||
> Generate comprehensive JSDoc comments for this function
|
||||
> Write unit tests for this component
|
||||
> Create API documentation
|
||||
```
|
||||
|
||||
## Project Structure
|
||||
|
||||
```
|
||||
qwen-code/
|
||||
├── packages/ # Core packages
|
||||
├── docs/ # Documentation
|
||||
├── examples/ # Example code
|
||||
└── tests/ # Test files
|
||||
```
|
||||
|
||||
## Development & Contributing
|
||||
|
||||
See [CONTRIBUTING.md](./CONTRIBUTING.md) to learn how to contribute to the project.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
If you encounter issues, check the [troubleshooting guide](docs/troubleshooting.md).
|
||||
|
||||
## Acknowledgments
|
||||
|
||||
This project is based on [Google Gemini CLI](https://github.com/google-gemini/gemini-cli). We acknowledge and appreciate the excellent work of the Gemini CLI team. Our main contribution focuses on parser-level adaptations to better support Qwen-Coder models.
|
||||
|
||||
## License
|
||||
|
||||
[LICENSE](./LICENSE)
|
||||
|
|
|
@ -0,0 +1,63 @@
|
|||
# Gemini CLI Roadmap
|
||||
|
||||
The [Official Gemini CLI Roadmap](https://github.com/orgs/google-gemini/projects/11/)
|
||||
|
||||
Gemini CLI is an open-source AI agent that brings the power of Gemini directly into your terminal. It provides lightweight access to Gemini, giving you the most direct path from your prompt to our model.
|
||||
|
||||
This document outlines our approach to the Gemini CLI roadmap. Here, you'll find our guiding principles and a breakdown of the key areas we are
|
||||
focused on for development. Our roadmap is not a static list but a dynamic set of priorities that are tracked live in our GitHub Issues.
|
||||
|
||||
As an [Apache 2.0 open source project](https://github.com/google-gemini/gemini-cli?tab=Apache-2.0-1-ov-file#readme), we appreciate and welcome [public contributions](https://github.com/google-gemini/gemini-cli/blob/main/CONTRIBUTING.md), and will give first priority to those contributions aligned with our roadmap. If you want to propose a new feature or change to our roadmap, please start by [opening an issue for discussion](https://github.com/google-gemini/gemini-cli/issues/new/choose).
|
||||
|
||||
## Disclaimer
|
||||
|
||||
This roadmap represents our current thinking and is for informational purposes only. It is not a commitment or a guarantee of future delivery. The development, release, and timing of any features are subject to change, and we may update the roadmap based on community discussions as well as when our priorities evolve.
|
||||
|
||||
## Guiding Principles
|
||||
|
||||
Our development is guided by the following principles:
|
||||
|
||||
- **Power & Simplicity:** Deliver access to state-of-the-art Gemini models with an intuitive and easy-to-use lightweight command-line interface.
|
||||
- **Extensibility:** An adaptable agent to help you with a variety of use cases and environments along with the ability to run these agents anywhere.
|
||||
- **Intelligent:** Gemini CLI should be reliably ranked among the best agentic tools as measured by benchmarks like SWE Bench, Terminal Bench, and CSAT.
|
||||
- **Free and Open Source:** Foster a thriving open source community where cost isn’t a barrier to personal use, and PRs get merged quickly. This means resolving and closing issues, pull requests, and discussion posts quickly.
|
||||
|
||||
## How the Roadmap Works
|
||||
|
||||
Our roadmap is managed directly through Github Issues. See our entry point Roadmap Issue [here](https://github.com/google-gemini/gemini-cli/issues/4191). This approach allows for transparency and gives you a direct way to learn more or get involved with any specific initiative. All our roadmap items will be tagged as Type:`Feature` and Label:`maintainer` for features we are actively working on, or Type:`Task` and Label:`maintainer` for a more detailed list of tasks.
|
||||
|
||||
Issues are organized to provide key information at a glance:
|
||||
|
||||
- **Target Quarter:** `Milestone` denotes the anticipated delivery timeline.
|
||||
- **Feature Area:** Labels such as `area/model` or `area/tooling` categorizes the work.
|
||||
- **Issue Type:** _Workstream_ => _Epics_ => _Features_ => _Tasks|Bugs_
|
||||
|
||||
To see what we're working on, you can filter our issues by these dimensions. See all our items [here](https://github.com/orgs/google-gemini/projects/11/views/19)
|
||||
|
||||
## Focus Areas
|
||||
|
||||
To better organize our efforts, we categorize our work into several key feature areas. These labels are used on our GitHub Issues to help you filter and
|
||||
find initiatives that interest you.
|
||||
|
||||
- **Authentication:** Secure user access via API keys, Gemini Code Assist login etc.
|
||||
- **Model:** Support new Gemini models, multi-modality, local execution, and performance tuning.
|
||||
- **User Experience:** Improve the CLI's usability, performance, interactive features, and documentation.
|
||||
- **Tooling:** Built-in tools and the MCP ecosystem.
|
||||
- **Core:** Core functionality of the CLI
|
||||
- **Extensibility:** Bringing Gemini CLI to other surfaces e.g. GitHub.
|
||||
- **Contribution:** Improve the contribution process via test automation and CI/CD pipeline enhancements.
|
||||
- **Platform:** Manage installation, OS support, and the underlying CLI framework.
|
||||
- **Quality:** Focus on testing, reliability, performance, and overall product quality.
|
||||
- **Background Agents:** Enable long-running, autonomous tasks and proactive assistance.
|
||||
- **Security and Privacy:** For all things related to security and privacy
|
||||
|
||||
## How to Contribute
|
||||
|
||||
Gemini CLI is an open-source project, and we welcome contributions from the community! Whether you're a developer, a designer, or just an enthusiastic user you can find our [Community Guidelines here](https://github.com/google-gemini/gemini-cli/blob/main/CONTRIBUTING.md) to learn how to get started. There are many ways to get involved:
|
||||
|
||||
- **Roadmap:** Please review and find areas in our [roadmap](https://github.com/google-gemini/gemini-cli/issues/4191) that you would like to contribute to. Contributions based on this will be easiest to integrate with.
|
||||
- **Report Bugs:** If you find an issue, please create a bug(https://github.com/google-gemini/gemini-cli/issues/new?template=bug_report.yml) with as much detail as possible. If you believe it is a critical breaking issue preventing direct CLI usage, please tag it as `priorty/p0`.
|
||||
- **Suggest Features:** Have a great idea? We'd love to hear it! Open a [feature request](https://github.com/google-gemini/gemini-cli/issues/new?template=feature_request.yml).
|
||||
- **Contribute Code:** Check out our [CONTRIBUTING.md](https://github.com/google-gemini/gemini-cli/blob/main/CONTRIBUTING.md) file for guidelines on how to submit pull requests. We have a list of "good first issues" for new contributors.
|
||||
- **Write Documentation:** Help us improve our documentation, tutorials, and examples.
|
||||
We are excited about the future of Gemini CLI and look forward to building it with you!
|
|
@ -0,0 +1,42 @@
|
|||
# Uninstalling the CLI
|
||||
|
||||
Your uninstall method depends on how you ran the CLI. Follow the instructions for either npx or a global npm installation.
|
||||
|
||||
## Method 1: Using npx
|
||||
|
||||
npx runs packages from a temporary cache without a permanent installation. To "uninstall" the CLI, you must clear this cache, which will remove gemini-cli and any other packages previously executed with npx.
|
||||
|
||||
The npx cache is a directory named `_npx` inside your main npm cache folder. You can find your npm cache path by running `npm config get cache`.
|
||||
|
||||
**For macOS / Linux**
|
||||
|
||||
```bash
|
||||
# The path is typically ~/.npm/_npx
|
||||
rm -rf "$(npm config get cache)/_npx"
|
||||
```
|
||||
|
||||
**For Windows**
|
||||
|
||||
_Command Prompt_
|
||||
|
||||
```cmd
|
||||
:: The path is typically %LocalAppData%\npm-cache\_npx
|
||||
rmdir /s /q "%LocalAppData%\npm-cache\_npx"
|
||||
```
|
||||
|
||||
_PowerShell_
|
||||
|
||||
```powershell
|
||||
# The path is typically $env:LocalAppData\npm-cache\_npx
|
||||
Remove-Item -Path (Join-Path $env:LocalAppData "npm-cache\_npx") -Recurse -Force
|
||||
```
|
||||
|
||||
## Method 2: Using npm (Global Install)
|
||||
|
||||
If you installed the CLI globally (e.g., `npm install -g @google/gemini-cli`), use the `npm uninstall` command with the `-g` flag to remove it.
|
||||
|
||||
```bash
|
||||
npm uninstall -g @google/gemini-cli
|
||||
```
|
||||
|
||||
This command completely removes the package from your system.
|
|
@ -0,0 +1,54 @@
|
|||
# Gemini CLI Architecture Overview
|
||||
|
||||
This document provides a high-level overview of the Gemini CLI's architecture.
|
||||
|
||||
## Core components
|
||||
|
||||
The Gemini CLI is primarily composed of two main packages, along with a suite of tools that can be used by the system in the course of handling command-line input:
|
||||
|
||||
1. **CLI package (`packages/cli`):**
|
||||
- **Purpose:** This contains the user-facing portion of the Gemini CLI, such as handling the initial user input, presenting the final output, and managing the overall user experience.
|
||||
- **Key functions contained in the package:**
|
||||
- [Input processing](./cli/commands.md)
|
||||
- History management
|
||||
- Display rendering
|
||||
- [Theme and UI customization](./cli/themes.md)
|
||||
- [CLI configuration settings](./cli/configuration.md)
|
||||
|
||||
2. **Core package (`packages/core`):**
|
||||
- **Purpose:** This acts as the backend for the Gemini CLI. It receives requests sent from `packages/cli`, orchestrates interactions with the Gemini API, and manages the execution of available tools.
|
||||
- **Key functions contained in the package:**
|
||||
- API client for communicating with the Google Gemini API
|
||||
- Prompt construction and management
|
||||
- Tool registration and execution logic
|
||||
- State management for conversations or sessions
|
||||
- Server-side configuration
|
||||
|
||||
3. **Tools (`packages/core/src/tools/`):**
|
||||
- **Purpose:** These are individual modules that extend the capabilities of the Gemini model, allowing it to interact with the local environment (e.g., file system, shell commands, web fetching).
|
||||
- **Interaction:** `packages/core` invokes these tools based on requests from the Gemini model.
|
||||
|
||||
## Interaction Flow
|
||||
|
||||
A typical interaction with the Gemini CLI follows this flow:
|
||||
|
||||
1. **User input:** The user types a prompt or command into the terminal, which is managed by `packages/cli`.
|
||||
2. **Request to core:** `packages/cli` sends the user's input to `packages/core`.
|
||||
3. **Request processed:** The core package:
|
||||
- Constructs an appropriate prompt for the Gemini API, possibly including conversation history and available tool definitions.
|
||||
- Sends the prompt to the Gemini API.
|
||||
4. **Gemini API response:** The Gemini API processes the prompt and returns a response. This response might be a direct answer or a request to use one of the available tools.
|
||||
5. **Tool execution (if applicable):**
|
||||
- When the Gemini API requests a tool, the core package prepares to execute it.
|
||||
- If the requested tool can modify the file system or execute shell commands, the user is first given details of the tool and its arguments, and the user must approve the execution.
|
||||
- Read-only operations, such as reading files, might not require explicit user confirmation to proceed.
|
||||
- Once confirmed, or if confirmation is not required, the core package executes the relevant action within the relevant tool, and the result is sent back to the Gemini API by the core package.
|
||||
- The Gemini API processes the tool result and generates a final response.
|
||||
6. **Response to CLI:** The core package sends the final response back to the CLI package.
|
||||
7. **Display to user:** The CLI package formats and displays the response to the user in the terminal.
|
||||
|
||||
## Key Design Principles
|
||||
|
||||
- **Modularity:** Separating the CLI (frontend) from the Core (backend) allows for independent development and potential future extensions (e.g., different frontends for the same backend).
|
||||
- **Extensibility:** The tool system is designed to be extensible, allowing new capabilities to be added.
|
||||
- **User experience:** The CLI focuses on providing a rich and interactive terminal experience.
|
After Width: | Height: | Size: 119 KiB |
After Width: | Height: | Size: 350 KiB |
After Width: | Height: | Size: 714 KiB |
After Width: | Height: | Size: 126 KiB |
After Width: | Height: | Size: 127 KiB |
After Width: | Height: | Size: 128 KiB |
After Width: | Height: | Size: 126 KiB |
After Width: | Height: | Size: 128 KiB |
After Width: | Height: | Size: 125 KiB |
After Width: | Height: | Size: 127 KiB |
After Width: | Height: | Size: 128 KiB |
After Width: | Height: | Size: 126 KiB |
After Width: | Height: | Size: 128 KiB |
After Width: | Height: | Size: 126 KiB |
After Width: | Height: | Size: 125 KiB |
|
@ -0,0 +1,75 @@
|
|||
# Checkpointing
|
||||
|
||||
The Gemini CLI includes a Checkpointing feature that automatically saves a snapshot of your project's state before any file modifications are made by AI-powered tools. This allows you to safely experiment with and apply code changes, knowing you can instantly revert back to the state before the tool was run.
|
||||
|
||||
## How It Works
|
||||
|
||||
When you approve a tool that modifies the file system (like `write_file` or `replace`), the CLI automatically creates a "checkpoint." This checkpoint includes:
|
||||
|
||||
1. **A Git Snapshot:** A commit is made in a special, shadow Git repository located in your home directory (`~/.qwen/history/<project_hash>`). This snapshot captures the complete state of your project files at that moment. It does **not** interfere with your own project's Git repository.
|
||||
2. **Conversation History:** The entire conversation you've had with the agent up to that point is saved.
|
||||
3. **The Tool Call:** The specific tool call that was about to be executed is also stored.
|
||||
|
||||
If you want to undo the change or simply go back, you can use the `/restore` command. Restoring a checkpoint will:
|
||||
|
||||
- Revert all files in your project to the state captured in the snapshot.
|
||||
- Restore the conversation history in the CLI.
|
||||
- Re-propose the original tool call, allowing you to run it again, modify it, or simply ignore it.
|
||||
|
||||
All checkpoint data, including the Git snapshot and conversation history, is stored locally on your machine. The Git snapshot is stored in the shadow repository while the conversation history and tool calls are saved in a JSON file in your project's temporary directory, typically located at `~/.qwen/tmp/<project_hash>/checkpoints`.
|
||||
|
||||
## Enabling the Feature
|
||||
|
||||
The Checkpointing feature is disabled by default. To enable it, you can either use a command-line flag or edit your `settings.json` file.
|
||||
|
||||
### Using the Command-Line Flag
|
||||
|
||||
You can enable checkpointing for the current session by using the `--checkpointing` flag when starting the Gemini CLI:
|
||||
|
||||
```bash
|
||||
gemini --checkpointing
|
||||
```
|
||||
|
||||
### Using the `settings.json` File
|
||||
|
||||
To enable checkpointing by default for all sessions, you need to edit your `settings.json` file.
|
||||
|
||||
Add the following key to your `settings.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"checkpointing": {
|
||||
"enabled": true
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Using the `/restore` Command
|
||||
|
||||
Once enabled, checkpoints are created automatically. To manage them, you use the `/restore` command.
|
||||
|
||||
### List Available Checkpoints
|
||||
|
||||
To see a list of all saved checkpoints for the current project, simply run:
|
||||
|
||||
```
|
||||
/restore
|
||||
```
|
||||
|
||||
The CLI will display a list of available checkpoint files. These file names are typically composed of a timestamp, the name of the file being modified, and the name of the tool that was about to be run (e.g., `2025-06-22T10-00-00_000Z-my-file.txt-write_file`).
|
||||
|
||||
### Restore a Specific Checkpoint
|
||||
|
||||
To restore your project to a specific checkpoint, use the checkpoint file from the list:
|
||||
|
||||
```
|
||||
/restore <checkpoint_file>
|
||||
```
|
||||
|
||||
For example:
|
||||
|
||||
```
|
||||
/restore 2025-06-22T10-00-00_000Z-my-file.txt-write_file
|
||||
```
|
||||
|
||||
After running the command, your files and conversation will be immediately restored to the state they were in when the checkpoint was created, and the original tool prompt will reappear.
|
|
@ -0,0 +1,114 @@
|
|||
# Authentication Setup
|
||||
|
||||
The Qwen Code CLI supports multiple authentication methods. On initial startup you'll need to configure **one** of the following authentication methods:
|
||||
|
||||
1. **Login with Google (Gemini Code Assist):**
|
||||
- Use this option to log in with your google account.
|
||||
- During initial startup, Gemini CLI will direct you to a webpage for authentication. Once authenticated, your credentials will be cached locally so the web login can be skipped on subsequent runs.
|
||||
- Note that the web login must be done in a browser that can communicate with the machine Gemini CLI is being run from. (Specifically, the browser will be redirected to a localhost url that Gemini CLI will be listening on).
|
||||
- <a id="workspace-gca">Users may have to specify a GOOGLE_CLOUD_PROJECT if:</a>
|
||||
1. You have a Google Workspace account. Google Workspace is a paid service for businesses and organizations that provides a suite of productivity tools, including a custom email domain (e.g. your-name@your-company.com), enhanced security features, and administrative controls. These accounts are often managed by an employer or school.
|
||||
1. You have received a free Code Assist license through the [Google Developer Program](https://developers.google.com/program/plans-and-pricing) (including qualified Google Developer Experts)
|
||||
1. You have been assigned a license to a current Gemini Code Assist standard or enterprise subscription.
|
||||
1. You are using the product outside the [supported regions](https://developers.google.com/gemini-code-assist/resources/available-locations) for free individual usage.
|
||||
1. You are a Google account holder under the age of 18
|
||||
- If you fall into one of these categories, you must first configure a Google Cloud Project Id to use, [enable the Gemini for Cloud API](https://cloud.google.com/gemini/docs/discover/set-up-gemini#enable-api) and [configure access permissions](https://cloud.google.com/gemini/docs/discover/set-up-gemini#grant-iam).
|
||||
|
||||
You can temporarily set the environment variable in your current shell session using the following command:
|
||||
|
||||
```bash
|
||||
export GOOGLE_CLOUD_PROJECT="YOUR_PROJECT_ID"
|
||||
```
|
||||
- For repeated use, you can add the environment variable to your [.env file](#persisting-environment-variables-with-env-files) or your shell's configuration file (like `~/.bashrc`, `~/.zshrc`, or `~/.profile`). For example, the following command adds the environment variable to a `~/.bashrc` file:
|
||||
|
||||
```bash
|
||||
echo 'export GOOGLE_CLOUD_PROJECT="YOUR_PROJECT_ID"' >> ~/.bashrc
|
||||
source ~/.bashrc
|
||||
```
|
||||
|
||||
2. **<a id="gemini-api-key"></a>Gemini API key:**
|
||||
- Obtain your API key from Google AI Studio: [https://aistudio.google.com/app/apikey](https://aistudio.google.com/app/apikey)
|
||||
- Set the `GEMINI_API_KEY` environment variable. In the following methods, replace `YOUR_GEMINI_API_KEY` with the API key you obtained from Google AI Studio:
|
||||
- You can temporarily set the environment variable in your current shell session using the following command:
|
||||
```bash
|
||||
export GEMINI_API_KEY="YOUR_GEMINI_API_KEY"
|
||||
```
|
||||
- For repeated use, you can add the environment variable to your [.env file](#persisting-environment-variables-with-env-files) or your shell's configuration file (like `~/.bashrc`, `~/.zshrc`, or `~/.profile`). For example, the following command adds the environment variable to a `~/.bashrc` file:
|
||||
```bash
|
||||
echo 'export GEMINI_API_KEY="YOUR_GEMINI_API_KEY"' >> ~/.bashrc
|
||||
source ~/.bashrc
|
||||
```
|
||||
|
||||
3. **Vertex AI:**
|
||||
- Obtain your Google Cloud API key: [Get an API Key](https://cloud.google.com/vertex-ai/generative-ai/docs/start/api-keys?usertype=newuser)
|
||||
- Set the `GOOGLE_API_KEY` environment variable. In the following methods, replace `YOUR_GOOGLE_API_KEY` with your Vertex AI API key:
|
||||
- You can temporarily set these environment variables in your current shell session using the following commands:
|
||||
```bash
|
||||
export GOOGLE_API_KEY="YOUR_GOOGLE_API_KEY"
|
||||
```
|
||||
- For repeated use, you can add the environment variables to your [.env file](#persisting-environment-variables-with-env-files) or your shell's configuration file (like `~/.bashrc`, `~/.zshrc`, or `~/.profile`). For example, the following commands add the environment variables to a `~/.bashrc` file:
|
||||
```bash
|
||||
echo 'export GOOGLE_API_KEY="YOUR_GOOGLE_API_KEY"' >> ~/.bashrc
|
||||
source ~/.bashrc
|
||||
```
|
||||
- To use Application Default Credentials (ADC), use the following command:
|
||||
- Ensure you have a Google Cloud project and have enabled the Vertex AI API.
|
||||
```bash
|
||||
gcloud auth application-default login
|
||||
```
|
||||
For more information, see [Set up Application Default Credentials for Google Cloud](https://cloud.google.com/docs/authentication/provide-credentials-adc).
|
||||
- Set the `GOOGLE_CLOUD_PROJECT` and `GOOGLE_CLOUD_LOCATION` environment variables. In the following methods, replace `YOUR_PROJECT_ID` and `YOUR_PROJECT_LOCATION` with the relevant values for your project:
|
||||
- You can temporarily set these environment variables in your current shell session using the following commands:
|
||||
```bash
|
||||
export GOOGLE_CLOUD_PROJECT="YOUR_PROJECT_ID"
|
||||
export GOOGLE_CLOUD_LOCATION="YOUR_PROJECT_LOCATION" # e.g., us-central1
|
||||
```
|
||||
- For repeated use, you can add the environment variables to your [.env file](#persisting-environment-variables-with-env-files) or your shell's configuration file (like `~/.bashrc`, `~/.zshrc`, or `~/.profile`). For example, the following commands add the environment variables to a `~/.bashrc` file:
|
||||
```bash
|
||||
echo 'export GOOGLE_CLOUD_PROJECT="YOUR_PROJECT_ID"' >> ~/.bashrc
|
||||
echo 'export GOOGLE_CLOUD_LOCATION="YOUR_PROJECT_LOCATION"' >> ~/.bashrc
|
||||
source ~/.bashrc
|
||||
```
|
||||
4. **Cloud Shell:**
|
||||
- This option is only available when running in a Google Cloud Shell environment.
|
||||
- It automatically uses the credentials of the logged-in user in the Cloud Shell environment.
|
||||
- This is the default authentication method when running in Cloud Shell and no other method is configured.
|
||||
|
||||
### Persisting Environment Variables with `.env` Files
|
||||
|
||||
You can create a **`.qwen/.env`** file in your project directory or in your home directory. Creating a plain **`.env`** file also works, but `.qwen/.env` is recommended to keep Gemini variables isolated from other tools.
|
||||
|
||||
Gemini CLI automatically loads environment variables from the **first** `.env` file it finds, using the following search order:
|
||||
|
||||
1. Starting in the **current directory** and moving upward toward `/`, for each directory it checks:
|
||||
1. `.qwen/.env`
|
||||
2. `.env`
|
||||
2. If no file is found, it falls back to your **home directory**:
|
||||
- `~/.qwen/.env`
|
||||
- `~/.env`
|
||||
|
||||
> **Important:** The search stops at the **first** file encountered—variables are **not merged** across multiple files.
|
||||
|
||||
#### Examples
|
||||
|
||||
**Project-specific overrides** (take precedence when you are inside the project):
|
||||
|
||||
```bash
|
||||
mkdir -p .gemini
|
||||
echo 'GOOGLE_CLOUD_PROJECT="your-project-id"' >> .qwen/.env
|
||||
```
|
||||
|
||||
**User-wide settings** (available in every directory):
|
||||
|
||||
```bash
|
||||
mkdir -p ~/.gemini
|
||||
cat >> ~/.qwen/.env <<'EOF'
|
||||
GOOGLE_CLOUD_PROJECT="your-project-id"
|
||||
GEMINI_API_KEY="your-gemini-api-key"
|
||||
EOF
|
||||
```
|
||||
|
||||
5. **OpenAI Authentication:**
|
||||
- Use OpenAI models instead of Google's Gemini models
|
||||
- For detailed setup instructions, see [OpenAI Authentication](./openai-auth.md)
|
||||
- Supports interactive setup, command line arguments, and environment variables
|
|
@ -0,0 +1,138 @@
|
|||
# CLI Commands
|
||||
|
||||
Gemini CLI supports several built-in commands to help you manage your session, customize the interface, and control its behavior. These commands are prefixed with a forward slash (`/`), an at symbol (`@`), or an exclamation mark (`!`).
|
||||
|
||||
## Slash commands (`/`)
|
||||
|
||||
Slash commands provide meta-level control over the CLI itself.
|
||||
|
||||
- **`/bug`**
|
||||
- **Description:** File an issue about Gemini CLI. By default, the issue is filed within the GitHub repository for Gemini CLI. The string you enter after `/bug` will become the headline for the bug being filed. The default `/bug` behavior can be modified using the `bugCommand` setting in your `.qwen/settings.json` files.
|
||||
|
||||
- **`/chat`**
|
||||
- **Description:** Save and resume conversation history for branching conversation state interactively, or resuming a previous state from a later session.
|
||||
- **Sub-commands:**
|
||||
- **`save`**
|
||||
- **Description:** Saves the current conversation history. You must add a `<tag>` for identifying the conversation state.
|
||||
- **Usage:** `/chat save <tag>`
|
||||
- **`resume`**
|
||||
- **Description:** Resumes a conversation from a previous save.
|
||||
- **Usage:** `/chat resume <tag>`
|
||||
- **`list`**
|
||||
- **Description:** Lists available tags for chat state resumption.
|
||||
|
||||
- **`/clear`**
|
||||
- **Description:** Clear the terminal screen, including the visible session history and scrollback within the CLI. The underlying session data (for history recall) might be preserved depending on the exact implementation, but the visual display is cleared.
|
||||
- **Keyboard shortcut:** Press **Ctrl+L** at any time to perform a clear action.
|
||||
|
||||
- **`/compress`**
|
||||
- **Description:** Replace the entire chat context with a summary. This saves on tokens used for future tasks while retaining a high level summary of what has happened.
|
||||
|
||||
- **`/editor`**
|
||||
- **Description:** Open a dialog for selecting supported editors.
|
||||
|
||||
- **`/extensions`**
|
||||
- **Description:** Lists all active extensions in the current Gemini CLI session. See [Gemini CLI Extensions](../extension.md).
|
||||
|
||||
- **`/help`** (or **`/?`**)
|
||||
- **Description:** Display help information about the Gemini CLI, including available commands and their usage.
|
||||
|
||||
- **`/mcp`**
|
||||
- **Description:** List configured Model Context Protocol (MCP) servers, their connection status, server details, and available tools.
|
||||
- **Sub-commands:**
|
||||
- **`desc`** or **`descriptions`**:
|
||||
- **Description:** Show detailed descriptions for MCP servers and tools.
|
||||
- **`nodesc`** or **`nodescriptions`**:
|
||||
- **Description:** Hide tool descriptions, showing only the tool names.
|
||||
- **`schema`**:
|
||||
- **Description:** Show the full JSON schema for the tool's configured parameters.
|
||||
- **Keyboard Shortcut:** Press **Ctrl+T** at any time to toggle between showing and hiding tool descriptions.
|
||||
|
||||
- **`/memory`**
|
||||
- **Description:** Manage the AI's instructional context (hierarchical memory loaded from `GEMINI.md` files).
|
||||
- **Sub-commands:**
|
||||
- **`add`**:
|
||||
- **Description:** Adds the following text to the AI's memory. Usage: `/memory add <text to remember>`
|
||||
- **`show`**:
|
||||
- **Description:** Display the full, concatenated content of the current hierarchical memory that has been loaded from all `GEMINI.md` files. This lets you inspect the instructional context being provided to the Gemini model.
|
||||
- **`refresh`**:
|
||||
- **Description:** Reload the hierarchical instructional memory from all `GEMINI.md` files found in the configured locations (global, project/ancestors, and sub-directories). This command updates the model with the latest `GEMINI.md` content.
|
||||
- **Note:** For more details on how `GEMINI.md` files contribute to hierarchical memory, see the [CLI Configuration documentation](./configuration.md#4-geminimd-files-hierarchical-instructional-context).
|
||||
|
||||
- **`/restore`**
|
||||
- **Description:** Restores the project files to the state they were in just before a tool was executed. This is particularly useful for undoing file edits made by a tool. If run without a tool call ID, it will list available checkpoints to restore from.
|
||||
- **Usage:** `/restore [tool_call_id]`
|
||||
- **Note:** Only available if the CLI is invoked with the `--checkpointing` option or configured via [settings](./configuration.md). See [Checkpointing documentation](../checkpointing.md) for more details.
|
||||
|
||||
- **`/stats`**
|
||||
- **Description:** Display detailed statistics for the current Gemini CLI session, including token usage, cached token savings (when available), and session duration. Note: Cached token information is only displayed when cached tokens are being used, which occurs with API key authentication but not with OAuth authentication at this time.
|
||||
|
||||
- [**`/theme`**](./themes.md)
|
||||
- **Description:** Open a dialog that lets you change the visual theme of Gemini CLI.
|
||||
|
||||
- **`/auth`**
|
||||
- **Description:** Open a dialog that lets you change the authentication method.
|
||||
|
||||
- **`/about`**
|
||||
- **Description:** Show version info. Please share this information when filing issues.
|
||||
|
||||
- [**`/tools`**](../tools/index.md)
|
||||
- **Description:** Display a list of tools that are currently available within Gemini CLI.
|
||||
- **Sub-commands:**
|
||||
- **`desc`** or **`descriptions`**:
|
||||
- **Description:** Show detailed descriptions of each tool, including each tool's name with its full description as provided to the model.
|
||||
- **`nodesc`** or **`nodescriptions`**:
|
||||
- **Description:** Hide tool descriptions, showing only the tool names.
|
||||
|
||||
- **`/privacy`**
|
||||
- **Description:** Display the Privacy Notice and allow users to select whether they consent to the collection of their data for service improvement purposes.
|
||||
|
||||
- **`/quit`** (or **`/exit`**)
|
||||
- **Description:** Exit Gemini CLI.
|
||||
|
||||
## At commands (`@`)
|
||||
|
||||
At commands are used to include the content of files or directories as part of your prompt to Gemini. These commands include git-aware filtering.
|
||||
|
||||
- **`@<path_to_file_or_directory>`**
|
||||
- **Description:** Inject the content of the specified file or files into your current prompt. This is useful for asking questions about specific code, text, or collections of files.
|
||||
- **Examples:**
|
||||
- `@path/to/your/file.txt Explain this text.`
|
||||
- `@src/my_project/ Summarize the code in this directory.`
|
||||
- `What is this file about? @README.md`
|
||||
- **Details:**
|
||||
- If a path to a single file is provided, the content of that file is read.
|
||||
- If a path to a directory is provided, the command attempts to read the content of files within that directory and any subdirectories.
|
||||
- Spaces in paths should be escaped with a backslash (e.g., `@My\ Documents/file.txt`).
|
||||
- The command uses the `read_many_files` tool internally. The content is fetched and then inserted into your query before being sent to the Gemini model.
|
||||
- **Git-aware filtering:** By default, git-ignored files (like `node_modules/`, `dist/`, `.env`, `.git/`) are excluded. This behavior can be changed via the `fileFiltering` settings.
|
||||
- **File types:** The command is intended for text-based files. While it might attempt to read any file, binary files or very large files might be skipped or truncated by the underlying `read_many_files` tool to ensure performance and relevance. The tool indicates if files were skipped.
|
||||
- **Output:** The CLI will show a tool call message indicating that `read_many_files` was used, along with a message detailing the status and the path(s) that were processed.
|
||||
|
||||
- **`@` (Lone at symbol)**
|
||||
- **Description:** If you type a lone `@` symbol without a path, the query is passed as-is to the Gemini model. This might be useful if you are specifically talking _about_ the `@` symbol in your prompt.
|
||||
|
||||
### Error handling for `@` commands
|
||||
|
||||
- If the path specified after `@` is not found or is invalid, an error message will be displayed, and the query might not be sent to the Gemini model, or it will be sent without the file content.
|
||||
- If the `read_many_files` tool encounters an error (e.g., permission issues), this will also be reported.
|
||||
|
||||
## Shell mode & passthrough commands (`!`)
|
||||
|
||||
The `!` prefix lets you interact with your system's shell directly from within Gemini CLI.
|
||||
|
||||
- **`!<shell_command>`**
|
||||
- **Description:** Execute the given `<shell_command>` in your system's default shell. Any output or errors from the command are displayed in the terminal.
|
||||
- **Examples:**
|
||||
- `!ls -la` (executes `ls -la` and returns to Gemini CLI)
|
||||
- `!git status` (executes `git status` and returns to Gemini CLI)
|
||||
|
||||
- **`!` (Toggle shell mode)**
|
||||
- **Description:** Typing `!` on its own toggles shell mode.
|
||||
- **Entering shell mode:**
|
||||
- When active, shell mode uses a different coloring and a "Shell Mode Indicator".
|
||||
- While in shell mode, text you type is interpreted directly as a shell command.
|
||||
- **Exiting shell mode:**
|
||||
- When exited, the UI reverts to its standard appearance and normal Gemini CLI behavior resumes.
|
||||
|
||||
- **Caution for all `!` usage:** Commands you execute in shell mode have the same permissions and impact as if you ran them directly in your terminal.
|
|
@ -0,0 +1,470 @@
|
|||
# Gemini CLI Configuration
|
||||
|
||||
Gemini CLI offers several ways to configure its behavior, including environment variables, command-line arguments, and settings files. This document outlines the different configuration methods and available settings.
|
||||
|
||||
## Configuration layers
|
||||
|
||||
Configuration is applied in the following order of precedence (lower numbers are overridden by higher numbers):
|
||||
|
||||
1. **Default values:** Hardcoded defaults within the application.
|
||||
2. **User settings file:** Global settings for the current user.
|
||||
3. **Project settings file:** Project-specific settings.
|
||||
4. **System settings file:** System-wide settings.
|
||||
5. **Environment variables:** System-wide or session-specific variables, potentially loaded from `.env` files.
|
||||
6. **Command-line arguments:** Values passed when launching the CLI.
|
||||
|
||||
## Settings files
|
||||
|
||||
Gemini CLI uses `settings.json` files for persistent configuration. There are three locations for these files:
|
||||
|
||||
- **User settings file:**
|
||||
- **Location:** `~/.qwen/settings.json` (where `~` is your home directory).
|
||||
- **Scope:** Applies to all Gemini CLI sessions for the current user.
|
||||
- **Project settings file:**
|
||||
- **Location:** `.qwen/settings.json` within your project's root directory.
|
||||
- **Scope:** Applies only when running Gemini CLI from that specific project. Project settings override user settings.
|
||||
- **System settings file:**
|
||||
- **Location:** `/etc/gemini-cli/settings.json` (Linux), `C:\ProgramData\gemini-cli\settings.json` (Windows) or `/Library/Application Support/GeminiCli/settings.json` (macOS).
|
||||
- **Scope:** Applies to all Gemini CLI sessions on the system, for all users. System settings override user and project settings. May be useful for system administrators at enterprises to have controls over users' Gemini CLI setups.
|
||||
|
||||
**Note on environment variables in settings:** String values within your `settings.json` files can reference environment variables using either `$VAR_NAME` or `${VAR_NAME}` syntax. These variables will be automatically resolved when the settings are loaded. For example, if you have an environment variable `MY_API_TOKEN`, you could use it in `settings.json` like this: `"apiKey": "$MY_API_TOKEN"`.
|
||||
|
||||
### The `.gemini` directory in your project
|
||||
|
||||
In addition to a project settings file, a project's `.gemini` directory can contain other project-specific files related to Gemini CLI's operation, such as:
|
||||
|
||||
- [Custom sandbox profiles](#sandboxing) (e.g., `.qwen/sandbox-macos-custom.sb`, `.qwen/sandbox.Dockerfile`).
|
||||
|
||||
### Available settings in `settings.json`:
|
||||
|
||||
- **`contextFileName`** (string or array of strings):
|
||||
- **Description:** Specifies the filename for context files (e.g., `GEMINI.md`, `AGENTS.md`). Can be a single filename or a list of accepted filenames.
|
||||
- **Default:** `GEMINI.md`
|
||||
- **Example:** `"contextFileName": "AGENTS.md"`
|
||||
|
||||
- **`bugCommand`** (object):
|
||||
- **Description:** Overrides the default URL for the `/bug` command.
|
||||
- **Default:** `"urlTemplate": "https://github.com/google-gemini/gemini-cli/issues/new?template=bug_report.yml&title={title}&info={info}"`
|
||||
- **Properties:**
|
||||
- **`urlTemplate`** (string): A URL that can contain `{title}` and `{info}` placeholders.
|
||||
- **Example:**
|
||||
```json
|
||||
"bugCommand": {
|
||||
"urlTemplate": "https://bug.example.com/new?title={title}&info={info}"
|
||||
}
|
||||
```
|
||||
|
||||
- **`fileFiltering`** (object):
|
||||
- **Description:** Controls git-aware file filtering behavior for @ commands and file discovery tools.
|
||||
- **Default:** `"respectGitIgnore": true, "enableRecursiveFileSearch": true`
|
||||
- **Properties:**
|
||||
- **`respectGitIgnore`** (boolean): Whether to respect .gitignore patterns when discovering files. When set to `true`, git-ignored files (like `node_modules/`, `dist/`, `.env`) are automatically excluded from @ commands and file listing operations.
|
||||
- **`enableRecursiveFileSearch`** (boolean): Whether to enable searching recursively for filenames under the current tree when completing @ prefixes in the prompt.
|
||||
- **Example:**
|
||||
```json
|
||||
"fileFiltering": {
|
||||
"respectGitIgnore": true,
|
||||
"enableRecursiveFileSearch": false
|
||||
}
|
||||
```
|
||||
|
||||
- **`coreTools`** (array of strings):
|
||||
- **Description:** Allows you to specify a list of core tool names that should be made available to the model. This can be used to restrict the set of built-in tools. See [Built-in Tools](../core/tools-api.md#built-in-tools) for a list of core tools. You can also specify command-specific restrictions for tools that support it, like the `ShellTool`. For example, `"coreTools": ["ShellTool(ls -l)"]` will only allow the `ls -l` command to be executed.
|
||||
- **Default:** All tools available for use by the Gemini model.
|
||||
- **Example:** `"coreTools": ["ReadFileTool", "GlobTool", "ShellTool(ls)"]`.
|
||||
|
||||
- **`excludeTools`** (array of strings):
|
||||
- **Description:** Allows you to specify a list of core tool names that should be excluded from the model. A tool listed in both `excludeTools` and `coreTools` is excluded. You can also specify command-specific restrictions for tools that support it, like the `ShellTool`. For example, `"excludeTools": ["ShellTool(rm -rf)"]` will block the `rm -rf` command.
|
||||
- **Default**: No tools excluded.
|
||||
- **Example:** `"excludeTools": ["run_shell_command", "findFiles"]`.
|
||||
- **Security Note:** Command-specific restrictions in
|
||||
`excludeTools` for `run_shell_command` are based on simple string matching and can be easily bypassed. This feature is **not a security mechanism** and should not be relied upon to safely execute untrusted code. It is recommended to use `coreTools` to explicitly select commands
|
||||
that can be executed.
|
||||
|
||||
- **`autoAccept`** (boolean):
|
||||
- **Description:** Controls whether the CLI automatically accepts and executes tool calls that are considered safe (e.g., read-only operations) without explicit user confirmation. If set to `true`, the CLI will bypass the confirmation prompt for tools deemed safe.
|
||||
- **Default:** `false`
|
||||
- **Example:** `"autoAccept": true`
|
||||
|
||||
- **`theme`** (string):
|
||||
- **Description:** Sets the visual [theme](./themes.md) for Gemini CLI.
|
||||
- **Default:** `"Default"`
|
||||
- **Example:** `"theme": "GitHub"`
|
||||
|
||||
- **`sandbox`** (boolean or string):
|
||||
- **Description:** Controls whether and how to use sandboxing for tool execution. If set to `true`, Gemini CLI uses a pre-built `gemini-cli-sandbox` Docker image. For more information, see [Sandboxing](#sandboxing).
|
||||
- **Default:** `false`
|
||||
- **Example:** `"sandbox": "docker"`
|
||||
|
||||
- **`toolDiscoveryCommand`** (string):
|
||||
- **Description:** Defines a custom shell command for discovering tools from your project. The shell command must return on `stdout` a JSON array of [function declarations](https://ai.google.dev/gemini-api/docs/function-calling#function-declarations). Tool wrappers are optional.
|
||||
- **Default:** Empty
|
||||
- **Example:** `"toolDiscoveryCommand": "bin/get_tools"`
|
||||
|
||||
- **`toolCallCommand`** (string):
|
||||
- **Description:** Defines a custom shell command for calling a specific tool that was discovered using `toolDiscoveryCommand`. The shell command must meet the following criteria:
|
||||
- It must take function `name` (exactly as in [function declaration](https://ai.google.dev/gemini-api/docs/function-calling#function-declarations)) as first command line argument.
|
||||
- It must read function arguments as JSON on `stdin`, analogous to [`functionCall.args`](https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference#functioncall).
|
||||
- It must return function output as JSON on `stdout`, analogous to [`functionResponse.response.content`](https://cloud.google.com/vertex-ai/generative-ai/docs/model-reference/inference#functionresponse).
|
||||
- **Default:** Empty
|
||||
- **Example:** `"toolCallCommand": "bin/call_tool"`
|
||||
|
||||
- **`mcpServers`** (object):
|
||||
- **Description:** Configures connections to one or more Model-Context Protocol (MCP) servers for discovering and using custom tools. Gemini CLI attempts to connect to each configured MCP server to discover available tools. If multiple MCP servers expose a tool with the same name, the tool names will be prefixed with the server alias you defined in the configuration (e.g., `serverAlias__actualToolName`) to avoid conflicts. Note that the system might strip certain schema properties from MCP tool definitions for compatibility.
|
||||
- **Default:** Empty
|
||||
- **Properties:**
|
||||
- **`<SERVER_NAME>`** (object): The server parameters for the named server.
|
||||
- `command` (string, required): The command to execute to start the MCP server.
|
||||
- `args` (array of strings, optional): Arguments to pass to the command.
|
||||
- `env` (object, optional): Environment variables to set for the server process.
|
||||
- `cwd` (string, optional): The working directory in which to start the server.
|
||||
- `timeout` (number, optional): Timeout in milliseconds for requests to this MCP server.
|
||||
- `trust` (boolean, optional): Trust this server and bypass all tool call confirmations.
|
||||
- **Example:**
|
||||
```json
|
||||
"mcpServers": {
|
||||
"myPythonServer": {
|
||||
"command": "python",
|
||||
"args": ["mcp_server.py", "--port", "8080"],
|
||||
"cwd": "./mcp_tools/python",
|
||||
"timeout": 5000
|
||||
},
|
||||
"myNodeServer": {
|
||||
"command": "node",
|
||||
"args": ["mcp_server.js"],
|
||||
"cwd": "./mcp_tools/node"
|
||||
},
|
||||
"myDockerServer": {
|
||||
"command": "docker",
|
||||
"args": ["run", "-i", "--rm", "-e", "API_KEY", "ghcr.io/foo/bar"],
|
||||
"env": {
|
||||
"API_KEY": "$MY_API_TOKEN"
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
- **`checkpointing`** (object):
|
||||
- **Description:** Configures the checkpointing feature, which allows you to save and restore conversation and file states. See the [Checkpointing documentation](../checkpointing.md) for more details.
|
||||
- **Default:** `{"enabled": false}`
|
||||
- **Properties:**
|
||||
- **`enabled`** (boolean): When `true`, the `/restore` command is available.
|
||||
|
||||
- **`preferredEditor`** (string):
|
||||
- **Description:** Specifies the preferred editor to use for viewing diffs.
|
||||
- **Default:** `vscode`
|
||||
- **Example:** `"preferredEditor": "vscode"`
|
||||
|
||||
- **`telemetry`** (object)
|
||||
- **Description:** Configures logging and metrics collection for Gemini CLI. For more information, see [Telemetry](../telemetry.md).
|
||||
- **Default:** `{"enabled": false, "target": "local", "otlpEndpoint": "http://localhost:4317", "logPrompts": true}`
|
||||
- **Properties:**
|
||||
- **`enabled`** (boolean): Whether or not telemetry is enabled.
|
||||
- **`target`** (string): The destination for collected telemetry. Supported values are `local` and `gcp`.
|
||||
- **`otlpEndpoint`** (string): The endpoint for the OTLP Exporter.
|
||||
- **`logPrompts`** (boolean): Whether or not to include the content of user prompts in the logs.
|
||||
- **Example:**
|
||||
```json
|
||||
"telemetry": {
|
||||
"enabled": true,
|
||||
"target": "local",
|
||||
"otlpEndpoint": "http://localhost:16686",
|
||||
"logPrompts": false
|
||||
}
|
||||
```
|
||||
- **`usageStatisticsEnabled`** (boolean):
|
||||
- **Description:** Enables or disables the collection of usage statistics. See [Usage Statistics](#usage-statistics) for more information.
|
||||
- **Default:** `true`
|
||||
- **Example:**
|
||||
```json
|
||||
"usageStatisticsEnabled": false
|
||||
```
|
||||
|
||||
- **`hideTips`** (boolean):
|
||||
- **Description:** Enables or disables helpful tips in the CLI interface.
|
||||
- **Default:** `false`
|
||||
- **Example:**
|
||||
|
||||
```json
|
||||
"hideTips": true
|
||||
```
|
||||
|
||||
- **`hideBanner`** (boolean):
|
||||
- **Description:** Enables or disables the startup banner (ASCII art logo) in the CLI interface.
|
||||
- **Default:** `false`
|
||||
- **Example:**
|
||||
|
||||
```json
|
||||
"hideBanner": true
|
||||
```
|
||||
|
||||
- **`maxSessionTurns`** (number):
|
||||
- **Description:** Sets the maximum number of turns for a session. If the session exceeds this limit, the CLI will stop processing and start a new chat.
|
||||
- **Default:** `-1` (unlimited)
|
||||
- **Example:**
|
||||
```json
|
||||
"maxSessionTurns": 10
|
||||
```
|
||||
|
||||
- **`enableOpenAILogging`** (boolean):
|
||||
- **Description:** Enables or disables logging of OpenAI API calls for debugging and analysis. When enabled, all requests and responses to the OpenAI API are logged to files in the `~/.qwen/logs/` directory.
|
||||
- **Default:** `false`
|
||||
- **Example:**
|
||||
|
||||
```json
|
||||
"enableOpenAILogging": true
|
||||
```
|
||||
|
||||
### Example `settings.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"theme": "GitHub",
|
||||
"sandbox": "docker",
|
||||
"toolDiscoveryCommand": "bin/get_tools",
|
||||
"toolCallCommand": "bin/call_tool",
|
||||
"mcpServers": {
|
||||
"mainServer": {
|
||||
"command": "bin/mcp_server.py"
|
||||
},
|
||||
"anotherServer": {
|
||||
"command": "node",
|
||||
"args": ["mcp_server.js", "--verbose"]
|
||||
}
|
||||
},
|
||||
"telemetry": {
|
||||
"enabled": true,
|
||||
"target": "local",
|
||||
"otlpEndpoint": "http://localhost:4317",
|
||||
"logPrompts": true
|
||||
},
|
||||
"usageStatisticsEnabled": true,
|
||||
"hideTips": false,
|
||||
"hideBanner": false,
|
||||
"maxSessionTurns": 10,
|
||||
"enableOpenAILogging": true
|
||||
}
|
||||
```
|
||||
|
||||
## Shell History
|
||||
|
||||
The CLI keeps a history of shell commands you run. To avoid conflicts between different projects, this history is stored in a project-specific directory within your user's home folder.
|
||||
|
||||
- **Location:** `~/.qwen/tmp/<project_hash>/shell_history`
|
||||
- `<project_hash>` is a unique identifier generated from your project's root path.
|
||||
- The history is stored in a file named `shell_history`.
|
||||
|
||||
## Environment Variables & `.env` Files
|
||||
|
||||
Environment variables are a common way to configure applications, especially for sensitive information like API keys or for settings that might change between environments.
|
||||
|
||||
The CLI automatically loads environment variables from an `.env` file. The loading order is:
|
||||
|
||||
1. `.env` file in the current working directory.
|
||||
2. If not found, it searches upwards in parent directories until it finds an `.env` file or reaches the project root (identified by a `.git` folder) or the home directory.
|
||||
3. If still not found, it looks for `~/.env` (in the user's home directory).
|
||||
|
||||
- **`GEMINI_API_KEY`** (Required):
|
||||
- Your API key for the Gemini API.
|
||||
- **Crucial for operation.** The CLI will not function without it.
|
||||
- Set this in your shell profile (e.g., `~/.bashrc`, `~/.zshrc`) or an `.env` file.
|
||||
- **`GEMINI_MODEL`**:
|
||||
- Specifies the default Gemini model to use.
|
||||
- Overrides the hardcoded default
|
||||
- Example: `export GEMINI_MODEL="gemini-2.5-flash"`
|
||||
- **`GOOGLE_API_KEY`**:
|
||||
- Your Google Cloud API key.
|
||||
- Required for using Vertex AI in express mode.
|
||||
- Ensure you have the necessary permissions.
|
||||
- Example: `export GOOGLE_API_KEY="YOUR_GOOGLE_API_KEY"`.
|
||||
- **`GOOGLE_CLOUD_PROJECT`**:
|
||||
- Your Google Cloud Project ID.
|
||||
- Required for using Code Assist or Vertex AI.
|
||||
- If using Vertex AI, ensure you have the necessary permissions in this project.
|
||||
- **Cloud Shell Note:** When running in a Cloud Shell environment, this variable defaults to a special project allocated for Cloud Shell users. If you have `GOOGLE_CLOUD_PROJECT` set in your global environment in Cloud Shell, it will be overridden by this default. To use a different project in Cloud Shell, you must define `GOOGLE_CLOUD_PROJECT` in a `.env` file.
|
||||
- Example: `export GOOGLE_CLOUD_PROJECT="YOUR_PROJECT_ID"`.
|
||||
- **`GOOGLE_APPLICATION_CREDENTIALS`** (string):
|
||||
- **Description:** The path to your Google Application Credentials JSON file.
|
||||
- **Example:** `export GOOGLE_APPLICATION_CREDENTIALS="/path/to/your/credentials.json"`
|
||||
- **`OTLP_GOOGLE_CLOUD_PROJECT`**:
|
||||
- Your Google Cloud Project ID for Telemetry in Google Cloud
|
||||
- Example: `export OTLP_GOOGLE_CLOUD_PROJECT="YOUR_PROJECT_ID"`.
|
||||
- **`GOOGLE_CLOUD_LOCATION`**:
|
||||
- Your Google Cloud Project Location (e.g., us-central1).
|
||||
- Required for using Vertex AI in non express mode.
|
||||
- Example: `export GOOGLE_CLOUD_LOCATION="YOUR_PROJECT_LOCATION"`.
|
||||
- **`GEMINI_SANDBOX`**:
|
||||
- Alternative to the `sandbox` setting in `settings.json`.
|
||||
- Accepts `true`, `false`, `docker`, `podman`, or a custom command string.
|
||||
- **`SEATBELT_PROFILE`** (macOS specific):
|
||||
- Switches the Seatbelt (`sandbox-exec`) profile on macOS.
|
||||
- `permissive-open`: (Default) Restricts writes to the project folder (and a few other folders, see `packages/cli/src/utils/sandbox-macos-permissive-open.sb`) but allows other operations.
|
||||
- `strict`: Uses a strict profile that declines operations by default.
|
||||
- `<profile_name>`: Uses a custom profile. To define a custom profile, create a file named `sandbox-macos-<profile_name>.sb` in your project's `.qwen/` directory (e.g., `my-project/.qwen/sandbox-macos-custom.sb`).
|
||||
- **`DEBUG` or `DEBUG_MODE`** (often used by underlying libraries or the CLI itself):
|
||||
- Set to `true` or `1` to enable verbose debug logging, which can be helpful for troubleshooting.
|
||||
- **`NO_COLOR`**:
|
||||
- Set to any value to disable all color output in the CLI.
|
||||
- **`CLI_TITLE`**:
|
||||
- Set to a string to customize the title of the CLI.
|
||||
- **`CODE_ASSIST_ENDPOINT`**:
|
||||
- Specifies the endpoint for the code assist server.
|
||||
- This is useful for development and testing.
|
||||
|
||||
## Command-Line Arguments
|
||||
|
||||
Arguments passed directly when running the CLI can override other configurations for that specific session.
|
||||
|
||||
- **`--model <model_name>`** (**`-m <model_name>`**):
|
||||
- Specifies the Gemini model to use for this session.
|
||||
- Example: `npm start -- --model gemini-1.5-pro-latest`
|
||||
- **`--prompt <your_prompt>`** (**`-p <your_prompt>`**):
|
||||
- Used to pass a prompt directly to the command. This invokes Gemini CLI in a non-interactive mode.
|
||||
- **`--sandbox`** (**`-s`**):
|
||||
- Enables sandbox mode for this session.
|
||||
- **`--sandbox-image`**:
|
||||
- Sets the sandbox image URI.
|
||||
- **`--debug`** (**`-d`**):
|
||||
- Enables debug mode for this session, providing more verbose output.
|
||||
- **`--all-files`** (**`-a`**):
|
||||
- If set, recursively includes all files within the current directory as context for the prompt.
|
||||
- **`--help`** (or **`-h`**):
|
||||
- Displays help information about command-line arguments.
|
||||
- **`--show-memory-usage`**:
|
||||
- Displays the current memory usage.
|
||||
- **`--yolo`**:
|
||||
- Enables YOLO mode, which automatically approves all tool calls.
|
||||
- **`--telemetry`**:
|
||||
- Enables [telemetry](../telemetry.md).
|
||||
- **`--telemetry-target`**:
|
||||
- Sets the telemetry target. See [telemetry](../telemetry.md) for more information.
|
||||
- **`--telemetry-otlp-endpoint`**:
|
||||
- Sets the OTLP endpoint for telemetry. See [telemetry](../telemetry.md) for more information.
|
||||
- **`--telemetry-log-prompts`**:
|
||||
- Enables logging of prompts for telemetry. See [telemetry](../telemetry.md) for more information.
|
||||
- **`--checkpointing`**:
|
||||
- Enables [checkpointing](./commands.md#checkpointing-commands).
|
||||
- **`--extensions <extension_name ...>`** (**`-e <extension_name ...>`**):
|
||||
- Specifies a list of extensions to use for the session. If not provided, all available extensions are used.
|
||||
- Use the special term `gemini -e none` to disable all extensions.
|
||||
- Example: `gemini -e my-extension -e my-other-extension`
|
||||
- **`--list-extensions`** (**`-l`**):
|
||||
- Lists all available extensions and exits.
|
||||
- **`--version`**:
|
||||
- Displays the version of the CLI.
|
||||
- **`--openai-logging`**:
|
||||
- Enables logging of OpenAI API calls for debugging and analysis. This flag overrides the `enableOpenAILogging` setting in `settings.json`.
|
||||
|
||||
## Context Files (Hierarchical Instructional Context)
|
||||
|
||||
While not strictly configuration for the CLI's _behavior_, context files (defaulting to `GEMINI.md` but configurable via the `contextFileName` setting) are crucial for configuring the _instructional context_ (also referred to as "memory") provided to the Gemini model. This powerful feature allows you to give project-specific instructions, coding style guides, or any relevant background information to the AI, making its responses more tailored and accurate to your needs. The CLI includes UI elements, such as an indicator in the footer showing the number of loaded context files, to keep you informed about the active context.
|
||||
|
||||
- **Purpose:** These Markdown files contain instructions, guidelines, or context that you want the Gemini model to be aware of during your interactions. The system is designed to manage this instructional context hierarchically.
|
||||
|
||||
### Example Context File Content (e.g., `GEMINI.md`)
|
||||
|
||||
Here's a conceptual example of what a context file at the root of a TypeScript project might contain:
|
||||
|
||||
```markdown
|
||||
# Project: My Awesome TypeScript Library
|
||||
|
||||
## General Instructions:
|
||||
|
||||
- When generating new TypeScript code, please follow the existing coding style.
|
||||
- Ensure all new functions and classes have JSDoc comments.
|
||||
- Prefer functional programming paradigms where appropriate.
|
||||
- All code should be compatible with TypeScript 5.0 and Node.js 20+.
|
||||
|
||||
## Coding Style:
|
||||
|
||||
- Use 2 spaces for indentation.
|
||||
- Interface names should be prefixed with `I` (e.g., `IUserService`).
|
||||
- Private class members should be prefixed with an underscore (`_`).
|
||||
- Always use strict equality (`===` and `!==`).
|
||||
|
||||
## Specific Component: `src/api/client.ts`
|
||||
|
||||
- This file handles all outbound API requests.
|
||||
- When adding new API call functions, ensure they include robust error handling and logging.
|
||||
- Use the existing `fetchWithRetry` utility for all GET requests.
|
||||
|
||||
## Regarding Dependencies:
|
||||
|
||||
- Avoid introducing new external dependencies unless absolutely necessary.
|
||||
- If a new dependency is required, please state the reason.
|
||||
```
|
||||
|
||||
This example demonstrates how you can provide general project context, specific coding conventions, and even notes about particular files or components. The more relevant and precise your context files are, the better the AI can assist you. Project-specific context files are highly encouraged to establish conventions and context.
|
||||
|
||||
- **Hierarchical Loading and Precedence:** The CLI implements a sophisticated hierarchical memory system by loading context files (e.g., `GEMINI.md`) from several locations. Content from files lower in this list (more specific) typically overrides or supplements content from files higher up (more general). The exact concatenation order and final context can be inspected using the `/memory show` command. The typical loading order is:
|
||||
1. **Global Context File:**
|
||||
- Location: `~/.qwen/<contextFileName>` (e.g., `~/.qwen/GEMINI.md` in your user home directory).
|
||||
- Scope: Provides default instructions for all your projects.
|
||||
2. **Project Root & Ancestors Context Files:**
|
||||
- Location: The CLI searches for the configured context file in the current working directory and then in each parent directory up to either the project root (identified by a `.git` folder) or your home directory.
|
||||
- Scope: Provides context relevant to the entire project or a significant portion of it.
|
||||
3. **Sub-directory Context Files (Contextual/Local):**
|
||||
- Location: The CLI also scans for the configured context file in subdirectories _below_ the current working directory (respecting common ignore patterns like `node_modules`, `.git`, etc.).
|
||||
- Scope: Allows for highly specific instructions relevant to a particular component, module, or subsection of your project.
|
||||
- **Concatenation & UI Indication:** The contents of all found context files are concatenated (with separators indicating their origin and path) and provided as part of the system prompt to the Gemini model. The CLI footer displays the count of loaded context files, giving you a quick visual cue about the active instructional context.
|
||||
- **Commands for Memory Management:**
|
||||
- Use `/memory refresh` to force a re-scan and reload of all context files from all configured locations. This updates the AI's instructional context.
|
||||
- Use `/memory show` to display the combined instructional context currently loaded, allowing you to verify the hierarchy and content being used by the AI.
|
||||
- See the [Commands documentation](./commands.md#memory) for full details on the `/memory` command and its sub-commands (`show` and `refresh`).
|
||||
|
||||
By understanding and utilizing these configuration layers and the hierarchical nature of context files, you can effectively manage the AI's memory and tailor the Gemini CLI's responses to your specific needs and projects.
|
||||
|
||||
## Sandboxing
|
||||
|
||||
The Gemini CLI can execute potentially unsafe operations (like shell commands and file modifications) within a sandboxed environment to protect your system.
|
||||
|
||||
Sandboxing is disabled by default, but you can enable it in a few ways:
|
||||
|
||||
- Using `--sandbox` or `-s` flag.
|
||||
- Setting `GEMINI_SANDBOX` environment variable.
|
||||
- Sandbox is enabled in `--yolo` mode by default.
|
||||
|
||||
By default, it uses a pre-built `gemini-cli-sandbox` Docker image.
|
||||
|
||||
For project-specific sandboxing needs, you can create a custom Dockerfile at `.qwen/sandbox.Dockerfile` in your project's root directory. This Dockerfile can be based on the base sandbox image:
|
||||
|
||||
```dockerfile
|
||||
FROM gemini-cli-sandbox
|
||||
|
||||
# Add your custom dependencies or configurations here
|
||||
# For example:
|
||||
# RUN apt-get update && apt-get install -y some-package
|
||||
# COPY ./my-config /app/my-config
|
||||
```
|
||||
|
||||
When `.qwen/sandbox.Dockerfile` exists, you can use `BUILD_SANDBOX` environment variable when running Gemini CLI to automatically build the custom sandbox image:
|
||||
|
||||
```bash
|
||||
BUILD_SANDBOX=1 gemini -s
|
||||
```
|
||||
|
||||
## Usage Statistics
|
||||
|
||||
To help us improve the Gemini CLI, we collect anonymized usage statistics. This data helps us understand how the CLI is used, identify common issues, and prioritize new features.
|
||||
|
||||
**What we collect:**
|
||||
|
||||
- **Tool Calls:** We log the names of the tools that are called, whether they succeed or fail, and how long they take to execute. We do not collect the arguments passed to the tools or any data returned by them.
|
||||
- **API Requests:** We log the Gemini model used for each request, the duration of the request, and whether it was successful. We do not collect the content of the prompts or responses.
|
||||
- **Session Information:** We collect information about the configuration of the CLI, such as the enabled tools and the approval mode.
|
||||
|
||||
**What we DON'T collect:**
|
||||
|
||||
- **Personally Identifiable Information (PII):** We do not collect any personal information, such as your name, email address, or API keys.
|
||||
- **Prompt and Response Content:** We do not log the content of your prompts or the responses from the Gemini model.
|
||||
- **File Content:** We do not log the content of any files that are read or written by the CLI.
|
||||
|
||||
**How to opt out:**
|
||||
|
||||
You can opt out of usage statistics collection at any time by setting the `usageStatisticsEnabled` property to `false` in your `settings.json` file:
|
||||
|
||||
```json
|
||||
{
|
||||
"usageStatisticsEnabled": false
|
||||
}
|
||||
```
|
|
@ -0,0 +1,28 @@
|
|||
# Gemini CLI
|
||||
|
||||
Within Gemini CLI, `packages/cli` is the frontend for users to send and receive prompts with the Gemini AI model and its associated tools. For a general overview of Gemini CLI, see the [main documentation page](../index.md).
|
||||
|
||||
## Navigating this section
|
||||
|
||||
- **[Authentication](./authentication.md):** A guide to setting up authentication with Google's AI services.
|
||||
- **[Commands](./commands.md):** A reference for Gemini CLI commands (e.g., `/help`, `/tools`, `/theme`).
|
||||
- **[Configuration](./configuration.md):** A guide to tailoring Gemini CLI behavior using configuration files.
|
||||
- **[Token Caching](./token-caching.md):** Optimize API costs through token caching.
|
||||
- **[Themes](./themes.md)**: A guide to customizing the CLI's appearance with different themes.
|
||||
- **[Tutorials](tutorials.md)**: A tutorial showing how to use Gemini CLI to automate a development task.
|
||||
|
||||
## Non-interactive mode
|
||||
|
||||
Gemini CLI can be run in a non-interactive mode, which is useful for scripting and automation. In this mode, you pipe input to the CLI, it executes the command, and then it exits.
|
||||
|
||||
The following example pipes a command to Gemini CLI from your terminal:
|
||||
|
||||
```bash
|
||||
echo "What is fine tuning?" | gemini
|
||||
```
|
||||
|
||||
Gemini CLI executes the command and prints the output to your terminal. Note that you can achieve the same behavior by using the `--prompt` or `-p` flag. For example:
|
||||
|
||||
```bash
|
||||
gemini -p "What is fine tuning?"
|
||||
```
|
|
@ -0,0 +1,76 @@
|
|||
# OpenAI Authentication
|
||||
|
||||
Qwen Code CLI supports OpenAI authentication for users who want to use OpenAI models instead of Google's Gemini models.
|
||||
|
||||
## Authentication Methods
|
||||
|
||||
### 1. Interactive Authentication (Recommended)
|
||||
|
||||
When you first run the CLI and select OpenAI as your authentication method, you'll be prompted to enter:
|
||||
|
||||
- **API Key**: Your OpenAI API key from [https://platform.openai.com/api-keys](https://platform.openai.com/api-keys)
|
||||
- **Base URL**: The base URL for OpenAI API (defaults to `https://api.openai.com/v1`)
|
||||
- **Model**: The OpenAI model to use (defaults to `gpt-4o`)
|
||||
|
||||
The CLI will guide you through each field:
|
||||
|
||||
1. Enter your API key and press Enter
|
||||
2. Review/modify the base URL and press Enter
|
||||
3. Review/modify the model name and press Enter
|
||||
|
||||
**Note**: You can paste your API key directly - the CLI supports paste functionality and will display the full key for verification.
|
||||
|
||||
### 2. Command Line Arguments
|
||||
|
||||
You can also provide the OpenAI credentials via command line arguments:
|
||||
|
||||
```bash
|
||||
# Basic usage with API key
|
||||
qwen-code --openai-api-key "your-api-key-here"
|
||||
|
||||
# With custom base URL
|
||||
qwen-code --openai-api-key "your-api-key-here" --openai-base-url "https://your-custom-endpoint.com/v1"
|
||||
|
||||
# With custom model
|
||||
qwen-code --openai-api-key "your-api-key-here" --model "gpt-4-turbo"
|
||||
```
|
||||
|
||||
### 3. Environment Variables
|
||||
|
||||
Set the following environment variables in your shell or `.env` file:
|
||||
|
||||
```bash
|
||||
export OPENAI_API_KEY="your-api-key-here"
|
||||
export OPENAI_BASE_URL="https://api.openai.com/v1" # Optional, defaults to this value
|
||||
export OPENAI_MODEL="gpt-4o" # Optional, defaults to gpt-4o
|
||||
```
|
||||
|
||||
## Supported Models
|
||||
|
||||
The CLI supports all OpenAI models that are available through the OpenAI API, including:
|
||||
|
||||
- `gpt-4o` (default)
|
||||
- `gpt-4o-mini`
|
||||
- `gpt-4-turbo`
|
||||
- `gpt-4`
|
||||
- `gpt-3.5-turbo`
|
||||
- And other available models
|
||||
|
||||
## Custom Endpoints
|
||||
|
||||
You can use custom endpoints by setting the `OPENAI_BASE_URL` environment variable or using the `--openai-base-url` command line argument. This is useful for:
|
||||
|
||||
- Using Azure OpenAI
|
||||
- Using other OpenAI-compatible APIs
|
||||
- Using local OpenAI-compatible servers
|
||||
|
||||
## Switching Authentication Methods
|
||||
|
||||
To switch between authentication methods, use the `/auth` command in the CLI interface.
|
||||
|
||||
## Security Notes
|
||||
|
||||
- API keys are stored in memory during the session
|
||||
- For persistent storage, use environment variables or `.env` files
|
||||
- Never commit API keys to version control
|
||||
- The CLI displays API keys in plain text for verification - ensure your terminal is secure
|
|
@ -0,0 +1,85 @@
|
|||
# Themes
|
||||
|
||||
Gemini CLI supports a variety of themes to customize its color scheme and appearance. You can change the theme to suit your preferences via the `/theme` command or `"theme":` configuration setting.
|
||||
|
||||
## Available Themes
|
||||
|
||||
Gemini CLI comes with a selection of pre-defined themes, which you can list using the `/theme` command within Gemini CLI:
|
||||
|
||||
- **Dark Themes:**
|
||||
- `ANSI`
|
||||
- `Atom One`
|
||||
- `Ayu`
|
||||
- `Default`
|
||||
- `Dracula`
|
||||
- `GitHub`
|
||||
- **Light Themes:**
|
||||
- `ANSI Light`
|
||||
- `Ayu Light`
|
||||
- `Default Light`
|
||||
- `GitHub Light`
|
||||
- `Google Code`
|
||||
- `Xcode`
|
||||
|
||||
### Changing Themes
|
||||
|
||||
1. Enter `/theme` into Gemini CLI.
|
||||
2. A dialog or selection prompt appears, listing the available themes.
|
||||
3. Using the arrow keys, select a theme. Some interfaces might offer a live preview or highlight as you select.
|
||||
4. Confirm your selection to apply the theme.
|
||||
|
||||
### Theme Persistence
|
||||
|
||||
Selected themes are saved in Gemini CLI's [configuration](./configuration.md) so your preference is remembered across sessions.
|
||||
|
||||
## Dark Themes
|
||||
|
||||
### ANSI
|
||||
|
||||
<img src="../assets/theme-ansi.png" alt="ANSI theme" width="600" />
|
||||
|
||||
### Atom OneDark
|
||||
|
||||
<img src="../assets/theme-atom-one.png" alt="Atom One theme" width="600">
|
||||
|
||||
### Ayu
|
||||
|
||||
<img src="../assets/theme-ayu.png" alt="Ayu theme" width="600">
|
||||
|
||||
### Default
|
||||
|
||||
<img src="../assets/theme-default.png" alt="Default theme" width="600">
|
||||
|
||||
### Dracula
|
||||
|
||||
<img src="../assets/theme-dracula.png" alt="Dracula theme" width="600">
|
||||
|
||||
### GitHub
|
||||
|
||||
<img src="../assets/theme-github.png" alt="GitHub theme" width="600">
|
||||
|
||||
## Light Themes
|
||||
|
||||
### ANSI Light
|
||||
|
||||
<img src="../assets/theme-ansi-light.png" alt="ANSI Light theme" width="600">
|
||||
|
||||
### Ayu Light
|
||||
|
||||
<img src="../assets/theme-ayu-light.png" alt="Ayu Light theme" width="600">
|
||||
|
||||
### Default Light
|
||||
|
||||
<img src="../assets/theme-default-light.png" alt="Default Light theme" width="600">
|
||||
|
||||
### GitHub Light
|
||||
|
||||
<img src="../assets/theme-github-light.png" alt="GitHub Light theme" width="600">
|
||||
|
||||
### Google Code
|
||||
|
||||
<img src="../assets/theme-google-light.png" alt="Google Code theme" width="600">
|
||||
|
||||
### Xcode
|
||||
|
||||
<img src="../assets/theme-xcode-light.png" alt="Xcode Light theme" width="600">
|
|
@ -0,0 +1,14 @@
|
|||
# Token Caching and Cost Optimization
|
||||
|
||||
Gemini CLI automatically optimizes API costs through token caching when using API key authentication (Gemini API key or Vertex AI). This feature reuses previous system instructions and context to reduce the number of tokens processed in subsequent requests.
|
||||
|
||||
**Token caching is available for:**
|
||||
|
||||
- API key users (Gemini API key)
|
||||
- Vertex AI users (with project and location setup)
|
||||
|
||||
**Token caching is not available for:**
|
||||
|
||||
- OAuth users (Google Personal/Enterprise accounts) - the Code Assist API does not support cached content creation at this time
|
||||
|
||||
You can view your token usage and cached token savings using the `/stats` command. When cached tokens are available, they will be displayed in the stats output.
|
|
@ -0,0 +1,69 @@
|
|||
# Tutorials
|
||||
|
||||
This page contains tutorials for interacting with Gemini CLI.
|
||||
|
||||
## Setting up a Model Context Protocol (MCP) server
|
||||
|
||||
> [!CAUTION]
|
||||
> Before using a third-party MCP server, ensure you trust its source and understand the tools it provides. Your use of third-party servers is at your own risk.
|
||||
|
||||
This tutorial demonstrates how to set up a MCP server, using the [GitHub MCP server](https://github.com/github/github-mcp-server) as an example. The GitHub MCP server provides tools for interacting with GitHub repositories, such as creating issues and commenting on pull requests.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
Before you begin, ensure you have the following installed and configured:
|
||||
|
||||
- **Docker:** Install and run [Docker].
|
||||
- **GitHub Personal Access Token (PAT):** Create a new [classic] or [fine-grained] PAT with the necessary scopes.
|
||||
|
||||
[Docker]: https://www.docker.com/
|
||||
[classic]: https://github.com/settings/tokens/new
|
||||
[fine-grained]: https://github.com/settings/personal-access-tokens/new
|
||||
|
||||
### Guide
|
||||
|
||||
#### Configure the MCP server in `settings.json`
|
||||
|
||||
In your project's root directory, create or open the [`.qwen/settings.json` file](./configuration.md). Within the file, add the `mcpServers` configuration block, which provides instructions for how to launch the GitHub MCP server.
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"github": {
|
||||
"command": "docker",
|
||||
"args": [
|
||||
"run",
|
||||
"-i",
|
||||
"--rm",
|
||||
"-e",
|
||||
"GITHUB_PERSONAL_ACCESS_TOKEN",
|
||||
"ghcr.io/github/github-mcp-server"
|
||||
],
|
||||
"env": {
|
||||
"GITHUB_PERSONAL_ACCESS_TOKEN": "${GITHUB_PERSONAL_ACCESS_TOKEN}"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Set your GitHub token
|
||||
|
||||
> [!CAUTION]
|
||||
> Using a broadly scoped personal access token that has access to personal and private repositories can lead to information from the private repository being leaked into the public repository. We recommend using a fine-grained access token that doesn't share access to both public and private repositories.
|
||||
|
||||
Use an environment variable to store your GitHub PAT:
|
||||
|
||||
```bash
|
||||
GITHUB_PERSONAL_ACCESS_TOKEN="pat_YourActualGitHubTokenHere"
|
||||
```
|
||||
|
||||
Gemini CLI uses this value in the `mcpServers` configuration that you defined in the `settings.json` file.
|
||||
|
||||
#### Launch Gemini CLI and verify the connection
|
||||
|
||||
When you launch Gemini CLI, it automatically reads your configuration and launches the GitHub MCP server in the background. You can then use natural language prompts to ask Gemini CLI to perform GitHub actions. For example:
|
||||
|
||||
```bash
|
||||
"get all open issues assigned to me in the 'foo/bar' repo and prioritize them"
|
||||
```
|
|
@ -0,0 +1,55 @@
|
|||
# Gemini CLI Core
|
||||
|
||||
Gemini CLI's core package (`packages/core`) is the backend portion of Gemini CLI, handling communication with the Gemini API, managing tools, and processing requests sent from `packages/cli`. For a general overview of Gemini CLI, see the [main documentation page](../index.md).
|
||||
|
||||
## Navigating this section
|
||||
|
||||
- **[Core tools API](./tools-api.md):** Information on how tools are defined, registered, and used by the core.
|
||||
- **[Memory Import Processor](./memport.md):** Documentation for the modular GEMINI.md import feature using @file.md syntax.
|
||||
|
||||
## Role of the core
|
||||
|
||||
While the `packages/cli` portion of Gemini CLI provides the user interface, `packages/core` is responsible for:
|
||||
|
||||
- **Gemini API interaction:** Securely communicating with the Google Gemini API, sending user prompts, and receiving model responses.
|
||||
- **Prompt engineering:** Constructing effective prompts for the Gemini model, potentially incorporating conversation history, tool definitions, and instructional context from `GEMINI.md` files.
|
||||
- **Tool management & orchestration:**
|
||||
- Registering available tools (e.g., file system tools, shell command execution).
|
||||
- Interpreting tool use requests from the Gemini model.
|
||||
- Executing the requested tools with the provided arguments.
|
||||
- Returning tool execution results to the Gemini model for further processing.
|
||||
- **Session and state management:** Keeping track of the conversation state, including history and any relevant context required for coherent interactions.
|
||||
- **Configuration:** Managing core-specific configurations, such as API key access, model selection, and tool settings.
|
||||
|
||||
## Security considerations
|
||||
|
||||
The core plays a vital role in security:
|
||||
|
||||
- **API key management:** It handles the `GEMINI_API_KEY` and ensures it's used securely when communicating with the Gemini API.
|
||||
- **Tool execution:** When tools interact with the local system (e.g., `run_shell_command`), the core (and its underlying tool implementations) must do so with appropriate caution, often involving sandboxing mechanisms to prevent unintended modifications.
|
||||
|
||||
## Chat history compression
|
||||
|
||||
To ensure that long conversations don't exceed the token limits of the Gemini model, the core includes a chat history compression feature.
|
||||
|
||||
When a conversation approaches the token limit for the configured model, the core automatically compresses the conversation history before sending it to the model. This compression is designed to be lossless in terms of the information conveyed, but it reduces the overall number of tokens used.
|
||||
|
||||
You can find the token limits for each model in the [Google AI documentation](https://ai.google.dev/gemini-api/docs/models).
|
||||
|
||||
## Model fallback
|
||||
|
||||
Gemini CLI includes a model fallback mechanism to ensure that you can continue to use the CLI even if the default "pro" model is rate-limited.
|
||||
|
||||
If you are using the default "pro" model and the CLI detects that you are being rate-limited, it automatically switches to the "flash" model for the current session. This allows you to continue working without interruption.
|
||||
|
||||
## File discovery service
|
||||
|
||||
The file discovery service is responsible for finding files in the project that are relevant to the current context. It is used by the `@` command and other tools that need to access files.
|
||||
|
||||
## Memory discovery service
|
||||
|
||||
The memory discovery service is responsible for finding and loading the `GEMINI.md` files that provide context to the model. It searches for these files in a hierarchical manner, starting from the current working directory and moving up to the project root and the user's home directory. It also searches in subdirectories.
|
||||
|
||||
This allows you to have global, project-level, and component-level context files, which are all combined to provide the model with the most relevant information.
|
||||
|
||||
You can use the [`/memory` command](../cli/commands.md) to `show`, `add`, and `refresh` the content of loaded `GEMINI.md` files.
|
|
@ -0,0 +1,175 @@
|
|||
# Memory Import Processor
|
||||
|
||||
The Memory Import Processor is a feature that allows you to modularize your GEMINI.md files by importing content from other markdown files using the `@file.md` syntax.
|
||||
|
||||
## Overview
|
||||
|
||||
This feature enables you to break down large GEMINI.md files into smaller, more manageable components that can be reused across different contexts. The import processor supports both relative and absolute paths, with built-in safety features to prevent circular imports and ensure file access security.
|
||||
|
||||
## Important Limitations
|
||||
|
||||
**This feature only supports `.md` (markdown) files.** Attempting to import files with other extensions (like `.txt`, `.json`, etc.) will result in a warning and the import will fail.
|
||||
|
||||
## Syntax
|
||||
|
||||
Use the `@` symbol followed by the path to the markdown file you want to import:
|
||||
|
||||
```markdown
|
||||
# Main GEMINI.md file
|
||||
|
||||
This is the main content.
|
||||
|
||||
@./components/instructions.md
|
||||
|
||||
More content here.
|
||||
|
||||
@./shared/configuration.md
|
||||
```
|
||||
|
||||
## Supported Path Formats
|
||||
|
||||
### Relative Paths
|
||||
|
||||
- `@./file.md` - Import from the same directory
|
||||
- `@../file.md` - Import from parent directory
|
||||
- `@./components/file.md` - Import from subdirectory
|
||||
|
||||
### Absolute Paths
|
||||
|
||||
- `@/absolute/path/to/file.md` - Import using absolute path
|
||||
|
||||
## Examples
|
||||
|
||||
### Basic Import
|
||||
|
||||
```markdown
|
||||
# My GEMINI.md
|
||||
|
||||
Welcome to my project!
|
||||
|
||||
@./getting-started.md
|
||||
|
||||
## Features
|
||||
|
||||
@./features/overview.md
|
||||
```
|
||||
|
||||
### Nested Imports
|
||||
|
||||
The imported files can themselves contain imports, creating a nested structure:
|
||||
|
||||
```markdown
|
||||
# main.md
|
||||
|
||||
@./header.md
|
||||
@./content.md
|
||||
@./footer.md
|
||||
```
|
||||
|
||||
```markdown
|
||||
# header.md
|
||||
|
||||
# Project Header
|
||||
|
||||
@./shared/title.md
|
||||
```
|
||||
|
||||
## Safety Features
|
||||
|
||||
### Circular Import Detection
|
||||
|
||||
The processor automatically detects and prevents circular imports:
|
||||
|
||||
```markdown
|
||||
# file-a.md
|
||||
|
||||
@./file-b.md
|
||||
|
||||
# file-b.md
|
||||
|
||||
@./file-a.md <!-- This will be detected and prevented -->
|
||||
```
|
||||
|
||||
### File Access Security
|
||||
|
||||
The `validateImportPath` function ensures that imports are only allowed from specified directories, preventing access to sensitive files outside the allowed scope.
|
||||
|
||||
### Maximum Import Depth
|
||||
|
||||
To prevent infinite recursion, there's a configurable maximum import depth (default: 10 levels).
|
||||
|
||||
## Error Handling
|
||||
|
||||
### Non-MD File Attempts
|
||||
|
||||
If you try to import a non-markdown file, you'll see a warning:
|
||||
|
||||
```markdown
|
||||
@./instructions.txt <!-- This will show a warning and fail -->
|
||||
```
|
||||
|
||||
Console output:
|
||||
|
||||
```
|
||||
[WARN] [ImportProcessor] Import processor only supports .md files. Attempting to import non-md file: ./instructions.txt. This will fail.
|
||||
```
|
||||
|
||||
### Missing Files
|
||||
|
||||
If a referenced file doesn't exist, the import will fail gracefully with an error comment in the output.
|
||||
|
||||
### File Access Errors
|
||||
|
||||
Permission issues or other file system errors are handled gracefully with appropriate error messages.
|
||||
|
||||
## API Reference
|
||||
|
||||
### `processImports(content, basePath, debugMode?, importState?)`
|
||||
|
||||
Processes import statements in GEMINI.md content.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- `content` (string): The content to process for imports
|
||||
- `basePath` (string): The directory path where the current file is located
|
||||
- `debugMode` (boolean, optional): Whether to enable debug logging (default: false)
|
||||
- `importState` (ImportState, optional): State tracking for circular import prevention
|
||||
|
||||
**Returns:** Promise<string> - Processed content with imports resolved
|
||||
|
||||
### `validateImportPath(importPath, basePath, allowedDirectories)`
|
||||
|
||||
Validates import paths to ensure they are safe and within allowed directories.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- `importPath` (string): The import path to validate
|
||||
- `basePath` (string): The base directory for resolving relative paths
|
||||
- `allowedDirectories` (string[]): Array of allowed directory paths
|
||||
|
||||
**Returns:** boolean - Whether the import path is valid
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Use descriptive file names** for imported components
|
||||
2. **Keep imports shallow** - avoid deeply nested import chains
|
||||
3. **Document your structure** - maintain a clear hierarchy of imported files
|
||||
4. **Test your imports** - ensure all referenced files exist and are accessible
|
||||
5. **Use relative paths** when possible for better portability
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
1. **Import not working**: Check that the file exists and has a `.md` extension
|
||||
2. **Circular import warnings**: Review your import structure for circular references
|
||||
3. **Permission errors**: Ensure the files are readable and within allowed directories
|
||||
4. **Path resolution issues**: Use absolute paths if relative paths aren't resolving correctly
|
||||
|
||||
### Debug Mode
|
||||
|
||||
Enable debug mode to see detailed logging of the import process:
|
||||
|
||||
```typescript
|
||||
const result = await processImports(content, basePath, true);
|
||||
```
|
|
@ -0,0 +1,73 @@
|
|||
# Gemini CLI Core: Tools API
|
||||
|
||||
The Gemini CLI core (`packages/core`) features a robust system for defining, registering, and executing tools. These tools extend the capabilities of the Gemini model, allowing it to interact with the local environment, fetch web content, and perform various actions beyond simple text generation.
|
||||
|
||||
## Core Concepts
|
||||
|
||||
- **Tool (`tools.ts`):** An interface and base class (`BaseTool`) that defines the contract for all tools. Each tool must have:
|
||||
- `name`: A unique internal name (used in API calls to Gemini).
|
||||
- `displayName`: A user-friendly name.
|
||||
- `description`: A clear explanation of what the tool does, which is provided to the Gemini model.
|
||||
- `parameterSchema`: A JSON schema defining the parameters the tool accepts. This is crucial for the Gemini model to understand how to call the tool correctly.
|
||||
- `validateToolParams()`: A method to validate incoming parameters.
|
||||
- `getDescription()`: A method to provide a human-readable description of what the tool will do with specific parameters before execution.
|
||||
- `shouldConfirmExecute()`: A method to determine if user confirmation is required before execution (e.g., for potentially destructive operations).
|
||||
- `execute()`: The core method that performs the tool's action and returns a `ToolResult`.
|
||||
|
||||
- **`ToolResult` (`tools.ts`):** An interface defining the structure of a tool's execution outcome:
|
||||
- `llmContent`: The factual string content to be included in the history sent back to the LLM for context.
|
||||
- `returnDisplay`: A user-friendly string (often Markdown) or a special object (like `FileDiff`) for display in the CLI.
|
||||
|
||||
- **Tool Registry (`tool-registry.ts`):** A class (`ToolRegistry`) responsible for:
|
||||
- **Registering Tools:** Holding a collection of all available built-in tools (e.g., `ReadFileTool`, `ShellTool`).
|
||||
- **Discovering Tools:** It can also discover tools dynamically:
|
||||
- **Command-based Discovery:** If `toolDiscoveryCommand` is configured in settings, this command is executed. It's expected to output JSON describing custom tools, which are then registered as `DiscoveredTool` instances.
|
||||
- **MCP-based Discovery:** If `mcpServerCommand` is configured, the registry can connect to a Model Context Protocol (MCP) server to list and register tools (`DiscoveredMCPTool`).
|
||||
- **Providing Schemas:** Exposing the `FunctionDeclaration` schemas of all registered tools to the Gemini model, so it knows what tools are available and how to use them.
|
||||
- **Retrieving Tools:** Allowing the core to get a specific tool by name for execution.
|
||||
|
||||
## Built-in Tools
|
||||
|
||||
The core comes with a suite of pre-defined tools, typically found in `packages/core/src/tools/`. These include:
|
||||
|
||||
- **File System Tools:**
|
||||
- `LSTool` (`ls.ts`): Lists directory contents.
|
||||
- `ReadFileTool` (`read-file.ts`): Reads the content of a single file. It takes an `absolute_path` parameter, which must be an absolute path.
|
||||
- `WriteFileTool` (`write-file.ts`): Writes content to a file.
|
||||
- `GrepTool` (`grep.ts`): Searches for patterns in files.
|
||||
- `GlobTool` (`glob.ts`): Finds files matching glob patterns.
|
||||
- `EditTool` (`edit.ts`): Performs in-place modifications to files (often requiring confirmation).
|
||||
- `ReadManyFilesTool` (`read-many-files.ts`): Reads and concatenates content from multiple files or glob patterns (used by the `@` command in CLI).
|
||||
- **Execution Tools:**
|
||||
- `ShellTool` (`shell.ts`): Executes arbitrary shell commands (requires careful sandboxing and user confirmation).
|
||||
- **Web Tools:**
|
||||
- `WebFetchTool` (`web-fetch.ts`): Fetches content from a URL.
|
||||
- `WebSearchTool` (`web-search.ts`): Performs a web search.
|
||||
- **Memory Tools:**
|
||||
- `MemoryTool` (`memoryTool.ts`): Interacts with the AI's memory.
|
||||
|
||||
Each of these tools extends `BaseTool` and implements the required methods for its specific functionality.
|
||||
|
||||
## Tool Execution Flow
|
||||
|
||||
1. **Model Request:** The Gemini model, based on the user's prompt and the provided tool schemas, decides to use a tool and returns a `FunctionCall` part in its response, specifying the tool name and arguments.
|
||||
2. **Core Receives Request:** The core parses this `FunctionCall`.
|
||||
3. **Tool Retrieval:** It looks up the requested tool in the `ToolRegistry`.
|
||||
4. **Parameter Validation:** The tool's `validateToolParams()` method is called.
|
||||
5. **Confirmation (if needed):**
|
||||
- The tool's `shouldConfirmExecute()` method is called.
|
||||
- If it returns details for confirmation, the core communicates this back to the CLI, which prompts the user.
|
||||
- The user's decision (e.g., proceed, cancel) is sent back to the core.
|
||||
6. **Execution:** If validated and confirmed (or if no confirmation is needed), the core calls the tool's `execute()` method with the provided arguments and an `AbortSignal` (for potential cancellation).
|
||||
7. **Result Processing:** The `ToolResult` from `execute()` is received by the core.
|
||||
8. **Response to Model:** The `llmContent` from the `ToolResult` is packaged as a `FunctionResponse` and sent back to the Gemini model so it can continue generating a user-facing response.
|
||||
9. **Display to User:** The `returnDisplay` from the `ToolResult` is sent to the CLI to show the user what the tool did.
|
||||
|
||||
## Extending with Custom Tools
|
||||
|
||||
While direct programmatic registration of new tools by users isn't explicitly detailed as a primary workflow in the provided files for typical end-users, the architecture supports extension through:
|
||||
|
||||
- **Command-based Discovery:** Advanced users or project administrators can define a `toolDiscoveryCommand` in `settings.json`. This command, when run by the Gemini CLI core, should output a JSON array of `FunctionDeclaration` objects. The core will then make these available as `DiscoveredTool` instances. The corresponding `toolCallCommand` would then be responsible for actually executing these custom tools.
|
||||
- **MCP Server(s):** For more complex scenarios, one or more MCP servers can be set up and configured via the `mcpServers` setting in `settings.json`. The Gemini CLI core can then discover and use tools exposed by these servers. As mentioned, if you have multiple MCP servers, the tool names will be prefixed with the server name from your configuration (e.g., `serverAlias__actualToolName`).
|
||||
|
||||
This tool system provides a flexible and powerful way to augment the Gemini model's capabilities, making the Gemini CLI a versatile assistant for a wide range of tasks.
|
|
@ -0,0 +1,117 @@
|
|||
# Gemini CLI Execution and Deployment
|
||||
|
||||
This document describes how to run Gemini CLI and explains the deployment architecture that Gemini CLI uses.
|
||||
|
||||
## Running Gemini CLI
|
||||
|
||||
There are several ways to run Gemini CLI. The option you choose depends on how you intend to use Gemini CLI.
|
||||
|
||||
---
|
||||
|
||||
### 1. Standard installation (Recommended for typical users)
|
||||
|
||||
This is the recommended way for end-users to install Gemini CLI. It involves downloading the Gemini CLI package from the NPM registry.
|
||||
|
||||
- **Global install:**
|
||||
|
||||
```bash
|
||||
npm install -g @google/gemini-cli
|
||||
```
|
||||
|
||||
Then, run the CLI from anywhere:
|
||||
|
||||
```bash
|
||||
gemini
|
||||
```
|
||||
|
||||
- **NPX execution:**
|
||||
|
||||
```bash
|
||||
# Execute the latest version from NPM without a global install
|
||||
npx @google/gemini-cli
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 2. Running in a sandbox (Docker/Podman)
|
||||
|
||||
For security and isolation, Gemini CLI can be run inside a container. This is the default way that the CLI executes tools that might have side effects.
|
||||
|
||||
- **Directly from the Registry:**
|
||||
You can run the published sandbox image directly. This is useful for environments where you only have Docker and want to run the CLI.
|
||||
```bash
|
||||
# Run the published sandbox image
|
||||
docker run --rm -it us-docker.pkg.dev/gemini-code-dev/gemini-cli/sandbox:0.1.1
|
||||
```
|
||||
- **Using the `--sandbox` flag:**
|
||||
If you have Gemini CLI installed locally (using the standard installation described above), you can instruct it to run inside the sandbox container.
|
||||
```bash
|
||||
gemini --sandbox -y -p "your prompt here"
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 3. Running from source (Recommended for Gemini CLI contributors)
|
||||
|
||||
Contributors to the project will want to run the CLI directly from the source code.
|
||||
|
||||
- **Development Mode:**
|
||||
This method provides hot-reloading and is useful for active development.
|
||||
```bash
|
||||
# From the root of the repository
|
||||
npm run start
|
||||
```
|
||||
- **Production-like mode (Linked package):**
|
||||
This method simulates a global installation by linking your local package. It's useful for testing a local build in a production workflow.
|
||||
|
||||
```bash
|
||||
# Link the local cli package to your global node_modules
|
||||
npm link packages/cli
|
||||
|
||||
# Now you can run your local version using the `gemini` command
|
||||
gemini
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### 4. Running the latest Gemini CLI commit from GitHub
|
||||
|
||||
You can run the most recently committed version of Gemini CLI directly from the GitHub repository. This is useful for testing features still in development.
|
||||
|
||||
```bash
|
||||
# Execute the CLI directly from the main branch on GitHub
|
||||
npx https://github.com/google-gemini/gemini-cli
|
||||
```
|
||||
|
||||
## Deployment architecture
|
||||
|
||||
The execution methods described above are made possible by the following architectural components and processes:
|
||||
|
||||
**NPM packages**
|
||||
|
||||
Gemini CLI project is a monorepo that publishes two core packages to the NPM registry:
|
||||
|
||||
- `@google/gemini-cli-core`: The backend, handling logic and tool execution.
|
||||
- `@google/gemini-cli`: The user-facing frontend.
|
||||
|
||||
These packages are used when performing the standard installation and when running Gemini CLI from the source.
|
||||
|
||||
**Build and packaging processes**
|
||||
|
||||
There are two distinct build processes used, depending on the distribution channel:
|
||||
|
||||
- **NPM publication:** For publishing to the NPM registry, the TypeScript source code in `@google/gemini-cli-core` and `@google/gemini-cli` is transpiled into standard JavaScript using the TypeScript Compiler (`tsc`). The resulting `dist/` directory is what gets published in the NPM package. This is a standard approach for TypeScript libraries.
|
||||
|
||||
- **GitHub `npx` execution:** When running the latest version of Gemini CLI directly from GitHub, a different process is triggered by the `prepare` script in `package.json`. This script uses `esbuild` to bundle the entire application and its dependencies into a single, self-contained JavaScript file. This bundle is created on-the-fly on the user's machine and is not checked into the repository.
|
||||
|
||||
**Docker sandbox image**
|
||||
|
||||
The Docker-based execution method is supported by the `gemini-cli-sandbox` container image. This image is published to a container registry and contains a pre-installed, global version of Gemini CLI.
|
||||
|
||||
## Release process
|
||||
|
||||
The release process is automated through GitHub Actions. The release workflow performs the following actions:
|
||||
|
||||
1. Build the NPM packages using `tsc`.
|
||||
2. Publish the NPM packages to the artifact registry.
|
||||
3. Create GitHub releases with bundled assets.
|
|
@ -0,0 +1,81 @@
|
|||
# Example Proxy Script
|
||||
|
||||
The following is an example of a proxy script that can be used with the `GEMINI_SANDBOX_PROXY_COMMAND` environment variable. This script only allows `HTTPS` connections to `example.com:443` and declines all other requests.
|
||||
|
||||
```javascript
|
||||
#!/usr/bin/env node
|
||||
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
// Example proxy server that listens on :::8877 and only allows HTTPS connections to example.com.
|
||||
// Set `GEMINI_SANDBOX_PROXY_COMMAND=scripts/example-proxy.js` to run proxy alongside sandbox
|
||||
// Test via `curl https://example.com` inside sandbox (in shell mode or via shell tool)
|
||||
|
||||
import http from 'http';
|
||||
import net from 'net';
|
||||
import { URL } from 'url';
|
||||
import console from 'console';
|
||||
|
||||
const PROXY_PORT = 8877;
|
||||
const ALLOWED_DOMAINS = ['example.com', 'googleapis.com'];
|
||||
const ALLOWED_PORT = '443';
|
||||
|
||||
const server = http.createServer((req, res) => {
|
||||
// Deny all requests other than CONNECT for HTTPS
|
||||
console.log(
|
||||
`[PROXY] Denying non-CONNECT request for: ${req.method} ${req.url}`,
|
||||
);
|
||||
res.writeHead(405, { 'Content-Type': 'text/plain' });
|
||||
res.end('Method Not Allowed');
|
||||
});
|
||||
|
||||
server.on('connect', (req, clientSocket, head) => {
|
||||
// req.url will be in the format "hostname:port" for a CONNECT request.
|
||||
const { port, hostname } = new URL(`http://${req.url}`);
|
||||
|
||||
console.log(`[PROXY] Intercepted CONNECT request for: ${hostname}:${port}`);
|
||||
|
||||
if (
|
||||
ALLOWED_DOMAINS.some(
|
||||
(domain) => hostname == domain || hostname.endsWith(`.${domain}`),
|
||||
) &&
|
||||
port === ALLOWED_PORT
|
||||
) {
|
||||
console.log(`[PROXY] Allowing connection to ${hostname}:${port}`);
|
||||
|
||||
// Establish a TCP connection to the original destination.
|
||||
const serverSocket = net.connect(port, hostname, () => {
|
||||
clientSocket.write('HTTP/1.1 200 Connection Established\r\n\r\n');
|
||||
// Create a tunnel by piping data between the client and the destination server.
|
||||
serverSocket.write(head);
|
||||
serverSocket.pipe(clientSocket);
|
||||
clientSocket.pipe(serverSocket);
|
||||
});
|
||||
|
||||
serverSocket.on('error', (err) => {
|
||||
console.error(`[PROXY] Error connecting to destination: ${err.message}`);
|
||||
clientSocket.end(`HTTP/1.1 502 Bad Gateway\r\n\r\n`);
|
||||
});
|
||||
} else {
|
||||
console.log(`[PROXY] Denying connection to ${hostname}:${port}`);
|
||||
clientSocket.end('HTTP/1.1 403 Forbidden\r\n\r\n');
|
||||
}
|
||||
|
||||
clientSocket.on('error', (err) => {
|
||||
// This can happen if the client hangs up.
|
||||
console.error(`[PROXY] Client socket error: ${err.message}`);
|
||||
});
|
||||
});
|
||||
|
||||
server.listen(PROXY_PORT, () => {
|
||||
const address = server.address();
|
||||
console.log(`[PROXY] Proxy listening on ${address.address}:${address.port}`);
|
||||
console.log(
|
||||
`[PROXY] Allowing HTTPS connections to domains: ${ALLOWED_DOMAINS.join(', ')}`,
|
||||
);
|
||||
});
|
||||
```
|
|
@ -0,0 +1,42 @@
|
|||
# Gemini CLI Extensions
|
||||
|
||||
Gemini CLI supports extensions that can be used to configure and extend its functionality.
|
||||
|
||||
## How it works
|
||||
|
||||
On startup, Gemini CLI looks for extensions in two locations:
|
||||
|
||||
1. `<workspace>/.qwen/extensions`
|
||||
2. `<home>/.qwen/extensions`
|
||||
|
||||
Gemini CLI loads all extensions from both locations. If an extension with the same name exists in both locations, the extension in the workspace directory takes precedence.
|
||||
|
||||
Within each location, individual extensions exist as a directory that contains a `gemini-extension.json` file. For example:
|
||||
|
||||
`<workspace>/.qwen/extensions/my-extension/gemini-extension.json`
|
||||
|
||||
### `gemini-extension.json`
|
||||
|
||||
The `gemini-extension.json` file contains the configuration for the extension. The file has the following structure:
|
||||
|
||||
```json
|
||||
{
|
||||
"name": "my-extension",
|
||||
"version": "1.0.0",
|
||||
"mcpServers": {
|
||||
"my-server": {
|
||||
"command": "node my-server.js"
|
||||
}
|
||||
},
|
||||
"contextFileName": "GEMINI.md",
|
||||
"excludeTools": ["run_shell_command"]
|
||||
}
|
||||
```
|
||||
|
||||
- `name`: The name of the extension. This is used to uniquely identify the extension. This should match the name of your extension directory.
|
||||
- `version`: The version of the extension.
|
||||
- `mcpServers`: A map of MCP servers to configure. The key is the name of the server, and the value is the server configuration. These servers will be loaded on startup just like MCP servers configured in a [`settings.json` file](./cli/configuration.md). If both an extension and a `settings.json` file configure an MCP server with the same name, the server defined in the `settings.json` file takes precedence.
|
||||
- `contextFileName`: The name of the file that contains the context for the extension. This will be used to load the context from the workspace. If this property is not used but a `GEMINI.md` file is present in your extension directory, then that file will be loaded.
|
||||
- `excludeTools`: An array of tool names to exclude from the model. You can also specify command-specific restrictions for tools that support it, like the `run_shell_command` tool. For example, `"excludeTools": ["run_shell_command(rm -rf)"]` will block the `rm -rf` command.
|
||||
|
||||
When Gemini CLI starts, it loads all the extensions and merges their configurations. If there are any conflicts, the workspace configuration takes precedence.
|
|
@ -0,0 +1,38 @@
|
|||
# Welcome to Gemini CLI documentation
|
||||
|
||||
This documentation provides a comprehensive guide to installing, using, and developing Gemini CLI. This tool lets you interact with Gemini models through a command-line interface.
|
||||
|
||||
## Overview
|
||||
|
||||
Gemini CLI brings the capabilities of Gemini models to your terminal in an interactive Read-Eval-Print Loop (REPL) environment. Gemini CLI consists of a client-side application (`packages/cli`) that communicates with a local server (`packages/core`), which in turn manages requests to the Gemini API and its AI models. Gemini CLI also contains a variety of tools for tasks such as performing file system operations, running shells, and web fetching, which are managed by `packages/core`.
|
||||
|
||||
## Navigating the documentation
|
||||
|
||||
This documentation is organized into the following sections:
|
||||
|
||||
- **[Execution and Deployment](./deployment.md):** Information for running Gemini CLI.
|
||||
- **[Architecture Overview](./architecture.md):** Understand the high-level design of Gemini CLI, including its components and how they interact.
|
||||
- **CLI Usage:** Documentation for `packages/cli`.
|
||||
- **[CLI Introduction](./cli/index.md):** Overview of the command-line interface.
|
||||
- **[Commands](./cli/commands.md):** Description of available CLI commands.
|
||||
- **[Configuration](./cli/configuration.md):** Information on configuring the CLI.
|
||||
- **[Checkpointing](./checkpointing.md):** Documentation for the checkpointing feature.
|
||||
- **[Extensions](./extension.md):** How to extend the CLI with new functionality.
|
||||
- **[Telemetry](./telemetry.md):** Overview of telemetry in the CLI.
|
||||
- **Core Details:** Documentation for `packages/core`.
|
||||
- **[Core Introduction](./core/index.md):** Overview of the core component.
|
||||
- **[Tools API](./core/tools-api.md):** Information on how the core manages and exposes tools.
|
||||
- **Tools:**
|
||||
- **[Tools Overview](./tools/index.md):** Overview of the available tools.
|
||||
- **[File System Tools](./tools/file-system.md):** Documentation for the `read_file` and `write_file` tools.
|
||||
- **[Multi-File Read Tool](./tools/multi-file.md):** Documentation for the `read_many_files` tool.
|
||||
- **[Shell Tool](./tools/shell.md):** Documentation for the `run_shell_command` tool.
|
||||
- **[Web Fetch Tool](./tools/web-fetch.md):** Documentation for the `web_fetch` tool.
|
||||
- **[Web Search Tool](./tools/web-search.md):** Documentation for the `google_web_search` tool.
|
||||
- **[Memory Tool](./tools/memory.md):** Documentation for the `save_memory` tool.
|
||||
- **[Contributing & Development Guide](../CONTRIBUTING.md):** Information for contributors and developers, including setup, building, testing, and coding conventions.
|
||||
- **[NPM Workspaces and Publishing](./npm.md):** Details on how the project's packages are managed and published.
|
||||
- **[Troubleshooting Guide](./troubleshooting.md):** Find solutions to common problems and FAQs.
|
||||
- **[Terms of Service and Privacy Notice](./tos-privacy.md):** Information on the terms of service and privacy notices applicable to your use of Gemini CLI.
|
||||
|
||||
We hope this documentation helps you make the most of the Gemini CLI!
|
|
@ -0,0 +1,141 @@
|
|||
# Integration Tests
|
||||
|
||||
This document provides information about the integration testing framework used in this project.
|
||||
|
||||
## Overview
|
||||
|
||||
The integration tests are designed to validate the end-to-end functionality of the Gemini CLI. They execute the built binary in a controlled environment and verify that it behaves as expected when interacting with the file system.
|
||||
|
||||
These tests are located in the `integration-tests` directory and are run using a custom test runner.
|
||||
|
||||
## Running the tests
|
||||
|
||||
The integration tests are not run as part of the default `npm run test` command. They must be run explicitly using the `npm run test:integration:all` script.
|
||||
|
||||
The integration tests can also be run using the following shortcut:
|
||||
|
||||
```bash
|
||||
npm run test:e2e
|
||||
```
|
||||
|
||||
## Running a specific set of tests
|
||||
|
||||
To run a subset of test files, you can use `npm run <integration test command> <file_name1> ....` where <integration test command> is either `test:e2e` or `test:integration*` and `<file_name>` is any of the `.test.js` files in the `integration-tests/` directory. For example, the following command runs `list_directory.test.js` and `write_file.test.js`:
|
||||
|
||||
```bash
|
||||
npm run test:e2e list_directory write_file
|
||||
```
|
||||
|
||||
### Running a single test by name
|
||||
|
||||
To run a single test by its name, use the `--test-name-pattern` flag:
|
||||
|
||||
```bash
|
||||
npm run test:e2e -- --test-name-pattern "reads a file"
|
||||
```
|
||||
|
||||
### Running all tests
|
||||
|
||||
To run the entire suite of integration tests, use the following command:
|
||||
|
||||
```bash
|
||||
npm run test:integration:all
|
||||
```
|
||||
|
||||
### Sandbox matrix
|
||||
|
||||
The `all` command will run tests for `no sandboxing`, `docker` and `podman`.
|
||||
Each individual type can be run using the following commands:
|
||||
|
||||
```bash
|
||||
npm run test:integration:sandbox:none
|
||||
```
|
||||
|
||||
```bash
|
||||
npm run test:integration:sandbox:docker
|
||||
```
|
||||
|
||||
```bash
|
||||
npm run test:integration:sandbox:podman
|
||||
```
|
||||
|
||||
## Diagnostics
|
||||
|
||||
The integration test runner provides several options for diagnostics to help track down test failures.
|
||||
|
||||
### Keeping test output
|
||||
|
||||
You can preserve the temporary files created during a test run for inspection. This is useful for debugging issues with file system operations.
|
||||
|
||||
To keep the test output, you can either use the `--keep-output` flag or set the `KEEP_OUTPUT` environment variable to `true`.
|
||||
|
||||
```bash
|
||||
# Using the flag
|
||||
npm run test:integration:sandbox:none -- --keep-output
|
||||
|
||||
# Using the environment variable
|
||||
KEEP_OUTPUT=true npm run test:integration:sandbox:none
|
||||
```
|
||||
|
||||
When output is kept, the test runner will print the path to the unique directory for the test run.
|
||||
|
||||
### Verbose output
|
||||
|
||||
For more detailed debugging, the `--verbose` flag streams the real-time output from the `gemini` command to the console.
|
||||
|
||||
```bash
|
||||
npm run test:integration:sandbox:none -- --verbose
|
||||
```
|
||||
|
||||
When using `--verbose` and `--keep-output` in the same command, the output is streamed to the console and also saved to a log file within the test's temporary directory.
|
||||
|
||||
The verbose output is formatted to clearly identify the source of the logs:
|
||||
|
||||
```
|
||||
--- TEST: <file-name-without-js>:<test-name> ---
|
||||
... output from the gemini command ...
|
||||
--- END TEST: <file-name-without-js>:<test-name> ---
|
||||
```
|
||||
|
||||
## Linting and formatting
|
||||
|
||||
To ensure code quality and consistency, the integration test files are linted as part of the main build process. You can also manually run the linter and auto-fixer.
|
||||
|
||||
### Running the linter
|
||||
|
||||
To check for linting errors, run the following command:
|
||||
|
||||
```bash
|
||||
npm run lint
|
||||
```
|
||||
|
||||
You can include the `--fix` flag in the command to automatically fix any fixable linting errors:
|
||||
|
||||
```bash
|
||||
npm run lint --fix
|
||||
```
|
||||
|
||||
## Directory structure
|
||||
|
||||
The integration tests create a unique directory for each test run inside the `.integration-tests` directory. Within this directory, a subdirectory is created for each test file, and within that, a subdirectory is created for each individual test case.
|
||||
|
||||
This structure makes it easy to locate the artifacts for a specific test run, file, or case.
|
||||
|
||||
```
|
||||
.integration-tests/
|
||||
└── <run-id>/
|
||||
└── <test-file-name>.test.js/
|
||||
└── <test-case-name>/
|
||||
├── output.log
|
||||
└── ...other test artifacts...
|
||||
```
|
||||
|
||||
## Continuous integration
|
||||
|
||||
To ensure the integration tests are always run, a GitHub Actions workflow is defined in `.github/workflows/e2e.yml`. This workflow automatically runs the integration tests on every pull request and push to the `main` branch.
|
||||
|
||||
The workflow runs the tests in different sandboxing environments to ensure Gemini CLI is tested across each:
|
||||
|
||||
- `sandbox:none`: Runs the tests without any sandboxing.
|
||||
- `sandbox:docker`: Runs the tests in a Docker container.
|
||||
- `sandbox:podman`: Runs the tests in a Podman container.
|
|
@ -0,0 +1,280 @@
|
|||
# Package Overview
|
||||
|
||||
This monorepo contains two main packages: `@google/gemini-cli` and `@google/gemini-cli-core`.
|
||||
|
||||
## `@google/gemini-cli`
|
||||
|
||||
This is the main package for the Gemini CLI. It is responsible for the user interface, command parsing, and all other user-facing functionality.
|
||||
|
||||
When this package is published, it is bundled into a single executable file. This bundle includes all of the package's dependencies, including `@google/gemini-cli-core`. This means that whether a user installs the package with `npm install -g @google/gemini-cli` or runs it directly with `npx @google/gemini-cli`, they are using this single, self-contained executable.
|
||||
|
||||
## `@google/gemini-cli-core`
|
||||
|
||||
This package contains the core logic for interacting with the Gemini API. It is responsible for making API requests, handling authentication, and managing the local cache.
|
||||
|
||||
This package is not bundled. When it is published, it is published as a standard Node.js package with its own dependencies. This allows it to be used as a standalone package in other projects, if needed. All transpiled js code in the `dist` folder is included in the package.
|
||||
|
||||
# Release Process
|
||||
|
||||
This project follows a structured release process to ensure that all packages are versioned and published correctly. The process is designed to be as automated as possible.
|
||||
|
||||
## How To Release
|
||||
|
||||
Releases are managed through the [release.yml](https://github.com/google-gemini/gemini-cli/actions/workflows/release.yml) GitHub Actions workflow. To perform a manual release for a patch or hotfix:
|
||||
|
||||
1. Navigate to the **Actions** tab of the repository.
|
||||
2. Select the **Release** workflow from the list.
|
||||
3. Click the **Run workflow** dropdown button.
|
||||
4. Fill in the required inputs:
|
||||
- **Version**: The exact version to release (e.g., `v0.2.1`).
|
||||
- **Ref**: The branch or commit SHA to release from (defaults to `main`).
|
||||
- **Dry Run**: Leave as `true` to test the workflow without publishing, or set to `false` to perform a live release.
|
||||
5. Click **Run workflow**.
|
||||
|
||||
## Nightly Releases
|
||||
|
||||
In addition to manual releases, this project has an automated nightly release process to provide the latest "bleeding edge" version for testing and development.
|
||||
|
||||
### Process
|
||||
|
||||
Every night at midnight UTC, the [Release workflow](https://github.com/google-gemini/gemini-cli/actions/workflows/release.yml) runs automatically on a schedule. It performs the following steps:
|
||||
|
||||
1. Checks out the latest code from the `main` branch.
|
||||
2. Installs all dependencies.
|
||||
3. Runs the full suite of `preflight` checks and integration tests.
|
||||
4. If all tests succeed, it calculates the next nightly version number (e.g., `v0.2.1-nightly.20230101`).
|
||||
5. It then builds and publishes the packages to npm with the `nightly` dist-tag.
|
||||
6. Finally, it creates a GitHub Release for the nightly version.
|
||||
|
||||
### Failure Handling
|
||||
|
||||
If any step in the nightly workflow fails, it will automatically create a new issue in the repository with the labels `bug` and `nightly-failure`. The issue will contain a link to the failed workflow run for easy debugging.
|
||||
|
||||
### How to Use the Nightly Build
|
||||
|
||||
To install the latest nightly build, use the `@nightly` tag:
|
||||
|
||||
```bash
|
||||
npm install -g @google/gemini-cli@nightly
|
||||
```
|
||||
|
||||
We also run a Google cloud build called [release-docker.yml](../.gcp/release-docker.yaml). Which publishes the sandbox docker to match your release. This will also be moved to GH and combined with the main release file once service account permissions are sorted out.
|
||||
|
||||
### After the Release
|
||||
|
||||
After the workflow has successfully completed, you can monitor its progress in the [GitHub Actions tab](https://github.com/google-gemini/gemini-cli/actions/workflows/release.yml). Once complete, you should:
|
||||
|
||||
1. Go to the [pull requests page](https://github.com/google-gemini/gemini-cli/pulls) of the repository.
|
||||
2. Create a new pull request from the `release/vX.Y.Z` branch to `main`.
|
||||
3. Review the pull request (it should only contain version updates in `package.json` files) and merge it. This keeps the version in `main` up-to-date.
|
||||
|
||||
## Release Validation
|
||||
|
||||
After pushing a new release smoke testing should be performed to ensure that the packages are working as expected. This can be done by installing the packages locally and running a set of tests to ensure that they are functioning correctly.
|
||||
|
||||
- `npx -y @google/gemini-cli@latest --version` to validate the push worked as expected if you were not doing a rc or dev tag
|
||||
- `npx -y @google/gemini-cli@<release tag> --version` to validate the tag pushed appropriately
|
||||
- _This is destructive locally_ `npm uninstall @google/gemini-cli && npm uninstall -g @google/gemini-cli && npm cache clean --force && npm install @google/gemini-cli@<version>`
|
||||
- Smoke testing a basic run through of exercising a few llm commands and tools is recommended to ensure that the packages are working as expected. We'll codify this more in the future.
|
||||
|
||||
## When to merge the version change, or not?
|
||||
|
||||
The above pattern for creating patch or hotfix releases from current or older commits leaves the repository in the following state:
|
||||
|
||||
1. The Tag (`vX.Y.Z-patch.1`): This tag correctly points to the original commit on main
|
||||
that contains the stable code you intended to release. This is crucial. Anyone checking
|
||||
out this tag gets the exact code that was published.
|
||||
2. The Branch (`release-vX.Y.Z-patch.1`): This branch contains one new commit on top of the
|
||||
tagged commit. That new commit only contains the version number change in package.json
|
||||
(and other related files like package-lock.json).
|
||||
|
||||
This separation is good. It keeps your main branch history clean of release-specific
|
||||
version bumps until you decide to merge them.
|
||||
|
||||
This is the critical decision, and it depends entirely on the nature of the release.
|
||||
|
||||
### Merge Back for Stable Patches and Hotfixes
|
||||
|
||||
You almost always want to merge the `release-<tag>` branch back into `main` for any
|
||||
stable patch or hotfix release.
|
||||
|
||||
- Why? The primary reason is to update the version in main's package.json. If you release
|
||||
v1.2.1 from an older commit but never merge the version bump back, your main branch's
|
||||
package.json will still say "version": "1.2.0". The next developer who starts work for
|
||||
the next feature release (v1.3.0) will be branching from a codebase that has an
|
||||
incorrect, older version number. This leads to confusion and requires manual version
|
||||
bumping later.
|
||||
- The Process: After the release-v1.2.1 branch is created and the package is successfully
|
||||
published, you should open a pull request to merge release-v1.2.1 into main. This PR
|
||||
will contain just one commit: "chore: bump version to v1.2.1". It's a clean, simple
|
||||
integration that keeps your main branch in sync with the latest released version.
|
||||
|
||||
### Do NOT Merge Back for Pre-Releases (RC, Beta, Dev)
|
||||
|
||||
You typically do not merge release branches for pre-releases back into `main`.
|
||||
|
||||
- Why? Pre-release versions (e.g., v1.3.0-rc.1, v1.3.0-rc.2) are, by definition, not
|
||||
stable and are temporary. You don't want to pollute your main branch's history with a
|
||||
series of version bumps for release candidates. The package.json in main should reflect
|
||||
the latest stable release version, not an RC.
|
||||
- The Process: The release-v1.3.0-rc.1 branch is created, the npm publish --tag rc happens,
|
||||
and then... the branch has served its purpose. You can simply delete it. The code for
|
||||
the RC is already on main (or a feature branch), so no functional code is lost. The
|
||||
release branch was just a temporary vehicle for the version number.
|
||||
|
||||
## Local Testing and Validation: Changes to the Packaging and Publishing Process
|
||||
|
||||
If you need to test the release process without actually publishing to NPM or creating a public GitHub release, you can trigger the workflow manually from the GitHub UI.
|
||||
|
||||
1. Go to the [Actions tab](https://github.com/google-gemini/gemini-cli/actions/workflows/release.yml) of the repository.
|
||||
2. Click on the "Run workflow" dropdown.
|
||||
3. Leave the `dry_run` option checked (`true`).
|
||||
4. Click the "Run workflow" button.
|
||||
|
||||
This will run the entire release process but will skip the `npm publish` and `gh release create` steps. You can inspect the workflow logs to ensure everything is working as expected.
|
||||
|
||||
It is crucial to test any changes to the packaging and publishing process locally before committing them. This ensures that the packages will be published correctly and that they will work as expected when installed by a user.
|
||||
|
||||
To validate your changes, you can perform a dry run of the publishing process. This will simulate the publishing process without actually publishing the packages to the npm registry.
|
||||
|
||||
```bash
|
||||
npm_package_version=9.9.9 SANDBOX_IMAGE_REGISTRY="registry" SANDBOX_IMAGE_NAME="thename" npm run publish:npm --dry-run
|
||||
```
|
||||
|
||||
This command will do the following:
|
||||
|
||||
1. Build all the packages.
|
||||
2. Run all the prepublish scripts.
|
||||
3. Create the package tarballs that would be published to npm.
|
||||
4. Print a summary of the packages that would be published.
|
||||
|
||||
You can then inspect the generated tarballs to ensure that they contain the correct files and that the `package.json` files have been updated correctly. The tarballs will be created in the root of each package's directory (e.g., `packages/cli/google-gemini-cli-0.1.6.tgz`).
|
||||
|
||||
By performing a dry run, you can be confident that your changes to the packaging process are correct and that the packages will be published successfully.
|
||||
|
||||
## Release Deep Dive
|
||||
|
||||
The main goal of the release process is to take the source code from the packages/ directory, build it, and assemble a
|
||||
clean, self-contained package in a temporary `bundle` directory at the root of the project. This `bundle` directory is what
|
||||
actually gets published to NPM.
|
||||
|
||||
Here are the key stages:
|
||||
|
||||
Stage 1: Pre-Release Sanity Checks and Versioning
|
||||
|
||||
- What happens: Before any files are moved, the process ensures the project is in a good state. This involves running tests,
|
||||
linting, and type-checking (npm run preflight). The version number in the root package.json and packages/cli/package.json
|
||||
is updated to the new release version.
|
||||
- Why: This guarantees that only high-quality, working code is released. Versioning is the first step to signify a new
|
||||
release.
|
||||
|
||||
Stage 2: Building the Source Code
|
||||
|
||||
- What happens: The TypeScript source code in packages/core/src and packages/cli/src is compiled into JavaScript.
|
||||
- File movement:
|
||||
- packages/core/src/\*_/_.ts -> compiled to -> packages/core/dist/
|
||||
- packages/cli/src/\*_/_.ts -> compiled to -> packages/cli/dist/
|
||||
- Why: The TypeScript code written during development needs to be converted into plain JavaScript that can be run by
|
||||
Node.js. The core package is built first as the cli package depends on it.
|
||||
|
||||
Stage 3: Assembling the Final Publishable Package
|
||||
|
||||
This is the most critical stage where files are moved and transformed into their final state for publishing. A temporary
|
||||
`bundle` folder is created at the project root to house the final package contents.
|
||||
|
||||
1. The `package.json` is Transformed:
|
||||
- What happens: The package.json from packages/cli/ is read, modified, and written into the root `bundle`/ directory.
|
||||
- File movement: packages/cli/package.json -> (in-memory transformation) -> `bundle`/package.json
|
||||
- Why: The final package.json must be different from the one used in development. Key changes include:
|
||||
- Removing devDependencies.
|
||||
- Removing workspace-specific "dependencies": { "@gemini-cli/core": "workspace:\*" } and ensuring the core code is
|
||||
bundled directly into the final JavaScript file.
|
||||
- Ensuring the bin, main, and files fields point to the correct locations within the final package structure.
|
||||
|
||||
2. The JavaScript Bundle is Created:
|
||||
- What happens: The built JavaScript from both packages/core/dist and packages/cli/dist are bundled into a single,
|
||||
executable JavaScript file.
|
||||
- File movement: packages/cli/dist/index.js + packages/core/dist/index.js -> (bundled by esbuild) -> `bundle`/gemini.js (or a
|
||||
similar name).
|
||||
- Why: This creates a single, optimized file that contains all the necessary application code. It simplifies the package
|
||||
by removing the need for the core package to be a separate dependency on NPM, as its code is now included directly.
|
||||
|
||||
3. Static and Supporting Files are Copied:
|
||||
- What happens: Essential files that are not part of the source code but are required for the package to work correctly
|
||||
or be well-described are copied into the `bundle` directory.
|
||||
- File movement:
|
||||
- README.md -> `bundle`/README.md
|
||||
- LICENSE -> `bundle`/LICENSE
|
||||
- packages/cli/src/utils/\*.sb (sandbox profiles) -> `bundle`/
|
||||
- Why:
|
||||
- The README.md and LICENSE are standard files that should be included in any NPM package.
|
||||
- The sandbox profiles (.sb files) are critical runtime assets required for the CLI's sandboxing feature to
|
||||
function. They must be located next to the final executable.
|
||||
|
||||
Stage 4: Publishing to NPM
|
||||
|
||||
- What happens: The npm publish command is run from inside the root `bundle` directory.
|
||||
- Why: By running npm publish from within the `bundle` directory, only the files we carefully assembled in Stage 3 are uploaded
|
||||
to the NPM registry. This prevents any source code, test files, or development configurations from being accidentally
|
||||
published, resulting in a clean and minimal package for users.
|
||||
|
||||
Summary of File Flow
|
||||
|
||||
```mermaid
|
||||
graph TD
|
||||
subgraph "Source Files"
|
||||
A["packages/core/src/*.ts<br/>packages/cli/src/*.ts"]
|
||||
B["packages/cli/package.json"]
|
||||
C["README.md<br/>LICENSE<br/>packages/cli/src/utils/*.sb"]
|
||||
end
|
||||
|
||||
subgraph "Process"
|
||||
D(Build)
|
||||
E(Transform)
|
||||
F(Assemble)
|
||||
G(Publish)
|
||||
end
|
||||
|
||||
subgraph "Artifacts"
|
||||
H["Bundled JS"]
|
||||
I["Final package.json"]
|
||||
J["bundle/"]
|
||||
end
|
||||
|
||||
subgraph "Destination"
|
||||
K["NPM Registry"]
|
||||
end
|
||||
|
||||
A --> D --> H
|
||||
B --> E --> I
|
||||
C --> F
|
||||
H --> F
|
||||
I --> F
|
||||
F --> J
|
||||
J --> G --> K
|
||||
```
|
||||
|
||||
This process ensures that the final published artifact is a purpose-built, clean, and efficient representation of the
|
||||
project, rather than a direct copy of the development workspace.
|
||||
|
||||
## NPM Workspaces
|
||||
|
||||
This project uses [NPM Workspaces](https://docs.npmjs.com/cli/v10/using-npm/workspaces) to manage the packages within this monorepo. This simplifies development by allowing us to manage dependencies and run scripts across multiple packages from the root of the project.
|
||||
|
||||
### How it Works
|
||||
|
||||
The root `package.json` file defines the workspaces for this project:
|
||||
|
||||
```json
|
||||
{
|
||||
"workspaces": ["packages/*"]
|
||||
}
|
||||
```
|
||||
|
||||
This tells NPM that any folder inside the `packages` directory is a separate package that should be managed as part of the workspace.
|
||||
|
||||
### Benefits of Workspaces
|
||||
|
||||
- **Simplified Dependency Management**: Running `npm install` from the root of the project will install all dependencies for all packages in the workspace and link them together. This means you don't need to run `npm install` in each package's directory.
|
||||
- **Automatic Linking**: Packages within the workspace can depend on each other. When you run `npm install`, NPM will automatically create symlinks between the packages. This means that when you make changes to one package, the changes are immediately available to other packages that depend on it.
|
||||
- **Simplified Script Execution**: You can run scripts in any package from the root of the project using the `--workspace` flag. For example, to run the `build` script in the `cli` package, you can run `npm run build --workspace @google/gemini-cli`.
|
|
@ -0,0 +1,70 @@
|
|||
# Gemini CLI: Quotas and Pricing
|
||||
|
||||
Your Gemini CLI quotas and pricing depend on the type of account you use to authenticate with Google. Additionally, both quotas and pricing may be calculated differently based on the model version, requests, and tokens used. A summary of model usage is available through the `/stats` command and presented on exit at the end of a session. See [privacy and terms](./tos-privacy.md) for details on Privacy policy and Terms of Service. Note: published prices are list price; additional negotiated commercial discounting may apply.
|
||||
|
||||
This article outlines the specific quotas and pricing applicable to the Gemini CLI when using different authentication methods.
|
||||
|
||||
## 1. Log in with Google (Gemini Code Assist Free Tier)
|
||||
|
||||
For users who authenticate by using their Google account to access Gemini Code Assist for individuals:
|
||||
|
||||
- **Quota:**
|
||||
- 60 requests per minute
|
||||
- 1000 requests per day
|
||||
- Token usage is not applicable
|
||||
- **Cost:** Free
|
||||
- **Details:** [Gemini Code Assist Quotas](https://developers.google.com/gemini-code-assist/resources/quotas#quotas-for-agent-mode-gemini-cli)
|
||||
- **Notes:** A specific quota for different models is not specified; model fallback may occur to preserve shared experience quality.
|
||||
|
||||
## 2. Gemini API Key (Unpaid)
|
||||
|
||||
If you are using a Gemini API key for the free tier:
|
||||
|
||||
- **Quota:**
|
||||
- Flash model only
|
||||
- 10 requests per minute
|
||||
- 250 requests per day
|
||||
- **Cost:** Free
|
||||
- **Details:** [Gemini API Rate Limits](https://ai.google.dev/gemini-api/docs/rate-limits)
|
||||
|
||||
## 3. Gemini API Key (Paid)
|
||||
|
||||
If you are using a Gemini API key with a paid plan:
|
||||
|
||||
- **Quota:** Varies by pricing tier.
|
||||
- **Cost:** Varies by pricing tier and model/token usage.
|
||||
- **Details:** [Gemini API Rate Limits](https://ai.google.dev/gemini-api/docs/rate-limits), [Gemini API Pricing](https://ai.google.dev/gemini-api/docs/pricing)
|
||||
|
||||
## 4. Login with Google (for Workspace or Licensed Code Assist users)
|
||||
|
||||
For users of Standard or Enterprise editions of Gemini Code Assist, quotas and pricing are based on a fixed price subscription with assigned license seats:
|
||||
|
||||
- **Standard Tier:**
|
||||
- **Quota:** 120 requests per minute, 1500 per day
|
||||
- **Enterprise Tier:**
|
||||
- **Quota:** 120 requests per minute, 2000 per day
|
||||
- **Cost:** Fixed price included with your Gemini for Google Workspace or Gemini Code Assist subscription.
|
||||
- **Details:** [Gemini Code Assist Quotas](https://developers.google.com/gemini-code-assist/resources/quotas#quotas-for-agent-mode-gemini-cli), [Gemini Code Assist Pricing](https://cloud.google.com/products/gemini/pricing)
|
||||
- **Notes:**
|
||||
- Specific quota for different models is not specified; model fallback may occur to preserve shared experience quality.
|
||||
- Members of the Google Developer Program may have Gemini Code Assist licenses through their membership.
|
||||
|
||||
## 5. Vertex AI (Express Mode)
|
||||
|
||||
If you are using Vertex AI in Express Mode:
|
||||
|
||||
- **Quota:** Quotas are variable and specific to your account. See the source for more details.
|
||||
- **Cost:** After your Express Mode usage is consumed and you enable billing for your project, cost is based on standard [Vertex AI Pricing](https://cloud.google.com/vertex-ai/pricing).
|
||||
- **Details:** [Vertex AI Express Mode Quotas](https://cloud.google.com/vertex-ai/generative-ai/docs/start/express-mode/overview#quotas)
|
||||
|
||||
## 6. Vertex AI (Regular Mode)
|
||||
|
||||
If you are using the standard Vertex AI service:
|
||||
|
||||
- **Quota:** Governed by a dynamic shared quota system or pre-purchased provisioned throughput.
|
||||
- **Cost:** Based on model and token usage. See [Vertex AI Pricing](https://cloud.google.com/vertex-ai/pricing).
|
||||
- **Details:** [Vertex AI Dynamic Shared Quota](https://cloud.google.com/vertex-ai/generative-ai/docs/resources/dynamic-shared-quota)
|
||||
|
||||
## 7. Google One and Ultra plans, Gemini for Workspace plans
|
||||
|
||||
These plans currently apply only to the use of Gemini web-based products provided by Google-based experiences (for example, the Gemini web app or the Flow video editor). These plans do not apply to the API usage which powers the Gemini CLI. Supporting these plans is under active consideration for future support.
|
|
@ -0,0 +1,135 @@
|
|||
# Sandboxing in the Gemini CLI
|
||||
|
||||
This document provides a guide to sandboxing in the Gemini CLI, including prerequisites, quickstart, and configuration.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
Before using sandboxing, you need to install and set up the Gemini CLI:
|
||||
|
||||
```bash
|
||||
npm install -g @google/gemini-cli
|
||||
```
|
||||
|
||||
To verify the installation
|
||||
|
||||
```bash
|
||||
gemini --version
|
||||
```
|
||||
|
||||
## Overview of sandboxing
|
||||
|
||||
Sandboxing isolates potentially dangerous operations (such as shell commands or file modifications) from your host system, providing a security barrier between AI operations and your environment.
|
||||
|
||||
The benefits of sandboxing include:
|
||||
|
||||
- **Security**: Prevent accidental system damage or data loss.
|
||||
- **Isolation**: Limit file system access to project directory.
|
||||
- **Consistency**: Ensure reproducible environments across different systems.
|
||||
- **Safety**: Reduce risk when working with untrusted code or experimental commands.
|
||||
|
||||
## Sandboxing methods
|
||||
|
||||
Your ideal method of sandboxing may differ depending on your platform and your preferred container solution.
|
||||
|
||||
### 1. macOS Seatbelt (macOS only)
|
||||
|
||||
Lightweight, built-in sandboxing using `sandbox-exec`.
|
||||
|
||||
**Default profile**: `permissive-open` - restricts writes outside project directory but allows most other operations.
|
||||
|
||||
### 2. Container-based (Docker/Podman)
|
||||
|
||||
Cross-platform sandboxing with complete process isolation.
|
||||
|
||||
**Note**: Requires building the sandbox image locally or using a published image from your organization's registry.
|
||||
|
||||
## Quickstart
|
||||
|
||||
```bash
|
||||
# Enable sandboxing with command flag
|
||||
gemini -s -p "analyze the code structure"
|
||||
|
||||
# Use environment variable
|
||||
export GEMINI_SANDBOX=true
|
||||
gemini -p "run the test suite"
|
||||
|
||||
# Configure in settings.json
|
||||
{
|
||||
"sandbox": "docker"
|
||||
}
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### Enable sandboxing (in order of precedence)
|
||||
|
||||
1. **Command flag**: `-s` or `--sandbox`
|
||||
2. **Environment variable**: `GEMINI_SANDBOX=true|docker|podman|sandbox-exec`
|
||||
3. **Settings file**: `"sandbox": true` in `settings.json`
|
||||
|
||||
### macOS Seatbelt profiles
|
||||
|
||||
Built-in profiles (set via `SEATBELT_PROFILE` env var):
|
||||
|
||||
- `permissive-open` (default): Write restrictions, network allowed
|
||||
- `permissive-closed`: Write restrictions, no network
|
||||
- `permissive-proxied`: Write restrictions, network via proxy
|
||||
- `restrictive-open`: Strict restrictions, network allowed
|
||||
- `restrictive-closed`: Maximum restrictions
|
||||
|
||||
## Linux UID/GID handling
|
||||
|
||||
The sandbox automatically handles user permissions on Linux. Override these permissions with:
|
||||
|
||||
```bash
|
||||
export SANDBOX_SET_UID_GID=true # Force host UID/GID
|
||||
export SANDBOX_SET_UID_GID=false # Disable UID/GID mapping
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common issues
|
||||
|
||||
**"Operation not permitted"**
|
||||
|
||||
- Operation requires access outside sandbox.
|
||||
- Try more permissive profile or add mount points.
|
||||
|
||||
**Missing commands**
|
||||
|
||||
- Add to custom Dockerfile.
|
||||
- Install via `sandbox.bashrc`.
|
||||
|
||||
**Network issues**
|
||||
|
||||
- Check sandbox profile allows network.
|
||||
- Verify proxy configuration.
|
||||
|
||||
### Debug mode
|
||||
|
||||
```bash
|
||||
DEBUG=1 gemini -s -p "debug command"
|
||||
```
|
||||
|
||||
### Inspect sandbox
|
||||
|
||||
```bash
|
||||
# Check environment
|
||||
gemini -s -p "run shell command: env | grep SANDBOX"
|
||||
|
||||
# List mounts
|
||||
gemini -s -p "run shell command: mount | grep workspace"
|
||||
```
|
||||
|
||||
## Security notes
|
||||
|
||||
- Sandboxing reduces but doesn't eliminate all risks.
|
||||
- Use the most restrictive profile that allows your work.
|
||||
- Container overhead is minimal after first build.
|
||||
- GUI applications may not work in sandboxes.
|
||||
|
||||
## Related documentation
|
||||
|
||||
- [Configuration](./cli/configuration.md): Full configuration options.
|
||||
- [Commands](./cli/commands.md): Available commands.
|
||||
- [Troubleshooting](./troubleshooting.md): General troubleshooting.
|
|
@ -0,0 +1,238 @@
|
|||
# Gemini CLI Observability Guide
|
||||
|
||||
Telemetry provides data about Gemini CLI's performance, health, and usage. By enabling it, you can monitor operations, debug issues, and optimize tool usage through traces, metrics, and structured logs.
|
||||
|
||||
Gemini CLI's telemetry system is built on the **[OpenTelemetry] (OTEL)** standard, allowing you to send data to any compatible backend.
|
||||
|
||||
[OpenTelemetry]: https://opentelemetry.io/
|
||||
|
||||
## Enabling telemetry
|
||||
|
||||
You can enable telemetry in multiple ways. Configuration is primarily managed via the [`.qwen/settings.json` file](./cli/configuration.md) and environment variables, but CLI flags can override these settings for a specific session.
|
||||
|
||||
### Order of precedence
|
||||
|
||||
The following lists the precedence for applying telemetry settings, with items listed higher having greater precedence:
|
||||
|
||||
1. **CLI flags (for `gemini` command):**
|
||||
- `--telemetry` / `--no-telemetry`: Overrides `telemetry.enabled`.
|
||||
- `--telemetry-target <local|gcp>`: Overrides `telemetry.target`.
|
||||
- `--telemetry-otlp-endpoint <URL>`: Overrides `telemetry.otlpEndpoint`.
|
||||
- `--telemetry-log-prompts` / `--no-telemetry-log-prompts`: Overrides `telemetry.logPrompts`.
|
||||
|
||||
1. **Environment variables:**
|
||||
- `OTEL_EXPORTER_OTLP_ENDPOINT`: Overrides `telemetry.otlpEndpoint`.
|
||||
|
||||
1. **Workspace settings file (`.qwen/settings.json`):** Values from the `telemetry` object in this project-specific file.
|
||||
|
||||
1. **User settings file (`~/.qwen/settings.json`):** Values from the `telemetry` object in this global user file.
|
||||
|
||||
1. **Defaults:** applied if not set by any of the above.
|
||||
- `telemetry.enabled`: `false`
|
||||
- `telemetry.target`: `local`
|
||||
- `telemetry.otlpEndpoint`: `http://localhost:4317`
|
||||
- `telemetry.logPrompts`: `true`
|
||||
|
||||
**For the `npm run telemetry -- --target=<gcp|local>` script:**
|
||||
The `--target` argument to this script _only_ overrides the `telemetry.target` for the duration and purpose of that script (i.e., choosing which collector to start). It does not permanently change your `settings.json`. The script will first look at `settings.json` for a `telemetry.target` to use as its default.
|
||||
|
||||
### Example settings
|
||||
|
||||
The following code can be added to your workspace (`.qwen/settings.json`) or user (`~/.qwen/settings.json`) settings to enable telemetry and send the output to Google Cloud:
|
||||
|
||||
```json
|
||||
{
|
||||
"telemetry": {
|
||||
"enabled": true,
|
||||
"target": "gcp"
|
||||
},
|
||||
"sandbox": false
|
||||
}
|
||||
```
|
||||
|
||||
## Running an OTEL Collector
|
||||
|
||||
An OTEL Collector is a service that receives, processes, and exports telemetry data.
|
||||
The CLI sends data using the OTLP/gRPC protocol.
|
||||
|
||||
Learn more about OTEL exporter standard configuration in [documentation][otel-config-docs].
|
||||
|
||||
[otel-config-docs]: https://opentelemetry.io/docs/languages/sdk-configuration/otlp-exporter/
|
||||
|
||||
### Local
|
||||
|
||||
Use the `npm run telemetry -- --target=local` command to automate the process of setting up a local telemetry pipeline, including configuring the necessary settings in your `.qwen/settings.json` file. The underlying script installs `otelcol-contrib` (the OpenTelemetry Collector) and `jaeger` (The Jaeger UI for viewing traces). To use it:
|
||||
|
||||
1. **Run the command**:
|
||||
Execute the command from the root of the repository:
|
||||
|
||||
```bash
|
||||
npm run telemetry -- --target=local
|
||||
```
|
||||
|
||||
The script will:
|
||||
- Download Jaeger and OTEL if needed.
|
||||
- Start a local Jaeger instance.
|
||||
- Start an OTEL collector configured to receive data from Gemini CLI.
|
||||
- Automatically enable telemetry in your workspace settings.
|
||||
- On exit, disable telemetry.
|
||||
|
||||
1. **View traces**:
|
||||
Open your web browser and navigate to **http://localhost:16686** to access the Jaeger UI. Here you can inspect detailed traces of Gemini CLI operations.
|
||||
|
||||
1. **Inspect logs and metrics**:
|
||||
The script redirects the OTEL collector output (which includes logs and metrics) to `~/.qwen/tmp/<projectHash>/otel/collector.log`. The script will provide links to view and a command to tail your telemetry data (traces, metrics, logs) locally.
|
||||
|
||||
1. **Stop the services**:
|
||||
Press `Ctrl+C` in the terminal where the script is running to stop the OTEL Collector and Jaeger services.
|
||||
|
||||
### Google Cloud
|
||||
|
||||
Use the `npm run telemetry -- --target=gcp` command to automate setting up a local OpenTelemetry collector that forwards data to your Google Cloud project, including configuring the necessary settings in your `.qwen/settings.json` file. The underlying script installs `otelcol-contrib`. To use it:
|
||||
|
||||
1. **Prerequisites**:
|
||||
- Have a Google Cloud project ID.
|
||||
- Export the `GOOGLE_CLOUD_PROJECT` environment variable to make it available to the OTEL collector.
|
||||
```bash
|
||||
export OTLP_GOOGLE_CLOUD_PROJECT="your-project-id"
|
||||
```
|
||||
- Authenticate with Google Cloud (e.g., run `gcloud auth application-default login` or ensure `GOOGLE_APPLICATION_CREDENTIALS` is set).
|
||||
- Ensure your Google Cloud account/service account has the necessary IAM roles: "Cloud Trace Agent", "Monitoring Metric Writer", and "Logs Writer".
|
||||
|
||||
1. **Run the command**:
|
||||
Execute the command from the root of the repository:
|
||||
|
||||
```bash
|
||||
npm run telemetry -- --target=gcp
|
||||
```
|
||||
|
||||
The script will:
|
||||
- Download the `otelcol-contrib` binary if needed.
|
||||
- Start an OTEL collector configured to receive data from Gemini CLI and export it to your specified Google Cloud project.
|
||||
- Automatically enable telemetry and disable sandbox mode in your workspace settings (`.qwen/settings.json`).
|
||||
- Provide direct links to view traces, metrics, and logs in your Google Cloud Console.
|
||||
- On exit (Ctrl+C), it will attempt to restore your original telemetry and sandbox settings.
|
||||
|
||||
1. **Run Gemini CLI:**
|
||||
In a separate terminal, run your Gemini CLI commands. This generates telemetry data that the collector captures.
|
||||
|
||||
1. **View telemetry in Google Cloud**:
|
||||
Use the links provided by the script to navigate to the Google Cloud Console and view your traces, metrics, and logs.
|
||||
|
||||
1. **Inspect local collector logs**:
|
||||
The script redirects the local OTEL collector output to `~/.qwen/tmp/<projectHash>/otel/collector-gcp.log`. The script provides links to view and command to tail your collector logs locally.
|
||||
|
||||
1. **Stop the service**:
|
||||
Press `Ctrl+C` in the terminal where the script is running to stop the OTEL Collector.
|
||||
|
||||
## Logs and metric reference
|
||||
|
||||
The following section describes the structure of logs and metrics generated for Gemini CLI.
|
||||
|
||||
- A `sessionId` is included as a common attribute on all logs and metrics.
|
||||
|
||||
### Logs
|
||||
|
||||
Logs are timestamped records of specific events. The following events are logged for Gemini CLI:
|
||||
|
||||
- `gemini_cli.config`: This event occurs once at startup with the CLI's configuration.
|
||||
- **Attributes**:
|
||||
- `model` (string)
|
||||
- `embedding_model` (string)
|
||||
- `sandbox_enabled` (boolean)
|
||||
- `core_tools_enabled` (string)
|
||||
- `approval_mode` (string)
|
||||
- `api_key_enabled` (boolean)
|
||||
- `vertex_ai_enabled` (boolean)
|
||||
- `code_assist_enabled` (boolean)
|
||||
- `log_prompts_enabled` (boolean)
|
||||
- `file_filtering_respect_git_ignore` (boolean)
|
||||
- `debug_mode` (boolean)
|
||||
- `mcp_servers` (string)
|
||||
|
||||
- `gemini_cli.user_prompt`: This event occurs when a user submits a prompt.
|
||||
- **Attributes**:
|
||||
- `prompt_length`
|
||||
- `prompt` (this attribute is excluded if `log_prompts_enabled` is configured to be `false`)
|
||||
- `auth_type`
|
||||
|
||||
- `gemini_cli.tool_call`: This event occurs for each function call.
|
||||
- **Attributes**:
|
||||
- `function_name`
|
||||
- `function_args`
|
||||
- `duration_ms`
|
||||
- `success` (boolean)
|
||||
- `decision` (string: "accept", "reject", or "modify", if applicable)
|
||||
- `error` (if applicable)
|
||||
- `error_type` (if applicable)
|
||||
|
||||
- `gemini_cli.api_request`: This event occurs when making a request to Gemini API.
|
||||
- **Attributes**:
|
||||
- `model`
|
||||
- `request_text` (if applicable)
|
||||
|
||||
- `gemini_cli.api_error`: This event occurs if the API request fails.
|
||||
- **Attributes**:
|
||||
- `model`
|
||||
- `error`
|
||||
- `error_type`
|
||||
- `status_code`
|
||||
- `duration_ms`
|
||||
- `auth_type`
|
||||
|
||||
- `gemini_cli.api_response`: This event occurs upon receiving a response from Gemini API.
|
||||
- **Attributes**:
|
||||
- `model`
|
||||
- `status_code`
|
||||
- `duration_ms`
|
||||
- `error` (optional)
|
||||
- `input_token_count`
|
||||
- `output_token_count`
|
||||
- `cached_content_token_count`
|
||||
- `thoughts_token_count`
|
||||
- `tool_token_count`
|
||||
- `response_text` (if applicable)
|
||||
- `auth_type`
|
||||
|
||||
- `gemini_cli.flash_fallback`: This event occurs when Gemini CLI switches to flash as fallback.
|
||||
- **Attributes**:
|
||||
- `auth_type`
|
||||
|
||||
### Metrics
|
||||
|
||||
Metrics are numerical measurements of behavior over time. The following metrics are collected for Gemini CLI:
|
||||
|
||||
- `gemini_cli.session.count` (Counter, Int): Incremented once per CLI startup.
|
||||
|
||||
- `gemini_cli.tool.call.count` (Counter, Int): Counts tool calls.
|
||||
- **Attributes**:
|
||||
- `function_name`
|
||||
- `success` (boolean)
|
||||
- `decision` (string: "accept", "reject", or "modify", if applicable)
|
||||
|
||||
- `gemini_cli.tool.call.latency` (Histogram, ms): Measures tool call latency.
|
||||
- **Attributes**:
|
||||
- `function_name`
|
||||
- `decision` (string: "accept", "reject", or "modify", if applicable)
|
||||
|
||||
- `gemini_cli.api.request.count` (Counter, Int): Counts all API requests.
|
||||
- **Attributes**:
|
||||
- `model`
|
||||
- `status_code`
|
||||
- `error_type` (if applicable)
|
||||
|
||||
- `gemini_cli.api.request.latency` (Histogram, ms): Measures API request latency.
|
||||
- **Attributes**:
|
||||
- `model`
|
||||
|
||||
- `gemini_cli.token.usage` (Counter, Int): Counts the number of tokens used.
|
||||
- **Attributes**:
|
||||
- `model`
|
||||
- `type` (string: "input", "output", "thought", "cache", or "tool")
|
||||
|
||||
- `gemini_cli.file.operation.count` (Counter, Int): Counts file operations.
|
||||
- **Attributes**:
|
||||
- `operation` (string: "create", "read", "update"): The type of file operation.
|
||||
- `lines` (Int, if applicable): Number of lines in the file.
|
||||
- `mimetype` (string, if applicable): Mimetype of the file.
|
||||
- `extension` (string, if applicable): File extension of the file.
|
|
@ -0,0 +1,143 @@
|
|||
# Gemini CLI file system tools
|
||||
|
||||
The Gemini CLI provides a comprehensive suite of tools for interacting with the local file system. These tools allow the Gemini model to read from, write to, list, search, and modify files and directories, all under your control and typically with confirmation for sensitive operations.
|
||||
|
||||
**Note:** All file system tools operate within a `rootDirectory` (usually the current working directory where you launched the CLI) for security. Paths that you provide to these tools are generally expected to be absolute or are resolved relative to this root directory.
|
||||
|
||||
## 1. `list_directory` (ReadFolder)
|
||||
|
||||
`list_directory` lists the names of files and subdirectories directly within a specified directory path. It can optionally ignore entries matching provided glob patterns.
|
||||
|
||||
- **Tool name:** `list_directory`
|
||||
- **Display name:** ReadFolder
|
||||
- **File:** `ls.ts`
|
||||
- **Parameters:**
|
||||
- `path` (string, required): The absolute path to the directory to list.
|
||||
- `ignore` (array of strings, optional): A list of glob patterns to exclude from the listing (e.g., `["*.log", ".git"]`).
|
||||
- `respect_git_ignore` (boolean, optional): Whether to respect `.gitignore` patterns when listing files. Defaults to `true`.
|
||||
- **Behavior:**
|
||||
- Returns a list of file and directory names.
|
||||
- Indicates whether each entry is a directory.
|
||||
- Sorts entries with directories first, then alphabetically.
|
||||
- **Output (`llmContent`):** A string like: `Directory listing for /path/to/your/folder:\n[DIR] subfolder1\nfile1.txt\nfile2.png`
|
||||
- **Confirmation:** No.
|
||||
|
||||
## 2. `read_file` (ReadFile)
|
||||
|
||||
`read_file` reads and returns the content of a specified file. This tool handles text, images (PNG, JPG, GIF, WEBP, SVG, BMP), and PDF files. For text files, it can read specific line ranges. Other binary file types are generally skipped.
|
||||
|
||||
- **Tool name:** `read_file`
|
||||
- **Display name:** ReadFile
|
||||
- **File:** `read-file.ts`
|
||||
- **Parameters:**
|
||||
- `path` (string, required): The absolute path to the file to read.
|
||||
- `offset` (number, optional): For text files, the 0-based line number to start reading from. Requires `limit` to be set.
|
||||
- `limit` (number, optional): For text files, the maximum number of lines to read. If omitted, reads a default maximum (e.g., 2000 lines) or the entire file if feasible.
|
||||
- **Behavior:**
|
||||
- For text files: Returns the content. If `offset` and `limit` are used, returns only that slice of lines. Indicates if content was truncated due to line limits or line length limits.
|
||||
- For image and PDF files: Returns the file content as a base64-encoded data structure suitable for model consumption.
|
||||
- For other binary files: Attempts to identify and skip them, returning a message indicating it's a generic binary file.
|
||||
- **Output:** (`llmContent`):
|
||||
- For text files: The file content, potentially prefixed with a truncation message (e.g., `[File content truncated: showing lines 1-100 of 500 total lines...]\nActual file content...`).
|
||||
- For image/PDF files: An object containing `inlineData` with `mimeType` and base64 `data` (e.g., `{ inlineData: { mimeType: 'image/png', data: 'base64encodedstring' } }`).
|
||||
- For other binary files: A message like `Cannot display content of binary file: /path/to/data.bin`.
|
||||
- **Confirmation:** No.
|
||||
|
||||
## 3. `write_file` (WriteFile)
|
||||
|
||||
`write_file` writes content to a specified file. If the file exists, it will be overwritten. If the file doesn't exist, it (and any necessary parent directories) will be created.
|
||||
|
||||
- **Tool name:** `write_file`
|
||||
- **Display name:** WriteFile
|
||||
- **File:** `write-file.ts`
|
||||
- **Parameters:**
|
||||
- `file_path` (string, required): The absolute path to the file to write to.
|
||||
- `content` (string, required): The content to write into the file.
|
||||
- **Behavior:**
|
||||
- Writes the provided `content` to the `file_path`.
|
||||
- Creates parent directories if they don't exist.
|
||||
- **Output (`llmContent`):** A success message, e.g., `Successfully overwrote file: /path/to/your/file.txt` or `Successfully created and wrote to new file: /path/to/new/file.txt`.
|
||||
- **Confirmation:** Yes. Shows a diff of changes and asks for user approval before writing.
|
||||
|
||||
## 4. `glob` (FindFiles)
|
||||
|
||||
`glob` finds files matching specific glob patterns (e.g., `src/**/*.ts`, `*.md`), returning absolute paths sorted by modification time (newest first).
|
||||
|
||||
- **Tool name:** `glob`
|
||||
- **Display name:** FindFiles
|
||||
- **File:** `glob.ts`
|
||||
- **Parameters:**
|
||||
- `pattern` (string, required): The glob pattern to match against (e.g., `"*.py"`, `"src/**/*.js"`).
|
||||
- `path` (string, optional): The absolute path to the directory to search within. If omitted, searches the tool's root directory.
|
||||
- `case_sensitive` (boolean, optional): Whether the search should be case-sensitive. Defaults to `false`.
|
||||
- `respect_git_ignore` (boolean, optional): Whether to respect .gitignore patterns when finding files. Defaults to `true`.
|
||||
- **Behavior:**
|
||||
- Searches for files matching the glob pattern within the specified directory.
|
||||
- Returns a list of absolute paths, sorted with the most recently modified files first.
|
||||
- Ignores common nuisance directories like `node_modules` and `.git` by default.
|
||||
- **Output (`llmContent`):** A message like: `Found 5 file(s) matching "*.ts" within src, sorted by modification time (newest first):\nsrc/file1.ts\nsrc/subdir/file2.ts...`
|
||||
- **Confirmation:** No.
|
||||
|
||||
## 5. `search_file_content` (SearchText)
|
||||
|
||||
`search_file_content` searches for a regular expression pattern within the content of files in a specified directory. Can filter files by a glob pattern. Returns the lines containing matches, along with their file paths and line numbers.
|
||||
|
||||
- **Tool name:** `search_file_content`
|
||||
- **Display name:** SearchText
|
||||
- **File:** `grep.ts`
|
||||
- **Parameters:**
|
||||
- `pattern` (string, required): The regular expression (regex) to search for (e.g., `"function\s+myFunction"`).
|
||||
- `path` (string, optional): The absolute path to the directory to search within. Defaults to the current working directory.
|
||||
- `include` (string, optional): A glob pattern to filter which files are searched (e.g., `"*.js"`, `"src/**/*.{ts,tsx}"`). If omitted, searches most files (respecting common ignores).
|
||||
- **Behavior:**
|
||||
- Uses `git grep` if available in a Git repository for speed, otherwise falls back to system `grep` or a JavaScript-based search.
|
||||
- Returns a list of matching lines, each prefixed with its file path (relative to the search directory) and line number.
|
||||
- **Output (`llmContent`):** A formatted string of matches, e.g.:
|
||||
```
|
||||
Found 3 matches for pattern "myFunction" in path "." (filter: "*.ts"):
|
||||
---
|
||||
File: src/utils.ts
|
||||
L15: export function myFunction() {
|
||||
L22: myFunction.call();
|
||||
---
|
||||
File: src/index.ts
|
||||
L5: import { myFunction } from './utils';
|
||||
---
|
||||
```
|
||||
- **Confirmation:** No.
|
||||
|
||||
## 6. `replace` (Edit)
|
||||
|
||||
`replace` replaces text within a file. By default, replaces a single occurrence, but can replace multiple occurrences when `expected_replacements` is specified. This tool is designed for precise, targeted changes and requires significant context around the `old_string` to ensure it modifies the correct location.
|
||||
|
||||
- **Tool name:** `replace`
|
||||
- **Display name:** Edit
|
||||
- **File:** `edit.ts`
|
||||
- **Parameters:**
|
||||
- `file_path` (string, required): The absolute path to the file to modify.
|
||||
- `old_string` (string, required): The exact literal text to replace.
|
||||
|
||||
**CRITICAL:** This string must uniquely identify the single instance to change. It should include at least 3 lines of context _before_ and _after_ the target text, matching whitespace and indentation precisely. If `old_string` is empty, the tool attempts to create a new file at `file_path` with `new_string` as content.
|
||||
|
||||
- `new_string` (string, required): The exact literal text to replace `old_string` with.
|
||||
- `expected_replacements` (number, optional): The number of occurrences to replace. Defaults to `1`.
|
||||
|
||||
- **Behavior:**
|
||||
- If `old_string` is empty and `file_path` does not exist, creates a new file with `new_string` as content.
|
||||
- If `old_string` is provided, it reads the `file_path` and attempts to find exactly one occurrence of `old_string`.
|
||||
- If one occurrence is found, it replaces it with `new_string`.
|
||||
- **Enhanced Reliability (Multi-Stage Edit Correction):** To significantly improve the success rate of edits, especially when the model-provided `old_string` might not be perfectly precise, the tool incorporates a multi-stage edit correction mechanism.
|
||||
- If the initial `old_string` isn't found or matches multiple locations, the tool can leverage the Gemini model to iteratively refine `old_string` (and potentially `new_string`).
|
||||
- This self-correction process attempts to identify the unique segment the model intended to modify, making the `replace` operation more robust even with slightly imperfect initial context.
|
||||
- **Failure conditions:** Despite the correction mechanism, the tool will fail if:
|
||||
- `file_path` is not absolute or is outside the root directory.
|
||||
- `old_string` is not empty, but the `file_path` does not exist.
|
||||
- `old_string` is empty, but the `file_path` already exists.
|
||||
- `old_string` is not found in the file after attempts to correct it.
|
||||
- `old_string` is found multiple times, and the self-correction mechanism cannot resolve it to a single, unambiguous match.
|
||||
- **Output (`llmContent`):**
|
||||
- On success: `Successfully modified file: /path/to/file.txt (1 replacements).` or `Created new file: /path/to/new_file.txt with provided content.`
|
||||
- On failure: An error message explaining the reason (e.g., `Failed to edit, 0 occurrences found...`, `Failed to edit, expected 1 occurrences but found 2...`).
|
||||
- **Confirmation:** Yes. Shows a diff of the proposed changes and asks for user approval before writing to the file.
|
||||
|
||||
These file system tools provide a foundation for the Gemini CLI to understand and interact with your local project context.
|
|
@ -0,0 +1,56 @@
|
|||
# Gemini CLI tools
|
||||
|
||||
The Gemini CLI includes built-in tools that the Gemini model uses to interact with your local environment, access information, and perform actions. These tools enhance the CLI's capabilities, enabling it to go beyond text generation and assist with a wide range of tasks.
|
||||
|
||||
## Overview of Gemini CLI tools
|
||||
|
||||
In the context of the Gemini CLI, tools are specific functions or modules that the Gemini model can request to be executed. For example, if you ask Gemini to "Summarize the contents of `my_document.txt`," the model will likely identify the need to read that file and will request the execution of the `read_file` tool.
|
||||
|
||||
The core component (`packages/core`) manages these tools, presents their definitions (schemas) to the Gemini model, executes them when requested, and returns the results to the model for further processing into a user-facing response.
|
||||
|
||||
These tools provide the following capabilities:
|
||||
|
||||
- **Access local information:** Tools allow Gemini to access your local file system, read file contents, list directories, etc.
|
||||
- **Execute commands:** With tools like `run_shell_command`, Gemini can run shell commands (with appropriate safety measures and user confirmation).
|
||||
- **Interact with the web:** Tools can fetch content from URLs.
|
||||
- **Take actions:** Tools can modify files, write new files, or perform other actions on your system (again, typically with safeguards).
|
||||
- **Ground responses:** By using tools to fetch real-time or specific local data, Gemini's responses can be more accurate, relevant, and grounded in your actual context.
|
||||
|
||||
## How to use Gemini CLI tools
|
||||
|
||||
To use Gemini CLI tools, provide a prompt to the Gemini CLI. The process works as follows:
|
||||
|
||||
1. You provide a prompt to the Gemini CLI.
|
||||
2. The CLI sends the prompt to the core.
|
||||
3. The core, along with your prompt and conversation history, sends a list of available tools and their descriptions/schemas to the Gemini API.
|
||||
4. The Gemini model analyzes your request. If it determines that a tool is needed, its response will include a request to execute a specific tool with certain parameters.
|
||||
5. The core receives this tool request, validates it, and (often after user confirmation for sensitive operations) executes the tool.
|
||||
6. The output from the tool is sent back to the Gemini model.
|
||||
7. The Gemini model uses the tool's output to formulate its final answer, which is then sent back through the core to the CLI and displayed to you.
|
||||
|
||||
You will typically see messages in the CLI indicating when a tool is being called and whether it succeeded or failed.
|
||||
|
||||
## Security and confirmation
|
||||
|
||||
Many tools, especially those that can modify your file system or execute commands (`write_file`, `edit`, `run_shell_command`), are designed with safety in mind. The Gemini CLI will typically:
|
||||
|
||||
- **Require confirmation:** Prompt you before executing potentially sensitive operations, showing you what action is about to be taken.
|
||||
- **Utilize sandboxing:** All tools are subject to restrictions enforced by sandboxing (see [Sandboxing in the Gemini CLI](../sandbox.md)). This means that when operating in a sandbox, any tools (including MCP servers) you wish to use must be available _inside_ the sandbox environment. For example, to run an MCP server through `npx`, the `npx` executable must be installed within the sandbox's Docker image or be available in the `sandbox-exec` environment.
|
||||
|
||||
It's important to always review confirmation prompts carefully before allowing a tool to proceed.
|
||||
|
||||
## Learn more about Gemini CLI's tools
|
||||
|
||||
Gemini CLI's built-in tools can be broadly categorized as follows:
|
||||
|
||||
- **[File System Tools](./file-system.md):** For interacting with files and directories (reading, writing, listing, searching, etc.).
|
||||
- **[Shell Tool](./shell.md) (`run_shell_command`):** For executing shell commands.
|
||||
- **[Web Fetch Tool](./web-fetch.md) (`web_fetch`):** For retrieving content from URLs.
|
||||
- **[Web Search Tool](./web-search.md) (`web_search`):** For searching the web.
|
||||
- **[Multi-File Read Tool](./multi-file.md) (`read_many_files`):** A specialized tool for reading content from multiple files or directories, often used by the `@` command.
|
||||
- **[Memory Tool](./memory.md) (`save_memory`):** For saving and recalling information across sessions.
|
||||
|
||||
Additionally, these tools incorporate:
|
||||
|
||||
- **[MCP servers](./mcp-server.md)**: MCP servers act as a bridge between the Gemini model and your local environment or other services like APIs.
|
||||
- **[Sandboxing](../sandbox.md)**: Sandboxing isolates the model and its changes from your environment to reduce potential risk.
|
|
@ -0,0 +1,447 @@
|
|||
# MCP servers with the Gemini CLI
|
||||
|
||||
This document provides a guide to configuring and using Model Context Protocol (MCP) servers with the Gemini CLI.
|
||||
|
||||
## What is an MCP server?
|
||||
|
||||
An MCP server is an application that exposes tools and resources to the Gemini CLI through the Model Context Protocol, allowing it to interact with external systems and data sources. MCP servers act as a bridge between the Gemini model and your local environment or other services like APIs.
|
||||
|
||||
An MCP server enables the Gemini CLI to:
|
||||
|
||||
- **Discover tools:** List available tools, their descriptions, and parameters through standardized schema definitions.
|
||||
- **Execute tools:** Call specific tools with defined arguments and receive structured responses.
|
||||
- **Access resources:** Read data from specific resources (though the Gemini CLI primarily focuses on tool execution).
|
||||
|
||||
With an MCP server, you can extend the Gemini CLI's capabilities to perform actions beyond its built-in features, such as interacting with databases, APIs, custom scripts, or specialized workflows.
|
||||
|
||||
## Core Integration Architecture
|
||||
|
||||
The Gemini CLI integrates with MCP servers through a sophisticated discovery and execution system built into the core package (`packages/core/src/tools/`):
|
||||
|
||||
### Discovery Layer (`mcp-client.ts`)
|
||||
|
||||
The discovery process is orchestrated by `discoverMcpTools()`, which:
|
||||
|
||||
1. **Iterates through configured servers** from your `settings.json` `mcpServers` configuration
|
||||
2. **Establishes connections** using appropriate transport mechanisms (Stdio, SSE, or Streamable HTTP)
|
||||
3. **Fetches tool definitions** from each server using the MCP protocol
|
||||
4. **Sanitizes and validates** tool schemas for compatibility with the Gemini API
|
||||
5. **Registers tools** in the global tool registry with conflict resolution
|
||||
|
||||
### Execution Layer (`mcp-tool.ts`)
|
||||
|
||||
Each discovered MCP tool is wrapped in a `DiscoveredMCPTool` instance that:
|
||||
|
||||
- **Handles confirmation logic** based on server trust settings and user preferences
|
||||
- **Manages tool execution** by calling the MCP server with proper parameters
|
||||
- **Processes responses** for both the LLM context and user display
|
||||
- **Maintains connection state** and handles timeouts
|
||||
|
||||
### Transport Mechanisms
|
||||
|
||||
The Gemini CLI supports three MCP transport types:
|
||||
|
||||
- **Stdio Transport:** Spawns a subprocess and communicates via stdin/stdout
|
||||
- **SSE Transport:** Connects to Server-Sent Events endpoints
|
||||
- **Streamable HTTP Transport:** Uses HTTP streaming for communication
|
||||
|
||||
## How to set up your MCP server
|
||||
|
||||
The Gemini CLI uses the `mcpServers` configuration in your `settings.json` file to locate and connect to MCP servers. This configuration supports multiple servers with different transport mechanisms.
|
||||
|
||||
### Configure the MCP server in settings.json
|
||||
|
||||
You can configure MCP servers at the global level in the `~/.qwen/settings.json` file or in your project's root directory, create or open the `.qwen/settings.json` file. Within the file, add the `mcpServers` configuration block.
|
||||
|
||||
### Configuration Structure
|
||||
|
||||
Add an `mcpServers` object to your `settings.json` file:
|
||||
|
||||
```json
|
||||
{ ...file contains other config objects
|
||||
"mcpServers": {
|
||||
"serverName": {
|
||||
"command": "path/to/server",
|
||||
"args": ["--arg1", "value1"],
|
||||
"env": {
|
||||
"API_KEY": "$MY_API_TOKEN"
|
||||
},
|
||||
"cwd": "./server-directory",
|
||||
"timeout": 30000,
|
||||
"trust": false
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### Configuration Properties
|
||||
|
||||
Each server configuration supports the following properties:
|
||||
|
||||
#### Required (one of the following)
|
||||
|
||||
- **`command`** (string): Path to the executable for Stdio transport
|
||||
- **`url`** (string): SSE endpoint URL (e.g., `"http://localhost:8080/sse"`)
|
||||
- **`httpUrl`** (string): HTTP streaming endpoint URL
|
||||
|
||||
#### Optional
|
||||
|
||||
- **`args`** (string[]): Command-line arguments for Stdio transport
|
||||
- **`headers`** (object): Custom HTTP headers when using `url` or `httpUrl`
|
||||
- **`env`** (object): Environment variables for the server process. Values can reference environment variables using `$VAR_NAME` or `${VAR_NAME}` syntax
|
||||
- **`cwd`** (string): Working directory for Stdio transport
|
||||
- **`timeout`** (number): Request timeout in milliseconds (default: 600,000ms = 10 minutes)
|
||||
- **`trust`** (boolean): When `true`, bypasses all tool call confirmations for this server (default: `false`)
|
||||
|
||||
### Example Configurations
|
||||
|
||||
#### Python MCP Server (Stdio)
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"pythonTools": {
|
||||
"command": "python",
|
||||
"args": ["-m", "my_mcp_server", "--port", "8080"],
|
||||
"cwd": "./mcp-servers/python",
|
||||
"env": {
|
||||
"DATABASE_URL": "$DB_CONNECTION_STRING",
|
||||
"API_KEY": "${EXTERNAL_API_KEY}"
|
||||
},
|
||||
"timeout": 15000
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Node.js MCP Server (Stdio)
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"nodeServer": {
|
||||
"command": "node",
|
||||
"args": ["dist/server.js", "--verbose"],
|
||||
"cwd": "./mcp-servers/node",
|
||||
"trust": true
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Docker-based MCP Server
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"dockerizedServer": {
|
||||
"command": "docker",
|
||||
"args": [
|
||||
"run",
|
||||
"-i",
|
||||
"--rm",
|
||||
"-e",
|
||||
"API_KEY",
|
||||
"-v",
|
||||
"${PWD}:/workspace",
|
||||
"my-mcp-server:latest"
|
||||
],
|
||||
"env": {
|
||||
"API_KEY": "$EXTERNAL_SERVICE_TOKEN"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### HTTP-based MCP Server
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"httpServer": {
|
||||
"httpUrl": "http://localhost:3000/mcp",
|
||||
"timeout": 5000
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### HTTP-based MCP Server with Custom Headers
|
||||
|
||||
```json
|
||||
{
|
||||
"mcpServers": {
|
||||
"httpServerWithAuth": {
|
||||
"httpUrl": "http://localhost:3000/mcp",
|
||||
"headers": {
|
||||
"Authorization": "Bearer your-api-token",
|
||||
"X-Custom-Header": "custom-value",
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
"timeout": 5000
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Discovery Process Deep Dive
|
||||
|
||||
When the Gemini CLI starts, it performs MCP server discovery through the following detailed process:
|
||||
|
||||
### 1. Server Iteration and Connection
|
||||
|
||||
For each configured server in `mcpServers`:
|
||||
|
||||
1. **Status tracking begins:** Server status is set to `CONNECTING`
|
||||
2. **Transport selection:** Based on configuration properties:
|
||||
- `httpUrl` → `StreamableHTTPClientTransport`
|
||||
- `url` → `SSEClientTransport`
|
||||
- `command` → `StdioClientTransport`
|
||||
3. **Connection establishment:** The MCP client attempts to connect with the configured timeout
|
||||
4. **Error handling:** Connection failures are logged and the server status is set to `DISCONNECTED`
|
||||
|
||||
### 2. Tool Discovery
|
||||
|
||||
Upon successful connection:
|
||||
|
||||
1. **Tool listing:** The client calls the MCP server's tool listing endpoint
|
||||
2. **Schema validation:** Each tool's function declaration is validated
|
||||
3. **Name sanitization:** Tool names are cleaned to meet Gemini API requirements:
|
||||
- Invalid characters (non-alphanumeric, underscore, dot, hyphen) are replaced with underscores
|
||||
- Names longer than 63 characters are truncated with middle replacement (`___`)
|
||||
|
||||
### 3. Conflict Resolution
|
||||
|
||||
When multiple servers expose tools with the same name:
|
||||
|
||||
1. **First registration wins:** The first server to register a tool name gets the unprefixed name
|
||||
2. **Automatic prefixing:** Subsequent servers get prefixed names: `serverName__toolName`
|
||||
3. **Registry tracking:** The tool registry maintains mappings between server names and their tools
|
||||
|
||||
### 4. Schema Processing
|
||||
|
||||
Tool parameter schemas undergo sanitization for Gemini API compatibility:
|
||||
|
||||
- **`$schema` properties** are removed
|
||||
- **`additionalProperties`** are stripped
|
||||
- **`anyOf` with `default`** have their default values removed (Vertex AI compatibility)
|
||||
- **Recursive processing** applies to nested schemas
|
||||
|
||||
### 5. Connection Management
|
||||
|
||||
After discovery:
|
||||
|
||||
- **Persistent connections:** Servers that successfully register tools maintain their connections
|
||||
- **Cleanup:** Servers that provide no usable tools have their connections closed
|
||||
- **Status updates:** Final server statuses are set to `CONNECTED` or `DISCONNECTED`
|
||||
|
||||
## Tool Execution Flow
|
||||
|
||||
When the Gemini model decides to use an MCP tool, the following execution flow occurs:
|
||||
|
||||
### 1. Tool Invocation
|
||||
|
||||
The model generates a `FunctionCall` with:
|
||||
|
||||
- **Tool name:** The registered name (potentially prefixed)
|
||||
- **Arguments:** JSON object matching the tool's parameter schema
|
||||
|
||||
### 2. Confirmation Process
|
||||
|
||||
Each `DiscoveredMCPTool` implements sophisticated confirmation logic:
|
||||
|
||||
#### Trust-based Bypass
|
||||
|
||||
```typescript
|
||||
if (this.trust) {
|
||||
return false; // No confirmation needed
|
||||
}
|
||||
```
|
||||
|
||||
#### Dynamic Allow-listing
|
||||
|
||||
The system maintains internal allow-lists for:
|
||||
|
||||
- **Server-level:** `serverName` → All tools from this server are trusted
|
||||
- **Tool-level:** `serverName.toolName` → This specific tool is trusted
|
||||
|
||||
#### User Choice Handling
|
||||
|
||||
When confirmation is required, users can choose:
|
||||
|
||||
- **Proceed once:** Execute this time only
|
||||
- **Always allow this tool:** Add to tool-level allow-list
|
||||
- **Always allow this server:** Add to server-level allow-list
|
||||
- **Cancel:** Abort execution
|
||||
|
||||
### 3. Execution
|
||||
|
||||
Upon confirmation (or trust bypass):
|
||||
|
||||
1. **Parameter preparation:** Arguments are validated against the tool's schema
|
||||
2. **MCP call:** The underlying `CallableTool` invokes the server with:
|
||||
|
||||
```typescript
|
||||
const functionCalls = [
|
||||
{
|
||||
name: this.serverToolName, // Original server tool name
|
||||
args: params,
|
||||
},
|
||||
];
|
||||
```
|
||||
|
||||
3. **Response processing:** Results are formatted for both LLM context and user display
|
||||
|
||||
### 4. Response Handling
|
||||
|
||||
The execution result contains:
|
||||
|
||||
- **`llmContent`:** Raw response parts for the language model's context
|
||||
- **`returnDisplay`:** Formatted output for user display (often JSON in markdown code blocks)
|
||||
|
||||
## How to interact with your MCP server
|
||||
|
||||
### Using the `/mcp` Command
|
||||
|
||||
The `/mcp` command provides comprehensive information about your MCP server setup:
|
||||
|
||||
```bash
|
||||
/mcp
|
||||
```
|
||||
|
||||
This displays:
|
||||
|
||||
- **Server list:** All configured MCP servers
|
||||
- **Connection status:** `CONNECTED`, `CONNECTING`, or `DISCONNECTED`
|
||||
- **Server details:** Configuration summary (excluding sensitive data)
|
||||
- **Available tools:** List of tools from each server with descriptions
|
||||
- **Discovery state:** Overall discovery process status
|
||||
|
||||
### Example `/mcp` Output
|
||||
|
||||
```
|
||||
MCP Servers Status:
|
||||
|
||||
📡 pythonTools (CONNECTED)
|
||||
Command: python -m my_mcp_server --port 8080
|
||||
Working Directory: ./mcp-servers/python
|
||||
Timeout: 15000ms
|
||||
Tools: calculate_sum, file_analyzer, data_processor
|
||||
|
||||
🔌 nodeServer (DISCONNECTED)
|
||||
Command: node dist/server.js --verbose
|
||||
Error: Connection refused
|
||||
|
||||
🐳 dockerizedServer (CONNECTED)
|
||||
Command: docker run -i --rm -e API_KEY my-mcp-server:latest
|
||||
Tools: docker__deploy, docker__status
|
||||
|
||||
Discovery State: COMPLETED
|
||||
```
|
||||
|
||||
### Tool Usage
|
||||
|
||||
Once discovered, MCP tools are available to the Gemini model like built-in tools. The model will automatically:
|
||||
|
||||
1. **Select appropriate tools** based on your requests
|
||||
2. **Present confirmation dialogs** (unless the server is trusted)
|
||||
3. **Execute tools** with proper parameters
|
||||
4. **Display results** in a user-friendly format
|
||||
|
||||
## Status Monitoring and Troubleshooting
|
||||
|
||||
### Connection States
|
||||
|
||||
The MCP integration tracks several states:
|
||||
|
||||
#### Server Status (`MCPServerStatus`)
|
||||
|
||||
- **`DISCONNECTED`:** Server is not connected or has errors
|
||||
- **`CONNECTING`:** Connection attempt in progress
|
||||
- **`CONNECTED`:** Server is connected and ready
|
||||
|
||||
#### Discovery State (`MCPDiscoveryState`)
|
||||
|
||||
- **`NOT_STARTED`:** Discovery hasn't begun
|
||||
- **`IN_PROGRESS`:** Currently discovering servers
|
||||
- **`COMPLETED`:** Discovery finished (with or without errors)
|
||||
|
||||
### Common Issues and Solutions
|
||||
|
||||
#### Server Won't Connect
|
||||
|
||||
**Symptoms:** Server shows `DISCONNECTED` status
|
||||
|
||||
**Troubleshooting:**
|
||||
|
||||
1. **Check configuration:** Verify `command`, `args`, and `cwd` are correct
|
||||
2. **Test manually:** Run the server command directly to ensure it works
|
||||
3. **Check dependencies:** Ensure all required packages are installed
|
||||
4. **Review logs:** Look for error messages in the CLI output
|
||||
5. **Verify permissions:** Ensure the CLI can execute the server command
|
||||
|
||||
#### No Tools Discovered
|
||||
|
||||
**Symptoms:** Server connects but no tools are available
|
||||
|
||||
**Troubleshooting:**
|
||||
|
||||
1. **Verify tool registration:** Ensure your server actually registers tools
|
||||
2. **Check MCP protocol:** Confirm your server implements the MCP tool listing correctly
|
||||
3. **Review server logs:** Check stderr output for server-side errors
|
||||
4. **Test tool listing:** Manually test your server's tool discovery endpoint
|
||||
|
||||
#### Tools Not Executing
|
||||
|
||||
**Symptoms:** Tools are discovered but fail during execution
|
||||
|
||||
**Troubleshooting:**
|
||||
|
||||
1. **Parameter validation:** Ensure your tool accepts the expected parameters
|
||||
2. **Schema compatibility:** Verify your input schemas are valid JSON Schema
|
||||
3. **Error handling:** Check if your tool is throwing unhandled exceptions
|
||||
4. **Timeout issues:** Consider increasing the `timeout` setting
|
||||
|
||||
#### Sandbox Compatibility
|
||||
|
||||
**Symptoms:** MCP servers fail when sandboxing is enabled
|
||||
|
||||
**Solutions:**
|
||||
|
||||
1. **Docker-based servers:** Use Docker containers that include all dependencies
|
||||
2. **Path accessibility:** Ensure server executables are available in the sandbox
|
||||
3. **Network access:** Configure sandbox to allow necessary network connections
|
||||
4. **Environment variables:** Verify required environment variables are passed through
|
||||
|
||||
### Debugging Tips
|
||||
|
||||
1. **Enable debug mode:** Run the CLI with `--debug` for verbose output
|
||||
2. **Check stderr:** MCP server stderr is captured and logged (INFO messages filtered)
|
||||
3. **Test isolation:** Test your MCP server independently before integrating
|
||||
4. **Incremental setup:** Start with simple tools before adding complex functionality
|
||||
5. **Use `/mcp` frequently:** Monitor server status during development
|
||||
|
||||
## Important Notes
|
||||
|
||||
### Security Considerations
|
||||
|
||||
- **Trust settings:** The `trust` option bypasses all confirmation dialogs. Use cautiously and only for servers you completely control
|
||||
- **Access tokens:** Be security-aware when configuring environment variables containing API keys or tokens
|
||||
- **Sandbox compatibility:** When using sandboxing, ensure MCP servers are available within the sandbox environment
|
||||
- **Private data:** Using broadly scoped personal access tokens can lead to information leakage between repositories
|
||||
|
||||
### Performance and Resource Management
|
||||
|
||||
- **Connection persistence:** The CLI maintains persistent connections to servers that successfully register tools
|
||||
- **Automatic cleanup:** Connections to servers providing no tools are automatically closed
|
||||
- **Timeout management:** Configure appropriate timeouts based on your server's response characteristics
|
||||
- **Resource monitoring:** MCP servers run as separate processes and consume system resources
|
||||
|
||||
### Schema Compatibility
|
||||
|
||||
- **Property stripping:** The system automatically removes certain schema properties (`$schema`, `additionalProperties`) for Gemini API compatibility
|
||||
- **Name sanitization:** Tool names are automatically sanitized to meet API requirements
|
||||
- **Conflict resolution:** Tool name conflicts between servers are resolved through automatic prefixing
|
||||
|
||||
This comprehensive integration makes MCP servers a powerful way to extend the Gemini CLI's capabilities while maintaining security, reliability, and ease of use.
|
|
@ -0,0 +1,44 @@
|
|||
# Memory Tool (`save_memory`)
|
||||
|
||||
This document describes the `save_memory` tool for the Gemini CLI.
|
||||
|
||||
## Description
|
||||
|
||||
Use `save_memory` to save and recall information across your Gemini CLI sessions. With `save_memory`, you can direct the CLI to remember key details across sessions, providing personalized and directed assistance.
|
||||
|
||||
### Arguments
|
||||
|
||||
`save_memory` takes one argument:
|
||||
|
||||
- `fact` (string, required): The specific fact or piece of information to remember. This should be a clear, self-contained statement written in natural language.
|
||||
|
||||
## How to use `save_memory` with the Gemini CLI
|
||||
|
||||
The tool appends the provided `fact` to a special `GEMINI.md` file located in the user's home directory (`~/.qwen/GEMINI.md`). This file can be configured to have a different name.
|
||||
|
||||
Once added, the facts are stored under a `## Gemini Added Memories` section. This file is loaded as context in subsequent sessions, allowing the CLI to recall the saved information.
|
||||
|
||||
Usage:
|
||||
|
||||
```
|
||||
save_memory(fact="Your fact here.")
|
||||
```
|
||||
|
||||
### `save_memory` examples
|
||||
|
||||
Remember a user preference:
|
||||
|
||||
```
|
||||
save_memory(fact="My preferred programming language is Python.")
|
||||
```
|
||||
|
||||
Store a project-specific detail:
|
||||
|
||||
```
|
||||
save_memory(fact="The project I'm currently working on is called 'gemini-cli'.")
|
||||
```
|
||||
|
||||
## Important notes
|
||||
|
||||
- **General usage:** This tool should be used for concise, important facts. It is not intended for storing large amounts of data or conversational history.
|
||||
- **Memory file:** The memory file is a plain text Markdown file, so you can view and edit it manually if needed.
|
|
@ -0,0 +1,66 @@
|
|||
# Multi File Read Tool (`read_many_files`)
|
||||
|
||||
This document describes the `read_many_files` tool for the Gemini CLI.
|
||||
|
||||
## Description
|
||||
|
||||
Use `read_many_files` to read content from multiple files specified by paths or glob patterns. The behavior of this tool depends on the provided files:
|
||||
|
||||
- For text files, this tool concatenates their content into a single string.
|
||||
- For image (e.g., PNG, JPEG), PDF, audio (MP3, WAV), and video (MP4, MOV) files, it reads and returns them as base64-encoded data, provided they are explicitly requested by name or extension.
|
||||
|
||||
`read_many_files` can be used to perform tasks such as getting an overview of a codebase, finding where specific functionality is implemented, reviewing documentation, or gathering context from multiple configuration files.
|
||||
|
||||
### Arguments
|
||||
|
||||
`read_many_files` takes the following arguments:
|
||||
|
||||
- `paths` (list[string], required): An array of glob patterns or paths relative to the tool's target directory (e.g., `["src/**/*.ts"]`, `["README.md", "docs/", "assets/logo.png"]`).
|
||||
- `exclude` (list[string], optional): Glob patterns for files/directories to exclude (e.g., `["**/*.log", "temp/"]`). These are added to default excludes if `useDefaultExcludes` is true.
|
||||
- `include` (list[string], optional): Additional glob patterns to include. These are merged with `paths` (e.g., `["*.test.ts"]` to specifically add test files if they were broadly excluded, or `["images/*.jpg"]` to include specific image types).
|
||||
- `recursive` (boolean, optional): Whether to search recursively. This is primarily controlled by `**` in glob patterns. Defaults to `true`.
|
||||
- `useDefaultExcludes` (boolean, optional): Whether to apply a list of default exclusion patterns (e.g., `node_modules`, `.git`, non image/pdf binary files). Defaults to `true`.
|
||||
- `respect_git_ignore` (boolean, optional): Whether to respect .gitignore patterns when finding files. Defaults to true.
|
||||
|
||||
## How to use `read_many_files` with the Gemini CLI
|
||||
|
||||
`read_many_files` searches for files matching the provided `paths` and `include` patterns, while respecting `exclude` patterns and default excludes (if enabled).
|
||||
|
||||
- For text files: it reads the content of each matched file (attempting to skip binary files not explicitly requested as image/PDF) and concatenates it into a single string, with a separator `--- {filePath} ---` between the content of each file. Uses UTF-8 encoding by default.
|
||||
- For image and PDF files: if explicitly requested by name or extension (e.g., `paths: ["logo.png"]` or `include: ["*.pdf"]`), the tool reads the file and returns its content as a base64 encoded string.
|
||||
- The tool attempts to detect and skip other binary files (those not matching common image/PDF types or not explicitly requested) by checking for null bytes in their initial content.
|
||||
|
||||
Usage:
|
||||
|
||||
```
|
||||
read_many_files(paths=["Your files or paths here."], include=["Additional files to include."], exclude=["Files to exclude."], recursive=False, useDefaultExcludes=false, respect_git_ignore=true)
|
||||
```
|
||||
|
||||
## `read_many_files` examples
|
||||
|
||||
Read all TypeScript files in the `src` directory:
|
||||
|
||||
```
|
||||
read_many_files(paths=["src/**/*.ts"])
|
||||
```
|
||||
|
||||
Read the main README, all Markdown files in the `docs` directory, and a specific logo image, excluding a specific file:
|
||||
|
||||
```
|
||||
read_many_files(paths=["README.md", "docs/**/*.md", "assets/logo.png"], exclude=["docs/OLD_README.md"])
|
||||
```
|
||||
|
||||
Read all JavaScript files but explicitly including test files and all JPEGs in an `images` folder:
|
||||
|
||||
```
|
||||
read_many_files(paths=["**/*.js"], include=["**/*.test.js", "images/**/*.jpg"], useDefaultExcludes=False)
|
||||
```
|
||||
|
||||
## Important notes
|
||||
|
||||
- **Binary file handling:**
|
||||
- **Image/PDF/Audio/Video files:** The tool can read common image types (PNG, JPEG, etc.), PDF, audio (mp3, wav), and video (mp4, mov) files, returning them as base64 encoded data. These files _must_ be explicitly targeted by the `paths` or `include` patterns (e.g., by specifying the exact filename like `video.mp4` or a pattern like `*.mov`).
|
||||
- **Other binary files:** The tool attempts to detect and skip other types of binary files by examining their initial content for null bytes. The tool excludes these files from its output.
|
||||
- **Performance:** Reading a very large number of files or very large individual files can be resource-intensive.
|
||||
- **Path specificity:** Ensure paths and glob patterns are correctly specified relative to the tool's target directory. For image/PDF files, ensure the patterns are specific enough to include them.
|
||||
- **Default excludes:** Be aware of the default exclusion patterns (like `node_modules`, `.git`) and use `useDefaultExcludes=False` if you need to override them, but do so cautiously.
|
|
@ -0,0 +1,138 @@
|
|||
# Shell Tool (`run_shell_command`)
|
||||
|
||||
This document describes the `run_shell_command` tool for the Gemini CLI.
|
||||
|
||||
## Description
|
||||
|
||||
Use `run_shell_command` to interact with the underlying system, run scripts, or perform command-line operations. `run_shell_command` executes a given shell command. On Windows, the command will be executed with `cmd.exe /c`. On other platforms, the command will be executed with `bash -c`.
|
||||
|
||||
### Arguments
|
||||
|
||||
`run_shell_command` takes the following arguments:
|
||||
|
||||
- `command` (string, required): The exact shell command to execute.
|
||||
- `description` (string, optional): A brief description of the command's purpose, which will be shown to the user.
|
||||
- `directory` (string, optional): The directory (relative to the project root) in which to execute the command. If not provided, the command runs in the project root.
|
||||
|
||||
## How to use `run_shell_command` with the Gemini CLI
|
||||
|
||||
When using `run_shell_command`, the command is executed as a subprocess. `run_shell_command` can start background processes using `&`. The tool returns detailed information about the execution, including:
|
||||
|
||||
- `Command`: The command that was executed.
|
||||
- `Directory`: The directory where the command was run.
|
||||
- `Stdout`: Output from the standard output stream.
|
||||
- `Stderr`: Output from the standard error stream.
|
||||
- `Error`: Any error message reported by the subprocess.
|
||||
- `Exit Code`: The exit code of the command.
|
||||
- `Signal`: The signal number if the command was terminated by a signal.
|
||||
- `Background PIDs`: A list of PIDs for any background processes started.
|
||||
|
||||
Usage:
|
||||
|
||||
```
|
||||
run_shell_command(command="Your commands.", description="Your description of the command.", directory="Your execution directory.")
|
||||
```
|
||||
|
||||
## `run_shell_command` examples
|
||||
|
||||
List files in the current directory:
|
||||
|
||||
```
|
||||
run_shell_command(command="ls -la")
|
||||
```
|
||||
|
||||
Run a script in a specific directory:
|
||||
|
||||
```
|
||||
run_shell_command(command="./my_script.sh", directory="scripts", description="Run my custom script")
|
||||
```
|
||||
|
||||
Start a background server:
|
||||
|
||||
```
|
||||
run_shell_command(command="npm run dev &", description="Start development server in background")
|
||||
```
|
||||
|
||||
## Important notes
|
||||
|
||||
- **Security:** Be cautious when executing commands, especially those constructed from user input, to prevent security vulnerabilities.
|
||||
- **Interactive commands:** Avoid commands that require interactive user input, as this can cause the tool to hang. Use non-interactive flags if available (e.g., `npm init -y`).
|
||||
- **Error handling:** Check the `Stderr`, `Error`, and `Exit Code` fields to determine if a command executed successfully.
|
||||
- **Background processes:** When a command is run in the background with `&`, the tool will return immediately and the process will continue to run in the background. The `Background PIDs` field will contain the process ID of the background process.
|
||||
|
||||
## Command Restrictions
|
||||
|
||||
You can restrict the commands that can be executed by the `run_shell_command` tool by using the `coreTools` and `excludeTools` settings in your configuration file.
|
||||
|
||||
- `coreTools`: To restrict `run_shell_command` to a specific set of commands, add entries to the `coreTools` list in the format `run_shell_command(<command>)`. For example, `"coreTools": ["run_shell_command(git)"]` will only allow `git` commands. Including the generic `run_shell_command` acts as a wildcard, allowing any command not explicitly blocked.
|
||||
- `excludeTools`: To block specific commands, add entries to the `excludeTools` list in the format `run_shell_command(<command>)`. For example, `"excludeTools": ["run_shell_command(rm)"]` will block `rm` commands.
|
||||
|
||||
The validation logic is designed to be secure and flexible:
|
||||
|
||||
1. **Command Chaining Disabled**: The tool automatically splits commands chained with `&&`, `||`, or `;` and validates each part separately. If any part of the chain is disallowed, the entire command is blocked.
|
||||
2. **Prefix Matching**: The tool uses prefix matching. For example, if you allow `git`, you can run `git status` or `git log`.
|
||||
3. **Blocklist Precedence**: The `excludeTools` list is always checked first. If a command matches a blocked prefix, it will be denied, even if it also matches an allowed prefix in `coreTools`.
|
||||
|
||||
### Command Restriction Examples
|
||||
|
||||
**Allow only specific command prefixes**
|
||||
|
||||
To allow only `git` and `npm` commands, and block all others:
|
||||
|
||||
```json
|
||||
{
|
||||
"coreTools": ["run_shell_command(git)", "run_shell_command(npm)"]
|
||||
}
|
||||
```
|
||||
|
||||
- `git status`: Allowed
|
||||
- `npm install`: Allowed
|
||||
- `ls -l`: Blocked
|
||||
|
||||
**Block specific command prefixes**
|
||||
|
||||
To block `rm` and allow all other commands:
|
||||
|
||||
```json
|
||||
{
|
||||
"coreTools": ["run_shell_command"],
|
||||
"excludeTools": ["run_shell_command(rm)"]
|
||||
}
|
||||
```
|
||||
|
||||
- `rm -rf /`: Blocked
|
||||
- `git status`: Allowed
|
||||
- `npm install`: Allowed
|
||||
|
||||
**Blocklist takes precedence**
|
||||
|
||||
If a command prefix is in both `coreTools` and `excludeTools`, it will be blocked.
|
||||
|
||||
```json
|
||||
{
|
||||
"coreTools": ["run_shell_command(git)"],
|
||||
"excludeTools": ["run_shell_command(git push)"]
|
||||
}
|
||||
```
|
||||
|
||||
- `git push origin main`: Blocked
|
||||
- `git status`: Allowed
|
||||
|
||||
**Block all shell commands**
|
||||
|
||||
To block all shell commands, add the `run_shell_command` wildcard to `excludeTools`:
|
||||
|
||||
```json
|
||||
{
|
||||
"excludeTools": ["run_shell_command"]
|
||||
}
|
||||
```
|
||||
|
||||
- `ls -l`: Blocked
|
||||
- `any other command`: Blocked
|
||||
|
||||
## Security Note for `excludeTools`
|
||||
|
||||
Command-specific restrictions in
|
||||
`excludeTools` for `run_shell_command` are based on simple string matching and can be easily bypassed. This feature is **not a security mechanism** and should not be relied upon to safely execute untrusted code. It is recommended to use `coreTools` to explicitly select commands
|
||||
that can be executed.
|
|
@ -0,0 +1,44 @@
|
|||
# Web Fetch Tool (`web_fetch`)
|
||||
|
||||
This document describes the `web_fetch` tool for the Gemini CLI.
|
||||
|
||||
## Description
|
||||
|
||||
Use `web_fetch` to summarize, compare, or extract information from web pages. The `web_fetch` tool processes content from one or more URLs (up to 20) embedded in a prompt. `web_fetch` takes a natural language prompt and returns a generated response.
|
||||
|
||||
### Arguments
|
||||
|
||||
`web_fetch` takes one argument:
|
||||
|
||||
- `prompt` (string, required): A comprehensive prompt that includes the URL(s) (up to 20) to fetch and specific instructions on how to process their content. For example: `"Summarize https://example.com/article and extract key points from https://another.com/data"`. The prompt must contain at least one URL starting with `http://` or `https://`.
|
||||
|
||||
## How to use `web_fetch` with the Gemini CLI
|
||||
|
||||
To use `web_fetch` with the Gemini CLI, provide a natural language prompt that contains URLs. The tool will ask for confirmation before fetching any URLs. Once confirmed, the tool will process URLs through Gemini API's `urlContext`.
|
||||
|
||||
If the Gemini API cannot access the URL, the tool will fall back to fetching content directly from the local machine. The tool will format the response, including source attribution and citations where possible. The tool will then provide the response to the user.
|
||||
|
||||
Usage:
|
||||
|
||||
```
|
||||
web_fetch(prompt="Your prompt, including a URL such as https://google.com.")
|
||||
```
|
||||
|
||||
## `web_fetch` examples
|
||||
|
||||
Summarize a single article:
|
||||
|
||||
```
|
||||
web_fetch(prompt="Can you summarize the main points of https://example.com/news/latest")
|
||||
```
|
||||
|
||||
Compare two articles:
|
||||
|
||||
```
|
||||
web_fetch(prompt="What are the differences in the conclusions of these two papers: https://arxiv.org/abs/2401.0001 and https://arxiv.org/abs/2401.0002?")
|
||||
```
|
||||
|
||||
## Important notes
|
||||
|
||||
- **URL processing:** `web_fetch` relies on the Gemini API's ability to access and process the given URLs.
|
||||
- **Output quality:** The quality of the output will depend on the clarity of the instructions in the prompt.
|
|
@ -0,0 +1,36 @@
|
|||
# Web Search Tool (`google_web_search`)
|
||||
|
||||
This document describes the `google_web_search` tool.
|
||||
|
||||
## Description
|
||||
|
||||
Use `google_web_search` to perform a web search using Google Search via the Gemini API. The `google_web_search` tool returns a summary of web results with sources.
|
||||
|
||||
### Arguments
|
||||
|
||||
`google_web_search` takes one argument:
|
||||
|
||||
- `query` (string, required): The search query.
|
||||
|
||||
## How to use `google_web_search` with the Gemini CLI
|
||||
|
||||
The `google_web_search` tool sends a query to the Gemini API, which then performs a web search. `google_web_search` will return a generated response based on the search results, including citations and sources.
|
||||
|
||||
Usage:
|
||||
|
||||
```
|
||||
google_web_search(query="Your query goes here.")
|
||||
```
|
||||
|
||||
## `google_web_search` examples
|
||||
|
||||
Get information on a topic:
|
||||
|
||||
```
|
||||
google_web_search(query="latest advancements in AI-powered code generation")
|
||||
```
|
||||
|
||||
## Important notes
|
||||
|
||||
- **Response returned:** The `google_web_search` tool returns a processed summary, not a raw list of search results.
|
||||
- **Citations:** The response includes citations to the sources used to generate the summary.
|
|
@ -0,0 +1,87 @@
|
|||
# Gemini CLI: Terms of Service and Privacy Notice
|
||||
|
||||
Gemini CLI is an open-source tool that lets you interact with Google's powerful language models directly from your command-line interface. The Terms of Service and Privacy Notices that apply to your usage of the Gemini CLI depend on the type of account you use to authenticate with Google.
|
||||
|
||||
This article outlines the specific terms and privacy policies applicable for different account types and authentication methods. Note: See [quotas and pricing](./quota-and-pricing.md) for the quota and pricing details that apply to your usage of the Gemini CLI.
|
||||
|
||||
## How to determine your authentication method
|
||||
|
||||
Your authentication method refers to the method you use to log into and access the Gemini CLI. There are four ways to authenticate:
|
||||
|
||||
- Logging in with your Google account to Gemini Code Assist for Individuals
|
||||
- Logging in with your Google account to Gemini Code Assist for Workspace, Standard, or Enterprise Users
|
||||
- Using an API key with Gemini Developer
|
||||
- Using an API key with Vertex AI GenAI API
|
||||
|
||||
For each of these four methods of authentication, different Terms of Service and Privacy Notices may apply.
|
||||
|
||||
| Authentication | Account | Terms of Service | Privacy Notice |
|
||||
| :---------------------------- | :------------------ | :------------------------------------------------------------------------------------------------------ | :----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| Gemini Code Assist via Google | Individual | [Google Terms of Service](https://policies.google.com/terms?hl=en-US) | [Gemini Code Assist Privacy Notice for Individuals](https://developers.google.com/gemini-code-assist/resources/privacy-notice-gemini-code-assist-individuals) |
|
||||
| Gemini Code Assist via Google | Standard/Enterprise | [Google Cloud Platform Terms of Service](https://cloud.google.com/terms) | [Gemini Code Assist Privacy Notice for Standard and Enterprise](https://cloud.google.com/gemini/docs/codeassist/security-privacy-compliance#standard_and_enterprise_data_protection_and_privacy) |
|
||||
| Gemini Developer API | Unpaid | [Gemini API Terms of Service - Unpaid Services](https://ai.google.dev/gemini-api/terms#unpaid-services) | [Google Privacy Policy](https://policies.google.com/privacy) |
|
||||
| Gemini Developer API | Paid | [Gemini API Terms of Service - Paid Services](https://ai.google.dev/gemini-api/terms#paid-services) | [Google Privacy Policy](https://policies.google.com/privacy) |
|
||||
| Vertex AI Gen API | | [Google Cloud Platform Service Terms](https://cloud.google.com/terms/service-terms/) | [Google Cloud Privacy Notice](https://cloud.google.com/terms/cloud-privacy-notice) |
|
||||
|
||||
## 1. If you have logged in with your Google account to Gemini Code Assist for Individuals
|
||||
|
||||
For users who use their Google account to access [Gemini Code Assist for Individuals](https://developers.google.com/gemini-code-assist/docs/overview#supported-features-gca), these Terms of Service and Privacy Notice documents apply:
|
||||
|
||||
- **Terms of Service:** Your use of the Gemini CLI is governed by the [Google Terms of Service](https://policies.google.com/terms?hl=en-US).
|
||||
- **Privacy Notice:** The collection and use of your data is described in the [Gemini Code Assist Privacy Notice for Individuals](https://developers.google.com/gemini-code-assist/resources/privacy-notice-gemini-code-assist-individuals).
|
||||
|
||||
## 2. If you have logged in with your Google account to Gemini Code Assist for Workspace, Standard, or Enterprise Users
|
||||
|
||||
For users who use their Google account to access the [Standard or Enterprise edition](https://cloud.google.com/gemini/docs/codeassist/overview#editions-overview) of Gemini Code Assist, these Terms of Service and Privacy Notice documents apply:
|
||||
|
||||
- **Terms of Service:** Your use of the Gemini CLI is governed by the [Google Cloud Platform Terms of Service](https://cloud.google.com/terms).
|
||||
- **Privacy Notice:** The collection and use of your data is described in the [Gemini Code Assist Privacy Notices for Standard and Enterprise Users](https://cloud.google.com/gemini/docs/codeassist/security-privacy-compliance#standard_and_enterprise_data_protection_and_privacy).
|
||||
|
||||
## 3. If you have logged in with a Gemini API key to the Gemini Developer API
|
||||
|
||||
If you are using a Gemini API key for authentication with the [Gemini Developer API](https://ai.google.dev/gemini-api/docs), these Terms of Service and Privacy Notice documents apply:
|
||||
|
||||
- **Terms of Service:** Your use of the Gemini CLI is governed by the [Gemini API Terms of Service](https://ai.google.dev/gemini-api/terms). These terms may differ depending on whether you are using an unpaid or paid service:
|
||||
- For unpaid services, refer to the [Gemini API Terms of Service - Unpaid Services](https://ai.google.dev/gemini-api/terms#unpaid-services).
|
||||
- For paid services, refer to the [Gemini API Terms of Service - Paid Services](https://ai.google.dev/gemini-api/terms#paid-services).
|
||||
- **Privacy Notice:** The collection and use of your data is described in the [Google Privacy Policy](https://policies.google.com/privacy).
|
||||
|
||||
## 4. If you have logged in with a Gemini API key to the Vertex AI GenAI API
|
||||
|
||||
If you are using a Gemini API key for authentication with a [Vertex AI GenAI API](https://cloud.google.com/vertex-ai/generative-ai/docs/reference/rest) backend, these Terms of Service and Privacy Notice documents apply:
|
||||
|
||||
- **Terms of Service:** Your use of the Gemini CLI is governed by the [Google Cloud Platform Service Terms](https://cloud.google.com/terms/service-terms/).
|
||||
- **Privacy Notice:** The collection and use of your data is described in the [Google Cloud Privacy Notice](https://cloud.google.com/terms/cloud-privacy-notice).
|
||||
|
||||
### Usage Statistics Opt-Out
|
||||
|
||||
You may opt-out from sending Usage Statistics to Google by following the instructions available here: [Usage Statistics Configuration](./cli/configuration.md#usage-statistics).
|
||||
|
||||
## Frequently Asked Questions (FAQ) for the Gemini CLI
|
||||
|
||||
### 1. Is my code, including prompts and answers, used to train Google's models?
|
||||
|
||||
Whether your code, including prompts and answers, is used to train Google's models depends on the type of authentication method you use and your account type.
|
||||
|
||||
- **Google account with Gemini Code Assist for Individuals**: Yes. When you use your personal Google account, the [Gemini Code Assist Privacy Notice for Individuals](https://developers.google.com/gemini-code-assist/resources/privacy-notice-gemini-code-assist-individuals) applies. Under this notice,
|
||||
your **prompts, answers, and related code are collected** and may be used to improve Google's products, including for model training.
|
||||
- **Google account with Gemini Code Assist for Workspace, Standard, or Enterprise**: No. For these accounts, your data is governed by the [Gemini Code Assist Privacy Notices](https://cloud.google.com/gemini/docs/codeassist/security-privacy-compliance#standard_and_enterprise_data_protection_and_privacy) terms, which treat your inputs as confidential. Your **prompts, answers, and related code are not collected** and are not used to train models.
|
||||
- **Gemini API key via the Gemini Developer API**: Whether your code is collected or used depends on whether you are using an unpaid or paid service.
|
||||
- **Unpaid services**: Yes. When you use the Gemini API key via the Gemini Developer API with an unpaid service, the [Gemini API Terms of Service - Unpaid Services](https://ai.google.dev/gemini-api/terms#unpaid-services) terms apply. Under this notice, your **prompts, answers, and related code are collected** and may be used to improve Google's products, including for model training.
|
||||
- **Paid services**: No. When you use the Gemini API key via the Gemini Developer API with a paid service, the [Gemini API Terms of Service - Paid Services](https://ai.google.dev/gemini-api/terms#paid-services) terms apply, which treats your inputs as confidential. Your **prompts, answers, and related code are not collected** and are not used to train models.
|
||||
- **Gemini API key via the Vertex AI GenAI API**: No. For these accounts, your data is governed by the [Google Cloud Privacy Notice](https://cloud.google.com/terms/cloud-privacy-notice) terms, which treat your inputs as confidential. Your **prompts, answers, and related code are not collected** and are not used to train models.
|
||||
|
||||
### 2. What are Usage Statistics and what does the opt-out control?
|
||||
|
||||
The **Usage Statistics** setting is the single control for all optional data collection in the Gemini CLI.
|
||||
|
||||
The data it collects depends on your account and authentication type:
|
||||
|
||||
- **Google account with Gemini Code Assist for Individuals**: When enabled, this setting allows Google to collect both anonymous telemetry (for example, commands run and performance metrics) and **your prompts and answers** for model improvement.
|
||||
- **Google account with Gemini Code Assist for Workspace, Standard, or Enterprise**: This setting only controls the collection of anonymous telemetry. Your prompts and answers are never collected, regardless of this setting.
|
||||
- **Gemini API key via the Gemini Developer API**:
|
||||
**Unpaid services**: When enabled, this setting allows Google to collect both anonymous telemetry (like commands run and performance metrics) and **your prompts and answers** for model improvement. When disabled we will use your data as described in [How Google Uses Your Data](https://ai.google.dev/gemini-api/terms#data-use-unpaid).
|
||||
**Paid services**: This setting only controls the collection of anonymous telemetry. Google logs prompts and responses for a limited period of time, solely for the purpose of detecting violations of the Prohibited Use Policy and any required legal or regulatory disclosures.
|
||||
- **Gemini API key via the Vertex AI GenAI API:** This setting only controls the collection of anonymous telemetry. Your prompts and answers are never collected, regardless of this setting.
|
||||
|
||||
You can disable Usage Statistics for any account type by following the instructions in the [Usage Statistics Configuration](./cli/configuration.md#usage-statistics) documentation.
|
|
@ -0,0 +1,75 @@
|
|||
# Troubleshooting Guide
|
||||
|
||||
This guide provides solutions to common issues and debugging tips.
|
||||
|
||||
## Authentication
|
||||
|
||||
- **Error: `Failed to login. Message: Request contains an invalid argument`**
|
||||
- Users with Google Workspace accounts, or users with Google Cloud accounts
|
||||
associated with their Gmail accounts may not be able to activate the free
|
||||
tier of the Google Code Assist plan.
|
||||
- For Google Cloud accounts, you can work around this by setting
|
||||
`GOOGLE_CLOUD_PROJECT` to your project ID.
|
||||
- You can also grab an API key from [AI Studio](https://aistudio.google.com/app/apikey), which also includes a
|
||||
separate free tier.
|
||||
|
||||
## Frequently asked questions (FAQs)
|
||||
|
||||
- **Q: How do I update Gemini CLI to the latest version?**
|
||||
- A: If installed globally via npm, update Gemini CLI using the command `npm install -g @google/gemini-cli@latest`. If run from source, pull the latest changes from the repository and rebuild using `npm run build`.
|
||||
|
||||
- **Q: Where are Gemini CLI configuration files stored?**
|
||||
- A: The CLI configuration is stored within two `settings.json` files: one in your home directory and one in your project's root directory. In both locations, `settings.json` is found in the `.qwen/` folder. Refer to [CLI Configuration](./cli/configuration.md) for more details.
|
||||
|
||||
- **Q: Why don't I see cached token counts in my stats output?**
|
||||
- A: Cached token information is only displayed when cached tokens are being used. This feature is available for API key users (Gemini API key or Vertex AI) but not for OAuth users (Google Personal/Enterprise accounts) at this time, as the Code Assist API does not support cached content creation. You can still view your total token usage with the `/stats` command.
|
||||
|
||||
## Common error messages and solutions
|
||||
|
||||
- **Error: `EADDRINUSE` (Address already in use) when starting an MCP server.**
|
||||
- **Cause:** Another process is already using the port the MCP server is trying to bind to.
|
||||
- **Solution:**
|
||||
Either stop the other process that is using the port or configure the MCP server to use a different port.
|
||||
|
||||
- **Error: Command not found (when attempting to run Gemini CLI).**
|
||||
- **Cause:** Gemini CLI is not correctly installed or not in your system's PATH.
|
||||
- **Solution:**
|
||||
1. Ensure Gemini CLI installation was successful.
|
||||
2. If installed globally, check that your npm global binary directory is in your PATH.
|
||||
3. If running from source, ensure you are using the correct command to invoke it (e.g., `node packages/cli/dist/index.js ...`).
|
||||
|
||||
- **Error: `MODULE_NOT_FOUND` or import errors.**
|
||||
- **Cause:** Dependencies are not installed correctly, or the project hasn't been built.
|
||||
- **Solution:**
|
||||
1. Run `npm install` to ensure all dependencies are present.
|
||||
2. Run `npm run build` to compile the project.
|
||||
|
||||
- **Error: "Operation not permitted", "Permission denied", or similar.**
|
||||
- **Cause:** If sandboxing is enabled, then the application is likely attempting an operation restricted by your sandbox, such as writing outside the project directory or system temp directory.
|
||||
- **Solution:** See [Sandboxing](./cli/configuration.md#sandboxing) for more information, including how to customize your sandbox configuration.
|
||||
|
||||
- **CLI is not interactive in "CI" environments**
|
||||
- **Issue:** The CLI does not enter interactive mode (no prompt appears) if an environment variable starting with `CI_` (e.g., `CI_TOKEN`) is set. This is because the `is-in-ci` package, used by the underlying UI framework, detects these variables and assumes a non-interactive CI environment.
|
||||
- **Cause:** The `is-in-ci` package checks for the presence of `CI`, `CONTINUOUS_INTEGRATION`, or any environment variable with a `CI_` prefix. When any of these are found, it signals that the environment is non-interactive, which prevents the CLI from starting in its interactive mode.
|
||||
- **Solution:** If the `CI_` prefixed variable is not needed for the CLI to function, you can temporarily unset it for the command. e.g., `env -u CI_TOKEN gemini`
|
||||
|
||||
## Debugging Tips
|
||||
|
||||
- **CLI debugging:**
|
||||
- Use the `--verbose` flag (if available) with CLI commands for more detailed output.
|
||||
- Check the CLI logs, often found in a user-specific configuration or cache directory.
|
||||
|
||||
- **Core debugging:**
|
||||
- Check the server console output for error messages or stack traces.
|
||||
- Increase log verbosity if configurable.
|
||||
- Use Node.js debugging tools (e.g., `node --inspect`) if you need to step through server-side code.
|
||||
|
||||
- **Tool issues:**
|
||||
- If a specific tool is failing, try to isolate the issue by running the simplest possible version of the command or operation the tool performs.
|
||||
- For `run_shell_command`, check that the command works directly in your shell first.
|
||||
- For file system tools, double-check paths and permissions.
|
||||
|
||||
- **Pre-flight checks:**
|
||||
- Always run `npm run preflight` before committing code. This can catch many common issues related to formatting, linting, and type errors.
|
||||
|
||||
If you encounter an issue not covered here, consider searching the project's issue tracker on GitHub or reporting a new issue with detailed information.
|
|
@ -0,0 +1,31 @@
|
|||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import esbuild from 'esbuild';
|
||||
import path from 'path';
|
||||
import { fileURLToPath } from 'url';
|
||||
import { createRequire } from 'module';
|
||||
|
||||
const __filename = fileURLToPath(import.meta.url);
|
||||
const __dirname = path.dirname(__filename);
|
||||
const require = createRequire(import.meta.url);
|
||||
const pkg = require(path.resolve(__dirname, 'package.json'));
|
||||
|
||||
esbuild
|
||||
.build({
|
||||
entryPoints: ['packages/cli/index.ts'],
|
||||
bundle: true,
|
||||
outfile: 'bundle/gemini.js',
|
||||
platform: 'node',
|
||||
format: 'esm',
|
||||
define: {
|
||||
'process.env.CLI_VERSION': JSON.stringify(pkg.version),
|
||||
},
|
||||
banner: {
|
||||
js: `import { createRequire as _gcliCreateRequire } from 'module'; const require = _gcliCreateRequire(import.meta.url); globalThis.__filename = require('url').fileURLToPath(import.meta.url); globalThis.__dirname = require('path').dirname(globalThis.__filename);`,
|
||||
},
|
||||
})
|
||||
.catch(() => process.exit(1));
|
|
@ -0,0 +1,159 @@
|
|||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
/**
|
||||
* @fileoverview Disallows relative imports between specified monorepo packages.
|
||||
*/
|
||||
'use strict';
|
||||
|
||||
import path from 'node:path';
|
||||
import fs from 'node:fs';
|
||||
|
||||
/**
|
||||
* Finds the package name by searching for the nearest `package.json` file
|
||||
* in the directory hierarchy, starting from the given file's directory
|
||||
* and moving upwards until the specified root directory is reached.
|
||||
* It reads the `package.json` and extracts the `name` property.
|
||||
*
|
||||
* @requires module:path Node.js path module
|
||||
* @requires module:fs Node.js fs module
|
||||
*
|
||||
* @param {string} filePath - The path (absolute or relative) to a file within the potential package structure.
|
||||
* The search starts from the directory containing this file.
|
||||
* @param {string} root - The absolute path to the root directory of the project/monorepo.
|
||||
* The upward search stops when this directory is reached.
|
||||
* @returns {string | undefined | null} The value of the `name` field from the first `package.json` found.
|
||||
* Returns `undefined` if the `name` field doesn't exist in the found `package.json`.
|
||||
* Returns `null` if no `package.json` is found before reaching the `root` directory.
|
||||
* @throws {Error} Can throw an error if `fs.readFileSync` fails (e.g., permissions) or if `JSON.parse` fails on invalid JSON content.
|
||||
*/
|
||||
function findPackageName(filePath, root) {
|
||||
let currentDir = path.dirname(path.resolve(filePath));
|
||||
while (currentDir !== root) {
|
||||
const parentDir = path.dirname(currentDir);
|
||||
const packageJsonPath = path.join(currentDir, 'package.json');
|
||||
if (fs.existsSync(packageJsonPath)) {
|
||||
const pkg = JSON.parse(fs.readFileSync(packageJsonPath, 'utf-8'));
|
||||
return pkg.name;
|
||||
}
|
||||
|
||||
// Move up one level
|
||||
currentDir = parentDir;
|
||||
// Safety break if we somehow reached the root directly in the loop condition (less likely with path.resolve)
|
||||
if (path.dirname(currentDir) === currentDir) break;
|
||||
}
|
||||
|
||||
return null; // Not found within the expected structure
|
||||
}
|
||||
|
||||
export default {
|
||||
meta: {
|
||||
type: 'problem',
|
||||
docs: {
|
||||
description: 'Disallow relative imports between packages.',
|
||||
category: 'Best Practices',
|
||||
recommended: 'error',
|
||||
},
|
||||
fixable: 'code',
|
||||
schema: [
|
||||
{
|
||||
type: 'object',
|
||||
properties: {
|
||||
root: {
|
||||
type: 'string',
|
||||
description:
|
||||
'Absolute path to the root of all relevant packages to consider.',
|
||||
},
|
||||
},
|
||||
required: ['root'],
|
||||
additionalProperties: false,
|
||||
},
|
||||
],
|
||||
messages: {
|
||||
noRelativePathsForCrossPackageImport:
|
||||
"Relative import '{{importedPath}}' crosses package boundary from '{{importingPackage}}' to '{{importedPackage}}'. Use a direct package import ('{{importedPackage}}') instead.",
|
||||
relativeImportIsInvalidPackage:
|
||||
"Relative import '{{importedPath}}' does not reference a valid package. All source must be in a package directory.",
|
||||
},
|
||||
},
|
||||
|
||||
create(context) {
|
||||
const options = context.options[0] || {};
|
||||
const allPackagesRoot = options.root;
|
||||
|
||||
const currentFilePath = context.filename;
|
||||
if (
|
||||
!currentFilePath ||
|
||||
currentFilePath === '<input>' ||
|
||||
currentFilePath === '<text>'
|
||||
) {
|
||||
// Skip if filename is not available (e.g., linting raw text)
|
||||
return {};
|
||||
}
|
||||
|
||||
const currentPackage = findPackageName(currentFilePath, allPackagesRoot);
|
||||
|
||||
// If the current file isn't inside a package structure, don't apply the rule
|
||||
if (!currentPackage) {
|
||||
return {};
|
||||
}
|
||||
|
||||
return {
|
||||
ImportDeclaration(node) {
|
||||
const importingPackage = currentPackage;
|
||||
const importedPath = node.source.value;
|
||||
|
||||
// Only interested in relative paths
|
||||
if (
|
||||
!importedPath ||
|
||||
typeof importedPath !== 'string' ||
|
||||
!importedPath.startsWith('.')
|
||||
) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Resolve the absolute path of the imported module
|
||||
const absoluteImportPath = path.resolve(
|
||||
path.dirname(currentFilePath),
|
||||
importedPath,
|
||||
);
|
||||
|
||||
// Find the package information for the imported file
|
||||
const importedPackage = findPackageName(
|
||||
absoluteImportPath,
|
||||
allPackagesRoot,
|
||||
);
|
||||
|
||||
// If the imported file isn't in a recognized package, report issue
|
||||
if (!importedPackage) {
|
||||
context.report({
|
||||
node: node.source,
|
||||
messageId: 'relativeImportIsInvalidPackage',
|
||||
data: { importedPath: importedPath },
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// The core check: Are the source and target packages different?
|
||||
if (currentPackage !== importedPackage) {
|
||||
// We found a relative import crossing package boundaries
|
||||
context.report({
|
||||
node: node.source, // Report the error on the source string literal
|
||||
messageId: 'noRelativePathsForCrossPackageImport',
|
||||
data: {
|
||||
importedPath,
|
||||
importedPackage,
|
||||
importingPackage,
|
||||
},
|
||||
fix(fixer) {
|
||||
return fixer.replaceText(node.source, `'${importedPackage}'`);
|
||||
},
|
||||
});
|
||||
}
|
||||
},
|
||||
};
|
||||
},
|
||||
};
|
|
@ -0,0 +1,236 @@
|
|||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import eslint from '@eslint/js';
|
||||
import tseslint from 'typescript-eslint';
|
||||
import reactPlugin from 'eslint-plugin-react';
|
||||
import reactHooks from 'eslint-plugin-react-hooks';
|
||||
import prettierConfig from 'eslint-config-prettier';
|
||||
import importPlugin from 'eslint-plugin-import';
|
||||
import globals from 'globals';
|
||||
import licenseHeader from 'eslint-plugin-license-header';
|
||||
import noRelativeCrossPackageImports from './eslint-rules/no-relative-cross-package-imports.js';
|
||||
import path from 'node:path'; // Use node: prefix for built-ins
|
||||
import url from 'node:url';
|
||||
|
||||
// --- ESM way to get __dirname ---
|
||||
const __filename = url.fileURLToPath(import.meta.url);
|
||||
const __dirname = path.dirname(__filename);
|
||||
// --- ---
|
||||
|
||||
// Determine the monorepo root (assuming eslint.config.js is at the root)
|
||||
const projectRoot = __dirname;
|
||||
|
||||
export default tseslint.config(
|
||||
{
|
||||
// Global ignores
|
||||
ignores: [
|
||||
'node_modules/*',
|
||||
'eslint.config.js',
|
||||
'packages/cli/dist/**',
|
||||
'packages/core/dist/**',
|
||||
'packages/server/dist/**',
|
||||
'packages/vscode-ide-companion/dist/**',
|
||||
'eslint-rules/*',
|
||||
'bundle/**',
|
||||
],
|
||||
},
|
||||
eslint.configs.recommended,
|
||||
...tseslint.configs.recommended,
|
||||
reactHooks.configs['recommended-latest'],
|
||||
reactPlugin.configs.flat.recommended,
|
||||
reactPlugin.configs.flat['jsx-runtime'], // Add this if you are using React 17+
|
||||
{
|
||||
// Settings for eslint-plugin-react
|
||||
settings: {
|
||||
react: {
|
||||
version: 'detect',
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
// Import specific config
|
||||
files: ['packages/cli/src/**/*.{ts,tsx}'], // Target only TS/TSX in the cli package
|
||||
plugins: {
|
||||
import: importPlugin,
|
||||
},
|
||||
settings: {
|
||||
'import/resolver': {
|
||||
node: true,
|
||||
},
|
||||
},
|
||||
rules: {
|
||||
...importPlugin.configs.recommended.rules,
|
||||
...importPlugin.configs.typescript.rules,
|
||||
'import/no-default-export': 'warn',
|
||||
'import/no-unresolved': 'off', // Disable for now, can be noisy with monorepos/paths
|
||||
},
|
||||
},
|
||||
{
|
||||
// General overrides and rules for the project (TS/TSX files)
|
||||
files: ['packages/*/src/**/*.{ts,tsx}'], // Target only TS/TSX in the cli package
|
||||
languageOptions: {
|
||||
globals: {
|
||||
...globals.node,
|
||||
...globals.es2021,
|
||||
},
|
||||
},
|
||||
rules: {
|
||||
// General Best Practice Rules (subset adapted for flat config)
|
||||
'@typescript-eslint/array-type': ['error', { default: 'array-simple' }],
|
||||
'arrow-body-style': ['error', 'as-needed'],
|
||||
curly: ['error', 'multi-line'],
|
||||
eqeqeq: ['error', 'always', { null: 'ignore' }],
|
||||
'@typescript-eslint/consistent-type-assertions': [
|
||||
'error',
|
||||
{ assertionStyle: 'as' },
|
||||
],
|
||||
'@typescript-eslint/explicit-member-accessibility': [
|
||||
'error',
|
||||
{ accessibility: 'no-public' },
|
||||
],
|
||||
'@typescript-eslint/no-explicit-any': 'error',
|
||||
'@typescript-eslint/no-inferrable-types': [
|
||||
'error',
|
||||
{ ignoreParameters: true, ignoreProperties: true },
|
||||
],
|
||||
'@typescript-eslint/no-namespace': ['error', { allowDeclarations: true }],
|
||||
'@typescript-eslint/no-unused-vars': [
|
||||
'error',
|
||||
{
|
||||
argsIgnorePattern: '^_',
|
||||
varsIgnorePattern: '^_',
|
||||
caughtErrorsIgnorePattern: '^_',
|
||||
},
|
||||
],
|
||||
'no-cond-assign': 'error',
|
||||
'no-debugger': 'error',
|
||||
'no-duplicate-case': 'error',
|
||||
'no-restricted-syntax': [
|
||||
'error',
|
||||
{
|
||||
selector: 'CallExpression[callee.name="require"]',
|
||||
message: 'Avoid using require(). Use ES6 imports instead.',
|
||||
},
|
||||
{
|
||||
selector: 'ThrowStatement > Literal:not([value=/^\\w+Error:/])',
|
||||
message:
|
||||
'Do not throw string literals or non-Error objects. Throw new Error("...") instead.',
|
||||
},
|
||||
],
|
||||
'no-unsafe-finally': 'error',
|
||||
'no-unused-expressions': 'off', // Disable base rule
|
||||
'@typescript-eslint/no-unused-expressions': [
|
||||
// Enable TS version
|
||||
'error',
|
||||
{ allowShortCircuit: true, allowTernary: true },
|
||||
],
|
||||
'no-var': 'error',
|
||||
'object-shorthand': 'error',
|
||||
'one-var': ['error', 'never'],
|
||||
'prefer-arrow-callback': 'error',
|
||||
'prefer-const': ['error', { destructuring: 'all' }],
|
||||
radix: 'error',
|
||||
'default-case': 'error',
|
||||
},
|
||||
},
|
||||
{
|
||||
files: ['./**/*.{tsx,ts,js}'],
|
||||
plugins: {
|
||||
'license-header': licenseHeader,
|
||||
},
|
||||
rules: {
|
||||
'license-header/header': [
|
||||
'error',
|
||||
[
|
||||
'/**',
|
||||
' * @license',
|
||||
' * Copyright 2025 Google LLC',
|
||||
' * SPDX-License-Identifier: Apache-2.0',
|
||||
' */',
|
||||
],
|
||||
],
|
||||
},
|
||||
},
|
||||
// extra settings for scripts that we run directly with node
|
||||
{
|
||||
files: ['./scripts/**/*.js', 'esbuild.config.js'],
|
||||
languageOptions: {
|
||||
globals: {
|
||||
...globals.node,
|
||||
process: 'readonly',
|
||||
console: 'readonly',
|
||||
},
|
||||
},
|
||||
rules: {
|
||||
'@typescript-eslint/no-unused-vars': [
|
||||
'error',
|
||||
{
|
||||
argsIgnorePattern: '^_',
|
||||
varsIgnorePattern: '^_',
|
||||
caughtErrorsIgnorePattern: '^_',
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
{
|
||||
files: ['packages/vscode-ide-companion/esbuild.js'],
|
||||
languageOptions: {
|
||||
globals: {
|
||||
...globals.node,
|
||||
process: 'readonly',
|
||||
console: 'readonly',
|
||||
},
|
||||
},
|
||||
rules: {
|
||||
'no-restricted-syntax': 'off',
|
||||
'@typescript-eslint/no-require-imports': 'off',
|
||||
},
|
||||
},
|
||||
// Prettier config must be last
|
||||
prettierConfig,
|
||||
// extra settings for scripts that we run directly with node
|
||||
{
|
||||
files: ['./integration-tests/**/*.js'],
|
||||
languageOptions: {
|
||||
globals: {
|
||||
...globals.node,
|
||||
process: 'readonly',
|
||||
console: 'readonly',
|
||||
},
|
||||
},
|
||||
rules: {
|
||||
'@typescript-eslint/no-unused-vars': [
|
||||
'error',
|
||||
{
|
||||
argsIgnorePattern: '^_',
|
||||
varsIgnorePattern: '^_',
|
||||
caughtErrorsIgnorePattern: '^_',
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
// Custom eslint rules for this repo
|
||||
{
|
||||
files: ['packages/**/*.{js,jsx,ts,tsx}'],
|
||||
plugins: {
|
||||
custom: {
|
||||
rules: {
|
||||
'no-relative-cross-package-imports': noRelativeCrossPackageImports,
|
||||
},
|
||||
},
|
||||
},
|
||||
rules: {
|
||||
// Enable and configure your custom rule
|
||||
'custom/no-relative-cross-package-imports': [
|
||||
'error',
|
||||
{
|
||||
root: path.join(projectRoot, 'packages'),
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
);
|
|
@ -0,0 +1,30 @@
|
|||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { strict as assert } from 'assert';
|
||||
import { test } from 'node:test';
|
||||
import { TestRig } from './test-helper.js';
|
||||
|
||||
test('reads a file', (t) => {
|
||||
const rig = new TestRig();
|
||||
rig.setup(t.name);
|
||||
rig.createFile('test.txt', 'hello world');
|
||||
|
||||
const output = rig.run(`read the file name test.txt`);
|
||||
|
||||
assert.ok(output.toLowerCase().includes('hello'));
|
||||
});
|
||||
|
||||
test('writes a file', (t) => {
|
||||
const rig = new TestRig();
|
||||
rig.setup(t.name);
|
||||
rig.createFile('test.txt', '');
|
||||
|
||||
rig.run(`edit test.txt to have a hello world message`);
|
||||
|
||||
const fileContent = rig.readFile('test.txt');
|
||||
assert.ok(fileContent.toLowerCase().includes('hello'));
|
||||
});
|
|
@ -0,0 +1,19 @@
|
|||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { test } from 'node:test';
|
||||
import { strict as assert } from 'assert';
|
||||
import { TestRig } from './test-helper.js';
|
||||
|
||||
test('should be able to search the web', async (t) => {
|
||||
const rig = new TestRig();
|
||||
rig.setup(t.name);
|
||||
|
||||
const prompt = `what planet do we live on`;
|
||||
const result = await rig.run(prompt);
|
||||
|
||||
assert.ok(result.toLowerCase().includes('earth'));
|
||||
});
|
|
@ -0,0 +1,24 @@
|
|||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { test } from 'node:test';
|
||||
import { strict as assert } from 'assert';
|
||||
import { TestRig } from './test-helper.js';
|
||||
|
||||
test('should be able to list a directory', async (t) => {
|
||||
const rig = new TestRig();
|
||||
rig.setup(t.name);
|
||||
rig.createFile('file1.txt', 'file 1 content');
|
||||
rig.mkdir('subdir');
|
||||
rig.sync();
|
||||
|
||||
const prompt = `Can you list the files in the current directory. Display them in the style of 'ls'`;
|
||||
const result = rig.run(prompt);
|
||||
|
||||
const lines = result.split('\n').filter((line) => line.trim() !== '');
|
||||
assert.ok(lines.some((line) => line.includes('file1.txt')));
|
||||
assert.ok(lines.some((line) => line.includes('subdir')));
|
||||
});
|
|
@ -0,0 +1,22 @@
|
|||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { test } from 'node:test';
|
||||
import { strict as assert } from 'assert';
|
||||
import { TestRig } from './test-helper.js';
|
||||
|
||||
test.skip('should be able to read multiple files', async (t) => {
|
||||
const rig = new TestRig();
|
||||
rig.setup(t.name);
|
||||
rig.createFile('file1.txt', 'file 1 content');
|
||||
rig.createFile('file2.txt', 'file 2 content');
|
||||
|
||||
const prompt = `Read the files in this directory, list them and print them to the screen`;
|
||||
const result = await rig.run(prompt);
|
||||
|
||||
assert.ok(result.includes('file 1 content'));
|
||||
assert.ok(result.includes('file 2 content'));
|
||||
});
|
|
@ -0,0 +1,22 @@
|
|||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { test } from 'node:test';
|
||||
import { strict as assert } from 'assert';
|
||||
import { TestRig } from './test-helper.js';
|
||||
|
||||
test('should be able to replace content in a file', async (t) => {
|
||||
const rig = new TestRig();
|
||||
rig.setup(t.name);
|
||||
|
||||
const fileName = 'file_to_replace.txt';
|
||||
rig.createFile(fileName, 'original content');
|
||||
const prompt = `Can you replace 'original' with 'replaced' in the file 'file_to_replace.txt'`;
|
||||
|
||||
await rig.run(prompt);
|
||||
const newFileContent = rig.readFile(fileName);
|
||||
assert.strictEqual(newFileContent, 'replaced content');
|
||||
});
|
|
@ -0,0 +1,146 @@
|
|||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { spawnSync, spawn } from 'child_process';
|
||||
import { mkdirSync, rmSync, createWriteStream } from 'fs';
|
||||
import { join, dirname, basename } from 'path';
|
||||
import { fileURLToPath } from 'url';
|
||||
import { glob } from 'glob';
|
||||
|
||||
async function main() {
|
||||
const __dirname = dirname(fileURLToPath(import.meta.url));
|
||||
const rootDir = join(__dirname, '..');
|
||||
const integrationTestsDir = join(rootDir, '.integration-tests');
|
||||
|
||||
if (process.env.GEMINI_SANDBOX === 'docker' && !process.env.IS_DOCKER) {
|
||||
console.log('Building sandbox for Docker...');
|
||||
const buildResult = spawnSync('npm', ['run', 'build:all'], {
|
||||
stdio: 'inherit',
|
||||
});
|
||||
if (buildResult.status !== 0) {
|
||||
console.error('Sandbox build failed.');
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
const runId = `${Date.now()}`;
|
||||
const runDir = join(integrationTestsDir, runId);
|
||||
|
||||
mkdirSync(runDir, { recursive: true });
|
||||
|
||||
const args = process.argv.slice(2);
|
||||
const keepOutput =
|
||||
process.env.KEEP_OUTPUT === 'true' || args.includes('--keep-output');
|
||||
if (keepOutput) {
|
||||
const keepOutputIndex = args.indexOf('--keep-output');
|
||||
if (keepOutputIndex > -1) {
|
||||
args.splice(keepOutputIndex, 1);
|
||||
}
|
||||
console.log(`Keeping output for test run in: ${runDir}`);
|
||||
}
|
||||
|
||||
const verbose = args.includes('--verbose');
|
||||
if (verbose) {
|
||||
const verboseIndex = args.indexOf('--verbose');
|
||||
if (verboseIndex > -1) {
|
||||
args.splice(verboseIndex, 1);
|
||||
}
|
||||
}
|
||||
|
||||
const testPatterns =
|
||||
args.length > 0
|
||||
? args.map((arg) => `integration-tests/${arg}.test.js`)
|
||||
: ['integration-tests/*.test.js'];
|
||||
const testFiles = glob.sync(testPatterns, { cwd: rootDir, absolute: true });
|
||||
|
||||
for (const testFile of testFiles) {
|
||||
const testFileName = basename(testFile);
|
||||
console.log(`\tFound test file: ${testFileName}`);
|
||||
}
|
||||
|
||||
let allTestsPassed = true;
|
||||
|
||||
for (const testFile of testFiles) {
|
||||
const testFileName = basename(testFile);
|
||||
const testFileDir = join(runDir, testFileName);
|
||||
mkdirSync(testFileDir, { recursive: true });
|
||||
|
||||
console.log(
|
||||
`------------- Running test file: ${testFileName} ------------------------------`,
|
||||
);
|
||||
|
||||
const nodeArgs = ['--test'];
|
||||
if (verbose) {
|
||||
nodeArgs.push('--test-reporter=spec');
|
||||
}
|
||||
nodeArgs.push(testFile);
|
||||
|
||||
const child = spawn('node', nodeArgs, {
|
||||
stdio: 'pipe',
|
||||
env: {
|
||||
...process.env,
|
||||
GEMINI_CLI_INTEGRATION_TEST: 'true',
|
||||
INTEGRATION_TEST_FILE_DIR: testFileDir,
|
||||
KEEP_OUTPUT: keepOutput.toString(),
|
||||
VERBOSE: verbose.toString(),
|
||||
TEST_FILE_NAME: testFileName,
|
||||
},
|
||||
});
|
||||
|
||||
let outputStream;
|
||||
if (keepOutput) {
|
||||
const outputFile = join(testFileDir, 'output.log');
|
||||
outputStream = createWriteStream(outputFile);
|
||||
console.log(`Output for ${testFileName} written to: ${outputFile}`);
|
||||
}
|
||||
|
||||
child.stdout.on('data', (data) => {
|
||||
if (verbose) {
|
||||
process.stdout.write(data);
|
||||
}
|
||||
if (outputStream) {
|
||||
outputStream.write(data);
|
||||
}
|
||||
});
|
||||
|
||||
child.stderr.on('data', (data) => {
|
||||
if (verbose) {
|
||||
process.stderr.write(data);
|
||||
}
|
||||
if (outputStream) {
|
||||
outputStream.write(data);
|
||||
}
|
||||
});
|
||||
|
||||
const exitCode = await new Promise((resolve) => {
|
||||
child.on('close', (code) => {
|
||||
if (outputStream) {
|
||||
outputStream.end(() => {
|
||||
resolve(code);
|
||||
});
|
||||
} else {
|
||||
resolve(code);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
if (exitCode !== 0) {
|
||||
console.error(`Test file failed: ${testFileName}`);
|
||||
allTestsPassed = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (!keepOutput) {
|
||||
rmSync(runDir, { recursive: true, force: true });
|
||||
}
|
||||
|
||||
if (!allTestsPassed) {
|
||||
console.error('One or more test files failed.');
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
main();
|
|
@ -0,0 +1,31 @@
|
|||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { test } from 'node:test';
|
||||
import { strict as assert } from 'assert';
|
||||
import { TestRig } from './test-helper.js';
|
||||
|
||||
test('should be able to run a shell command', async (t) => {
|
||||
const rig = new TestRig();
|
||||
rig.setup(t.name);
|
||||
rig.createFile('blah.txt', 'some content');
|
||||
|
||||
const prompt = `Can you use ls to list the contexts of the current folder`;
|
||||
const result = rig.run(prompt);
|
||||
|
||||
assert.ok(result.includes('blah.txt'));
|
||||
});
|
||||
|
||||
test('should be able to run a shell command via stdin', async (t) => {
|
||||
const rig = new TestRig();
|
||||
rig.setup(t.name);
|
||||
rig.createFile('blah.txt', 'some content');
|
||||
|
||||
const prompt = `Can you use ls to list the contexts of the current folder`;
|
||||
const result = rig.run({ stdin: prompt });
|
||||
|
||||
assert.ok(result.includes('blah.txt'));
|
||||
});
|
|
@ -0,0 +1,21 @@
|
|||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { test } from 'node:test';
|
||||
import { strict as assert } from 'assert';
|
||||
import { TestRig } from './test-helper.js';
|
||||
|
||||
test('should be able to save to memory', async (t) => {
|
||||
const rig = new TestRig();
|
||||
rig.setup(t.name);
|
||||
|
||||
const prompt = `remember that my favorite color is blue.
|
||||
|
||||
what is my favorite color? tell me that and surround it with $ symbol`;
|
||||
const result = await rig.run(prompt);
|
||||
|
||||
assert.ok(result.toLowerCase().includes('$blue$'));
|
||||
});
|
|
@ -0,0 +1,70 @@
|
|||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { test, describe, before, after } from 'node:test';
|
||||
import { strict as assert } from 'node:assert';
|
||||
import { TestRig } from './test-helper.js';
|
||||
import { spawn } from 'child_process';
|
||||
import { join } from 'path';
|
||||
import { fileURLToPath } from 'url';
|
||||
import { writeFileSync, unlinkSync } from 'fs';
|
||||
|
||||
const __dirname = fileURLToPath(new URL('.', import.meta.url));
|
||||
const serverScriptPath = join(__dirname, './temp-server.js');
|
||||
|
||||
const serverScript = `
|
||||
import { McpServer } from '@modelcontextprotocol/sdk/server/mcp.js';
|
||||
import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js';
|
||||
import { z } from 'zod';
|
||||
|
||||
const server = new McpServer({
|
||||
name: 'addition-server',
|
||||
version: '1.0.0',
|
||||
});
|
||||
|
||||
server.registerTool(
|
||||
'add',
|
||||
{
|
||||
title: 'Addition Tool',
|
||||
description: 'Add two numbers',
|
||||
inputSchema: { a: z.number(), b: z.number() },
|
||||
},
|
||||
async ({ a, b }) => ({
|
||||
content: [{ type: 'text', text: String(a + b) }],
|
||||
}),
|
||||
);
|
||||
|
||||
const transport = new StdioServerTransport();
|
||||
await server.connect(transport);
|
||||
`;
|
||||
|
||||
describe('simple-mcp-server', () => {
|
||||
const rig = new TestRig();
|
||||
let child;
|
||||
|
||||
before(() => {
|
||||
writeFileSync(serverScriptPath, serverScript);
|
||||
child = spawn('node', [serverScriptPath], {
|
||||
stdio: ['pipe', 'pipe', 'pipe'],
|
||||
});
|
||||
child.stderr.on('data', (data) => {
|
||||
console.error(`stderr: ${data}`);
|
||||
});
|
||||
// Wait for the server to be ready
|
||||
return new Promise((resolve) => setTimeout(resolve, 500));
|
||||
});
|
||||
|
||||
after(() => {
|
||||
child.kill();
|
||||
unlinkSync(serverScriptPath);
|
||||
});
|
||||
|
||||
test('should add two numbers', () => {
|
||||
rig.setup('should add two numbers');
|
||||
const output = rig.run('add 5 and 10');
|
||||
assert.ok(output.includes('15'));
|
||||
});
|
||||
});
|
|
@ -0,0 +1,101 @@
|
|||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { execSync } from 'child_process';
|
||||
import { mkdirSync, writeFileSync, readFileSync } from 'fs';
|
||||
import { join, dirname } from 'path';
|
||||
import { fileURLToPath } from 'url';
|
||||
import { env } from 'process';
|
||||
|
||||
const __dirname = dirname(fileURLToPath(import.meta.url));
|
||||
|
||||
function sanitizeTestName(name) {
|
||||
return name
|
||||
.toLowerCase()
|
||||
.replace(/[^a-z0-9]/g, '-')
|
||||
.replace(/-+/g, '-');
|
||||
}
|
||||
|
||||
export class TestRig {
|
||||
constructor() {
|
||||
this.bundlePath = join(__dirname, '..', 'bundle/gemini.js');
|
||||
this.testDir = null;
|
||||
}
|
||||
|
||||
setup(testName) {
|
||||
this.testName = testName;
|
||||
const sanitizedName = sanitizeTestName(testName);
|
||||
this.testDir = join(env.INTEGRATION_TEST_FILE_DIR, sanitizedName);
|
||||
mkdirSync(this.testDir, { recursive: true });
|
||||
}
|
||||
|
||||
createFile(fileName, content) {
|
||||
const filePath = join(this.testDir, fileName);
|
||||
writeFileSync(filePath, content);
|
||||
return filePath;
|
||||
}
|
||||
|
||||
mkdir(dir) {
|
||||
mkdirSync(join(this.testDir, dir));
|
||||
}
|
||||
|
||||
sync() {
|
||||
// ensure file system is done before spawning
|
||||
execSync('sync', { cwd: this.testDir });
|
||||
}
|
||||
|
||||
run(promptOrOptions, ...args) {
|
||||
let command = `node ${this.bundlePath} --yolo`;
|
||||
const execOptions = {
|
||||
cwd: this.testDir,
|
||||
encoding: 'utf-8',
|
||||
};
|
||||
|
||||
if (typeof promptOrOptions === 'string') {
|
||||
command += ` --prompt "${promptOrOptions}"`;
|
||||
} else if (
|
||||
typeof promptOrOptions === 'object' &&
|
||||
promptOrOptions !== null
|
||||
) {
|
||||
if (promptOrOptions.prompt) {
|
||||
command += ` --prompt "${promptOrOptions.prompt}"`;
|
||||
}
|
||||
if (promptOrOptions.stdin) {
|
||||
execOptions.input = promptOrOptions.stdin;
|
||||
}
|
||||
}
|
||||
|
||||
command += ` ${args.join(' ')}`;
|
||||
|
||||
const output = execSync(command, execOptions);
|
||||
|
||||
if (env.KEEP_OUTPUT === 'true' || env.VERBOSE === 'true') {
|
||||
const testId = `${env.TEST_FILE_NAME.replace(
|
||||
'.test.js',
|
||||
'',
|
||||
)}:${this.testName.replace(/ /g, '-')}`;
|
||||
console.log(`--- TEST: ${testId} ---`);
|
||||
console.log(output);
|
||||
console.log(`--- END TEST: ${testId} ---`);
|
||||
}
|
||||
|
||||
return output;
|
||||
}
|
||||
|
||||
readFile(fileName) {
|
||||
const content = readFileSync(join(this.testDir, fileName), 'utf-8');
|
||||
if (env.KEEP_OUTPUT === 'true' || env.VERBOSE === 'true') {
|
||||
const testId = `${env.TEST_FILE_NAME.replace(
|
||||
'.test.js',
|
||||
'',
|
||||
)}:${this.testName.replace(/ /g, '-')}`;
|
||||
console.log(`--- FILE: ${testId}/${fileName} ---`);
|
||||
console.log(content);
|
||||
console.log(`--- END FILE: ${testId}/${fileName} ---`);
|
||||
}
|
||||
return content;
|
||||
}
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { test } from 'node:test';
|
||||
import { strict as assert } from 'assert';
|
||||
import { TestRig } from './test-helper.js';
|
||||
|
||||
test('should be able to write a file', async (t) => {
|
||||
const rig = new TestRig();
|
||||
rig.setup(t.name);
|
||||
const prompt = `show me an example of using the write tool. put a dad joke in dad.txt`;
|
||||
|
||||
await rig.run(prompt);
|
||||
const newFilePath = 'dad.txt';
|
||||
|
||||
const newFileContent = rig.readFile(newFilePath);
|
||||
assert.notEqual(newFileContent, '');
|
||||
});
|
|
@ -0,0 +1,86 @@
|
|||
{
|
||||
"name": "@qwen/qwen-code",
|
||||
"version": "0.0.1-dev1",
|
||||
"engines": {
|
||||
"node": ">=20"
|
||||
},
|
||||
"type": "module",
|
||||
"workspaces": [
|
||||
"packages/*"
|
||||
],
|
||||
"private": "true",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git+http://gitlab.alibaba-inc.com/Qwen-Coder/qwen-code.git"
|
||||
},
|
||||
"config": {
|
||||
"sandboxImageUri": "us-docker.pkg.dev/gemini-code-dev/gemini-cli/sandbox:0.0.1-dev1"
|
||||
},
|
||||
"scripts": {
|
||||
"start": "node scripts/start.js",
|
||||
"debug": "cross-env DEBUG=1 node --inspect-brk scripts/start.js",
|
||||
"auth:npm": "npx google-artifactregistry-auth",
|
||||
"auth:docker": "gcloud auth configure-docker us-west1-docker.pkg.dev",
|
||||
"auth": "npm run auth:npm && npm run auth:docker",
|
||||
"generate": "node scripts/generate-git-commit-info.js",
|
||||
"build": "node scripts/build.js",
|
||||
"build:all": "npm run build && npm run build:sandbox",
|
||||
"build:packages": "npm run build --workspaces",
|
||||
"build:sandbox": "node scripts/build_sandbox.js --skip-npm-install-build",
|
||||
"bundle": "npm run generate && node esbuild.config.js && node scripts/copy_bundle_assets.js",
|
||||
"test": "npm run test --workspaces",
|
||||
"test:ci": "npm run test:ci --workspaces --if-present && npm run test:scripts",
|
||||
"test:scripts": "vitest run --config ./scripts/tests/vitest.config.ts",
|
||||
"test:e2e": "npm run test:integration:sandbox:none -- --verbose --keep-output",
|
||||
"test:integration:all": "npm run test:integration:sandbox:none && npm run test:integration:sandbox:docker && npm run test:integration:sandbox:podman",
|
||||
"test:integration:sandbox:none": "GEMINI_SANDBOX=false node integration-tests/run-tests.js",
|
||||
"test:integration:sandbox:docker": "GEMINI_SANDBOX=docker node integration-tests/run-tests.js",
|
||||
"test:integration:sandbox:podman": "GEMINI_SANDBOX=podman node integration-tests/run-tests.js",
|
||||
"lint": "eslint . --ext .ts,.tsx && eslint integration-tests",
|
||||
"lint:fix": "eslint . --fix && eslint integration-tests --fix",
|
||||
"lint:ci": "eslint . --ext .ts,.tsx --max-warnings 0 && eslint integration-tests --max-warnings 0",
|
||||
"format": "prettier --write .",
|
||||
"typecheck": "npm run typecheck --workspaces --if-present",
|
||||
"preflight": "npm run clean && npm ci && npm run format && npm run lint:ci && npm run build && npm run typecheck && npm run test:ci",
|
||||
"prepare": "npm run bundle",
|
||||
"prepare:package": "node scripts/prepare-package.js",
|
||||
"release:version": "node scripts/version.js",
|
||||
"telemetry": "node scripts/telemetry.js",
|
||||
"clean": "node scripts/clean.js"
|
||||
},
|
||||
"bin": {
|
||||
"qwen": "bundle/gemini.js"
|
||||
},
|
||||
"files": [
|
||||
"bundle/",
|
||||
"README.md",
|
||||
"LICENSE"
|
||||
],
|
||||
"devDependencies": {
|
||||
"@types/micromatch": "^4.0.9",
|
||||
"@types/mime-types": "^3.0.1",
|
||||
"@types/minimatch": "^5.1.2",
|
||||
"@types/semver": "^7.7.0",
|
||||
"@types/shell-quote": "^1.7.5",
|
||||
"@vitest/coverage-v8": "^3.1.1",
|
||||
"concurrently": "^9.2.0",
|
||||
"cross-env": "^7.0.3",
|
||||
"esbuild": "^0.25.0",
|
||||
"eslint": "^9.24.0",
|
||||
"eslint-config-prettier": "^10.1.2",
|
||||
"eslint-plugin-import": "^2.31.0",
|
||||
"eslint-plugin-license-header": "^0.8.0",
|
||||
"eslint-plugin-react": "^7.37.5",
|
||||
"eslint-plugin-react-hooks": "^5.2.0",
|
||||
"glob": "^10.4.5",
|
||||
"globals": "^16.0.0",
|
||||
"json": "^11.0.0",
|
||||
"lodash": "^4.17.21",
|
||||
"memfs": "^4.17.2",
|
||||
"prettier": "^3.5.3",
|
||||
"react-devtools-core": "^4.28.5",
|
||||
"typescript-eslint": "^8.30.1",
|
||||
"vitest": "^3.2.4",
|
||||
"yargs": "^18.0.0"
|
||||
}
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
#!/usr/bin/env node
|
||||
|
||||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import './src/gemini.js';
|
||||
import { main } from './src/gemini.js';
|
||||
|
||||
// --- Global Entry Point ---
|
||||
main().catch((error) => {
|
||||
console.error('An unexpected critical error occurred:');
|
||||
if (error instanceof Error) {
|
||||
console.error(error.stack);
|
||||
} else {
|
||||
console.error(String(error));
|
||||
}
|
||||
process.exit(1);
|
||||
});
|
|
@ -0,0 +1,79 @@
|
|||
{
|
||||
"name": "@qwen/qwen-code",
|
||||
"version": "0.0.1-dev1",
|
||||
"description": "Gemini CLI",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
"url": "git+http://gitlab.alibaba-inc.com/Qwen-Coder/qwen-code.git"
|
||||
},
|
||||
"type": "module",
|
||||
"main": "dist/index.js",
|
||||
"bin": {
|
||||
"qwen": "dist/index.js"
|
||||
},
|
||||
"scripts": {
|
||||
"build": "node ../../scripts/build_package.js",
|
||||
"start": "node dist/index.js",
|
||||
"debug": "node --inspect-brk dist/index.js",
|
||||
"lint": "eslint . --ext .ts,.tsx",
|
||||
"format": "prettier --write .",
|
||||
"test": "vitest run",
|
||||
"test:ci": "vitest run --coverage",
|
||||
"typecheck": "tsc --noEmit"
|
||||
},
|
||||
"files": [
|
||||
"dist"
|
||||
],
|
||||
"config": {
|
||||
"sandboxImageUri": "us-docker.pkg.dev/gemini-code-dev/gemini-cli/sandbox:0.0.1-dev1"
|
||||
},
|
||||
"dependencies": {
|
||||
"@qwen/qwen-code-core": "file:../core",
|
||||
"@types/update-notifier": "^6.0.8",
|
||||
"command-exists": "^1.2.9",
|
||||
"diff": "^7.0.0",
|
||||
"dotenv": "^17.1.0",
|
||||
"gaxios": "^7.1.1",
|
||||
"glob": "^10.4.1",
|
||||
"highlight.js": "^11.11.1",
|
||||
"ink": "^6.0.1",
|
||||
"ink-big-text": "^2.0.0",
|
||||
"ink-gradient": "^3.0.0",
|
||||
"ink-link": "^4.1.0",
|
||||
"ink-select-input": "^6.2.0",
|
||||
"ink-spinner": "^5.0.0",
|
||||
"lowlight": "^3.3.0",
|
||||
"mime-types": "^3.0.1",
|
||||
"open": "^10.1.2",
|
||||
"react": "^19.1.0",
|
||||
"read-package-up": "^11.0.0",
|
||||
"shell-quote": "^1.8.3",
|
||||
"string-width": "^7.1.0",
|
||||
"strip-ansi": "^7.1.0",
|
||||
"strip-json-comments": "^3.1.1",
|
||||
"update-notifier": "^7.3.1",
|
||||
"yargs": "^18.0.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/runtime": "^7.27.6",
|
||||
"@testing-library/react": "^16.3.0",
|
||||
"@types/command-exists": "^1.2.3",
|
||||
"@types/diff": "^7.0.2",
|
||||
"@types/dotenv": "^6.1.1",
|
||||
"@types/node": "^20.11.24",
|
||||
"@types/react": "^19.1.8",
|
||||
"@types/react-dom": "^19.1.6",
|
||||
"@types/semver": "^7.7.0",
|
||||
"@types/shell-quote": "^1.7.5",
|
||||
"@types/yargs": "^17.0.32",
|
||||
"ink-testing-library": "^4.0.0",
|
||||
"jsdom": "^26.1.0",
|
||||
"pretty-format": "^30.0.2",
|
||||
"react-dom": "^19.1.0",
|
||||
"typescript": "^5.3.3",
|
||||
"vitest": "^3.1.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=20"
|
||||
}
|
||||
}
|
|
@ -0,0 +1,75 @@
|
|||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { AuthType } from '@qwen/qwen-code-core';
|
||||
import { vi } from 'vitest';
|
||||
import { validateAuthMethod } from './auth.js';
|
||||
|
||||
vi.mock('./settings.js', () => ({
|
||||
loadEnvironment: vi.fn(),
|
||||
}));
|
||||
|
||||
describe('validateAuthMethod', () => {
|
||||
const originalEnv = process.env;
|
||||
|
||||
beforeEach(() => {
|
||||
vi.resetModules();
|
||||
process.env = {};
|
||||
});
|
||||
|
||||
afterEach(() => {
|
||||
process.env = originalEnv;
|
||||
});
|
||||
|
||||
it('should return null for LOGIN_WITH_GOOGLE', () => {
|
||||
expect(validateAuthMethod(AuthType.LOGIN_WITH_GOOGLE)).toBeNull();
|
||||
});
|
||||
|
||||
it('should return null for CLOUD_SHELL', () => {
|
||||
expect(validateAuthMethod(AuthType.CLOUD_SHELL)).toBeNull();
|
||||
});
|
||||
|
||||
describe('USE_GEMINI', () => {
|
||||
it('should return null if GEMINI_API_KEY is set', () => {
|
||||
process.env.GEMINI_API_KEY = 'test-key';
|
||||
expect(validateAuthMethod(AuthType.USE_GEMINI)).toBeNull();
|
||||
});
|
||||
|
||||
it('should return an error message if GEMINI_API_KEY is not set', () => {
|
||||
expect(validateAuthMethod(AuthType.USE_GEMINI)).toBe(
|
||||
'GEMINI_API_KEY environment variable not found. Add that to your environment and try again (no reload needed if using .env)!',
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
describe('USE_VERTEX_AI', () => {
|
||||
it('should return null if GOOGLE_CLOUD_PROJECT and GOOGLE_CLOUD_LOCATION are set', () => {
|
||||
process.env.GOOGLE_CLOUD_PROJECT = 'test-project';
|
||||
process.env.GOOGLE_CLOUD_LOCATION = 'test-location';
|
||||
expect(validateAuthMethod(AuthType.USE_VERTEX_AI)).toBeNull();
|
||||
});
|
||||
|
||||
it('should return null if GOOGLE_API_KEY is set', () => {
|
||||
process.env.GOOGLE_API_KEY = 'test-api-key';
|
||||
expect(validateAuthMethod(AuthType.USE_VERTEX_AI)).toBeNull();
|
||||
});
|
||||
|
||||
it('should return an error message if no required environment variables are set', () => {
|
||||
expect(validateAuthMethod(AuthType.USE_VERTEX_AI)).toBe(
|
||||
'When using Vertex AI, you must specify either:\n' +
|
||||
'• GOOGLE_CLOUD_PROJECT and GOOGLE_CLOUD_LOCATION environment variables.\n' +
|
||||
'• GOOGLE_API_KEY environment variable (if using express mode).\n' +
|
||||
'Update your environment and try again (no reload needed if using .env)!',
|
||||
);
|
||||
});
|
||||
});
|
||||
|
||||
it('should return an error message for an invalid auth method', () => {
|
||||
expect(validateAuthMethod('invalid-method')).toBe(
|
||||
'Invalid auth method selected.',
|
||||
);
|
||||
});
|
||||
});
|
|
@ -0,0 +1,61 @@
|
|||
/**
|
||||
* @license
|
||||
* Copyright 2025 Google LLC
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*/
|
||||
|
||||
import { AuthType } from '@qwen/qwen-code-core';
|
||||
import { loadEnvironment } from './settings.js';
|
||||
|
||||
export const validateAuthMethod = (authMethod: string): string | null => {
|
||||
loadEnvironment();
|
||||
if (
|
||||
authMethod === AuthType.LOGIN_WITH_GOOGLE ||
|
||||
authMethod === AuthType.CLOUD_SHELL
|
||||
) {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (authMethod === AuthType.USE_GEMINI) {
|
||||
if (!process.env.GEMINI_API_KEY) {
|
||||
return 'GEMINI_API_KEY environment variable not found. Add that to your environment and try again (no reload needed if using .env)!';
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
if (authMethod === AuthType.USE_VERTEX_AI) {
|
||||
const hasVertexProjectLocationConfig =
|
||||
!!process.env.GOOGLE_CLOUD_PROJECT && !!process.env.GOOGLE_CLOUD_LOCATION;
|
||||
const hasGoogleApiKey = !!process.env.GOOGLE_API_KEY;
|
||||
if (!hasVertexProjectLocationConfig && !hasGoogleApiKey) {
|
||||
return (
|
||||
'When using Vertex AI, you must specify either:\n' +
|
||||
'• GOOGLE_CLOUD_PROJECT and GOOGLE_CLOUD_LOCATION environment variables.\n' +
|
||||
'• GOOGLE_API_KEY environment variable (if using express mode).\n' +
|
||||
'Update your environment and try again (no reload needed if using .env)!'
|
||||
);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
if (authMethod === AuthType.USE_OPENAI) {
|
||||
if (!process.env.OPENAI_API_KEY) {
|
||||
return 'OPENAI_API_KEY environment variable not found. You can enter it interactively or add it to your .env file.';
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
return 'Invalid auth method selected.';
|
||||
};
|
||||
|
||||
export const setOpenAIApiKey = (apiKey: string): void => {
|
||||
process.env.OPENAI_API_KEY = apiKey;
|
||||
};
|
||||
|
||||
export const setOpenAIBaseUrl = (baseUrl: string): void => {
|
||||
process.env.OPENAI_BASE_URL = baseUrl;
|
||||
};
|
||||
|
||||
export const setOpenAIModel = (model: string): void => {
|
||||
process.env.OPENAI_MODEL = model;
|
||||
};
|