first
This commit is contained in:
		
						commit
						4724d04367
					
				
							
								
								
									
										42
									
								
								Dockerfile
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										42
									
								
								Dockerfile
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,42 @@ | ||||
| FROM ubuntu:24.04 | ||||
| 
 | ||||
| LABEL Name="mcp-server" Version="1.0" | ||||
| 
 | ||||
| # Set non-interactive mode for apt | ||||
| ENV DEBIAN_FRONTEND=noninteractive | ||||
| 
 | ||||
| # Install system tools including Node.js | ||||
| RUN apt-get update && \ | ||||
|     apt-get install -y \ | ||||
|     python3 python3-pip python3-venv python3-full \ | ||||
|     git curl ca-certificates unzip wget \ | ||||
|     make gcc g++ && \ | ||||
|     # Install Node.js 20.x | ||||
|     curl -fsSL https://deb.nodesource.com/setup_20.x | bash - && \ | ||||
|     apt-get install -y nodejs && \ | ||||
|     rm -rf /var/lib/apt/lists/* | ||||
| 
 | ||||
| # Create and activate virtual environment | ||||
| RUN python3 -m venv /opt/venv | ||||
| ENV PATH="/opt/venv/bin:$PATH" | ||||
| 
 | ||||
| # Install Node.js tools | ||||
| RUN npm install -g node-gyp @google/gemini-cli && \ | ||||
|     # Verify installation | ||||
|     which gemini || ln -s /usr/local/lib/node_modules/@google/gemini-cli/bin/gemini.js /usr/local/bin/gemini | ||||
| 
 | ||||
| # Workdir setup | ||||
| WORKDIR /app | ||||
| 
 | ||||
| # Install Python dependencies | ||||
| COPY requirements.txt ./ | ||||
| RUN pip install --no-cache-dir -r requirements.txt | ||||
| 
 | ||||
| # Copy project files | ||||
| COPY . . | ||||
| 
 | ||||
| # Expose port | ||||
| EXPOSE 8000 | ||||
| 
 | ||||
| # Entrypoint | ||||
| CMD ["python3", "main.py"] | ||||
							
								
								
									
										233
									
								
								README.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										233
									
								
								README.md
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,233 @@ | ||||
| # MCP Server - AI-Powered Code Editor | ||||
| 
 | ||||
| A comprehensive server that automatically clones Gitea repositories, analyzes code with AI models (Gemini/OpenAI), applies intelligent code changes, and commits them back to the repository. | ||||
| 
 | ||||
| ## 🚀 Features | ||||
| 
 | ||||
| - **Repository Management**: Clone repositories from Gitea with authentication | ||||
| - **AI-Powered Analysis**: Use Gemini CLI or OpenAI to analyze and edit code | ||||
| - **Model Selection**: Choose specific AI models (e.g., gemini-1.5-pro, gpt-4) | ||||
| - **Real-time Progress Tracking**: Web interface with live status updates | ||||
| - **Modern UI**: Beautiful, responsive frontend with progress indicators | ||||
| - **Background Processing**: Asynchronous task processing with status monitoring | ||||
| - **Comprehensive Logging**: Full logging to both console and file | ||||
| - **Docker Support**: Easy deployment with Docker and docker-compose | ||||
| 
 | ||||
| ## 📋 Prerequisites | ||||
| 
 | ||||
| - Python 3.8+ | ||||
| - Git | ||||
| - API keys for AI models (Gemini or OpenAI) | ||||
| 
 | ||||
| ## 🛠️ Installation | ||||
| 
 | ||||
| ### Option 1: Docker (Recommended) | ||||
| 
 | ||||
| 1. **Clone the repository** | ||||
|    ```bash | ||||
|    git clone <your-repo-url> | ||||
|    cd mcp-server | ||||
|    ``` | ||||
| 
 | ||||
| 2. **Build and run with Docker Compose** | ||||
|    ```bash | ||||
|    docker-compose up --build | ||||
|    ``` | ||||
| 
 | ||||
| 3. **Or build and run manually** | ||||
|    ```bash | ||||
|    docker build -t mcp-server . | ||||
|    docker run -p 8000:8000 mcp-server | ||||
|    ``` | ||||
| 
 | ||||
| ### Option 2: Local Installation | ||||
| 
 | ||||
| 1. **Clone the repository** | ||||
|    ```bash | ||||
|    git clone <your-repo-url> | ||||
|    cd mcp-server | ||||
|    ``` | ||||
| 
 | ||||
| 2. **Install Python dependencies** | ||||
|    ```bash | ||||
|    pip install -r requirements.txt | ||||
|    ``` | ||||
| 
 | ||||
| 3. **Install Gemini CLI (if using Gemini)** | ||||
|    ```bash | ||||
|    # Download from GitHub releases | ||||
|    curl -L https://github.com/google/generative-ai-go/releases/latest/download/gemini-linux-amd64 -o /usr/local/bin/gemini | ||||
|    chmod +x /usr/local/bin/gemini | ||||
|    ``` | ||||
| 
 | ||||
| 4. **Start the server** | ||||
|    ```bash | ||||
|    python main.py | ||||
|    # or | ||||
|    python start.py | ||||
|    ``` | ||||
| 
 | ||||
| ## 🚀 Usage | ||||
| 
 | ||||
| ### Using the Web Interface | ||||
| 
 | ||||
| 1. Open your browser and navigate to `http://localhost:8000` | ||||
| 2. Fill in the repository details: | ||||
|    - **Gitea Repository URL**: Your repository URL (e.g., `http://157.66.191.31:3000/user/repo.git`) | ||||
|    - **Gitea Token**: Your Gitea access token (get from Settings → Applications → Generate new token) | ||||
|    - **AI Model**: Choose between Gemini CLI or OpenAI | ||||
|    - **Model Name**: Specify the exact model (e.g., `gemini-1.5-pro`, `gpt-4`) | ||||
|    - **API Key**: Your AI model API key | ||||
|    - **Prompt**: Describe what changes you want to make to the code | ||||
| 
 | ||||
| 3. Click "Process Repository" and monitor the progress | ||||
| 
 | ||||
| ### API Endpoints | ||||
| 
 | ||||
| - `GET /` - Web interface | ||||
| - `POST /process` - Start repository processing | ||||
| - `GET /status/{task_id}` - Get processing status | ||||
| - `GET /health` - Health check | ||||
| 
 | ||||
| ## 🔧 Configuration | ||||
| 
 | ||||
| ### Environment Variables | ||||
| 
 | ||||
| | Variable | Description | Default | | ||||
| |----------|-------------|---------| | ||||
| | `HOST` | Server host | `0.0.0.0` | | ||||
| | `PORT` | Server port | `8000` | | ||||
| 
 | ||||
| ### Supported AI Models | ||||
| 
 | ||||
| **Gemini Models:** | ||||
| - `gemini-1.5-pro` (recommended) | ||||
| - `gemini-1.5-flash` | ||||
| - `gemini-1.0-pro` | ||||
| 
 | ||||
| **OpenAI Models:** | ||||
| - `gpt-4` | ||||
| - `gpt-4-turbo` | ||||
| - `gpt-3.5-turbo` | ||||
| 
 | ||||
| ### Supported File Types | ||||
| 
 | ||||
| The system analyzes and can modify: | ||||
| - Python (`.py`) | ||||
| - JavaScript (`.js`, `.jsx`) | ||||
| - TypeScript (`.ts`, `.tsx`) | ||||
| - HTML (`.html`) | ||||
| - CSS (`.css`) | ||||
| - JSON (`.json`) | ||||
| - Markdown (`.md`) | ||||
| 
 | ||||
| ## 📁 Project Structure | ||||
| 
 | ||||
| ``` | ||||
| mcp-server/ | ||||
| ├── main.py              # FastAPI application | ||||
| ├── requirements.txt     # Python dependencies | ||||
| ├── Dockerfile          # Docker configuration | ||||
| ├── docker-compose.yml  # Docker Compose configuration | ||||
| ├── README.md           # This file | ||||
| ├── templates/ | ||||
| │   └── index.html      # Frontend template | ||||
| ├── static/ | ||||
| │   ├── style.css       # Frontend styles | ||||
| │   └── script.js       # Frontend JavaScript | ||||
| └── logs/               # Log files (created by Docker) | ||||
| ``` | ||||
| 
 | ||||
| ## 🔄 How It Works | ||||
| 
 | ||||
| 1. **Repository Cloning**: Authenticates with Gitea and clones the repository | ||||
| 2. **AI Analysis**: Sends code and prompt to selected AI model | ||||
| 3. **Code Modification**: Applies AI-suggested changes to the codebase | ||||
| 4. **Commit & Push**: Commits changes and pushes back to Gitea | ||||
| 
 | ||||
| ## 🎯 Example Prompts | ||||
| 
 | ||||
| - "Add error handling to all API endpoints" | ||||
| - "Optimize database queries for better performance" | ||||
| - "Add comprehensive logging throughout the application" | ||||
| - "Refactor the authentication system to use JWT tokens" | ||||
| - "Add unit tests for all utility functions" | ||||
| 
 | ||||
| ## 📊 Logging | ||||
| 
 | ||||
| The server provides comprehensive logging: | ||||
| - **Console Output**: Real-time logs in the terminal | ||||
| - **File Logging**: Logs saved to `mcp_server.log` | ||||
| - **Task-specific Logging**: Each task has detailed logging with task ID | ||||
| 
 | ||||
| ### Viewing Logs | ||||
| 
 | ||||
| **Docker:** | ||||
| ```bash | ||||
| # View container logs | ||||
| docker logs <container_id> | ||||
| 
 | ||||
| # Follow logs in real-time | ||||
| docker logs -f <container_id> | ||||
| ``` | ||||
| 
 | ||||
| **Local:** | ||||
| ```bash | ||||
| # View log file | ||||
| tail -f mcp_server.log | ||||
| ``` | ||||
| 
 | ||||
| ## 🔒 Security Considerations | ||||
| 
 | ||||
| - API keys are sent from frontend and not stored | ||||
| - Use HTTPS in production | ||||
| - Implement proper authentication for the web interface | ||||
| - Regularly update dependencies | ||||
| - Monitor API usage and costs | ||||
| 
 | ||||
| ## 🐛 Troubleshooting | ||||
| 
 | ||||
| ### Common Issues | ||||
| 
 | ||||
| 1. **Repository cloning fails** | ||||
|    - Verify Gitea token is valid and has repository access | ||||
|    - Check repository URL format | ||||
|    - Ensure repository exists and is accessible | ||||
|    - Make sure token has appropriate permissions (read/write) | ||||
| 
 | ||||
| 2. **AI model errors** | ||||
|    - Verify API keys are correct | ||||
|    - Check model name spelling | ||||
|    - Ensure internet connectivity | ||||
| 
 | ||||
| 3. **Gemini CLI not found** | ||||
|    - Install Gemini CLI: `curl -L https://github.com/google/generative-ai-go/releases/latest/download/gemini-linux-amd64 -o /usr/local/bin/gemini && chmod +x /usr/local/bin/gemini` | ||||
| 
 | ||||
| ### Logs | ||||
| 
 | ||||
| Check the logs for detailed error messages and processing status: | ||||
| - **Frontend**: Real-time logs in the web interface | ||||
| - **Backend**: Console and file logs with detailed information | ||||
| 
 | ||||
| ## 🤝 Contributing | ||||
| 
 | ||||
| 1. Fork the repository | ||||
| 2. Create a feature branch | ||||
| 3. Make your changes | ||||
| 4. Add tests if applicable | ||||
| 5. Submit a pull request | ||||
| 
 | ||||
| ## 📄 License | ||||
| 
 | ||||
| This project is licensed under the MIT License - see the LICENSE file for details. | ||||
| 
 | ||||
| ## 🆘 Support | ||||
| 
 | ||||
| For issues and questions: | ||||
| 1. Check the troubleshooting section | ||||
| 2. Review the logs in the web interface and console | ||||
| 3. Create an issue in the repository | ||||
| 
 | ||||
| --- | ||||
| 
 | ||||
| **Note**: This tool modifies code automatically. Always review changes before deploying to production environments.  | ||||
							
								
								
									
										47
									
								
								create_gitea_token.md
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										47
									
								
								create_gitea_token.md
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,47 @@ | ||||
| # How to Create a Gitea Token | ||||
| 
 | ||||
| ## Step-by-Step Guide | ||||
| 
 | ||||
| ### 1. Log into your Gitea instance | ||||
| - Go to your Gitea URL (e.g., `http://157.66.191.31:3000`) | ||||
| - Log in with your username and password | ||||
| 
 | ||||
| ### 2. Navigate to Settings | ||||
| - Click on your profile picture in the top-right corner | ||||
| - Select "Settings" from the dropdown menu | ||||
| 
 | ||||
| ### 3. Go to Applications | ||||
| - In the left sidebar, click on "Applications" | ||||
| - Look for "Manage Access Tokens" or "Tokens" | ||||
| 
 | ||||
| ### 4. Generate New Token | ||||
| - Click "Generate new token" | ||||
| - Fill in the form: | ||||
|   - **Token Name**: Give it a descriptive name (e.g., "MCP Server") | ||||
|   - **Scopes**: Select the following permissions: | ||||
|     - ✅ `repo` (Full control of private repositories) | ||||
|     - ✅ `write:packages` (Write packages) | ||||
|     - ✅ `read:packages` (Read packages) | ||||
| 
 | ||||
| ### 5. Copy the Token | ||||
| - Click "Generate token" | ||||
| - **IMPORTANT**: Copy the token immediately - you won't be able to see it again! | ||||
| - The token will look like: `37c322628fa57b0ec7b481c8655ae2bebd486f6f` | ||||
| 
 | ||||
| ### 6. Use in MCP Server | ||||
| - Paste the token in the "Gitea Token" field in the MCP Server interface | ||||
| - The token is pre-filled with the example token for testing | ||||
| 
 | ||||
| ## Security Notes | ||||
| 
 | ||||
| - **Never share your token** - it provides access to your repositories | ||||
| - **Store tokens securely** - don't commit them to version control | ||||
| - **Use different tokens** for different applications | ||||
| - **Revoke tokens** when no longer needed | ||||
| 
 | ||||
| ## Example Token (for testing) | ||||
| ``` | ||||
| 37c322628fa57b0ec7b481c8655ae2bebd486f6f | ||||
| ``` | ||||
| 
 | ||||
| This token is already pre-filled in the MCP Server interface for convenience.  | ||||
							
								
								
									
										22
									
								
								docker-compose.yml
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										22
									
								
								docker-compose.yml
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,22 @@ | ||||
| version: '3.8' | ||||
| 
 | ||||
| services: | ||||
|   mcp-server: | ||||
|     build: . | ||||
|     ports: | ||||
|       - "8000:8000" | ||||
|     environment: | ||||
|       - HOST=0.0.0.0 | ||||
|       - PORT=8000 | ||||
|     volumes: | ||||
|       - ./logs:/app/logs | ||||
|       - mcp-data:/app/data | ||||
|     restart: unless-stopped | ||||
|     healthcheck: | ||||
|       test: ["CMD", "curl", "-f", "http://localhost:8000/health"] | ||||
|       interval: 30s | ||||
|       timeout: 10s | ||||
|       retries: 3 | ||||
| 
 | ||||
| volumes: | ||||
|   mcp-data:  | ||||
							
								
								
									
										20
									
								
								env.example
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										20
									
								
								env.example
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,20 @@ | ||||
| # MCP Server Environment Configuration | ||||
| 
 | ||||
| # AI Model API Keys | ||||
| # Get your Gemini API key from: https://makersuite.google.com/app/apikey | ||||
| GEMINI_API_KEY=your_gemini_api_key_here | ||||
| 
 | ||||
| # Get your OpenAI API key from: https://platform.openai.com/api-keys | ||||
| OPENAI_API_KEY=your_openai_api_key_here | ||||
| 
 | ||||
| # Server Configuration | ||||
| HOST=0.0.0.0 | ||||
| PORT=8000 | ||||
| 
 | ||||
| # Gitea Configuration (default values) | ||||
| DEFAULT_GITEA_URL=http://157.66.191.31:3000 | ||||
| DEFAULT_USERNAME=risadmin_prod | ||||
| DEFAULT_PASSWORD=adminprod1234 | ||||
| 
 | ||||
| # Logging | ||||
| LOG_LEVEL=INFO  | ||||
							
								
								
									
										390
									
								
								main.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										390
									
								
								main.py
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,390 @@ | ||||
| import os | ||||
| import shutil | ||||
| import subprocess | ||||
| import tempfile | ||||
| import asyncio | ||||
| import logging | ||||
| from pathlib import Path | ||||
| from typing import Optional, Dict, Any | ||||
| import json | ||||
| 
 | ||||
| from fastapi import FastAPI, HTTPException, BackgroundTasks, Request | ||||
| from fastapi.staticfiles import StaticFiles | ||||
| from fastapi.responses import HTMLResponse | ||||
| from fastapi.templating import Jinja2Templates | ||||
| from pydantic import BaseModel | ||||
| import git | ||||
| import requests | ||||
| from dotenv import load_dotenv | ||||
| 
 | ||||
| # Load environment variables | ||||
| load_dotenv() | ||||
| 
 | ||||
| # Configure logging | ||||
| logging.basicConfig( | ||||
|     level=logging.INFO, | ||||
|     format='%(asctime)s - %(levelname)s - %(message)s', | ||||
|     handlers=[ | ||||
|         logging.StreamHandler(), | ||||
|         logging.FileHandler('mcp_server.log') | ||||
|     ] | ||||
| ) | ||||
| logger = logging.getLogger(__name__) | ||||
| 
 | ||||
| app = FastAPI(title="MCP Server", description="AI-powered code editing server") | ||||
| 
 | ||||
| # Mount static files and templates | ||||
| app.mount("/static", StaticFiles(directory="static"), name="static") | ||||
| templates = Jinja2Templates(directory="templates") | ||||
| 
 | ||||
| # Models | ||||
| class GiteaRequest(BaseModel): | ||||
|     repo_url: str | ||||
|     token: str  # Gitea token instead of username/password | ||||
|     prompt: str | ||||
|     ai_model: str = "gemini"  # gemini or openai | ||||
|     model_name: str = "gemini-1.5-pro"  # specific model name | ||||
|     api_key: str  # API key from frontend | ||||
| 
 | ||||
| class ProcessResponse(BaseModel): | ||||
|     task_id: str | ||||
|     status: str | ||||
|     message: str | ||||
| 
 | ||||
| # Global storage for task status | ||||
| task_status = {} | ||||
| 
 | ||||
| class MCPServer: | ||||
|     def __init__(self): | ||||
|         self.repo_path = None | ||||
|          | ||||
|     async def process_repository(self, task_id: str, request: GiteaRequest): | ||||
|         """Main processing function""" | ||||
|         try: | ||||
|             logger.info(f"Task {task_id}: Starting process...") | ||||
|             task_status[task_id] = {"status": "processing", "message": "Starting process..."} | ||||
|              | ||||
|             # Step 1: Clone repository | ||||
|             await self._clone_repository(task_id, request) | ||||
|              | ||||
|             # Step 2: Analyze code with AI | ||||
|             await self._analyze_with_ai(task_id, request) | ||||
|              | ||||
|             # Step 3: Commit and push changes | ||||
|             await self._commit_and_push(task_id, request) | ||||
|              | ||||
|             logger.info(f"Task {task_id}: Successfully processed repository") | ||||
|             task_status[task_id] = {"status": "completed", "message": "Successfully processed repository"} | ||||
|              | ||||
|         except Exception as e: | ||||
|             logger.error(f"Task {task_id}: Error - {str(e)}") | ||||
|             task_status[task_id] = {"status": "error", "message": str(e)} | ||||
|             # Do not delete the repo directory; keep for inspection | ||||
|      | ||||
|     async def _clone_repository(self, task_id: str, request: GiteaRequest): | ||||
|         """Clone repository from Gitea into a persistent directory""" | ||||
|         logger.info(f"Task {task_id}: Cloning repository...") | ||||
|         task_status[task_id] = {"status": "processing", "message": "Cloning repository..."} | ||||
|          | ||||
|         # Extract repo name from URL | ||||
|         repo_name = request.repo_url.split('/')[-1].replace('.git', '') | ||||
|         # Persistent directory under /app/data | ||||
|         data_dir = "/app/data" | ||||
|         os.makedirs(data_dir, exist_ok=True) | ||||
|         self.repo_path = os.path.join(data_dir, f"{repo_name}_{task_id}") | ||||
|         try: | ||||
|             os.chmod(data_dir, 0o777)  # Give full permissions to the data dir | ||||
|             logger.info(f"Task {task_id}: Created/using data directory: {self.repo_path}") | ||||
|         except Exception as e: | ||||
|             logger.warning(f"Task {task_id}: Could not set permissions on data dir: {e}") | ||||
|          | ||||
|         # Clone repository using git command with credentials | ||||
|         try: | ||||
|             # Use git command with credentials in URL | ||||
|             auth_url = request.repo_url.replace('://', f'://{request.token}@') | ||||
|              | ||||
|             result = subprocess.run( | ||||
|                 ['git', 'clone', auth_url, self.repo_path], | ||||
|                 capture_output=True, | ||||
|                 text=True, | ||||
|                 timeout=300  # 5 minutes timeout | ||||
|             ) | ||||
|              | ||||
|             if result.returncode != 0: | ||||
|                 logger.error(f"Task {task_id}: Git clone error - {result.stderr}") | ||||
|                 raise Exception(f"Failed to clone repository: {result.stderr}") | ||||
|              | ||||
|             logger.info(f"Task {task_id}: Successfully cloned repository to {self.repo_path}") | ||||
|              | ||||
|         except subprocess.TimeoutExpired: | ||||
|             raise Exception("Repository cloning timed out after 5 minutes") | ||||
|         except Exception as e: | ||||
|             raise Exception(f"Failed to clone repository: {str(e)}") | ||||
|      | ||||
|     async def _analyze_with_ai(self, task_id: str, request: GiteaRequest): | ||||
|         """Analyze code with AI model and apply changes""" | ||||
|         logger.info(f"Task {task_id}: Analyzing code with AI...") | ||||
|         task_status[task_id] = {"status": "processing", "message": "Analyzing code with AI..."} | ||||
|          | ||||
|         if request.ai_model == "gemini": | ||||
|             await self._use_gemini_cli(task_id, request.prompt, request.api_key, request.model_name) | ||||
|         elif request.ai_model == "openai": | ||||
|             await self._use_openai_ai(task_id, request.prompt, request.api_key, request.model_name) | ||||
|         else: | ||||
|             raise Exception(f"Unsupported AI model: {request.ai_model}") | ||||
|      | ||||
|     async def _use_gemini_cli(self, task_id: str, prompt: str, api_key: str, model_name: str): | ||||
|         """Use Gemini CLI for code analysis and editing""" | ||||
|         try: | ||||
|             # Check if Gemini CLI is installed | ||||
|             try: | ||||
|                 subprocess.run(["gemini", "--version"], check=True, capture_output=True) | ||||
|                 logger.info(f"Task {task_id}: Gemini CLI is available") | ||||
|             except (subprocess.CalledProcessError, FileNotFoundError): | ||||
|                 raise Exception("Gemini CLI is not installed. Please install it first: https://github.com/google/generative-ai-go/tree/main/cmd/gemini") | ||||
|              | ||||
|             # Read all code files | ||||
|             code_content = self._read_code_files() | ||||
|             logger.info(f"Task {task_id}: Read {len(code_content)} characters of code content") | ||||
|              | ||||
|             # Create AI prompt | ||||
|             ai_prompt = f""" | ||||
|             Analyze the following codebase and make the requested changes: | ||||
|              | ||||
|             USER REQUEST: {prompt} | ||||
|              | ||||
|             CODEBASE: | ||||
|             {code_content} | ||||
|              | ||||
|             Please provide: | ||||
|             1. A summary of what changes need to be made | ||||
|             2. The specific file changes in the format: | ||||
|                FILE: filename.py | ||||
|                CHANGES: | ||||
|                [describe changes or provide new code] | ||||
|              | ||||
|             Be specific about which files to modify and what changes to make. | ||||
|             """ | ||||
|              | ||||
|             # Set API key as environment variable for Gemini CLI | ||||
|             env = os.environ.copy() | ||||
|             env['GEMINI_API_KEY'] = api_key | ||||
|              | ||||
|             logger.info(f"Task {task_id}: Calling Gemini CLI with model: {model_name}") | ||||
|              | ||||
|             # Call Gemini CLI with specific model, passing prompt via stdin | ||||
|             result = subprocess.run( | ||||
|                 ["gemini", "generate", "--model", model_name], | ||||
|                 input=ai_prompt, | ||||
|                 capture_output=True, | ||||
|                 text=True, | ||||
|                 env=env, | ||||
|                 cwd=self.repo_path, | ||||
|                 timeout=600  # 10 minutes timeout | ||||
|             ) | ||||
|              | ||||
|             if result.returncode != 0: | ||||
|                 logger.error(f"Task {task_id}: Gemini CLI error - {result.stderr}") | ||||
|                 raise Exception(f"Gemini CLI error: {result.stderr}") | ||||
|              | ||||
|             logger.info(f"Task {task_id}: Gemini CLI response received ({len(result.stdout)} characters)") | ||||
|             logger.info(f"Task {task_id}: Gemini CLI raw response:\n{result.stdout}") | ||||
|             # Store the raw AI response for frontend display | ||||
|             task_status[task_id]["ai_response"] = result.stdout | ||||
|             # Parse and apply changes | ||||
|             await self._apply_ai_changes(result.stdout, task_id) | ||||
|              | ||||
|         except subprocess.TimeoutExpired: | ||||
|             raise Exception("Gemini CLI request timed out after 10 minutes") | ||||
|         except Exception as e: | ||||
|             raise Exception(f"Gemini CLI error: {str(e)}") | ||||
|      | ||||
|     async def _use_openai_ai(self, task_id: str, prompt: str, api_key: str, model_name: str): | ||||
|         """Use OpenAI for code analysis and editing""" | ||||
|         try: | ||||
|             from openai import OpenAI | ||||
|              | ||||
|             # Configure OpenAI with API key from frontend | ||||
|             client = OpenAI(api_key=api_key) | ||||
|              | ||||
|             # Read all code files | ||||
|             code_content = self._read_code_files() | ||||
|             logger.info(f"Task {task_id}: Read {len(code_content)} characters of code content") | ||||
|              | ||||
|             # Create AI prompt | ||||
|             ai_prompt = f""" | ||||
|             Analyze the following codebase and make the requested changes: | ||||
|              | ||||
|             USER REQUEST: {prompt} | ||||
|              | ||||
|             CODEBASE: | ||||
|             {code_content} | ||||
|              | ||||
|             Please provide: | ||||
|             1. A summary of what changes need to be made | ||||
|             2. The specific file changes in the format: | ||||
|                FILE: filename.py | ||||
|                CHANGES: | ||||
|                [describe changes or provide new code] | ||||
|              | ||||
|             Be specific about which files to modify and what changes to make. | ||||
|             """ | ||||
|              | ||||
|             logger.info(f"Task {task_id}: Calling OpenAI with model: {model_name}") | ||||
|              | ||||
|             # Get AI response | ||||
|             response = client.chat.completions.create( | ||||
|                 model=model_name, | ||||
|                 messages=[ | ||||
|                     {"role": "system", "content": "You are a code analysis and editing assistant."}, | ||||
|                     {"role": "user", "content": ai_prompt} | ||||
|                 ] | ||||
|             ) | ||||
|              | ||||
|             logger.info(f"Task {task_id}: OpenAI response received") | ||||
|              | ||||
|             # Parse and apply changes | ||||
|             await self._apply_ai_changes(response.choices[0].message.content, task_id) | ||||
|              | ||||
|         except ImportError: | ||||
|             raise Exception("OpenAI library not installed. Run: pip install openai") | ||||
|         except Exception as e: | ||||
|             raise Exception(f"OpenAI error: {str(e)}") | ||||
|      | ||||
|     def _read_code_files(self) -> str: | ||||
|         """Read all code files in the repository""" | ||||
|         code_content = "" | ||||
|         file_count = 0 | ||||
|          | ||||
|         for root, dirs, files in os.walk(self.repo_path): | ||||
|             # Skip .git directory | ||||
|             if '.git' in dirs: | ||||
|                 dirs.remove('.git') | ||||
|              | ||||
|             for file in files: | ||||
|                 if file.endswith(('.py', '.js', '.ts', '.jsx', '.tsx', '.html', '.css', '.json', '.md')): | ||||
|                     file_path = os.path.join(root, file) | ||||
|                     try: | ||||
|                         with open(file_path, 'r', encoding='utf-8') as f: | ||||
|                             content = f.read() | ||||
|                             relative_path = os.path.relpath(file_path, self.repo_path) | ||||
|                             code_content += f"\n\n=== {relative_path} ===\n{content}\n" | ||||
|                             file_count += 1 | ||||
|                     except Exception as e: | ||||
|                         logger.warning(f"Could not read {file_path}: {e}") | ||||
|          | ||||
|         logger.info(f"Read {file_count} code files") | ||||
|         return code_content | ||||
|      | ||||
|     async def _apply_ai_changes(self, ai_response: str, task_id: str): | ||||
|         """Apply changes suggested by AI""" | ||||
|         logger.info(f"Task {task_id}: Applying AI suggestions...") | ||||
|         task_status[task_id] = {"status": "processing", "message": "Applying AI suggestions..."} | ||||
|          | ||||
|         # Parse AI response for file changes | ||||
|         # This is a simplified parser - you might want to make it more robust | ||||
|         lines = ai_response.split('\n') | ||||
|         current_file = None | ||||
|         current_changes = [] | ||||
|         files_modified = 0 | ||||
|          | ||||
|         for line in lines: | ||||
|             if line.startswith('FILE:'): | ||||
|                 if current_file and current_changes: | ||||
|                     await self._apply_file_changes(current_file, '\n'.join(current_changes)) | ||||
|                     files_modified += 1 | ||||
|                 current_file = line.replace('FILE:', '').strip() | ||||
|                 current_changes = [] | ||||
|             elif line.startswith('CHANGES:') or line.strip() == '': | ||||
|                 continue | ||||
|             elif current_file: | ||||
|                 current_changes.append(line) | ||||
|          | ||||
|         # Apply last file changes | ||||
|         if current_file and current_changes: | ||||
|             await self._apply_file_changes(current_file, '\n'.join(current_changes)) | ||||
|             files_modified += 1 | ||||
|          | ||||
|         logger.info(f"Task {task_id}: Applied changes to {files_modified} files") | ||||
|      | ||||
|     async def _apply_file_changes(self, filename: str, changes: str): | ||||
|         """Apply changes to a specific file""" | ||||
|         file_path = os.path.join(self.repo_path, filename) | ||||
|          | ||||
|         if os.path.exists(file_path): | ||||
|             # For now, we'll append the changes to the file | ||||
|             # In a real implementation, you'd want more sophisticated parsing | ||||
|             with open(file_path, 'a', encoding='utf-8') as f: | ||||
|                 f.write(f"\n\n# AI Generated Changes:\n{changes}\n") | ||||
|             logger.info(f"Applied changes to file: {filename}") | ||||
|      | ||||
|     async def _commit_and_push(self, task_id: str, request: GiteaRequest): | ||||
|         """Commit and push changes back to Gitea""" | ||||
|         logger.info(f"Task {task_id}: Committing and pushing changes...") | ||||
|         task_status[task_id] = {"status": "processing", "message": "Committing and pushing changes..."} | ||||
|          | ||||
|         try: | ||||
|             repo = git.Repo(self.repo_path) | ||||
|              | ||||
|             # Add all changes | ||||
|             repo.git.add('.') | ||||
|              | ||||
|             # Check if there are changes to commit | ||||
|             if repo.is_dirty(): | ||||
|                 # Commit changes | ||||
|                 repo.index.commit("AI-generated code updates") | ||||
|                 logger.info(f"Task {task_id}: Changes committed") | ||||
|                  | ||||
|                 # Push changes | ||||
|                 origin = repo.remote(name='origin') | ||||
|                 origin.push() | ||||
|                 logger.info(f"Task {task_id}: Changes pushed to remote") | ||||
|             else: | ||||
|                 logger.info(f"Task {task_id}: No changes to commit") | ||||
|             # Remove the cloned repo directory after push | ||||
|             if self.repo_path and os.path.exists(self.repo_path): | ||||
|                 shutil.rmtree(self.repo_path) | ||||
|                 logger.info(f"Task {task_id}: Removed cloned repo directory {self.repo_path}") | ||||
|         except Exception as e: | ||||
|             raise Exception(f"Failed to commit and push changes: {str(e)}") | ||||
| 
 | ||||
| # Create MCP server instance | ||||
| mcp_server = MCPServer() | ||||
| 
 | ||||
| @app.get("/", response_class=HTMLResponse) | ||||
| async def read_root(request: Request): | ||||
|     """Serve the frontend""" | ||||
|     return templates.TemplateResponse("index.html", {"request": request}) | ||||
| 
 | ||||
| @app.post("/process", response_model=ProcessResponse) | ||||
| async def process_repository(request: GiteaRequest, background_tasks: BackgroundTasks): | ||||
|     """Process repository with AI""" | ||||
|     import uuid | ||||
|      | ||||
|     task_id = str(uuid.uuid4()) | ||||
|     logger.info(f"Starting new task: {task_id}") | ||||
|      | ||||
|     # Start background task | ||||
|     background_tasks.add_task(mcp_server.process_repository, task_id, request) | ||||
|      | ||||
|     return ProcessResponse( | ||||
|         task_id=task_id, | ||||
|         status="started", | ||||
|         message="Processing started" | ||||
|     ) | ||||
| 
 | ||||
| @app.get("/status/{task_id}") | ||||
| async def get_status(task_id: str): | ||||
|     """Get status of a processing task""" | ||||
|     if task_id not in task_status: | ||||
|         raise HTTPException(status_code=404, detail="Task not found") | ||||
|     return task_status[task_id] | ||||
| 
 | ||||
| @app.get("/health") | ||||
| async def health_check(): | ||||
|     """Health check endpoint""" | ||||
|     return {"status": "healthy", "message": "MCP Server is running"} | ||||
| 
 | ||||
| if __name__ == "__main__": | ||||
|     import uvicorn | ||||
|     uvicorn.run(app, host="0.0.0.0", port=8000)  | ||||
							
								
								
									
										11
									
								
								requirements.txt
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								requirements.txt
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,11 @@ | ||||
| fastapi==0.104.1 | ||||
| uvicorn==0.24.0 | ||||
| python-multipart==0.0.6 | ||||
| requests==2.31.0 | ||||
| gitpython==3.1.40 | ||||
| google-generativeai==0.3.2 | ||||
| openai==1.3.7 | ||||
| python-dotenv==1.0.0 | ||||
| pydantic==2.5.0 | ||||
| aiofiles==23.2.1 | ||||
| jinja2==3.1.2  | ||||
							
								
								
									
										65
									
								
								start.bat
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										65
									
								
								start.bat
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,65 @@ | ||||
| @echo off | ||||
| echo 🤖 MCP Server - AI-Powered Code Editor | ||||
| echo ================================================ | ||||
| echo. | ||||
| 
 | ||||
| REM Check if Python is installed | ||||
| python --version >nul 2>&1 | ||||
| if errorlevel 1 ( | ||||
|     echo ❌ Error: Python is not installed or not in PATH | ||||
|     echo Please install Python 3.8+ from https://python.org | ||||
|     pause | ||||
|     exit /b 1 | ||||
| ) | ||||
| 
 | ||||
| REM Check if pip is available | ||||
| python -m pip --version >nul 2>&1 | ||||
| if errorlevel 1 ( | ||||
|     echo ❌ Error: pip is not available | ||||
|     echo Please ensure pip is installed with Python | ||||
|     pause | ||||
|     exit /b 1 | ||||
| ) | ||||
| 
 | ||||
| echo ✅ Python is installed | ||||
| echo. | ||||
| 
 | ||||
| REM Install dependencies | ||||
| echo 📦 Installing Python dependencies... | ||||
| python -m pip install -r requirements.txt | ||||
| if errorlevel 1 ( | ||||
|     echo ❌ Error installing dependencies | ||||
|     pause | ||||
|     exit /b 1 | ||||
| ) | ||||
| 
 | ||||
| echo ✅ Dependencies installed successfully | ||||
| echo. | ||||
| 
 | ||||
| REM Check for .env file | ||||
| if not exist .env ( | ||||
|     if exist env.example ( | ||||
|         copy env.example .env >nul | ||||
|         echo ✅ Created .env file from template | ||||
|         echo ⚠️  Please edit .env file and add your API keys | ||||
|     ) else ( | ||||
|         echo ⚠️  No .env file found and no template available | ||||
|     ) | ||||
| ) else ( | ||||
|     echo ✅ .env file already exists | ||||
| ) | ||||
| 
 | ||||
| echo. | ||||
| echo 🚀 Starting MCP Server... | ||||
| echo 📱 Web interface will be available at: http://localhost:8000 | ||||
| echo 🔧 API documentation at: http://localhost:8000/docs | ||||
| echo. | ||||
| echo Press Ctrl+C to stop the server | ||||
| echo. | ||||
| 
 | ||||
| REM Start the server | ||||
| python main.py | ||||
| 
 | ||||
| echo. | ||||
| echo 👋 Server stopped | ||||
| pause  | ||||
							
								
								
									
										122
									
								
								start.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										122
									
								
								start.py
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,122 @@ | ||||
| #!/usr/bin/env python3 | ||||
| """ | ||||
| MCP Server Startup Script | ||||
| Handles environment setup and server initialization | ||||
| """ | ||||
| 
 | ||||
| import os | ||||
| import sys | ||||
| import subprocess | ||||
| import shutil | ||||
| from pathlib import Path | ||||
| 
 | ||||
| def check_python_version(): | ||||
|     """Check if Python version is compatible""" | ||||
|     if sys.version_info < (3, 8): | ||||
|         print("❌ Error: Python 3.8 or higher is required") | ||||
|         print(f"Current version: {sys.version}") | ||||
|         sys.exit(1) | ||||
|     print(f"✅ Python version: {sys.version.split()[0]}") | ||||
| 
 | ||||
| def check_git(): | ||||
|     """Check if Git is installed""" | ||||
|     try: | ||||
|         subprocess.run(["git", "--version"], check=True, capture_output=True) | ||||
|         print("✅ Git is installed") | ||||
|     except (subprocess.CalledProcessError, FileNotFoundError): | ||||
|         print("❌ Error: Git is not installed or not in PATH") | ||||
|         print("Please install Git: https://git-scm.com/") | ||||
|         sys.exit(1) | ||||
| 
 | ||||
| def check_node(): | ||||
|     """Check if Node.js is installed (optional)""" | ||||
|     try: | ||||
|         subprocess.run(["node", "--version"], check=True, capture_output=True) | ||||
|         print("✅ Node.js is installed") | ||||
|     except (subprocess.CalledProcessError, FileNotFoundError): | ||||
|         print("⚠️  Warning: Node.js is not installed") | ||||
|         print("This is optional but recommended for projects with package.json") | ||||
| 
 | ||||
| def install_dependencies(): | ||||
|     """Install Python dependencies""" | ||||
|     print("\n📦 Installing Python dependencies...") | ||||
|     try: | ||||
|         subprocess.run([sys.executable, "-m", "pip", "install", "-r", "requirements.txt"], check=True) | ||||
|         print("✅ Dependencies installed successfully") | ||||
|     except subprocess.CalledProcessError as e: | ||||
|         print(f"❌ Error installing dependencies: {e}") | ||||
|         sys.exit(1) | ||||
| 
 | ||||
| def setup_environment(): | ||||
|     """Set up environment file""" | ||||
|     env_file = Path(".env") | ||||
|     env_example = Path("env.example") | ||||
|      | ||||
|     if not env_file.exists(): | ||||
|         if env_example.exists(): | ||||
|             shutil.copy(env_example, env_file) | ||||
|             print("✅ Created .env file from template") | ||||
|             print("⚠️  Please edit .env file and add your API keys") | ||||
|         else: | ||||
|             print("⚠️  No .env file found and no template available") | ||||
|     else: | ||||
|         print("✅ .env file already exists") | ||||
| 
 | ||||
| def check_api_keys(): | ||||
|     """Check if API keys are configured""" | ||||
|     from dotenv import load_dotenv | ||||
|     load_dotenv() | ||||
|      | ||||
|     gemini_key = os.getenv("GEMINI_API_KEY") | ||||
|     openai_key = os.getenv("OPENAI_API_KEY") | ||||
|      | ||||
|     if not gemini_key and not openai_key: | ||||
|         print("⚠️  Warning: No API keys configured") | ||||
|         print("Please add GEMINI_API_KEY or OPENAI_API_KEY to .env file") | ||||
|         print("You can still start the server, but AI features won't work") | ||||
|     else: | ||||
|         if gemini_key: | ||||
|             print("✅ Gemini API key configured") | ||||
|         if openai_key: | ||||
|             print("✅ OpenAI API key configured") | ||||
| 
 | ||||
| def create_directories(): | ||||
|     """Create necessary directories""" | ||||
|     directories = ["templates", "static"] | ||||
|     for directory in directories: | ||||
|         Path(directory).mkdir(exist_ok=True) | ||||
|     print("✅ Directories created") | ||||
| 
 | ||||
| def start_server(): | ||||
|     """Start the FastAPI server""" | ||||
|     print("\n🚀 Starting MCP Server...") | ||||
|     print("📱 Web interface will be available at: http://localhost:8000") | ||||
|     print("🔧 API documentation at: http://localhost:8000/docs") | ||||
|     print("\nPress Ctrl+C to stop the server") | ||||
|      | ||||
|     try: | ||||
|         subprocess.run([sys.executable, "main.py"]) | ||||
|     except KeyboardInterrupt: | ||||
|         print("\n👋 Server stopped") | ||||
| 
 | ||||
| def main(): | ||||
|     """Main startup function""" | ||||
|     print("🤖 MCP Server - AI-Powered Code Editor") | ||||
|     print("=" * 50) | ||||
|      | ||||
|     # Pre-flight checks | ||||
|     check_python_version() | ||||
|     check_git() | ||||
|     check_node() | ||||
|      | ||||
|     # Setup | ||||
|     create_directories() | ||||
|     setup_environment() | ||||
|     install_dependencies() | ||||
|     check_api_keys() | ||||
|      | ||||
|     # Start server | ||||
|     start_server() | ||||
| 
 | ||||
| if __name__ == "__main__": | ||||
|     main()  | ||||
							
								
								
									
										415
									
								
								static/script.js
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										415
									
								
								static/script.js
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,415 @@ | ||||
| // MCP Server Frontend JavaScript
 | ||||
| 
 | ||||
| class MCPServerFrontend { | ||||
|     constructor() { | ||||
|         this.currentTaskId = null; | ||||
|         this.statusInterval = null; | ||||
|         this.init(); | ||||
|     } | ||||
| 
 | ||||
|     init() { | ||||
|         this.bindEvents(); | ||||
|         this.setDefaultValues(); | ||||
|     } | ||||
| 
 | ||||
|     bindEvents() { | ||||
|         // Process button
 | ||||
|         document.getElementById('processBtn').addEventListener('click', () => { | ||||
|             this.processRepository(); | ||||
|         }); | ||||
| 
 | ||||
|         // Clear log button
 | ||||
|         document.getElementById('clearLog').addEventListener('click', () => { | ||||
|             this.clearLog(); | ||||
|         }); | ||||
| 
 | ||||
|         // Toggle API key visibility
 | ||||
|         document.getElementById('toggleApiKey').addEventListener('click', () => { | ||||
|             this.toggleApiKeyVisibility(); | ||||
|         }); | ||||
| 
 | ||||
|         // Toggle Gitea token visibility
 | ||||
|         document.getElementById('toggleGiteaToken').addEventListener('click', () => { | ||||
|             this.toggleGiteaTokenVisibility(); | ||||
|         }); | ||||
| 
 | ||||
|         // Form validation
 | ||||
|         document.getElementById('repoUrl').addEventListener('input', () => { | ||||
|             this.validateForm(); | ||||
|         }); | ||||
| 
 | ||||
|         document.getElementById('giteaToken').addEventListener('input', () => { | ||||
|             this.validateForm(); | ||||
|         }); | ||||
| 
 | ||||
|         document.getElementById('prompt').addEventListener('input', () => { | ||||
|             this.validateForm(); | ||||
|         }); | ||||
| 
 | ||||
|         document.getElementById('apiKey').addEventListener('input', () => { | ||||
|             this.validateForm(); | ||||
|         }); | ||||
| 
 | ||||
|         document.getElementById('modelName').addEventListener('input', () => { | ||||
|             this.validateForm(); | ||||
|         }); | ||||
|     } | ||||
| 
 | ||||
|     setDefaultValues() { | ||||
|         // Set default values for testing
 | ||||
|         document.getElementById('repoUrl').value = 'http://157.66.191.31:3000/user/repo.git'; | ||||
|         document.getElementById('giteaToken').value = '37c322628fa57b0ec7b481c8655ae2bebd486f6f'; | ||||
|         document.getElementById('aiModel').value = 'gemini'; | ||||
|     } | ||||
| 
 | ||||
|     toggleApiKeyVisibility() { | ||||
|         const apiKeyInput = document.getElementById('apiKey'); | ||||
|         const toggleBtn = document.getElementById('toggleApiKey'); | ||||
|         const icon = toggleBtn.querySelector('i'); | ||||
| 
 | ||||
|         if (apiKeyInput.type === 'password') { | ||||
|             apiKeyInput.type = 'text'; | ||||
|             icon.className = 'fas fa-eye-slash'; | ||||
|         } else { | ||||
|             apiKeyInput.type = 'password'; | ||||
|             icon.className = 'fas fa-eye'; | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     toggleGiteaTokenVisibility() { | ||||
|         const giteaTokenInput = document.getElementById('giteaToken'); | ||||
|         const toggleBtn = document.getElementById('toggleGiteaToken'); | ||||
|         const icon = toggleBtn.querySelector('i'); | ||||
| 
 | ||||
|         if (giteaTokenInput.type === 'password') { | ||||
|             giteaTokenInput.type = 'text'; | ||||
|             icon.className = 'fas fa-eye-slash'; | ||||
|         } else { | ||||
|             giteaTokenInput.type = 'password'; | ||||
|             icon.className = 'fas fa-eye'; | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     validateForm() { | ||||
|         const repoUrl = document.getElementById('repoUrl').value.trim(); | ||||
|         const giteaToken = document.getElementById('giteaToken').value.trim(); | ||||
|         const prompt = document.getElementById('prompt').value.trim(); | ||||
|         const apiKey = document.getElementById('apiKey').value.trim(); | ||||
|         const modelName = document.getElementById('modelName').value.trim(); | ||||
| 
 | ||||
|         const isValid = repoUrl && giteaToken && prompt && apiKey && modelName; | ||||
|         const processBtn = document.getElementById('processBtn'); | ||||
|          | ||||
|         processBtn.disabled = !isValid; | ||||
|          | ||||
|         if (isValid) { | ||||
|             processBtn.classList.remove('btn-secondary'); | ||||
|             processBtn.classList.add('btn-primary'); | ||||
|         } else { | ||||
|             processBtn.classList.remove('btn-primary'); | ||||
|             processBtn.classList.add('btn-secondary'); | ||||
|         } | ||||
| 
 | ||||
|         return isValid; | ||||
|     } | ||||
| 
 | ||||
|     async processRepository() { | ||||
|         if (!this.validateForm()) { | ||||
|             this.addLogEntry('error', 'Please fill in all required fields'); | ||||
|             return; | ||||
|         } | ||||
| 
 | ||||
|         const requestData = { | ||||
|             repo_url: document.getElementById('repoUrl').value.trim(), | ||||
|             token: document.getElementById('giteaToken').value.trim(), | ||||
|             prompt: document.getElementById('prompt').value.trim(), | ||||
|             ai_model: document.getElementById('aiModel').value, | ||||
|             model_name: document.getElementById('modelName').value.trim(), | ||||
|             api_key: document.getElementById('apiKey').value.trim() | ||||
|         }; | ||||
| 
 | ||||
|         try { | ||||
|             // Disable form
 | ||||
|             this.setFormEnabled(false); | ||||
|             this.showProgressCard(); | ||||
|             this.updateStatus('Processing...', 'processing'); | ||||
|             this.addLogEntry('info', 'Starting repository processing...'); | ||||
| 
 | ||||
|             // Show loading modal
 | ||||
|             const loadingModal = new bootstrap.Modal(document.getElementById('loadingModal')); | ||||
|             loadingModal.show(); | ||||
| 
 | ||||
|             // Send request to backend
 | ||||
|             const response = await fetch('/process', { | ||||
|                 method: 'POST', | ||||
|                 headers: { | ||||
|                     'Content-Type': 'application/json', | ||||
|                 }, | ||||
|                 body: JSON.stringify(requestData) | ||||
|             }); | ||||
| 
 | ||||
|             if (!response.ok) { | ||||
|                 throw new Error(`HTTP error! status: ${response.status}`); | ||||
|             } | ||||
| 
 | ||||
|             const result = await response.json(); | ||||
|             this.currentTaskId = result.task_id; | ||||
| 
 | ||||
|             this.addLogEntry('success', `Task started with ID: ${result.task_id}`); | ||||
|             this.addLogEntry('info', 'Monitoring task progress...'); | ||||
| 
 | ||||
|             // Start monitoring
 | ||||
|             this.startStatusMonitoring(); | ||||
| 
 | ||||
|             // Hide loading modal
 | ||||
|             loadingModal.hide(); | ||||
| 
 | ||||
|         } catch (error) { | ||||
|             this.addLogEntry('error', `Failed to start processing: ${error.message}`); | ||||
|             this.updateStatus('Error', 'error'); | ||||
|             this.setFormEnabled(true); | ||||
|             this.hideProgressCard(); | ||||
|              | ||||
|             // Hide loading modal
 | ||||
|             const loadingModal = bootstrap.Modal.getInstance(document.getElementById('loadingModal')); | ||||
|             if (loadingModal) { | ||||
|                 loadingModal.hide(); | ||||
|             } | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     startStatusMonitoring() { | ||||
|         if (this.statusInterval) { | ||||
|             clearInterval(this.statusInterval); | ||||
|         } | ||||
| 
 | ||||
|         this.statusInterval = setInterval(async () => { | ||||
|             if (!this.currentTaskId) { | ||||
|                 this.stopStatusMonitoring(); | ||||
|                 return; | ||||
|             } | ||||
| 
 | ||||
|             try { | ||||
|                 const response = await fetch(`/status/${this.currentTaskId}`); | ||||
|                 if (!response.ok) { | ||||
|                     throw new Error(`HTTP error! status: ${response.status}`); | ||||
|                 } | ||||
| 
 | ||||
|                 const status = await response.json(); | ||||
|                 this.updateProgress(status); | ||||
|                 this.addLogEntry('info', status.message); | ||||
| 
 | ||||
|                 if (status.status === 'completed' || status.status === 'error') { | ||||
|                     this.stopStatusMonitoring(); | ||||
|                     this.handleTaskCompletion(status); | ||||
|                 } | ||||
| 
 | ||||
|             } catch (error) { | ||||
|                 this.addLogEntry('error', `Failed to get status: ${error.message}`); | ||||
|                 this.stopStatusMonitoring(); | ||||
|             } | ||||
|         }, 2000); // Check every 2 seconds
 | ||||
|     } | ||||
| 
 | ||||
|     stopStatusMonitoring() { | ||||
|         if (this.statusInterval) { | ||||
|             clearInterval(this.statusInterval); | ||||
|             this.statusInterval = null; | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     updateProgress(status) { | ||||
|         const progressBar = document.getElementById('progressBar'); | ||||
|         const steps = document.querySelectorAll('.step'); | ||||
| 
 | ||||
|         // Show AI output if available
 | ||||
|         if (status.ai_response) { | ||||
|             document.getElementById('aiOutputCard').style.display = 'block'; | ||||
|             document.getElementById('aiOutput').textContent = status.ai_response; | ||||
|         } else { | ||||
|             document.getElementById('aiOutputCard').style.display = 'none'; | ||||
|             document.getElementById('aiOutput').textContent = ''; | ||||
|         } | ||||
| 
 | ||||
|         // Step order (no deps)
 | ||||
|         const stepOrder = ['clone', 'ai', 'commit']; | ||||
|         let currentStep = 0; | ||||
|         let errorStep = -1; | ||||
| 
 | ||||
|         // Determine current step and error
 | ||||
|         if (status.message.includes('Cloning')) { | ||||
|             currentStep = 0; | ||||
|         } else if (status.message.includes('Analyzing') || status.message.includes('AI')) { | ||||
|             currentStep = 1; | ||||
|         } else if (status.message.includes('Committing') || status.message.includes('Push')) { | ||||
|             currentStep = 2; | ||||
|         } else if (status.status === 'completed') { | ||||
|             currentStep = 3; | ||||
|         } else if (status.status === 'error') { | ||||
|             // Try to find which step failed
 | ||||
|             if (status.message.includes('Clone')) errorStep = 0; | ||||
|             else if (status.message.includes('AI') || status.message.includes('Gemini') || status.message.includes('OpenAI')) errorStep = 1; | ||||
|             else if (status.message.includes('Commit') || status.message.includes('Push')) errorStep = 2; | ||||
|         } | ||||
| 
 | ||||
|         // Update progress bar
 | ||||
|         let progress = (currentStep / stepOrder.length) * 100; | ||||
|         progressBar.style.width = `${progress}%`; | ||||
| 
 | ||||
|         // Update step indicators with color, icon, and label
 | ||||
|         steps.forEach((step, idx) => { | ||||
|             const stepName = step.dataset.step; | ||||
|             step.classList.remove('active', 'completed', 'error', 'pending'); | ||||
|             const iconSpan = step.querySelector('.step-icon'); | ||||
|             const labelSpan = step.querySelector('.step-label'); | ||||
| 
 | ||||
|             // Set icon and label
 | ||||
|             if (errorStep === idx) { | ||||
|                 step.classList.add('error'); | ||||
|                 iconSpan.innerHTML = '✖'; | ||||
|                 labelSpan.textContent = 'Error'; | ||||
|             } else if (idx < currentStep) { | ||||
|                 step.classList.add('completed'); | ||||
|                 iconSpan.innerHTML = '✔'; | ||||
|                 labelSpan.textContent = 'Completed'; | ||||
|             } else if (idx === currentStep && status.status !== 'completed' && status.status !== 'error') { | ||||
|                 step.classList.add('active'); | ||||
|                 iconSpan.innerHTML = '<span class="spinner-grow spinner-grow-sm" style="width:1em;height:1em;"></span>'; | ||||
|                 labelSpan.textContent = 'In Progress'; | ||||
|             } else { | ||||
|                 step.classList.add('pending'); | ||||
|                 iconSpan.innerHTML = '●'; | ||||
|                 labelSpan.textContent = 'Pending'; | ||||
|             } | ||||
|         }); | ||||
|     } | ||||
| 
 | ||||
|     isStepCompleted(stepName, activeStep) { | ||||
|         const stepOrder = ['clone', 'deps', 'ai', 'commit']; | ||||
|         const stepIndex = stepOrder.indexOf(stepName); | ||||
|         const activeIndex = stepOrder.indexOf(activeStep); | ||||
|          | ||||
|         return stepIndex < activeIndex; | ||||
|     } | ||||
| 
 | ||||
|     handleTaskCompletion(status) { | ||||
|         if (status.status === 'completed') { | ||||
|             this.updateStatus('Completed', 'completed'); | ||||
|             this.addLogEntry('success', 'Repository processing completed successfully!'); | ||||
|             this.addLogEntry('info', 'Changes have been committed and pushed to the repository.'); | ||||
|         } else { | ||||
|             this.updateStatus('Error', 'error'); | ||||
|             this.addLogEntry('error', `Processing failed: ${status.message}`); | ||||
|         } | ||||
| 
 | ||||
|         this.setFormEnabled(true); | ||||
|         this.currentTaskId = null; | ||||
|     } | ||||
| 
 | ||||
|     updateStatus(text, type) { | ||||
|         const statusText = document.querySelector('.status-text'); | ||||
|         const statusDot = document.querySelector('.status-dot'); | ||||
| 
 | ||||
|         statusText.textContent = text; | ||||
| 
 | ||||
|         // Update status dot color
 | ||||
|         statusDot.className = 'status-dot'; | ||||
|         switch (type) { | ||||
|             case 'processing': | ||||
|                 statusDot.style.backgroundColor = '#ffc107'; | ||||
|                 break; | ||||
|             case 'completed': | ||||
|                 statusDot.style.backgroundColor = '#28a745'; | ||||
|                 break; | ||||
|             case 'error': | ||||
|                 statusDot.style.backgroundColor = '#dc3545'; | ||||
|                 break; | ||||
|             default: | ||||
|                 statusDot.style.backgroundColor = '#17a2b8'; | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     addLogEntry(type, message) { | ||||
|         const logContainer = document.getElementById('logContainer'); | ||||
|         const timestamp = new Date().toLocaleTimeString(); | ||||
|          | ||||
|         const logEntry = document.createElement('div'); | ||||
|         logEntry.className = `log-entry ${type}`; | ||||
|         logEntry.innerHTML = ` | ||||
|             <span class="timestamp">[${timestamp}]</span> | ||||
|             <span class="message">${message}</span> | ||||
|         `;
 | ||||
| 
 | ||||
|         logContainer.appendChild(logEntry); | ||||
|         logContainer.scrollTop = logContainer.scrollHeight; | ||||
|     } | ||||
| 
 | ||||
|     clearLog() { | ||||
|         const logContainer = document.getElementById('logContainer'); | ||||
|         logContainer.innerHTML = ` | ||||
|             <div class="log-entry info"> | ||||
|                 <span class="timestamp">[System]</span> | ||||
|                 <span class="message">Log cleared. Ready for new operations.</span> | ||||
|             </div> | ||||
|         `;
 | ||||
|     } | ||||
| 
 | ||||
|     setFormEnabled(enabled) { | ||||
|         const formElements = [ | ||||
|             'repoUrl', 'giteaToken', 'prompt', 'aiModel', 'modelName', 'apiKey', 'processBtn' | ||||
|         ]; | ||||
| 
 | ||||
|         formElements.forEach(id => { | ||||
|             const element = document.getElementById(id); | ||||
|             if (element) { | ||||
|                 element.disabled = !enabled; | ||||
|             } | ||||
|         }); | ||||
| 
 | ||||
|         const processBtn = document.getElementById('processBtn'); | ||||
|         if (enabled) { | ||||
|             processBtn.innerHTML = '<i class="fas fa-play"></i> Process Repository'; | ||||
|         } else { | ||||
|             processBtn.innerHTML = '<i class="fas fa-spinner fa-spin"></i> Processing...'; | ||||
|         } | ||||
|     } | ||||
| 
 | ||||
|     showProgressCard() { | ||||
|         const progressCard = document.getElementById('progressCard'); | ||||
|         progressCard.style.display = 'block'; | ||||
|          | ||||
|         // Reset progress
 | ||||
|         document.getElementById('progressBar').style.width = '0%'; | ||||
|         document.querySelectorAll('.step').forEach(step => { | ||||
|             step.classList.remove('active', 'completed'); | ||||
|         }); | ||||
|     } | ||||
| 
 | ||||
|     hideProgressCard() { | ||||
|         const progressCard = document.getElementById('progressCard'); | ||||
|         progressCard.style.display = 'none'; | ||||
|     } | ||||
| 
 | ||||
|     // Utility function to format error messages
 | ||||
|     formatError(error) { | ||||
|         if (error.response) { | ||||
|             return `Server error: ${error.response.status} - ${error.response.statusText}`; | ||||
|         } else if (error.request) { | ||||
|             return 'Network error: Unable to connect to server'; | ||||
|         } else { | ||||
|             return `Error: ${error.message}`; | ||||
|         } | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| // Initialize the frontend when DOM is loaded
 | ||||
| document.addEventListener('DOMContentLoaded', () => { | ||||
|     window.mcpFrontend = new MCPServerFrontend(); | ||||
| }); | ||||
| 
 | ||||
| // Handle page unload to clean up intervals
 | ||||
| window.addEventListener('beforeunload', () => { | ||||
|     if (window.mcpFrontend) { | ||||
|         window.mcpFrontend.stopStatusMonitoring(); | ||||
|     } | ||||
| });  | ||||
							
								
								
									
										342
									
								
								static/style.css
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										342
									
								
								static/style.css
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,342 @@ | ||||
| /* Custom CSS for MCP Server */ | ||||
| 
 | ||||
| :root { | ||||
|     --primary-color: #007bff; | ||||
|     --secondary-color: #6c757d; | ||||
|     --success-color: #28a745; | ||||
|     --danger-color: #dc3545; | ||||
|     --warning-color: #ffc107; | ||||
|     --info-color: #17a2b8; | ||||
|     --light-color: #f8f9fa; | ||||
|     --dark-color: #343a40; | ||||
|     --sidebar-width: 350px; | ||||
| } | ||||
| 
 | ||||
| body { | ||||
|     font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; | ||||
|     background-color: #f5f7fa; | ||||
|     margin: 0; | ||||
|     padding: 0; | ||||
| } | ||||
| 
 | ||||
| /* Sidebar Styles */ | ||||
| .sidebar { | ||||
|     background: linear-gradient(135deg, #667eea 0%, #764ba2 100%); | ||||
|     color: white; | ||||
|     min-height: 100vh; | ||||
|     padding: 0; | ||||
|     box-shadow: 2px 0 10px rgba(0,0,0,0.1); | ||||
| } | ||||
| 
 | ||||
| .sidebar-header { | ||||
|     padding: 2rem 1.5rem; | ||||
|     border-bottom: 1px solid rgba(255,255,255,0.1); | ||||
| } | ||||
| 
 | ||||
| .sidebar-header h3 { | ||||
|     margin: 0; | ||||
|     font-weight: 600; | ||||
|     color: white; | ||||
| } | ||||
| 
 | ||||
| .sidebar-header p { | ||||
|     margin: 0.5rem 0 0 0; | ||||
|     opacity: 0.8; | ||||
| } | ||||
| 
 | ||||
| .sidebar-content { | ||||
|     padding: 1.5rem; | ||||
| } | ||||
| 
 | ||||
| .form-section { | ||||
|     background: rgba(255,255,255,0.1); | ||||
|     border-radius: 10px; | ||||
|     padding: 1.5rem; | ||||
|     margin-bottom: 1.5rem; | ||||
|     backdrop-filter: blur(10px); | ||||
| } | ||||
| 
 | ||||
| .form-section h5 { | ||||
|     color: white; | ||||
|     margin-bottom: 1rem; | ||||
|     font-weight: 600; | ||||
| } | ||||
| 
 | ||||
| .form-label { | ||||
|     color: white; | ||||
|     font-weight: 500; | ||||
|     margin-bottom: 0.5rem; | ||||
| } | ||||
| 
 | ||||
| .form-control, .form-select { | ||||
|     background: rgba(255,255,255,0.9); | ||||
|     border: none; | ||||
|     border-radius: 8px; | ||||
|     padding: 0.75rem; | ||||
|     font-size: 0.9rem; | ||||
| } | ||||
| 
 | ||||
| .form-control:focus, .form-select:focus { | ||||
|     background: white; | ||||
|     box-shadow: 0 0 0 0.2rem rgba(255,255,255,0.25); | ||||
| } | ||||
| 
 | ||||
| .btn-primary { | ||||
|     background: linear-gradient(45deg, #007bff, #0056b3); | ||||
|     border: none; | ||||
|     border-radius: 8px; | ||||
|     padding: 0.75rem 1.5rem; | ||||
|     font-weight: 600; | ||||
|     transition: all 0.3s ease; | ||||
| } | ||||
| 
 | ||||
| .btn-primary:hover { | ||||
|     transform: translateY(-2px); | ||||
|     box-shadow: 0 4px 15px rgba(0,123,255,0.4); | ||||
| } | ||||
| 
 | ||||
| /* Main Content Styles */ | ||||
| .main-content { | ||||
|     padding: 2rem; | ||||
|     background: white; | ||||
|     min-height: 100vh; | ||||
| } | ||||
| 
 | ||||
| .content-header { | ||||
|     display: flex; | ||||
|     justify-content: space-between; | ||||
|     align-items: center; | ||||
|     margin-bottom: 2rem; | ||||
|     padding-bottom: 1rem; | ||||
|     border-bottom: 2px solid #f0f0f0; | ||||
| } | ||||
| 
 | ||||
| .content-header h2 { | ||||
|     margin: 0; | ||||
|     color: var(--dark-color); | ||||
|     font-weight: 600; | ||||
| } | ||||
| 
 | ||||
| .status-indicator { | ||||
|     display: flex; | ||||
|     align-items: center; | ||||
|     gap: 0.5rem; | ||||
| } | ||||
| 
 | ||||
| .status-dot { | ||||
|     width: 12px; | ||||
|     height: 12px; | ||||
|     border-radius: 50%; | ||||
|     background-color: var(--success-color); | ||||
|     animation: pulse 2s infinite; | ||||
| } | ||||
| 
 | ||||
| .status-text { | ||||
|     font-weight: 500; | ||||
|     color: var(--secondary-color); | ||||
| } | ||||
| 
 | ||||
| /* Progress Steps */ | ||||
| .progress-steps { | ||||
|     display: flex; | ||||
|     justify-content: space-between; | ||||
|     margin-top: 1rem; | ||||
| } | ||||
| 
 | ||||
| .step { | ||||
|     display: flex; | ||||
|     flex-direction: column; | ||||
|     align-items: center; | ||||
|     text-align: center; | ||||
|     flex: 1; | ||||
|     position: relative; | ||||
| } | ||||
| 
 | ||||
| .step:not(:last-child)::after { | ||||
|     content: ''; | ||||
|     position: absolute; | ||||
|     top: 20px; | ||||
|     left: 50%; | ||||
|     width: 100%; | ||||
|     height: 2px; | ||||
|     background-color: #e9ecef; | ||||
|     z-index: 1; | ||||
| } | ||||
| 
 | ||||
| .step.active:not(:last-child)::after { | ||||
|     background-color: var(--primary-color); | ||||
| } | ||||
| 
 | ||||
| .step i { | ||||
|     width: 40px; | ||||
|     height: 40px; | ||||
|     border-radius: 50%; | ||||
|     background-color: #e9ecef; | ||||
|     color: var(--secondary-color); | ||||
|     display: flex; | ||||
|     align-items: center; | ||||
|     justify-content: center; | ||||
|     margin-bottom: 0.5rem; | ||||
|     position: relative; | ||||
|     z-index: 2; | ||||
|     transition: all 0.3s ease; | ||||
| } | ||||
| 
 | ||||
| .step.active i { | ||||
|     background-color: var(--primary-color); | ||||
|     color: white; | ||||
| } | ||||
| 
 | ||||
| .step.completed i { | ||||
|     background-color: var(--success-color); | ||||
|     color: white; | ||||
| } | ||||
| 
 | ||||
| .step span { | ||||
|     font-size: 0.8rem; | ||||
|     color: var(--secondary-color); | ||||
|     font-weight: 500; | ||||
| } | ||||
| 
 | ||||
| .step.active span { | ||||
|     color: var(--primary-color); | ||||
| } | ||||
| 
 | ||||
| .step.completed span { | ||||
|     color: var(--success-color); | ||||
| } | ||||
| 
 | ||||
| /* Log Container */ | ||||
| .log-container { | ||||
|     background-color: #f8f9fa; | ||||
|     border-radius: 8px; | ||||
|     padding: 1rem; | ||||
|     max-height: 400px; | ||||
|     overflow-y: auto; | ||||
|     font-family: 'Courier New', monospace; | ||||
|     font-size: 0.85rem; | ||||
| } | ||||
| 
 | ||||
| .log-entry { | ||||
|     margin-bottom: 0.5rem; | ||||
|     padding: 0.25rem 0; | ||||
|     border-radius: 4px; | ||||
| } | ||||
| 
 | ||||
| .log-entry.info { | ||||
|     color: var(--info-color); | ||||
| } | ||||
| 
 | ||||
| .log-entry.success { | ||||
|     color: var(--success-color); | ||||
| } | ||||
| 
 | ||||
| .log-entry.error { | ||||
|     color: var(--danger-color); | ||||
| } | ||||
| 
 | ||||
| .log-entry.warning { | ||||
|     color: var(--warning-color); | ||||
| } | ||||
| 
 | ||||
| .timestamp { | ||||
|     font-weight: bold; | ||||
|     margin-right: 0.5rem; | ||||
| } | ||||
| 
 | ||||
| /* Cards */ | ||||
| .card { | ||||
|     border: none; | ||||
|     border-radius: 12px; | ||||
|     box-shadow: 0 2px 10px rgba(0,0,0,0.1); | ||||
|     transition: all 0.3s ease; | ||||
| } | ||||
| 
 | ||||
| .card:hover { | ||||
|     box-shadow: 0 4px 20px rgba(0,0,0,0.15); | ||||
| } | ||||
| 
 | ||||
| .card-header { | ||||
|     background: linear-gradient(45deg, #f8f9fa, #e9ecef); | ||||
|     border-bottom: 1px solid #dee2e6; | ||||
|     border-radius: 12px 12px 0 0 !important; | ||||
|     font-weight: 600; | ||||
| } | ||||
| 
 | ||||
| /* Progress Bar */ | ||||
| .progress { | ||||
|     height: 8px; | ||||
|     border-radius: 4px; | ||||
|     background-color: #e9ecef; | ||||
| } | ||||
| 
 | ||||
| .progress-bar { | ||||
|     background: linear-gradient(45deg, var(--primary-color), #0056b3); | ||||
|     border-radius: 4px; | ||||
|     transition: width 0.3s ease; | ||||
| } | ||||
| 
 | ||||
| /* Responsive Design */ | ||||
| @media (max-width: 768px) { | ||||
|     .sidebar { | ||||
|         min-height: auto; | ||||
|         position: relative; | ||||
|     } | ||||
|      | ||||
|     .main-content { | ||||
|         padding: 1rem; | ||||
|     } | ||||
|      | ||||
|     .content-header { | ||||
|         flex-direction: column; | ||||
|         align-items: flex-start; | ||||
|         gap: 1rem; | ||||
|     } | ||||
|      | ||||
|     .progress-steps { | ||||
|         flex-direction: column; | ||||
|         gap: 1rem; | ||||
|     } | ||||
|      | ||||
|     .step:not(:last-child)::after { | ||||
|         display: none; | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| /* Animations */ | ||||
| @keyframes pulse { | ||||
|     0% { | ||||
|         opacity: 1; | ||||
|     } | ||||
|     50% { | ||||
|         opacity: 0.5; | ||||
|     } | ||||
|     100% { | ||||
|         opacity: 1; | ||||
|     } | ||||
| } | ||||
| 
 | ||||
| /* Loading States */ | ||||
| .btn:disabled { | ||||
|     opacity: 0.6; | ||||
|     cursor: not-allowed; | ||||
| } | ||||
| 
 | ||||
| /* Custom Scrollbar */ | ||||
| .log-container::-webkit-scrollbar { | ||||
|     width: 6px; | ||||
| } | ||||
| 
 | ||||
| .log-container::-webkit-scrollbar-track { | ||||
|     background: #f1f1f1; | ||||
|     border-radius: 3px; | ||||
| } | ||||
| 
 | ||||
| .log-container::-webkit-scrollbar-thumb { | ||||
|     background: #c1c1c1; | ||||
|     border-radius: 3px; | ||||
| } | ||||
| 
 | ||||
| .log-container::-webkit-scrollbar-thumb:hover { | ||||
|     background: #a8a8a8; | ||||
| }  | ||||
							
								
								
									
										194
									
								
								templates/index.html
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										194
									
								
								templates/index.html
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,194 @@ | ||||
| <!DOCTYPE html> | ||||
| <html lang="en"> | ||||
| <head> | ||||
|     <meta charset="UTF-8"> | ||||
|     <meta name="viewport" content="width=device-width, initial-scale=1.0"> | ||||
|     <title>MCP Server - AI Code Editor</title> | ||||
|     <link href="https://cdn.jsdelivr.net/npm/bootstrap@5.1.3/dist/css/bootstrap.min.css" rel="stylesheet"> | ||||
|     <link href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.0.0/css/all.min.css" rel="stylesheet"> | ||||
|     <link href="/static/style.css" rel="stylesheet"> | ||||
| </head> | ||||
| <body> | ||||
|     <div class="container-fluid"> | ||||
|         <div class="row"> | ||||
|             <!-- Sidebar --> | ||||
|             <div class="col-md-3 sidebar"> | ||||
|                 <div class="sidebar-header"> | ||||
|                     <h3><i class="fas fa-robot"></i> MCP Server</h3> | ||||
|                     <p class="text-muted">AI-powered code editing</p> | ||||
|                 </div> | ||||
|                  | ||||
|                 <div class="sidebar-content"> | ||||
|                     <div class="form-section"> | ||||
|                         <h5><i class="fas fa-cog"></i> Configuration</h5> | ||||
|                          | ||||
|                         <div class="mb-3"> | ||||
|                             <label for="repoUrl" class="form-label">Gitea Repository URL</label> | ||||
|                             <input type="url" class="form-control" id="repoUrl"  | ||||
|                                    placeholder="http://157.66.191.31:3000/user/repo.git"> | ||||
|                         </div> | ||||
|                          | ||||
|                         <div class="mb-3"> | ||||
|                             <label for="giteaToken" class="form-label">Gitea Token</label> | ||||
|                             <div class="input-group"> | ||||
|                                 <input type="password" class="form-control" id="giteaToken"  | ||||
|                                        placeholder="Enter your Gitea token"> | ||||
|                                 <button class="btn btn-outline-secondary" type="button" id="toggleGiteaToken"> | ||||
|                                     <i class="fas fa-eye"></i> | ||||
|                                 </button> | ||||
|                             </div> | ||||
|                             <div class="form-text"> | ||||
|                                 <small> | ||||
|                                     Get your token from Gitea: Settings → Applications → Generate new token | ||||
|                                 </small> | ||||
|                             </div> | ||||
|                         </div> | ||||
|                     </div> | ||||
|                      | ||||
|                     <div class="form-section"> | ||||
|                         <h5><i class="fas fa-brain"></i> AI Configuration</h5> | ||||
|                          | ||||
|                         <div class="mb-3"> | ||||
|                             <label for="aiModel" class="form-label">AI Model</label> | ||||
|                             <select class="form-select" id="aiModel"> | ||||
|                                 <option value="gemini">Gemini CLI</option> | ||||
|                                 <option value="openai">OpenAI</option> | ||||
|                             </select> | ||||
|                         </div> | ||||
|                          | ||||
|                         <div class="mb-3"> | ||||
|                             <label for="modelName" class="form-label">Model Name</label> | ||||
|                             <input type="text" class="form-control" id="modelName"  | ||||
|                                    placeholder="gemini-1.5-pro" value="gemini-1.5-pro"> | ||||
|                             <div class="form-text"> | ||||
|                                 <small> | ||||
|                                     <strong>Gemini:</strong> gemini-1.5-pro, gemini-1.5-flash, etc.<br> | ||||
|                                     <strong>OpenAI:</strong> gpt-4, gpt-3.5-turbo, etc. | ||||
|                                 </small> | ||||
|                             </div> | ||||
|                         </div> | ||||
|                          | ||||
|                         <div class="mb-3"> | ||||
|                             <label for="apiKey" class="form-label">API Key</label> | ||||
|                             <div class="input-group"> | ||||
|                                 <input type="password" class="form-control" id="apiKey"  | ||||
|                                        placeholder="Enter your API key"> | ||||
|                                 <button class="btn btn-outline-secondary" type="button" id="toggleApiKey"> | ||||
|                                     <i class="fas fa-eye"></i> | ||||
|                                 </button> | ||||
|                             </div> | ||||
|                             <div class="form-text"> | ||||
|                                 <small> | ||||
|                                     <strong>Gemini:</strong> Get your API key from <a href="https://makersuite.google.com/app/apikey" target="_blank">Google AI Studio</a><br> | ||||
|                                     <strong>OpenAI:</strong> Get your API key from <a href="https://platform.openai.com/api-keys" target="_blank">OpenAI Platform</a> | ||||
|                                 </small> | ||||
|                             </div> | ||||
|                         </div> | ||||
|                     </div> | ||||
|                      | ||||
|                     <div class="form-section"> | ||||
|                         <h5><i class="fas fa-magic"></i> AI Prompt</h5> | ||||
|                         <div class="mb-3"> | ||||
|                             <label for="prompt" class="form-label">What would you like to do?</label> | ||||
|                             <textarea class="form-control" id="prompt" rows="4"  | ||||
|                                       placeholder="Describe the changes you want to make to the code..."></textarea> | ||||
|                         </div> | ||||
|                          | ||||
|                         <button type="button" class="btn btn-primary w-100" id="processBtn"> | ||||
|                             <i class="fas fa-play"></i> Process Repository | ||||
|                         </button> | ||||
|                     </div> | ||||
|                 </div> | ||||
|             </div> | ||||
|              | ||||
|             <!-- Main Content --> | ||||
|             <div class="col-md-9 main-content"> | ||||
|                 <div class="content-header"> | ||||
|                     <h2>Repository Processing</h2> | ||||
|                     <div class="status-indicator" id="statusIndicator"> | ||||
|                         <span class="status-dot"></span> | ||||
|                         <span class="status-text">Ready</span> | ||||
|                     </div> | ||||
|                 </div> | ||||
|                  | ||||
|                 <div class="content-body"> | ||||
|                     <!-- Progress Section --> | ||||
|                     <div class="card mb-4" id="progressCard" style="display: none;"> | ||||
|                         <div class="card-header"> | ||||
|                             <h5><i class="fas fa-tasks"></i> Processing Progress</h5> | ||||
|                         </div> | ||||
|                         <div class="card-body"> | ||||
|                             <div class="progress mb-3"> | ||||
|                                 <div class="progress-bar" id="progressBar" role="progressbar" style="width: 0%"></div> | ||||
|                             </div> | ||||
|                             <div class="progress-steps"> | ||||
|                                 <div class="step" data-step="clone"> | ||||
|                                     <span class="step-icon"></span> | ||||
|                                     <span>Clone Repository</span> | ||||
|                                     <span class="step-label"></span> | ||||
|                                 </div> | ||||
|                                 <div class="step" data-step="ai"> | ||||
|                                     <span class="step-icon"></span> | ||||
|                                     <span>AI Analysis</span> | ||||
|                                     <span class="step-label"></span> | ||||
|                                 </div> | ||||
|                                 <div class="step" data-step="commit"> | ||||
|                                     <span class="step-icon"></span> | ||||
|                                     <span>Commit & Push</span> | ||||
|                                     <span class="step-label"></span> | ||||
|                                 </div> | ||||
|                             </div> | ||||
|                         </div> | ||||
|                     </div> | ||||
|                      | ||||
|                     <!-- Log Section --> | ||||
|                     <div class="card"> | ||||
|                         <div class="card-header d-flex justify-content-between align-items-center"> | ||||
|                             <h5><i class="fas fa-terminal"></i> Processing Log</h5> | ||||
|                             <button class="btn btn-sm btn-outline-secondary" id="clearLog"> | ||||
|                                 <i class="fas fa-trash"></i> Clear | ||||
|                             </button> | ||||
|                         </div> | ||||
|                         <div class="card-body"> | ||||
|                             <div class="log-container" id="logContainer"> | ||||
|                                 <div class="log-entry info"> | ||||
|                                     <span class="timestamp">[System]</span> | ||||
|                                     <span class="message">Ready to process repository. Fill in the details and click "Process Repository".</span> | ||||
|                                 </div> | ||||
|                             </div> | ||||
|                         </div> | ||||
|                     </div> | ||||
| 
 | ||||
|                     <!-- AI Model Output Section --> | ||||
|                     <div class="card mt-4" id="aiOutputCard" style="display: none;"> | ||||
|                         <div class="card-header"> | ||||
|                             <h5><i class="fas fa-robot"></i> AI Model Output</h5> | ||||
|                         </div> | ||||
|                         <div class="card-body"> | ||||
|                             <pre id="aiOutput" style="white-space: pre-wrap; word-break: break-word;"></pre> | ||||
|                         </div> | ||||
|                     </div> | ||||
|                 </div> | ||||
|             </div> | ||||
|         </div> | ||||
|     </div> | ||||
|      | ||||
|     <!-- Loading Modal --> | ||||
|     <div class="modal fade" id="loadingModal" tabindex="-1"> | ||||
|         <div class="modal-dialog modal-dialog-centered"> | ||||
|             <div class="modal-content"> | ||||
|                 <div class="modal-body text-center"> | ||||
|                     <div class="spinner-border text-primary mb-3" role="status"> | ||||
|                         <span class="visually-hidden">Loading...</span> | ||||
|                     </div> | ||||
|                     <h5>Processing Repository</h5> | ||||
|                     <p class="text-muted">This may take a few minutes...</p> | ||||
|                 </div> | ||||
|             </div> | ||||
|         </div> | ||||
|     </div> | ||||
|      | ||||
|     <script src="https://cdn.jsdelivr.net/npm/bootstrap@5.1.3/dist/js/bootstrap.bundle.min.js"></script> | ||||
|     <script src="/static/script.js"></script> | ||||
| </body> | ||||
| </html>  | ||||
							
								
								
									
										221
									
								
								test_setup.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										221
									
								
								test_setup.py
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,221 @@ | ||||
| #!/usr/bin/env python3 | ||||
| """ | ||||
| MCP Server Setup Test | ||||
| Tests all components to ensure they're working correctly | ||||
| """ | ||||
| 
 | ||||
| import os | ||||
| import sys | ||||
| import subprocess | ||||
| import importlib | ||||
| from pathlib import Path | ||||
| 
 | ||||
| def test_python_version(): | ||||
|     """Test Python version""" | ||||
|     print("🐍 Testing Python version...") | ||||
|     if sys.version_info >= (3, 8): | ||||
|         print(f"✅ Python {sys.version.split()[0]} - Compatible") | ||||
|         return True | ||||
|     else: | ||||
|         print(f"❌ Python {sys.version.split()[0]} - Incompatible (need 3.8+)") | ||||
|         return False | ||||
| 
 | ||||
| def test_dependencies(): | ||||
|     """Test if all required dependencies are installed""" | ||||
|     print("\n📦 Testing dependencies...") | ||||
|      | ||||
|     required_packages = [ | ||||
|         'fastapi', | ||||
|         'uvicorn',  | ||||
|         'gitpython', | ||||
|         'requests', | ||||
|         'python-dotenv', | ||||
|         'pydantic' | ||||
|     ] | ||||
|      | ||||
|     optional_packages = [ | ||||
|         'google.generativeai', | ||||
|         'openai' | ||||
|     ] | ||||
|      | ||||
|     all_good = True | ||||
|      | ||||
|     for package in required_packages: | ||||
|         try: | ||||
|             importlib.import_module(package) | ||||
|             print(f"✅ {package}") | ||||
|         except ImportError: | ||||
|             print(f"❌ {package} - Missing") | ||||
|             all_good = False | ||||
|      | ||||
|     print("\n🔧 Optional packages:") | ||||
|     for package in optional_packages: | ||||
|         try: | ||||
|             importlib.import_module(package) | ||||
|             print(f"✅ {package}") | ||||
|         except ImportError: | ||||
|             print(f"⚠️  {package} - Not installed (AI features won't work)") | ||||
|      | ||||
|     return all_good | ||||
| 
 | ||||
| def test_git(): | ||||
|     """Test Git installation""" | ||||
|     print("\n🔧 Testing Git...") | ||||
|     try: | ||||
|         result = subprocess.run(["git", "--version"], capture_output=True, text=True) | ||||
|         if result.returncode == 0: | ||||
|             print(f"✅ Git: {result.stdout.strip()}") | ||||
|             return True | ||||
|         else: | ||||
|             print("❌ Git not working properly") | ||||
|             return False | ||||
|     except FileNotFoundError: | ||||
|         print("❌ Git not found in PATH") | ||||
|         return False | ||||
| 
 | ||||
| def test_node(): | ||||
|     """Test Node.js installation (optional)""" | ||||
|     print("\n🔧 Testing Node.js...") | ||||
|     try: | ||||
|         result = subprocess.run(["node", "--version"], capture_output=True, text=True) | ||||
|         if result.returncode == 0: | ||||
|             print(f"✅ Node.js: {result.stdout.strip()}") | ||||
|             return True | ||||
|         else: | ||||
|             print("⚠️  Node.js not working properly") | ||||
|             return False | ||||
|     except FileNotFoundError: | ||||
|         print("⚠️  Node.js not found (optional)") | ||||
|         return False | ||||
| 
 | ||||
| def test_files(): | ||||
|     """Test if all required files exist""" | ||||
|     print("\n📁 Testing files...") | ||||
|      | ||||
|     required_files = [ | ||||
|         'main.py', | ||||
|         'requirements.txt', | ||||
|         'env.example', | ||||
|         'README.md' | ||||
|     ] | ||||
|      | ||||
|     required_dirs = [ | ||||
|         'templates', | ||||
|         'static' | ||||
|     ] | ||||
|      | ||||
|     all_good = True | ||||
|      | ||||
|     for file in required_files: | ||||
|         if Path(file).exists(): | ||||
|             print(f"✅ {file}") | ||||
|         else: | ||||
|             print(f"❌ {file} - Missing") | ||||
|             all_good = False | ||||
|      | ||||
|     for directory in required_dirs: | ||||
|         if Path(directory).exists(): | ||||
|             print(f"✅ {directory}/") | ||||
|         else: | ||||
|             print(f"❌ {directory}/ - Missing") | ||||
|             all_good = False | ||||
|      | ||||
|     return all_good | ||||
| 
 | ||||
| def test_environment(): | ||||
|     """Test environment configuration""" | ||||
|     print("\n🔐 Testing environment...") | ||||
|      | ||||
|     from dotenv import load_dotenv | ||||
|     load_dotenv() | ||||
|      | ||||
|     gemini_key = os.getenv("GEMINI_API_KEY") | ||||
|     openai_key = os.getenv("OPENAI_API_KEY") | ||||
|      | ||||
|     if gemini_key and gemini_key != "your_gemini_api_key_here": | ||||
|         print("✅ Gemini API key configured") | ||||
|     else: | ||||
|         print("⚠️  Gemini API key not configured") | ||||
|      | ||||
|     if openai_key and openai_key != "your_openai_api_key_here": | ||||
|         print("✅ OpenAI API key configured") | ||||
|     else: | ||||
|         print("⚠️  OpenAI API key not configured") | ||||
|      | ||||
|     if not gemini_key and not openai_key: | ||||
|         print("⚠️  No AI API keys configured - AI features won't work") | ||||
|         return False | ||||
|      | ||||
|     return True | ||||
| 
 | ||||
| def test_server_import(): | ||||
|     """Test if the server can be imported""" | ||||
|     print("\n🚀 Testing server import...") | ||||
|     try: | ||||
|         # Test basic import | ||||
|         import main | ||||
|         print("✅ Server module imports successfully") | ||||
|         return True | ||||
|     except Exception as e: | ||||
|         print(f"❌ Server import failed: {e}") | ||||
|         return False | ||||
| 
 | ||||
| def run_tests(): | ||||
|     """Run all tests""" | ||||
|     print("🧪 MCP Server Setup Test") | ||||
|     print("=" * 50) | ||||
|      | ||||
|     tests = [ | ||||
|         ("Python Version", test_python_version), | ||||
|         ("Dependencies", test_dependencies), | ||||
|         ("Git", test_git), | ||||
|         ("Node.js", test_node), | ||||
|         ("Files", test_files), | ||||
|         ("Environment", test_environment), | ||||
|         ("Server Import", test_server_import) | ||||
|     ] | ||||
|      | ||||
|     results = [] | ||||
|      | ||||
|     for test_name, test_func in tests: | ||||
|         try: | ||||
|             result = test_func() | ||||
|             results.append((test_name, result)) | ||||
|         except Exception as e: | ||||
|             print(f"❌ {test_name} test failed with error: {e}") | ||||
|             results.append((test_name, False)) | ||||
|      | ||||
|     # Summary | ||||
|     print("\n" + "=" * 50) | ||||
|     print("📊 Test Summary:") | ||||
|     print("=" * 50) | ||||
|      | ||||
|     passed = 0 | ||||
|     total = len(results) | ||||
|      | ||||
|     for test_name, result in results: | ||||
|         status = "✅ PASS" if result else "❌ FAIL" | ||||
|         print(f"{test_name:<20} {status}") | ||||
|         if result: | ||||
|             passed += 1 | ||||
|      | ||||
|     print(f"\nResults: {passed}/{total} tests passed") | ||||
|      | ||||
|     if passed == total: | ||||
|         print("🎉 All tests passed! Your MCP Server is ready to use.") | ||||
|         print("\nTo start the server, run:") | ||||
|         print("  python main.py") | ||||
|         print("  or") | ||||
|         print("  python start.py") | ||||
|     else: | ||||
|         print("⚠️  Some tests failed. Please fix the issues before running the server.") | ||||
|         print("\nCommon fixes:") | ||||
|         print("1. Install missing dependencies: pip install -r requirements.txt") | ||||
|         print("2. Configure API keys in .env file") | ||||
|         print("3. Install Git if not present") | ||||
|      | ||||
|     return passed == total | ||||
| 
 | ||||
| if __name__ == "__main__": | ||||
|     success = run_tests() | ||||
|     sys.exit(0 if success else 1)  | ||||
							
								
								
									
										80
									
								
								view_logs.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										80
									
								
								view_logs.py
									
									
									
									
									
										Normal file
									
								
							| @ -0,0 +1,80 @@ | ||||
| #!/usr/bin/env python3 | ||||
| """ | ||||
| Real-time log viewer for MCP Server | ||||
| """ | ||||
| 
 | ||||
| import os | ||||
| import time | ||||
| import sys | ||||
| from pathlib import Path | ||||
| 
 | ||||
| def follow_logs(log_file="mcp_server.log"): | ||||
|     """Follow log file in real-time""" | ||||
|     log_path = Path(log_file) | ||||
|      | ||||
|     if not log_path.exists(): | ||||
|         print(f"Log file {log_file} not found. Creating empty file...") | ||||
|         log_path.touch() | ||||
|      | ||||
|     print(f"Following logs from: {log_path.absolute()}") | ||||
|     print("Press Ctrl+C to stop") | ||||
|     print("-" * 80) | ||||
|      | ||||
|     # Get initial file size | ||||
|     with open(log_path, 'r') as f: | ||||
|         f.seek(0, 2)  # Go to end of file | ||||
|         last_size = f.tell() | ||||
|      | ||||
|     try: | ||||
|         while True: | ||||
|             with open(log_path, 'r') as f: | ||||
|                 f.seek(last_size) | ||||
|                 new_lines = f.readlines() | ||||
|                 if new_lines: | ||||
|                     for line in new_lines: | ||||
|                         print(line.rstrip()) | ||||
|                     last_size = f.tell() | ||||
|              | ||||
|             time.sleep(0.1)  # Check every 100ms | ||||
|              | ||||
|     except KeyboardInterrupt: | ||||
|         print("\nStopped following logs") | ||||
| 
 | ||||
| def show_recent_logs(log_file="mcp_server.log", lines=50): | ||||
|     """Show recent log lines""" | ||||
|     log_path = Path(log_file) | ||||
|      | ||||
|     if not log_path.exists(): | ||||
|         print(f"Log file {log_file} not found.") | ||||
|         return | ||||
|      | ||||
|     with open(log_path, 'r') as f: | ||||
|         all_lines = f.readlines() | ||||
|         recent_lines = all_lines[-lines:] if len(all_lines) > lines else all_lines | ||||
|          | ||||
|         print(f"Recent {len(recent_lines)} lines from {log_path.absolute()}:") | ||||
|         print("-" * 80) | ||||
|         for line in recent_lines: | ||||
|             print(line.rstrip()) | ||||
| 
 | ||||
| def main(): | ||||
|     if len(sys.argv) > 1: | ||||
|         command = sys.argv[1] | ||||
|          | ||||
|         if command == "follow": | ||||
|             log_file = sys.argv[2] if len(sys.argv) > 2 else "mcp_server.log" | ||||
|             follow_logs(log_file) | ||||
|         elif command == "recent": | ||||
|             log_file = sys.argv[2] if len(sys.argv) > 2 else "mcp_server.log" | ||||
|             lines = int(sys.argv[3]) if len(sys.argv) > 3 else 50 | ||||
|             show_recent_logs(log_file, lines) | ||||
|         else: | ||||
|             print("Usage:") | ||||
|             print("  python view_logs.py follow [log_file]  # Follow logs in real-time") | ||||
|             print("  python view_logs.py recent [log_file] [lines]  # Show recent logs") | ||||
|     else: | ||||
|         # Default: show recent logs | ||||
|         show_recent_logs() | ||||
| 
 | ||||
| if __name__ == "__main__": | ||||
|     main()  | ||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user
	 Varun
						Varun