Преглед на файлове

port github workflow to circleci

Alex Cheema преди 1 година
родител
ревизия
cf4cddccc1
променени са 2 файла, в които са добавени 108 реда и са изтрити 239 реда
  1. 108 39
      .circleci/config.yml
  2. 0 200
      .github/workflows/test.yml

+ 108 - 39
.circleci/config.yml

@@ -1,54 +1,123 @@
-# This config was automatically generated from your source code
-# Stacks detected: cicd:github-actions:.github/workflows,deps:python:.,file:setup.py:.
 version: 2.1
+
 orbs:
-  python: circleci/python@2
+  apple: ml-explore/pr-approval@0.1.0
+
 jobs:
-  test-python:
-    # Install dependencies and run tests
-    docker:
-      - image: cimg/python:3.12-node
+  unit_test:
+    macos:
+      xcode: "15.2.0"
+    resource_class: macos.m1.medium.gen1
     steps:
       - checkout
-      - python/install-packages:
-          pkg-manager: pip-dist
+      - run:
+          name: Set up Python
+          command: |
+            brew install python@3.12
+            python3.12 -m venv env
+            source env/bin/activate
+      - restore_cache:
+          keys:
+            - huggingface-hub-{{ checksum "~/.cache/huggingface/hub/**/*") }}-{{ .Environment.CIRCLE_JOB }}
+      - run:
+          name: Install dependencies
+          command: |
+            source env/bin/activate
+            pip install --upgrade pip
+            pip install .
       - run:
           name: Run tests
-          command: pytest --junitxml=junit.xml || ((($? == 5)) && echo 'Did not find any tests to run.')
-      - store_test_results:
-          path: junit.xml
-  build-package:
-    # build python package
-    docker:
-      - image: cimg/python:3.12-node
+          command: |
+            source env/bin/activate
+            ls ~/.cache/huggingface/hub/models--mlx-community--Meta-Llama-3-8B-Instruct-4bit/**/* || true
+            METAL_XCODE=1 python3 -m exo.inference.test_inference_engine
+      - save_cache:
+          paths:
+            - ~/.cache/huggingface/hub
+          key: huggingface-hub-{{ checksum "~/.cache/huggingface/hub/**/*") }}-{{ .Environment.CIRCLE_JOB }}
+
+  discovery_integration_test:
+    macos:
+      xcode: "15.2.0"
     steps:
       - checkout
       - run:
-          name: Create the ~/artifacts directory if it doesn't exist
-          command: mkdir -p ~/artifacts
-      - python/dist
-      - store_artifacts:
-          path: dist
-          destination: ~/artifacts
-  deploy:
-    # This is an example deploy job, not actually used by the workflow
-    docker:
-      - image: cimg/base:stable
+          name: Set up Python
+          command: |
+            brew install python@3.12
+            python3.12 -m venv env
+            source env/bin/activate
+      - run:
+          name: Install dependencies
+          command: |
+            source env/bin/activate
+            pip install --upgrade pip
+            pip install .
+      - run:
+          name: Run discovery integration test
+          command: |
+            source env/bin/activate
+            DEBUG_DISCOVERY=9 DEBUG=9 python3 main.py --node-id "node1" --listen-port 5678 --broadcast-port 5679 --chatgpt-api-port 8000 > output1.log 2>&1 &
+            PID1=$!
+            DEBUG_DISCOVERY=9 DEBUG=9 python3 main.py --node-id "node2" --listen-port 5679 --broadcast-port 5678 --chatgpt-api-port 8001 > output2.log 2>&1 &
+            PID2=$!
+            sleep 10
+            kill $PID1 $PID2
+            if grep -q "Connected to peer" output1.log && grep -q "Connected to peer" output2.log; then
+              echo "Test passed: Both instances discovered each other"
+              exit 0
+            else
+              echo "Test failed: Devices did not discover each other"
+              echo "Output of first instance:"
+              cat output1.log
+              echo "Output of second instance:"
+              cat output2.log
+              exit 1
+            fi
+
+  chatgpt_api_integration_test:
+    macos:
+      xcode: "15.2.0"
     steps:
-      # Replace this with steps to deploy to users
+      - checkout
+      - run:
+          name: Set up Python
+          command: |
+            brew install python@3.12
+            python3.12 -m venv env
+            source env/bin/activate
+      - restore_cache:
+          keys:
+            - huggingface-hub-{{ checksum "~/.cache/huggingface/hub/**/*") }}-{{ .Environment.CIRCLE_JOB }}
+            - huggingface-hub-
+      - restore_cache:
+          keys:
+            - tinygrad-downloads-{{ checksum "~/Library/Caches/tinygrad/downloads/**/*") }}-{{ .Environment.CIRCLE_JOB }}
+            - tinygrad-downloads-
       - run:
-          name: deploy
-          command: '#e.g. ./deploy.sh'
+          name: Install dependencies
+          command: |
+            source env/bin/activate
+            pip install --upgrade pip
+            pip install .
       - run:
-          name: found github actions config
-          command: ':'
+          name: Run chatgpt api integration test
+          command: |
+            source env/bin/activate
+            exit 0 # TODO: Implement the actual test here
+      - save_cache:
+          paths:
+            - ~/.cache/huggingface/hub
+          key: huggingface-hub-{{ checksum "~/.cache/huggingface/hub/**/*") }}-{{ .Environment.CIRCLE_JOB }}
+      - save_cache:
+          paths:
+            - ~/Library/Caches/tinygrad/downloads
+          key: tinygrad-downloads-{{ checksum "~/Library/Caches/tinygrad/downloads/**/*") }}-{{ .Environment.CIRCLE_JOB }}
+
 workflows:
-  build-and-test:
+  version: 2
+  build_and_test:
     jobs:
-      - test-python
-      - build-package:
-          requires:
-            - test-python
-    # - deploy:
-    #     requires:
-    #       - build-package
+      - unit_test
+      - discovery_integration_test
+      - chatgpt_api_integration_test

+ 0 - 200
.github/workflows/test.yml

@@ -1,200 +0,0 @@
-name: Python Tests on M1 Mac
-
-on:
-  push:
-    branches: [ main ]
-  pull_request:
-    branches: [ main ]
-
-jobs:
-  unit_test:
-    runs-on: macos-14
-    steps:
-    - uses: actions/checkout@v2
-
-    - name: Set up Python
-      uses: actions/setup-python@v2
-      with:
-        python-version: '3.12'
-
-    - name: Cache huggingface hub models
-      uses: actions/cache@v3
-      with:
-        path: ~/.cache/huggingface/hub
-        key: ${{ runner.os }}-huggingface-hub-${{ hashFiles('~/.cache/huggingface/hub/**/*') }}-${{ github.job }}
-
-    - name: Install dependencies
-      run: |
-        python3 -m pip install --upgrade pip
-        pip install .
-
-    - name: Run tests
-      run: |
-        # Check if cached files are present
-        ls ~/.cache/huggingface/hub/models--mlx-community--Meta-Llama-3-8B-Instruct-4bit/**/* || true
-
-        # Run unit tests
-        METAL_XCODE=1 python3 -m exo.inference.test_inference_engine
-
-  discovery_integration_test:
-    runs-on: macos-latest
-    steps:
-    - uses: actions/checkout@v2
-
-    - name: Set up Python
-      uses: actions/setup-python@v2
-      with:
-        python-version: '3.x'
-
-    - name: Install dependencies
-      run: |
-        python3 -m pip install --upgrade pip
-        pip install .
-
-    - name: Run discovery integration test
-      run: |
-        # Start first instance
-        DEBUG_DISCOVERY=9 DEBUG=9 python3 main.py --node-id "node1" --listen-port 5678 --broadcast-port 5679 --chatgpt-api-port 8000 > output1.log 2>&1 &
-        PID1=$!
-
-        # Start second instance
-        DEBUG_DISCOVERY=9 DEBUG=9 python3 main.py --node-id "node2" --listen-port 5679 --broadcast-port 5678 --chatgpt-api-port 8001 > output2.log 2>&1 &
-        PID2=$!
-
-        # Wait for discovery
-        sleep 10
-
-        # Stop both instances
-        kill $PID1 $PID2
-
-        # Check outputs
-        if grep -q "Connected to peer" output1.log && grep -q "Connected to peer" output2.log; then
-          echo "Test passed: Both instances discovered each other"
-          exit 0
-        else
-          echo "Test failed: Devices did not discover each other"
-          echo "Output of first instance:"
-          cat output1.log
-          echo "Output of second instance:"
-          cat output2.log
-          exit 1
-        fi
-
-  chatgpt_api_integration_test:
-    runs-on: macos-latest
-    steps:
-    - uses: actions/checkout@v2
-
-    - name: Set up Python
-      uses: actions/setup-python@v2
-      with:
-        python-version: '3.x'
-
-    - name: Cache huggingface hub models
-      uses: actions/cache@v3
-      with:
-        path: ~/.cache/huggingface/hub
-        key: ${{ runner.os }}-huggingface-hub-${{ hashFiles('~/.cache/huggingface/hub/**/*') }}-${{ github.job }}
-        restore-keys: |
-          ${{ runner.os }}-huggingface-hub-
-
-    - name: Cache tinygrad downloaded models
-      uses: actions/cache@v3
-      with:
-        path: ~/Library/Caches/tinygrad/downloads
-        key: ${{ runner.os }}-tinygrad-downloads-${{ hashFiles('~/Library/Caches/tinygrad/downloads/**/*') }}-${{ github.job }}
-        restore-keys: |
-          ${{ runner.os }}-tinygrad-downloads-
-
-    - name: Install dependencies
-      run: |
-        python3 -m pip install --upgrade pip
-        pip install .
-
-    - name: Run chatgpt api integration test
-      run: |
-        exit 0 # TODO
-        # Check if cached files are present
-        ls ~/.cache/huggingface/hub/models--mlx-community--Meta-Llama-3-8B-Instruct-4bit/**/* || true
-
-        # Start first instance
-        DEBUG_DISCOVERY=9 DEBUG=9 python3 main.py --inference-engine mlx --node-id "node1" --listen-port 5678 --broadcast-port 5679 --chatgpt-api-port 8000 --chatgpt-api-response-timeout-secs 900 > output1.log 2>&1 &
-        PID1=$!
-
-        # Start second instance
-        DEBUG_DISCOVERY=9 DEBUG=9 python3 main.py --inference-engine mlx --node-id "node2" --listen-port 5679 --broadcast-port 5678 --chatgpt-api-port 8001 --chatgpt-api-response-timeout-secs 900 > output2.log 2>&1 &
-        PID2=$!
-
-        # Wait for discovery
-        sleep 10
-
-        # Function to check if processes are still running
-        check_processes() {
-          if ! kill -0 $PID1 2>/dev/null; then
-            echo "First instance (PID $PID1) died unexpectedly. Log output:"
-            cat output1.log
-            exit 1
-          fi
-          if ! kill -0 $PID2 2>/dev/null; then
-            echo "Second instance (PID $PID2) died unexpectedly. Log output:"
-            cat output2.log
-            exit 1
-          fi
-        }
-
-        # Check processes before proceeding
-        check_processes
-
-        # first one to load the model
-        curl -s http://localhost:8000/v1/chat/completions \
-            -H "Content-Type: application/json" \
-            -d '{
-              "model": "llama-3-8b",
-              "messages": [{"role": "user", "content": "Keep responses concise. Placeholder to load model..."}],
-              "temperature": 0.7
-            }'
-
-        # Check processes after model load
-        check_processes
-
-        response_1=$(curl -s http://localhost:8000/v1/chat/completions \
-          -H "Content-Type: application/json" \
-          -d '{
-            "model": "llama-3-8b",
-            "messages": [{"role": "user", "content": "Keep responses concise. Who was the king of pop?"}],
-            "temperature": 0.7
-          }')
-        echo "Response 1: $response_1"
-
-        # Check processes after first response
-        check_processes
-
-        response_2=$(curl -s http://localhost:8000/v1/chat/completions \
-          -H "Content-Type: application/json" \
-          -d '{
-            "model": "llama-3-8b",
-            "messages": [{"role": "user", "content": "Keep responses concise. Who was the king of pop?"}],
-            "temperature": 0.7
-          }')
-        echo "Response 2: $response_2"
-
-        # Check processes after second response
-        check_processes
-
-        # Stop both instances
-        kill $PID1 $PID2
-
-        echo ""
-        if ! echo "$response_1" | grep -q "Michael Jackson" || ! echo "$response_2" | grep -q "Michael Jackson"; then
-          echo "Test failed: Response does not contain 'Michael Jackson'"
-          echo "Response 1: $response_1"
-          echo ""
-          echo "Response 2: $response_2"
-          echo "Output of first instance:"
-          cat output1.log
-          echo "Output of second instance:"
-          cat output2.log
-          exit 1
-        else
-          echo "Test passed: Response from both nodes contains 'Michael Jackson'"
-        fi