Ver código fonte

cache isolation per workflow job

Alex Cheema 1 ano atrás
pai
commit
052ee1c7e9
1 arquivos alterados com 10 adições e 4 exclusões
  1. 10 4
      .github/workflows/test.yml

+ 10 - 4
.github/workflows/test.yml

@@ -17,6 +17,12 @@ jobs:
       with:
         python-version: '3.12'
 
+    - name: Cache huggingface hub models
+      uses: actions/cache@v3
+      with:
+        path: ~/.cache/huggingface/hub
+        key: ${{ runner.os }}-huggingface-hub-${{ hashFiles('~/.cache/huggingface/hub/**/*') }}-${{ github.job }}
+
     - name: Install dependencies
       run: |
         python3 -m pip install --upgrade pip
@@ -25,7 +31,7 @@ jobs:
     - name: Run tests
       run: |
         # Check if cached files are present
-        ls ~/.cache/huggingface/hub/models--mlx-community--Meta-Llama-3-8B-Instruct-4bit/**/*
+        ls ~/.cache/huggingface/hub/models--mlx-community--Meta-Llama-3-8B-Instruct-4bit/**/* || true
 
         # Run unit tests
         METAL_XCODE=1 python3 -m exo.inference.test_inference_engine
@@ -88,7 +94,7 @@ jobs:
       uses: actions/cache@v3
       with:
         path: ~/.cache/huggingface/hub
-        key: ${{ runner.os }}-huggingface-hub-${{ hashFiles('~/.cache/huggingface/hub/**/*') }}
+        key: ${{ runner.os }}-huggingface-hub-${{ hashFiles('~/.cache/huggingface/hub/**/*') }}-${{ github.job }}
         restore-keys: |
           ${{ runner.os }}-huggingface-hub-
 
@@ -96,7 +102,7 @@ jobs:
       uses: actions/cache@v3
       with:
         path: ~/Library/Caches/tinygrad/downloads
-        key: ${{ runner.os }}-tinygrad-downloads-${{ hashFiles('~/Library/Caches/tinygrad/downloads/**/*') }}
+        key: ${{ runner.os }}-tinygrad-downloads-${{ hashFiles('~/Library/Caches/tinygrad/downloads/**/*') }}-${{ github.job }}
         restore-keys: |
           ${{ runner.os }}-tinygrad-downloads-
 
@@ -108,7 +114,7 @@ jobs:
     - name: Run chatgpt api integration test
       run: |
         # Check if cached files are present
-        ls ~/.cache/huggingface/hub/models--mlx-community--Meta-Llama-3-8B-Instruct-4bit/**/*
+        ls ~/.cache/huggingface/hub/models--mlx-community--Meta-Llama-3-8B-Instruct-4bit/**/* || true
 
         # Start first instance
         DEBUG_DISCOVERY=9 DEBUG=9 python3 main.py --listen-port 5678 --broadcast-port 5679 --chatgpt-api-port 8000 --chatgpt-api-response-timeout-secs 1200 > output1.log 2>&1 &