test.yml 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169
  1. name: Python Tests on M1 Mac
  2. on:
  3. push:
  4. branches: [ main ]
  5. pull_request:
  6. branches: [ main ]
  7. jobs:
  8. unit_test:
  9. runs-on: macos-14
  10. steps:
  11. - uses: actions/checkout@v2
  12. - name: Set up Python
  13. uses: actions/setup-python@v2
  14. with:
  15. python-version: '3.12'
  16. - name: Cache huggingface hub models
  17. uses: actions/cache@v3
  18. with:
  19. path: ~/.cache/huggingface/hub
  20. key: ${{ runner.os }}-huggingface-hub-${{ hashFiles('~/.cache/huggingface/hub/**/*') }}-${{ github.job }}
  21. - name: Install dependencies
  22. run: |
  23. python3 -m pip install --upgrade pip
  24. pip install .
  25. - name: Run tests
  26. run: |
  27. # Check if cached files are present
  28. ls ~/.cache/huggingface/hub/models--mlx-community--Meta-Llama-3-8B-Instruct-4bit/**/* || true
  29. # Run unit tests
  30. METAL_XCODE=1 python3 -m exo.inference.test_inference_engine
  31. discovery_integration_test:
  32. runs-on: macos-latest
  33. steps:
  34. - uses: actions/checkout@v2
  35. - name: Set up Python
  36. uses: actions/setup-python@v2
  37. with:
  38. python-version: '3.x'
  39. - name: Install dependencies
  40. run: |
  41. python3 -m pip install --upgrade pip
  42. pip install .
  43. - name: Run discovery integration test
  44. run: |
  45. # Start first instance
  46. DEBUG_DISCOVERY=9 DEBUG=9 python3 main.py --listen-port 5678 --broadcast-port 5679 --chatgpt-api-port 8000 > output1.log 2>&1 &
  47. PID1=$!
  48. # Start second instance
  49. DEBUG_DISCOVERY=9 DEBUG=9 python3 main.py --listen-port 5679 --broadcast-port 5678 --chatgpt-api-port 8001 > output2.log 2>&1 &
  50. PID2=$!
  51. # Wait for discovery
  52. sleep 10
  53. # Stop both instances
  54. kill $PID1 $PID2
  55. # Check outputs
  56. if grep -q "Connected to peer" output1.log && grep -q "Connected to peer" output2.log; then
  57. echo "Test passed: Both instances discovered each other"
  58. exit 0
  59. else
  60. echo "Test failed: Devices did not discover each other"
  61. echo "Output of first instance:"
  62. cat output1.log
  63. echo "Output of second instance:"
  64. cat output2.log
  65. exit 1
  66. fi
  67. chatgpt_api_integration_test:
  68. runs-on: macos-latest
  69. steps:
  70. - uses: actions/checkout@v2
  71. - name: Set up Python
  72. uses: actions/setup-python@v2
  73. with:
  74. python-version: '3.x'
  75. - name: Cache huggingface hub models
  76. uses: actions/cache@v3
  77. with:
  78. path: ~/.cache/huggingface/hub
  79. key: ${{ runner.os }}-huggingface-hub-${{ hashFiles('~/.cache/huggingface/hub/**/*') }}-${{ github.job }}
  80. restore-keys: |
  81. ${{ runner.os }}-huggingface-hub-
  82. - name: Cache tinygrad downloaded models
  83. uses: actions/cache@v3
  84. with:
  85. path: ~/Library/Caches/tinygrad/downloads
  86. key: ${{ runner.os }}-tinygrad-downloads-${{ hashFiles('~/Library/Caches/tinygrad/downloads/**/*') }}-${{ github.job }}
  87. restore-keys: |
  88. ${{ runner.os }}-tinygrad-downloads-
  89. - name: Install dependencies
  90. run: |
  91. python3 -m pip install --upgrade pip
  92. pip install .
  93. - name: Run chatgpt api integration test
  94. run: |
  95. # Check if cached files are present
  96. ls ~/.cache/huggingface/hub/models--mlx-community--Meta-Llama-3-8B-Instruct-4bit/**/* || true
  97. # Start first instance
  98. DEBUG_DISCOVERY=9 DEBUG=9 python3 main.py --listen-port 5678 --broadcast-port 5679 --chatgpt-api-port 8000 --chatgpt-api-response-timeout-secs 1200 > output1.log 2>&1 &
  99. PID1=$!
  100. # Start second instance
  101. DEBUG_DISCOVERY=9 DEBUG=9 python3 main.py --listen-port 5679 --broadcast-port 5678 --chatgpt-api-port 8001 --chatgpt-api-response-timeout-secs 1200 > output2.log 2>&1 &
  102. PID2=$!
  103. # Wait for discovery
  104. sleep 10
  105. # first one to load the model
  106. curl -s http://localhost:8000/v1/chat/completions \
  107. -H "Content-Type: application/json" \
  108. -d '{
  109. "model": "llama-3-8b",
  110. "messages": [{"role": "user", "content": "Placeholder to load model..."}],
  111. "temperature": 0.7
  112. }'
  113. response_1=$(curl -s http://localhost:8000/v1/chat/completions \
  114. -H "Content-Type: application/json" \
  115. -d '{
  116. "model": "llama-3-8b",
  117. "messages": [{"role": "user", "content": "Who was the king of pop?"}],
  118. "temperature": 0.7
  119. }')
  120. echo "Response 1: $response_1"
  121. response_2=$(curl -s http://localhost:8000/v1/chat/completions \
  122. -H "Content-Type: application/json" \
  123. -d '{
  124. "model": "llama-3-8b",
  125. "messages": [{"role": "user", "content": "Who was the king of pop?"}],
  126. "temperature": 0.7
  127. }')
  128. echo "Response 2: $response_2"
  129. # Stop both instances
  130. kill $PID1 $PID2
  131. echo ""
  132. if ! echo "$response_1" | grep -q "Michael Jackson" || ! echo "$response_2" | grep -q "Michael Jackson"; then
  133. echo "Test failed: Response does not contain 'Michael Jackson'"
  134. echo "Response 1: $response_1"
  135. echo ""
  136. echo "Response 2: $response_2"
  137. exit 1
  138. else
  139. echo "Test passed: Response from both nodes contains 'Michael Jackson'"
  140. fi