Browse Source

fix ci output streaming

Alex Cheema 10 months ago
parent
commit
7a02acdcd5
1 changed files with 33 additions and 9 deletions
  1. 33 9
      .circleci/config.yml

+ 33 - 9
.circleci/config.yml

@@ -16,21 +16,40 @@ commands:
           command: |
             source env/bin/activate
 
-            # Function to run a command with real-time output and capture
-            run_with_output() {
-              local log_file=$1
-              shift
-              "$@" > >(tee -a "$log_file") 2>&1 &
+            # Create named pipes
+            mkfifo output1_pipe output2_pipe
+
+            # Start output streaming in the background
+            tee output1.log < output1_pipe &
+            tee output2.log < output2_pipe &
+
+            # Function to run a command and redirect output
+            run_instance() {
+              local instance=$1
+              local port=$2
+              local api_port=$3
+
+              HF_HOME="$(pwd)/.hf_cache_$instance" \
+              DEBUG_DISCOVERY=7 \
+              DEBUG=7 \
+              python3 main.py \
+                --inference-engine <<parameters.inference_engine>> \
+                --node-id "$instance" \
+                --listen-port $port \
+                --broadcast-port $((port + 1)) \
+                --chatgpt-api-port $api_port \
+                --chatgpt-api-response-timeout-secs 900 \
+                > ${instance}_pipe 2>&1 &
+
               echo $!
             }
 
-            # Start first instance
+            # Start instances
             echo "Starting first instance..."
-            PID1=$(run_with_output output1.log HF_HOME="$(pwd)/.hf_cache_node1" DEBUG_DISCOVERY=7 DEBUG=7 python3 main.py --inference-engine <<parameters.inference_engine>> --node-id "node1" --listen-port 5678 --broadcast-port 5679 --chatgpt-api-port 8000 --chatgpt-api-response-timeout-secs 900)
+            PID1=$(run_instance "node1" 5678 8000)
 
-            # Start second instance
             echo "Starting second instance..."
-            PID2=$(run_with_output output2.log HF_HOME="$(pwd)/.hf_cache_node2" DEBUG_DISCOVERY=7 DEBUG=7 python3 main.py --inference-engine <<parameters.inference_engine>> --node-id "node2" --listen-port 5679 --broadcast-port 5678 --chatgpt-api-port 8001 --chatgpt-api-response-timeout-secs 900)
+            PID2=$(run_instance "node2" 5679 8001)
 
             # Wait for discovery
             echo "Waiting for discovery..."
@@ -96,6 +115,11 @@ commands:
               echo "Test passed: Response from both nodes contains 'Michael Jackson'"
             fi
 
+            # Clean up
+            kill $PID1 $PID2
+            wait
+            rm output1_pipe output2_pipe
+
 jobs:
   unit_test:
     macos: