|
@@ -27,12 +27,23 @@ commands:
|
|
fi
|
|
fi
|
|
|
|
|
|
# Start first instance
|
|
# Start first instance
|
|
- HF_HOME="$(pwd)/.hf_cache_node1" DEBUG_DISCOVERY=7 DEBUG=7 exo --inference-engine <<parameters.inference_engine>> --node-id "node1" --listen-port 5678 --broadcast-port 5679 --chatgpt-api-port 8000 --chatgpt-api-response-timeout 900 --disable-tui 2>&1 | tee output1.log &
|
|
|
|
|
|
+ HF_HOME="$(pwd)/.hf_cache_node1" DEBUG_DISCOVERY=7 DEBUG=7 exo --inference-engine <<parameters.inference_engine>> \
|
|
|
|
+ --node-id "node1" --listen-port 5678 --broadcast-port 5679 --chatgpt-api-port 8000 \
|
|
|
|
+ --chatgpt-api-response-timeout 900 --disable-tui > output1.log &
|
|
PID1=$!
|
|
PID1=$!
|
|
|
|
+ tail -f output1.log &
|
|
|
|
+ TAIL1=$!
|
|
|
|
|
|
# Start second instance
|
|
# Start second instance
|
|
- HF_HOME="$(pwd)/.hf_cache_node2" DEBUG_DISCOVERY=7 DEBUG=7 exo --inference-engine <<parameters.inference_engine>> --node-id "node2" --listen-port 5679 --broadcast-port 5678 --chatgpt-api-port 8001 --chatgpt-api-response-timeout 900 --disable-tui 2>&1 | tee output2.log &
|
|
|
|
|
|
+ HF_HOME="$(pwd)/.hf_cache_node2" DEBUG_DISCOVERY=7 DEBUG=7 exo --inference-engine <<parameters.inference_engine>> \
|
|
|
|
+ --node-id "node2" --listen-port 5679 --broadcast-port 5678 --chatgpt-api-port 8001 \
|
|
|
|
+ --chatgpt-api-response-timeout 900 --disable-tui > output2.log &
|
|
PID2=$!
|
|
PID2=$!
|
|
|
|
+ tail -f output2.log &
|
|
|
|
+ TAIL2=$!
|
|
|
|
+
|
|
|
|
+ # Remember to kill the tail processes at the end
|
|
|
|
+ trap 'kill $TAIL1 $TAIL2' EXIT
|
|
|
|
|
|
# Wait for discovery
|
|
# Wait for discovery
|
|
sleep 10
|
|
sleep 10
|