Fix xai probe: double /v1 URL bug, use /v1/models instead of chat completion
Two bugs caused all xai providers to show 'error' in the monitor: 1. Double /v1 in URL: models.json baseUrl is https://api.x.ai/v1 (OpenAI- compatible convention), and the probe was appending /v1/chat/completions, producing https://api.x.ai/v1/v1/chat/completions → HTTP 4xx. Fix: strip trailing /vN from baseUrl before constructing the probe URL. 2. Wrong model: probe used grok-3-mini, which requires specific x.ai console permissions not granted to our keys. Keys have access to grok-4-1-fast-reasoning only. Fix: use GET /v1/models instead — lightweight, no model guessing, returns 200 (valid key) or 401 (invalid). Includes available models in result for visibility. 158/158 tests pass (unit tests for parseXaiHeaders unchanged).
This commit is contained in:
parent
34898b1196
commit
c7e6438398
2 changed files with 46 additions and 35 deletions
|
|
@ -102,7 +102,7 @@ export function parseXaiHeaders(headers, httpStatus, apiKey) {
|
|||
* Probe an x.ai API endpoint.
|
||||
*
|
||||
* @param {string} providerName
|
||||
* @param {string} baseUrl — base URL (e.g. https://api.x.ai)
|
||||
* @param {string} baseUrl — base URL (e.g. https://api.x.ai or https://api.x.ai/v1)
|
||||
* @param {string|null} apiKey — x.ai API key
|
||||
* @returns {Promise<Object>} normalized provider result
|
||||
*/
|
||||
|
|
@ -111,22 +111,37 @@ export async function probeXaiProvider(providerName, baseUrl, apiKey) {
|
|||
return { type: 'xai-api', status: 'no_key', severity: 'unknown' };
|
||||
}
|
||||
|
||||
const base = (baseUrl || 'https://api.x.ai').replace(/\/$/, '');
|
||||
// Strip trailing /v1 (or /v2 etc.) — the probe appends its own versioned path.
|
||||
// models.json baseUrl often includes the version (e.g. https://api.x.ai/v1)
|
||||
// to satisfy the pi framework; we normalise here to avoid doubling it.
|
||||
const base = (baseUrl || 'https://api.x.ai').replace(/\/$/, '').replace(/\/v\d+$/, '');
|
||||
|
||||
try {
|
||||
const response = await fetch(`${base}/v1/chat/completions`, {
|
||||
method: 'POST',
|
||||
// Use GET /v1/models as the probe: lightweight, no model-name guessing,
|
||||
// returns 200 (valid key) or 401 (invalid key). x.ai doesn't expose
|
||||
// per-request quota headers the way Anthropic does, so this is the
|
||||
// most reliable liveness check.
|
||||
const response = await fetch(`${base}/v1/models`, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'Authorization': `Bearer ${apiKey}`,
|
||||
'content-type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
model: 'grok-3-mini',
|
||||
max_tokens: 1,
|
||||
messages: [{ role: 'user', content: 'Hi' }],
|
||||
}),
|
||||
});
|
||||
|
||||
if (response.status === 200) {
|
||||
let models = [];
|
||||
try {
|
||||
const body = await response.json();
|
||||
models = (body.data || []).map(m => m.id);
|
||||
} catch (_) { /* ignore parse errors */ }
|
||||
return {
|
||||
type: 'xai-api',
|
||||
status: 'ok',
|
||||
models,
|
||||
severity: 'ok',
|
||||
};
|
||||
}
|
||||
|
||||
return parseXaiHeaders(response.headers, response.status, apiKey);
|
||||
} catch (err) {
|
||||
return {
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue