跳转到主要内容
This tool gives you a Python script that tests whether a Qhaigc /v1/messages endpoint genuinely supports Claude native tools — including tool_use content blocks and strict schema validation — rather than silently routing your request through an OpenAI compatibility layer. Run it whenever you need confidence that native Claude tool calling will work end-to-end with your API key and chosen model.

Usage

1

Install the dependency

pip install requests
2

Edit the script configuration

Open claude_api_checker.py and update the three variables in the config section at the top:
  • Set API_URL to your endpoint, e.g. https://api.qhaigc.net/v1/messages
  • Set API_KEY to your Qhaigc API key
  • Set MODEL to the Claude model you want to test, e.g. claude-haiku-4-5-20251001
Do not commit your real API key to a public repository. Use environment variables or a secrets manager to inject the key at runtime.
3

Run the script

python claude_api_checker.py
The script runs three test phases against your endpoint and prints detailed output for each one.
4

Read the compatibility report

Look for the Claude Native Compatibility Report section at the end of the output. It summarizes whether native Claude tools, strict schema, and authentication all behaved as expected.

The Script

# pip install requests
import requests
import json
import time
from typing import Dict, Any, Optional, Tuple

# ================= 配置区 =================
# 必须是 Claude / Anthropic Messages 路径
API_URL = "https://api.qhaigc.net/v1/messages"
API_KEY = "sk-XXXXXXXXXXXXXXXXXXXX"

# 建议填你实际要测的模型
MODEL = "claude-haiku-4-5-20251001"

# 官方 Claude API 版本头
ANTHROPIC_VERSION = "2023-06-01"

TIMEOUT = 20
# ==========================================

def pretty(obj: Any) -> str:
    return json.dumps(obj, indent=2, ensure_ascii=False)

def build_headers_x_api_key() -> Dict[str, str]:
    return {
        "x-api-key": API_KEY,
        "anthropic-version": ANTHROPIC_VERSION,
        "content-type": "application/json",
    }

def build_headers_bearer() -> Dict[str, str]:
    return {
        "Authorization": f"Bearer {API_KEY}",
        "anthropic-version": ANTHROPIC_VERSION,
        "content-type": "application/json",
    }

def build_tool_definition(strict: bool = False) -> Dict[str, Any]:
    tool = {
        "name": "get_weather",
        "description": "Get the current weather in a given location",
        "input_schema": {
            "type": "object",
            "properties": {
                "location": {
                    "type": "string",
                    "description": "The city and state, e.g. San Francisco, CA",
                }
            },
            "required": ["location"],
            "additionalProperties": False,
        },
    }
    if strict:
        tool["strict"] = True
    return tool

def build_payload(force_tool: bool = True, strict: bool = False) -> Dict[str, Any]:
    payload = {
        "model": MODEL,
        "max_tokens": 256,
        "messages": [
            {
                "role": "user",
                "content": (
                    "What's the weather like in San Francisco? "
                    "Please use the get_weather tool and do not answer from memory."
                ),
            }
        ],
        "tools": [build_tool_definition(strict=strict)],
    }
    if force_tool:
        payload["tool_choice"] = {
            "type": "tool",
            "name": "get_weather",
        }
    return payload

def safe_json(resp: requests.Response) -> Tuple[Optional[Dict[str, Any]], Optional[str]]:
    try:
        return resp.json(), None
    except json.JSONDecodeError:
        return None, resp.text

def find_tool_use(resp_json: Dict[str, Any]) -> Optional[Dict[str, Any]]:
    for block in resp_json.get("content", []):
        if isinstance(block, dict) and block.get("type") == "tool_use":
            return block
    return None

def classify_error(error_text: str) -> str:
    s = error_text.lower()
    if "chat.completions" in s:
        return "Likely routing through OpenAI chat.completions internally."
    if "tools[0].type" in s or "type is required" in s:
        return "OpenAI-style tools validation detected — not true Claude Native Tools."
    if "parameters" in s or "function" in s or "function_call" in s:
        return "Compatibility layer is processing as OpenAI function calling."
    if "model" in s and any(k in s for k in ["not found", "invalid", "unsupported"]):
        return "Model not found or not connected — check model name."
    return "Could not auto-classify — review the raw error response."

def do_request(name: str, headers: Dict[str, str], payload: Dict[str, Any]) -> Dict[str, Any]:
    print(f"\n===== {name} =====")
    started = time.time()
    try:
        resp = requests.post(API_URL, headers=headers, json=payload, timeout=TIMEOUT)
        elapsed = round(time.time() - started, 3)
    except requests.exceptions.RequestException as e:
        return {"ok": False, "network_error": str(e), "phase": name}

    resp_json, raw_text = safe_json(resp)
    result = {
        "ok": resp.status_code == 200,
        "status_code": resp.status_code,
        "elapsed_sec": elapsed,
        "phase": name,
        "json": resp_json,
        "raw_text": raw_text,
    }
    if resp_json:
        result["stop_reason"] = resp_json.get("stop_reason")
        result["tool_use"] = find_tool_use(resp_json)
    return result

def has_tool_use(result):
    return bool(result and result.get("ok") and result.get("tool_use"))

def main():
    print("Testing Claude Native Tools compatibility...")
    results = {}
    results["native_basic"] = do_request(
        "Phase 1 - Native Claude Tools",
        build_headers_x_api_key(),
        build_payload(force_tool=True, strict=False),
    )
    results["native_strict"] = do_request(
        "Phase 2 - Strict Schema",
        build_headers_x_api_key(),
        build_payload(force_tool=True, strict=True),
    )
    results["bearer_probe"] = do_request(
        "Phase 3 - Bearer Auth Probe",
        build_headers_bearer(),
        build_payload(force_tool=True, strict=False),
    )

    print("\n" + "=" * 60)
    print("Claude Native Compatibility Report")
    print("=" * 60)
    if has_tool_use(results["native_basic"]):
        print("✅ Native Claude Tools: Supported")
    else:
        print("❌ Native Claude Tools: Not supported or compatibility issue")
    if has_tool_use(results["native_strict"]):
        print("✅ Strict Tool Schema: Supported")
    else:
        print("⚠️  Strict Tool Schema: Not fully supported")
    print("=" * 60)

if __name__ == "__main__":
    main()

How to Read the Results

The response contained a content[] block with "type": "tool_use". Your endpoint correctly supports native Claude tool calling via the /v1/messages path.
The basic tool call succeeded and returned a tool_use block, but the endpoint did not handle the strict: true field in the tool definition correctly. Core tool calling still works; however, strict schema enforcement may not be fully supported by the underlying model or routing layer.
No tool_use block was detected in the response, or the request failed outright. Check the following:
  • Confirm your MODEL value matches a Claude model available on your account
  • Confirm API_URL points to the /v1/messages path, not /v1/chat/completions
  • Confirm API_KEY is valid and has not expired
  • Review the raw error output printed above the report for a diagnostic hint
This script tests the /v1/messages endpoint specifically. Most Qhaigc use cases work fine through /v1/chat/completions, which is fully OpenAI-compatible. Use this checker only when you specifically require Claude native tools with tool_use content blocks in the response.