import test from 'node:test';
import assert from 'node:assert/strict';
import {
buildUpstreamMessagesFromAnthropic,
buildUpstreamMessagesFromChat,
buildUpstreamMessagesFromResponses,
buildUpstreamOptionsFromAnthropic,
buildUpstreamOptionsFromChat,
buildUpstreamOptionsFromResponses,
} from './direct-request-builders';
import {
applyCanonicalEvent,
createCanonicalState,
parseUpstreamChunk,
type CanonicalEvent,
} from './direct-canonical';
import {
buildChatCompletionResponse,
collectChatCompletionStreamFrames,
} from './direct-adapter-chat';
import {
buildResponsesResponse,
collectResponsesStreamEvents,
} from './direct-adapter-responses';
import {
buildAnthropicMessageResponse,
collectAnthropicStreamEvents,
} from './direct-adapter-messages';
import { loadModels } from './direct-config';
import type { DirectTextBlock } from './direct-types';
const cliContextBlocks: DirectTextBlock[] = [
{ type: 'text', text: 'ctx' },
{ type: 'text', text: '/workspace' },
];
function builderContext(systemPromptMode: 'original' | 'passthrough' | 'hybrid' = 'original') {
return {
systemPrompt: 'captured system prompt',
systemPromptMode,
cliUserContextBlocks: cliContextBlocks,
};
}
function accumulate(events: CanonicalEvent[]) {
const state = createCanonicalState({
id: 'resp_1',
model: 'minimax-m2.7',
created: 123,
});
for (const event of events) {
applyCanonicalEvent(state, event);
}
return state;
}
test('buildUpstreamMessagesFromChat uses original system prompt by default and wraps final user query', () => {
const messages = buildUpstreamMessagesFromChat(
{
messages: [
{ role: 'system', content: 'request system' },
{ role: 'assistant', content: 'older answer' },
{ role: 'user', content: 'hello world' },
],
},
builderContext(),
);
assert.equal(messages[0]?.role, 'system');
assert.equal(messages[0]?.content, 'captured system prompt');
assert.equal(messages.filter((message) => message.role === 'system').length, 1);
assert.equal(messages[1]?.role, 'assistant');
assert.equal(messages[2]?.role, 'user');
assert.equal(messages[2]?.agent, 'cli');
assert.ok(Array.isArray(messages[2]?.content));
assert.deepEqual(messages[2]?.content, [
...cliContextBlocks,
{ type: 'text', text: 'hello world' },
]);
});
test('buildUpstreamMessagesFromChat can passthrough request system prompt instead of original', () => {
const messages = buildUpstreamMessagesFromChat(
{
messages: [
{ role: 'system', content: 'request system' },
{ role: 'user', content: 'hello world' },
],
},
builderContext('passthrough'),
);
assert.equal(messages[0]?.role, 'system');
assert.equal(messages[0]?.content, 'request system');
assert.equal(messages.filter((message) => message.role === 'system').length, 1);
assert.equal(messages[1]?.role, 'user');
});
test('buildUpstreamMessagesFromChat can send original then passthrough system prompts in hybrid mode', () => {
const messages = buildUpstreamMessagesFromChat(
{
messages: [
{ role: 'system', content: 'request system' },
{ role: 'user', content: 'hello world' },
],
},
builderContext('hybrid'),
);
assert.equal(messages[0]?.role, 'system');
assert.equal(messages[0]?.content, 'captured system prompt');
assert.equal(messages[1]?.role, 'system');
assert.equal(messages[1]?.content, 'request system');
assert.equal(messages.filter((message) => message.role === 'system').length, 2);
assert.equal(messages[2]?.role, 'user');
});
test('buildUpstreamMessagesFromChat preserves assistant tool calls and tool results', () => {
const messages = buildUpstreamMessagesFromChat(
{
messages: [
{
role: 'assistant',
content: '',
tool_calls: [{
id: 'call_1',
type: 'function',
function: { name: 'lookup', arguments: '{"q":"x"}' },
}],
},
{ role: 'tool', tool_call_id: 'call_1', content: 'result' },
{ role: 'user', content: 'continue' },
],
},
builderContext(),
);
assert.equal(messages[1]?.role, 'assistant');
assert.deepEqual(messages[1]?.tool_calls, [{
id: 'call_1',
type: 'function',
function: { name: 'lookup', arguments: '{"q":"x"}' },
}]);
assert.equal(messages[2]?.role, 'tool');
assert.equal(messages[2]?.tool_call_id, 'call_1');
assert.equal(messages[2]?.content, 'result');
});
test('buildUpstreamMessagesFromResponses uses original system prompt by default', () => {
const messages = buildUpstreamMessagesFromResponses(
{
instructions: 'be concise',
input: [
{
type: 'message',
role: 'user',
content: [{ type: 'input_text', text: 'say hi' }],
},
],
},
builderContext(),
);
assert.equal(messages[0]?.role, 'system');
assert.equal(messages[0]?.content, 'captured system prompt');
assert.equal(messages.filter((message) => message.role === 'system').length, 1);
assert.equal(messages[1]?.role, 'user');
assert.equal(messages[1]?.agent, 'cli');
assert.deepEqual(messages[1]?.content, [
...cliContextBlocks,
{ type: 'text', text: 'say hi' },
]);
});
test('buildUpstreamMessagesFromResponses can passthrough request instructions instead of original', () => {
const messages = buildUpstreamMessagesFromResponses(
{
instructions: 'be concise',
input: 'say hi',
},
builderContext('passthrough'),
);
assert.equal(messages[0]?.role, 'system');
assert.equal(messages[0]?.content, 'be concise');
assert.equal(messages.filter((message) => message.role === 'system').length, 1);
assert.equal(messages[1]?.role, 'user');
});
test('buildUpstreamMessagesFromResponses can send original then instructions in hybrid mode', () => {
const messages = buildUpstreamMessagesFromResponses(
{
instructions: 'be concise',
input: 'say hi',
},
builderContext('hybrid'),
);
assert.equal(messages[0]?.role, 'system');
assert.equal(messages[0]?.content, 'captured system prompt');
assert.equal(messages[1]?.role, 'system');
assert.equal(messages[1]?.content, 'be concise');
assert.equal(messages.filter((message) => message.role === 'system').length, 2);
assert.equal(messages[2]?.role, 'user');
});
test('buildUpstreamMessagesFromResponses accepts string message content', () => {
const messages = buildUpstreamMessagesFromResponses(
{
input: [
{
type: 'message',
role: 'user',
content: 'say hi',
},
],
},
builderContext(),
);
assert.equal(messages[1]?.role, 'user');
assert.deepEqual(messages[1]?.content, [
...cliContextBlocks,
{ type: 'text', text: 'say hi' },
]);
});
test('buildUpstreamMessagesFromResponses preserves function call continuation items', () => {
const messages = buildUpstreamMessagesFromResponses(
{
input: [
{ type: 'function_call', call_id: 'call_1', name: 'lookup', arguments: '{"q":"x"}' },
{ type: 'function_call_output', call_id: 'call_1', output: 'result' },
{ type: 'message', role: 'user', content: [{ type: 'input_text', text: 'continue' }] },
],
},
builderContext(),
);
assert.equal(messages[1]?.role, 'assistant');
assert.deepEqual(messages[1]?.tool_calls, [{
id: 'call_1',
type: 'function',
function: { name: 'lookup', arguments: '{"q":"x"}' },
}]);
assert.equal(messages[2]?.role, 'tool');
assert.equal(messages[2]?.tool_call_id, 'call_1');
assert.equal(messages[2]?.content, 'result');
});
test('buildUpstreamMessagesFromAnthropic uses original system prompt by default', () => {
const messages = buildUpstreamMessagesFromAnthropic(
{
system: 'anthropic system',
messages: [
{ role: 'assistant', content: [{ type: 'text', text: 'older answer' }] },
{ role: 'user', content: [{ type: 'text', text: 'new question' }] },
],
},
builderContext(),
);
assert.equal(messages[0]?.role, 'system');
assert.equal(messages[0]?.content, 'captured system prompt');
assert.equal(messages.filter((message) => message.role === 'system').length, 1);
assert.equal(messages[1]?.role, 'assistant');
assert.equal(messages[1]?.content, 'older answer');
assert.equal(messages[2]?.role, 'user');
assert.equal(messages[2]?.agent, 'cli');
});
test('buildUpstreamMessagesFromAnthropic can passthrough request system instead of original', () => {
const messages = buildUpstreamMessagesFromAnthropic(
{
system: 'anthropic system',
messages: [{ role: 'user', content: [{ type: 'text', text: 'new question' }] }],
},
builderContext('passthrough'),
);
assert.equal(messages[0]?.role, 'system');
assert.equal(messages[0]?.content, 'anthropic system');
assert.equal(messages.filter((message) => message.role === 'system').length, 1);
assert.equal(messages[1]?.role, 'user');
});
test('buildUpstreamMessagesFromAnthropic can send original then request system in hybrid mode', () => {
const messages = buildUpstreamMessagesFromAnthropic(
{
system: 'anthropic system',
messages: [{ role: 'user', content: [{ type: 'text', text: 'new question' }] }],
},
builderContext('hybrid'),
);
assert.equal(messages[0]?.role, 'system');
assert.equal(messages[0]?.content, 'captured system prompt');
assert.equal(messages[1]?.role, 'system');
assert.equal(messages[1]?.content, 'anthropic system');
assert.equal(messages.filter((message) => message.role === 'system').length, 2);
assert.equal(messages[2]?.role, 'user');
});
test('buildUpstreamMessagesFromAnthropic preserves tool_use and tool_result blocks', () => {
const messages = buildUpstreamMessagesFromAnthropic(
{
messages: [
{
role: 'assistant',
content: [{ type: 'tool_use', id: 'call_1', name: 'lookup', input: { q: 'x' } }],
},
{
role: 'user',
content: [{ type: 'tool_result', tool_use_id: 'call_1', content: 'result' }],
},
],
},
builderContext(),
);
assert.equal(messages[1]?.role, 'assistant');
assert.deepEqual(messages[1]?.tool_calls, [{
id: 'call_1',
type: 'function',
function: { name: 'lookup', arguments: '{"q":"x"}' },
}]);
assert.equal(messages[2]?.role, 'tool');
assert.equal(messages[2]?.tool_call_id, 'call_1');
assert.equal(messages[2]?.content, 'result');
});
test('buildUpstreamOptionsFromChat passes through model tools and reasoning settings', () => {
const options = buildUpstreamOptionsFromChat({
model: 'kimi-k2.6',
tools: [{ type: 'function', function: { name: 'lookup', description: 'd', parameters: { type: 'object' } } }],
temperature: 0.3,
max_tokens: 4096,
reasoning_effort: 'high',
verbosity: 'low',
reasoning_summary: 'detailed',
});
assert.equal(options.model, 'kimi-k2.6');
assert.equal(options.temperature, 0.3);
assert.equal(options.max_tokens, 4096);
assert.equal(options.reasoning_effort, 'high');
assert.equal(options.verbosity, 'low');
assert.equal(options.reasoning_summary, 'detailed');
assert.deepEqual(options.tools, [{ type: 'function', function: { name: 'lookup', description: 'd', parameters: { type: 'object' } } }]);
});
test('buildUpstreamOptionsFromResponses passes through model tools and reasoning settings', () => {
const options = buildUpstreamOptionsFromResponses({
model: 'glm-5.1',
tools: [{ type: 'function', name: 'lookup', description: 'd', parameters: { type: 'object' } }],
temperature: 0.2,
max_output_tokens: 2048,
reasoning: { effort: 'medium', summary: 'auto' },
text: { verbosity: 'high' },
});
assert.equal(options.model, 'glm-5.1');
assert.equal(options.temperature, 0.2);
assert.equal(options.max_tokens, 2048);
assert.equal(options.reasoning_effort, 'medium');
assert.equal(options.reasoning_summary, 'auto');
assert.equal(options.verbosity, 'high');
assert.deepEqual(options.tools, [{ type: 'function', function: { name: 'lookup', description: 'd', parameters: { type: 'object' } } }]);
});
test('buildUpstreamOptionsFromAnthropic passes through model tools and thinking budget', () => {
const options = buildUpstreamOptionsFromAnthropic({
model: 'hunyuan-2.0-thinking',
tools: [{ name: 'lookup', description: 'd', input_schema: { type: 'object' } }],
max_tokens: 1024,
thinking: { type: 'enabled', budget_tokens: 2048 },
});
assert.equal(options.model, 'hunyuan-2.0-thinking');
assert.equal(options.max_tokens, 1024);
assert.equal(options.reasoning_effort, 'high');
assert.deepEqual(options.tools, [{ type: 'function', function: { name: 'lookup', description: 'd', parameters: { type: 'object' } } }]);
});
test('loadModels reads config-backed model registry', () => {
const models = loadModels();
assert.ok(models.some((model) => model.id === 'kimi-k2.6' && model.name === 'Kimi-K2.6' && model.credits_multiplier === 0.59));
assert.ok(models.some((model) => model.id === 'hunyuan-2.0-thinking' && model.credits_multiplier === 0.04));
});
test('parseUpstreamChunk extracts content, reasoning, finish reason, and usage', () => {
const events = parseUpstreamChunk({
id: 'up_1',
model: 'ep-model',
object: 'chat.completion.chunk',
created: 456,
choices: [
{
index: 0,
delta: {
role: 'assistant',
content: 'hello',
reasoning_content: 'thinking',
function_call: { name: 'lookup', arguments: '{"q":"x"}' },
},
finish_reason: 'stop',
},
],
usage: {
prompt_tokens: 10,
completion_tokens: 20,
total_tokens: 30,
},
});
assert.deepEqual(events, [
{ type: 'text_delta', text: 'hello' },
{ type: 'reasoning_delta', text: 'thinking' },
{ type: 'function_call_name', name: 'lookup' },
{ type: 'function_call_arguments_delta', text: '{"q":"x"}' },
{ type: 'finish', finishReason: 'stop' },
{
type: 'usage',
usage: { prompt_tokens: 10, completion_tokens: 20, total_tokens: 30 },
},
]);
});
test('parseUpstreamChunk extracts tool call deltas', () => {
const events = parseUpstreamChunk({
choices: [
{
delta: {
tool_calls: [{
index: 0,
id: 'call_1',
type: 'function',
function: {
name: 'fake_lookup_weather',
arguments: '{"city":"Shanghai"}',
},
}],
},
finish_reason: 'tool_calls',
},
],
});
assert.deepEqual(events, [
{
type: 'tool_call_delta',
index: 0,
id: 'call_1',
name: 'fake_lookup_weather',
arguments: '{"city":"Shanghai"}',
},
{ type: 'finish', finishReason: 'tool_calls' },
]);
});
test('parseUpstreamChunk ignores empty upstream finish reason placeholders', () => {
const events = parseUpstreamChunk({
choices: [
{
delta: { reasoning_content: 'thinking' },
finish_reason: '',
},
],
});
assert.deepEqual(events, [{ type: 'reasoning_delta', text: 'thinking' }]);
});
test('buildChatCompletionResponse maps canonical state to non-stream OpenAI response', () => {
const state = accumulate([
{ type: 'reasoning_delta', text: 'thinking' },
{ type: 'text_delta', text: 'hello world' },
{ type: 'finish', finishReason: 'stop' },
{ type: 'usage', usage: { prompt_tokens: 2, completion_tokens: 3, total_tokens: 5 } },
]);
const response = buildChatCompletionResponse(state);
assert.equal(response.object, 'chat.completion');
assert.equal(response.choices[0]?.message?.content, 'hello world');
assert.equal(response.choices[0]?.message?.reasoning_content, 'thinking');
assert.equal(response.usage?.total_tokens, 5);
});
test('buildChatCompletionResponse maps tool calls to OpenAI message tool_calls', () => {
const state = accumulate([
{
type: 'tool_call_delta',
index: 0,
id: 'call_1',
name: 'fake_lookup_weather',
arguments: '{"city":"Shanghai","unit":"celsius"}',
},
{ type: 'finish', finishReason: 'tool_calls' },
]);
const response = buildChatCompletionResponse(state);
assert.equal(response.choices[0]?.finish_reason, 'tool_calls');
assert.deepEqual(response.choices[0]?.message?.tool_calls, [{
id: 'call_1',
type: 'function',
function: {
name: 'fake_lookup_weather',
arguments: '{"city":"Shanghai","unit":"celsius"}',
},
}]);
});
test('collectChatCompletionStreamFrames emits OpenAI SSE chunks and DONE', () => {
const frames = collectChatCompletionStreamFrames(
{ id: 'resp_1', model: 'minimax-m2.7', created: 123 },
[
{ type: 'reasoning_delta', text: 'thinking' },
{ type: 'text_delta', text: 'hello' },
{ type: 'finish', finishReason: 'stop' },
{ type: 'usage', usage: { prompt_tokens: 1, completion_tokens: 2, total_tokens: 3 } },
],
);
assert.equal(frames.at(-1), 'data: [DONE]');
assert.ok(frames.some((frame) => frame.includes('reasoning_content')));
assert.ok(frames.some((frame) => frame.includes('"content":"hello"')));
});
test('collectChatCompletionStreamFrames emits tool call deltas', () => {
const frames = collectChatCompletionStreamFrames(
{ id: 'resp_1', model: 'minimax-m2.7', created: 123 },
[
{
type: 'tool_call_delta',
index: 0,
id: 'call_1',
name: 'fake_lookup_weather',
arguments: '{"city":"Shanghai"}',
},
{ type: 'finish', finishReason: 'tool_calls' },
],
);
assert.ok(frames.some((frame) => frame.includes('"tool_calls"')));
assert.ok(frames.some((frame) => frame.includes('"name":"fake_lookup_weather"')));
assert.ok(frames.some((frame) => frame.includes('"finish_reason":"tool_calls"')));
assert.equal(frames.at(-1), 'data: [DONE]');
});
test('buildResponsesResponse maps canonical state to OpenAI responses payload', () => {
const state = accumulate([
{ type: 'reasoning_delta', text: 'thinking' },
{ type: 'text_delta', text: 'hello world' },
{ type: 'finish', finishReason: 'stop' },
{ type: 'usage', usage: { prompt_tokens: 4, completion_tokens: 5, total_tokens: 9 } },
]);
const response = buildResponsesResponse(state);
const messageItem = response.output[1] as { type?: string; content?: Array<{ text?: string }> };
assert.equal(response.object, 'response');
assert.equal(response.status, 'completed');
assert.equal(response.output[0]?.type, 'reasoning');
assert.equal(messageItem.type, 'message');
assert.equal(messageItem.content?.[0]?.text, 'hello world');
});
test('buildResponsesResponse maps tool calls to function_call output items', () => {
const state = accumulate([
{
type: 'tool_call_delta',
index: 0,
id: 'call_1',
name: 'fake_lookup_weather',
arguments: '{"city":"Shanghai"}',
},
{ type: 'finish', finishReason: 'tool_calls' },
]);
const response = buildResponsesResponse(state);
assert.deepEqual(response.output.at(-1), {
type: 'function_call',
id: (response.output.at(-1) as { id: string }).id,
call_id: 'call_1',
name: 'fake_lookup_weather',
arguments: '{"city":"Shanghai"}',
status: 'completed',
});
});
test('collectResponsesStreamEvents emits sub2api-style response events', () => {
const events = collectResponsesStreamEvents(
{ id: 'resp_1', model: 'minimax-m2.7', created: 123 },
[
{ type: 'reasoning_delta', text: 'thinking' },
{ type: 'text_delta', text: 'hello' },
{ type: 'finish', finishReason: 'stop' },
{ type: 'usage', usage: { prompt_tokens: 1, completion_tokens: 2, total_tokens: 3 } },
],
);
assert.equal(events[0]?.type, 'response.created');
assert.ok(events.some((event) => event.type === 'response.reasoning_summary_text.delta'));
assert.ok(events.some((event) => event.type === 'response.output_text.delta'));
assert.equal(events.at(-1)?.type, 'response.completed');
});
test('collectResponsesStreamEvents starts text-only output at index zero', () => {
const events = collectResponsesStreamEvents(
{ id: 'resp_1', model: 'minimax-m2.7', created: 123 },
[
{ type: 'text_delta', text: 'hello' },
{ type: 'finish', finishReason: 'stop' },
],
);
const added = events.find((event) => event.type === 'response.output_item.added') as { output_index?: number } | undefined;
const delta = events.find((event) => event.type === 'response.output_text.delta') as { output_index?: number } | undefined;
const done = events.find((event) => event.type === 'response.output_text.done') as { output_index?: number } | undefined;
assert.equal(added?.output_index, 0);
assert.equal(delta?.output_index, 0);
assert.equal(done?.output_index, 0);
});
test('collectResponsesStreamEvents emits function call stream events', () => {
const events = collectResponsesStreamEvents(
{ id: 'resp_1', model: 'minimax-m2.7', created: 123 },
[
{
type: 'tool_call_delta',
index: 0,
id: 'call_1',
name: 'fake_lookup_weather',
arguments: '{"city":"Shanghai"}',
},
{ type: 'finish', finishReason: 'tool_calls' },
],
);
assert.ok(events.some((event) => event.type === 'response.output_item.added' && JSON.stringify(event).includes('fake_lookup_weather')));
assert.ok(events.some((event) => event.type === 'response.function_call_arguments.delta' && JSON.stringify(event).includes('Shanghai')));
assert.ok(events.some((event) => event.type === 'response.output_item.done' && JSON.stringify(event).includes('function_call')));
});
test('collectResponsesStreamEvents starts tool-only output at index zero', () => {
const events = collectResponsesStreamEvents(
{ id: 'resp_1', model: 'minimax-m2.7', created: 123 },
[
{
type: 'tool_call_delta',
index: 0,
id: 'call_1',
name: 'fake_lookup_weather',
arguments: '{"city":"Shanghai"}',
},
{ type: 'finish', finishReason: 'tool_calls' },
],
);
const added = events.find((event) => event.type === 'response.output_item.added') as { output_index?: number } | undefined;
const delta = events.find((event) => event.type === 'response.function_call_arguments.delta') as { output_index?: number } | undefined;
const done = events.find((event) => event.type === 'response.function_call_arguments.done') as { output_index?: number } | undefined;
assert.equal(added?.output_index, 0);
assert.equal(delta?.output_index, 0);
assert.equal(done?.output_index, 0);
});
test('buildAnthropicMessageResponse includes thinking and text blocks', () => {
const state = accumulate([
{ type: 'reasoning_delta', text: 'thinking' },
{ type: 'text_delta', text: 'hello world' },
{ type: 'finish', finishReason: 'stop' },
{ type: 'usage', usage: { prompt_tokens: 7, completion_tokens: 8, total_tokens: 15 } },
]);
const response = buildAnthropicMessageResponse(state);
assert.equal(response.type, 'message');
assert.equal(response.content[0]?.type, 'thinking');
assert.equal(response.content[1]?.type, 'text');
assert.equal(response.content[1]?.text, 'hello world');
assert.equal(response.stop_reason, 'end_turn');
});
test('buildAnthropicMessageResponse maps tool calls to tool_use blocks', () => {
const state = accumulate([
{
type: 'tool_call_delta',
index: 0,
id: 'call_1',
name: 'fake_lookup_weather',
arguments: '{"city":"Shanghai"}',
},
{ type: 'finish', finishReason: 'tool_calls' },
]);
const response = buildAnthropicMessageResponse(state);
assert.equal(response.stop_reason, 'tool_use');
assert.deepEqual(response.content.at(-1), {
type: 'tool_use',
id: 'call_1',
name: 'fake_lookup_weather',
input: { city: 'Shanghai' },
});
});
test('collectAnthropicStreamEvents emits thinking and text SSE events', () => {
const events = collectAnthropicStreamEvents(
{ id: 'resp_1', model: 'minimax-m2.7', created: 123 },
[
{ type: 'reasoning_delta', text: 'thinking' },
{ type: 'text_delta', text: 'hello' },
{ type: 'finish', finishReason: 'stop' },
{ type: 'usage', usage: { prompt_tokens: 1, completion_tokens: 2, total_tokens: 3 } },
],
);
assert.equal(events[0]?.event, 'message_start');
assert.ok(events.some((event) => event.event === 'content_block_delta' && JSON.stringify(event.data).includes('thinking_delta')));
assert.ok(events.some((event) => event.event === 'content_block_delta' && JSON.stringify(event.data).includes('text_delta')));
assert.equal(events.at(-1)?.event, 'message_stop');
});
test('collectAnthropicStreamEvents emits tool_use stream events', () => {
const events = collectAnthropicStreamEvents(
{ id: 'resp_1', model: 'minimax-m2.7', created: 123 },
[
{
type: 'tool_call_delta',
index: 0,
id: 'call_1',
name: 'fake_lookup_weather',
arguments: '{"city":"Shanghai"}',
},
{ type: 'finish', finishReason: 'tool_calls' },
],
);
assert.ok(events.some((event) => event.event === 'content_block_start' && JSON.stringify(event.data).includes('tool_use')));
assert.ok(events.some((event) => event.event === 'content_block_delta' && JSON.stringify(event.data).includes('input_json_delta')));
assert.ok(events.some((event) => event.event === 'message_delta' && JSON.stringify(event.data).includes('tool_use')));
});