Skip to content

Instantly share code, notes, and snippets.

@developit
Created December 4, 2025 00:04
Show Gist options
  • Select an option

  • Save developit/a5822789fb578564f4553d2c3fc4bf7d to your computer and use it in GitHub Desktop.

Select an option

Save developit/a5822789fb578564f4553d2c3fc4bf7d to your computer and use it in GitHub Desktop.
tiny-ai-client: ~1.3KB zero-dep streaming AI client with tool calling

Tiny AI Client

A minimalist, zero-dependency OpenAI-compatible streaming client with automatic tool calling.

Features

  • ~1.3KB gzipped - Zero dependencies
  • Dual API support - Responses (default) and Completions modes
  • Streaming - Async generator with typed chunks
  • Tool calling - Inline handlers or callback dispatch

Usage

import ai from 'tiny-ai-client';

// Stream text
for await (const chunk of ai({
  apiKey: 'sk-...',
  baseURL: 'https://api.openai.com/v1',
  model: 'gpt-4o-mini',
  input: 'Hello!',
})) {
  if (chunk.type === 'text') console.log(chunk.text);
}

Tool Calling

Tools can have inline call handlers that run automatically:

const tools = [
  {
    type: 'function',
    name: 'get_weather',
    description: 'Get weather for a city',
    parameters: {
      type: 'object',
      properties: {location: {type: 'string'}},
      required: ['location'],
    },
    call: ({location}) => ({temp: 72, location}),
  },
];

for await (const chunk of ai({...config, input: 'Weather in Tokyo?', tools})) {
  if (chunk.type === 'text') console.log(chunk.text);
  if (chunk.type === 'tool_result') console.log('Tool returned:', chunk.result);
}

Or use onToolCall for dynamic dispatch:

for await (const chunk of ai({
  ...config,
  input: 'Calculate 5 + 3',
  tools: [{type: 'function', name: 'calc', description: 'Calculate', parameters: {...}}],
  onToolCall: (name, args) => eval(args.expr),
})) { ... }

API Modes

ai({ mode: 'responses', ... })  // Modern Responses API (default)
ai({ mode: 'completions', ... }) // Legacy Completions API

API

export default function ai(options: Options): AsyncIterableIterator<StreamChunk>

interface Options {
  apiKey: string;
  baseURL: string;
  model?: string;
  input: string;
  instructions?: string;        // System prompt
  tools?: Tool[];
  onToolCall?: (name: string, args: object) => unknown;
  mode?: 'responses' | 'completions';
  temperature?: number;
  max_output_tokens?: number;
  headers?: Record<string, string>;
}

type StreamChunk =
  | { type: 'text'; text: string }
  | { type: 'tool_call'; id: string; function: { name: string; arguments: string } }
  | { type: 'tool_result'; id: string; function: {...}; result: unknown }
  | { type: 'done' };

Size

Metric Size
Minified 2.5 KB
Gzipped 1.26 KB

License

MIT

import {test, describe} from 'node:test';
import assert from 'node:assert/strict';
import ai from './dist/ai.js';
const TEST_BASE_URL = 'https://proxy-shopify-ai.local.shop.dev/v1';
const TEST_API_KEY = '1'; // Ignored by proxy
describe('AI Client Tests', () => {
describe('Dual-mode Client', () => {
test('should generate text in responses mode', async () => {
const chunks = await Array.fromAsync(ai({
apiKey: TEST_API_KEY,
baseURL: TEST_BASE_URL,
mode: 'responses',
model: 'gpt-4o-mini',
input: 'Reply with "Responses mode works" and nothing else',
max_output_tokens: 50,
temperature: 0,
}));
const result = chunks.at(-1)!;
const content = chunks.filter(c => c.type === 'text').map(c => c.text).join('');
assert.ok(content);
console.log('Responses mode result:', content, '| final chunk:', result.type);
});
test('should generate text in completions mode', async () => {
const chunks = await Array.fromAsync(ai({
apiKey: TEST_API_KEY,
baseURL: TEST_BASE_URL,
mode: 'completions',
model: 'gpt-4o-mini',
input: 'Reply with "Completions mode works" and nothing else',
max_output_tokens: 50,
temperature: 0,
}));
const result = chunks.at(-1)!;
const content = chunks.filter(c => c.type === 'text').map(c => c.text).join('');
assert.ok(content);
console.log('Completions mode result:', content, '| final chunk:', result.type);
});
test('should handle system messages', async () => {
const chunks = await Array.fromAsync(ai({
apiKey: TEST_API_KEY,
baseURL: TEST_BASE_URL,
model: 'gpt-4o-mini',
instructions: 'You are a helpful assistant that speaks like a pirate.',
input: 'Say hello',
max_output_tokens: 50,
temperature: 0.5,
}));
const result = chunks.at(-1)!;
const content = chunks.filter(c => c.type === 'text').map(c => c.text).join('');
assert.ok(content);
console.log('System message result:', content, '| final chunk:', result.type);
});
test('should stream in both modes', async () => {
for (const mode of ['responses', 'completions'] as const) {
const chunks = [];
for await (const chunk of ai({
apiKey: TEST_API_KEY,
baseURL: TEST_BASE_URL,
mode,
model: 'gpt-4o-mini',
input: `Say "Mode ${mode} streaming works"`,
max_output_tokens: 50,
temperature: 0,
})) {
chunks.push(chunk);
}
assert.ok(chunks.length > 0, `No chunks received in ${mode} mode`);
const hasText = chunks.some((c) => c.type === 'text');
assert.ok(hasText, `No text chunks in ${mode} mode`);
console.log(`${mode} mode streamed ${chunks.length} chunks`);
}
});
test('should handle tool calls with inline handlers (responses mode)', async () => {
const tools = [
{
type: 'function' as const,
name: 'get_current_weather',
description: 'Get the current weather in a location',
parameters: {
type: 'object',
properties: {
location: { type: 'string', description: 'City name' }
},
required: ['location']
},
call: async ({location}: any) => ({
temperature: 72,
conditions: 'sunny',
location
})
}
];
const chunks = await Array.fromAsync(ai({
apiKey: TEST_API_KEY,
baseURL: TEST_BASE_URL,
mode: 'responses',
model: 'gpt-4o-mini',
input: 'What is the weather in Paris? Use the get_current_weather tool.',
tools,
max_output_tokens: 150,
}));
console.log('Responses mode chunks:', chunks.map(c => c.type).join(', '));
const hasToolCall = chunks.some(c => c.type === 'tool_call');
const hasToolResult = chunks.some(c => c.type === 'tool_result');
const hasText = chunks.some(c => c.type === 'text');
assert.ok(hasToolCall, 'Should have tool_call chunk');
assert.ok(hasToolResult, 'Should have tool_result chunk');
assert.ok(hasText, 'Should have text chunk');
console.log('Tool calling test (responses) passed with', chunks.length, 'chunks');
});
test('should handle tool calls with onToolCall callback (completions mode)', async () => {
const tools = [
{
type: 'function' as const,
name: 'add',
description: 'Add two numbers together',
parameters: {
type: 'object',
properties: {
a: { type: 'number', description: 'First number' },
b: { type: 'number', description: 'Second number' }
},
required: ['a', 'b']
}
}
];
const chunks = await Array.fromAsync(ai({
apiKey: TEST_API_KEY,
baseURL: TEST_BASE_URL,
mode: 'completions',
model: 'gpt-4o-mini',
input: 'Calculate 5 + 3 using the add tool',
tools,
max_output_tokens: 150,
onToolCall: async (name, args: any) => {
if (name === 'add') {
return { result: args.a + args.b };
}
return { error: 'unknown tool' };
}
}));
console.log('Completions mode chunks:', chunks.map(c => c.type).join(', '));
const hasToolCall = chunks.some(c => c.type === 'tool_call');
const hasToolResult = chunks.some(c => c.type === 'tool_result');
assert.ok(hasToolCall, 'Should have tool_call chunk');
assert.ok(hasToolResult, 'Should have tool_result chunk');
console.log('onToolCall test (completions) passed with', chunks.length, 'chunks');
});
test('should handle multi-turn tool calling (responses mode)', async () => {
const tools = [
{
type: 'function' as const,
name: 'get_temperature',
description: 'Get temperature for a city',
parameters: {
type: 'object',
properties: {
city: { type: 'string' }
},
required: ['city']
},
call: async ({city}: any) => ({temperature: city === 'Tokyo' ? 22 : 18})
},
{
type: 'function' as const,
name: 'compare',
description: 'Compare two numbers',
parameters: {
type: 'object',
properties: {
a: { type: 'number' },
b: { type: 'number' }
},
required: ['a', 'b']
},
call: async ({a, b}: any) => ({comparison: a > b ? 'greater' : a < b ? 'less' : 'equal'})
}
];
const chunks = await Array.fromAsync(ai({
apiKey: TEST_API_KEY,
baseURL: TEST_BASE_URL,
mode: 'responses',
model: 'gpt-4o-mini',
input: 'Get temperature for Tokyo and Paris, then tell me which is warmer',
tools,
max_output_tokens: 200,
}));
const toolCalls = chunks.filter(c => c.type === 'tool_call');
const toolResults = chunks.filter(c => c.type === 'tool_result');
assert.ok(toolCalls.length >= 2, `Should have at least 2 tool calls, got ${toolCalls.length}`);
assert.ok(toolResults.length >= 2, `Should have at least 2 tool results, got ${toolResults.length}`);
console.log(`Multi-turn test (responses) passed with ${toolCalls.length} tool calls`);
});
test('should handle multi-turn tool calling (completions mode)', async () => {
let callCount = 0;
const tools = [
{
type: 'function' as const,
name: 'get_number',
description: 'Get a number (returns 10)',
parameters: {
type: 'object',
properties: {}
},
call: async () => {
callCount++;
return {value: 10};
}
},
{
type: 'function' as const,
name: 'multiply',
description: 'Multiply two numbers',
parameters: {
type: 'object',
properties: {
a: { type: 'number' },
b: { type: 'number' }
},
required: ['a', 'b']
},
call: async ({a, b}: any) => {
callCount++;
return {result: a * b};
}
}
];
const chunks = await Array.fromAsync(ai({
apiKey: TEST_API_KEY,
baseURL: TEST_BASE_URL,
mode: 'completions',
model: 'gpt-4o-mini',
input: 'Use get_number to get two numbers, then multiply them together',
tools,
max_output_tokens: 200,
}));
const toolCalls = chunks.filter(c => c.type === 'tool_call');
assert.ok(callCount >= 2, `Should call tools at least 2 times, got ${callCount}`);
assert.ok(toolCalls.length >= 2, `Should have at least 2 tool calls, got ${toolCalls.length}`);
console.log(`Multi-turn test (completions) passed with ${callCount} tool calls`);
});
});
describe('Error handling', () => {
test('should handle API errors gracefully', async () => {
await assert.rejects(async () => {
await Array.fromAsync(ai({
apiKey: 'invalid',
baseURL: 'https://invalid-url.example.com/v1',
input: 'test',
}));
});
});
test('should handle missing tool handler gracefully', async () => {
// Tool without inline call function and no onToolCall callback
const tools = [
{
type: 'function' as const,
name: 'unhandled_tool',
description: 'A tool with no handler',
parameters: { type: 'object', properties: {} }
}
];
const chunks = await Array.fromAsync(ai({
apiKey: TEST_API_KEY,
baseURL: TEST_BASE_URL,
mode: 'completions',
model: 'gpt-4o-mini',
input: 'Call the unhandled_tool function',
tools,
max_output_tokens: 100,
}));
// Should get tool_result with error
const toolResult = chunks.find(c => c.type === 'tool_result') as any;
assert.ok(toolResult, 'Should have tool_result chunk');
assert.ok(toolResult.result?.error, 'Tool result should contain error');
console.log('Missing tool handler error:', toolResult.result?.error);
});
test('should handle tool that throws an Error object', async () => {
const tools = [
{
type: 'function' as const,
name: 'failing_tool',
description: 'A tool that throws an error',
parameters: { type: 'object', properties: {} },
call: async () => {
throw new Error('Tool execution failed');
}
}
];
const chunks = await Array.fromAsync(ai({
apiKey: TEST_API_KEY,
baseURL: TEST_BASE_URL,
mode: 'responses',
model: 'gpt-4o-mini',
input: 'Call the failing_tool function',
tools,
max_output_tokens: 100,
}));
const toolResult = chunks.find(c => c.type === 'tool_result') as any;
assert.ok(toolResult, 'Should have tool_result chunk');
assert.equal(toolResult.result?.error, 'Tool execution failed');
console.log('Error handling test passed:', toolResult.result?.error);
});
});
describe('Edge cases', () => {
test('should use default mode (responses) when not specified', async () => {
const chunks = await Array.fromAsync(ai({
apiKey: TEST_API_KEY,
baseURL: TEST_BASE_URL,
model: 'gpt-4o-mini',
input: 'Say "default mode"',
max_output_tokens: 30,
}));
const content = chunks.filter(c => c.type === 'text').map(c => c.text).join('');
assert.ok(content, 'Should receive text content');
assert.ok(chunks.at(-1)?.type === 'done', 'Should end with done chunk');
console.log('Default mode test passed:', content);
});
test('should handle custom headers', async () => {
const chunks = await Array.fromAsync(ai({
apiKey: TEST_API_KEY,
baseURL: TEST_BASE_URL,
mode: 'responses',
model: 'gpt-4o-mini',
input: 'Say "headers work"',
max_output_tokens: 30,
headers: {
'X-Custom-Header': 'test-value',
'X-Another-Header': 'another-value',
},
}));
const content = chunks.filter(c => c.type === 'text').map(c => c.text).join('');
assert.ok(content, 'Should receive text content with custom headers');
console.log('Custom headers test passed:', content);
});
test('should handle completions mode with instructions only (no user input)', async () => {
const chunks = await Array.fromAsync(ai({
apiKey: TEST_API_KEY,
baseURL: TEST_BASE_URL,
mode: 'completions',
model: 'gpt-4o-mini',
instructions: 'You always respond with exactly: "Hello from instructions"',
input: 'Hi',
max_output_tokens: 30,
}));
const content = chunks.filter(c => c.type === 'text').map(c => c.text).join('');
assert.ok(content, 'Should receive text content');
console.log('Instructions test passed:', content);
});
test('should handle completions mode without tools (text only)', async () => {
const chunks = await Array.fromAsync(ai({
apiKey: TEST_API_KEY,
baseURL: TEST_BASE_URL,
mode: 'completions',
model: 'gpt-4o-mini',
input: 'What is 2+2? Reply with just the number.',
max_output_tokens: 20,
temperature: 0,
}));
const textChunks = chunks.filter(c => c.type === 'text');
const doneChunk = chunks.find(c => c.type === 'done');
assert.ok(textChunks.length > 0, 'Should have text chunks');
assert.ok(doneChunk, 'Should have done chunk');
assert.ok(!chunks.some(c => c.type === 'tool_call'), 'Should not have tool calls');
console.log('Completions text-only test passed with', textChunks.length, 'text chunks');
});
test('should handle responses mode without tools (text only)', async () => {
const chunks = await Array.fromAsync(ai({
apiKey: TEST_API_KEY,
baseURL: TEST_BASE_URL,
mode: 'responses',
model: 'gpt-4o-mini',
input: 'What is 3+3? Reply with just the number.',
max_output_tokens: 20,
temperature: 0,
}));
const textChunks = chunks.filter(c => c.type === 'text');
const doneChunk = chunks.find(c => c.type === 'done');
assert.ok(textChunks.length > 0, 'Should have text chunks');
assert.ok(doneChunk, 'Should have done chunk');
assert.ok(!chunks.some(c => c.type === 'tool_call'), 'Should not have tool calls');
console.log('Responses text-only test passed with', textChunks.length, 'text chunks');
});
test('should handle tool with synchronous call function', async () => {
let syncCalled = false;
const tools = [
{
type: 'function' as const,
name: 'sync_tool',
description: 'A synchronous tool that returns a fixed result',
parameters: {
type: 'object',
properties: {},
},
call: () => {
syncCalled = true;
return { success: true, data: 'sync result' }; // Synchronous, not async
}
}
];
const chunks = await Array.fromAsync(ai({
apiKey: TEST_API_KEY,
baseURL: TEST_BASE_URL,
mode: 'responses',
model: 'gpt-4o-mini',
input: 'Call the sync_tool function',
tools,
max_output_tokens: 100,
}));
const toolResult = chunks.find(c => c.type === 'tool_result') as any;
assert.ok(toolResult, 'Should have tool_result chunk');
assert.ok(syncCalled, 'Sync function should have been called');
assert.equal(toolResult.result?.success, true, 'Should return sync result');
console.log('Sync tool test passed:', toolResult.result);
});
});
});
export type Message = {
role: 'system' | 'user' | 'assistant';
content: string;
};
export type Tool = {
type: 'function';
name: string;
description: string;
parameters: Record<string, unknown>;
call?: (args: Record<string, unknown>) => unknown | Promise<unknown>;
};
export type ToolCall = {
id: string;
function: {
name: string;
arguments: string;
};
};
export type StreamChunk =
| {type: 'text'; text: string}
| ({type: 'tool_call'} & ToolCall)
| ({type: 'tool_result'; result: unknown} & ToolCall)
| {type: 'done'};
export type ApiMode = 'completions' | 'responses';
export interface CompleteOptions {
apiKey: string;
baseURL: string;
headers?: Record<string, string>;
mode?: ApiMode;
model?: string;
input: string;
instructions?: string;
tools?: Tool[];
onToolCall?: (
name: string,
args: Record<string, unknown>,
) => unknown | Promise<unknown>;
temperature?: number;
max_output_tokens?: number;
}
export default async function* ai(
options: CompleteOptions,
): AsyncIterableIterator<StreamChunk> {
const {
apiKey,
baseURL,
headers = {},
mode = 'responses',
tools,
onToolCall,
...rest
} = options;
const c = mode === 'completions';
// Inline tool dispatch with error handling
const callTool = async (name: string, args: Record<string, unknown>) => {
try {
return await (tools?.find((t) => t.name === name)?.call?.(args) ??
onToolCall?.(name, args) ??
(() => {
throw name;
})());
} catch (e: any) {
return {error: e.message ?? e};
}
};
// Build request once, mutate body for continuations
const endpoint = baseURL + (c ? '/chat/completions' : '/responses');
let body: any = {
...rest,
tools,
stream: true,
};
if (c) {
const {max_output_tokens, instructions, input, ...r} = body;
body = {...r, max_tokens: max_output_tokens, messages: []};
if (instructions)
body.messages.push({role: 'system', content: instructions});
if (input) body.messages.push({role: 'user', content: input});
// Wrap tools in {function: {...}} for completions API
if (body.tools)
body.tools = body.tools.map(({call, type, ...fn}: Tool) => ({
type,
function: fn,
}));
}
// Track conversation for responses mode (normalize to array)
let conversationInput: any =
!c && body.input
? [{type: 'message', role: 'user', content: body.input}]
: null;
// Outer loop for tool continuation
while (true) {
const pendingCalls: ToolCall[] = [];
const toolCallMap: Record<number, ToolCall> = {};
const outputItems: any[] = [];
let assistantContent = '';
// For responses mode continuation, use the built conversation array
if (conversationInput) body.input = conversationInput;
const response = await fetch(endpoint, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
Authorization: `Bearer ${apiKey}`,
...headers,
},
body: JSON.stringify(body),
});
if (!response.ok) throw Error(await response.text());
const reader = response.body!.getReader();
const decoder = new TextDecoder();
let buffer = '';
try {
while (true) {
const {done, value} = await reader.read();
if (done) break;
buffer += decoder.decode(value, {stream: true});
const lines = buffer.split('\n');
buffer = lines.pop() || '';
for (const line of lines) {
if (!line.startsWith('data: ')) continue;
const dataLine = line.slice(6);
if (dataLine === '[DONE]') {
// Completions: collect accumulated tool calls at stream end
if (c)
for (const tc of Object.values(toolCallMap)) {
pendingCalls.push(tc);
yield {...tc, type: 'tool_call' as const};
}
if (!pendingCalls.length) {
yield {type: 'done'};
return;
}
continue;
}
let chunk: StreamChunk | null = null;
try {
const data = JSON.parse(dataLine);
const choice = data.choices?.[0];
if (choice) {
// Completions API
const delta = choice.delta;
if (delta?.content) {
chunk = {type: 'text', text: delta.content};
} else if (delta?.tool_calls) {
for (const tc of delta.tool_calls) {
const idx = tc.index ?? 0;
toolCallMap[idx] ||= {
id: '',
function: {name: '', arguments: ''},
};
const call = toolCallMap[idx];
if (tc.id) call.id = tc.id;
if (tc.function?.name) call.function.name += tc.function.name;
if (tc.function?.arguments)
call.function.arguments += tc.function.arguments;
}
}
// finish_reason signals end of content - [DONE] will handle cleanup
} else if (data.delta) {
// Responses API text delta
chunk = {type: 'text', text: data.delta};
} else if (data.item?.type === 'function_call') {
// Responses API tool call
chunk = {
type: 'tool_call',
id: data.item.call_id || data.item.id || '',
function: {
name: data.item.name || '',
arguments: data.item.arguments || '{}',
},
};
} else if (data.response?.status === 'completed') {
// Responses API completed
if (data.response.output)
outputItems.push(...data.response.output);
if (!pendingCalls.length) {
yield {type: 'done'};
return;
}
continue;
}
} catch {}
if (chunk) {
if (chunk.type === 'text') assistantContent += chunk.text;
else if (!c) pendingCalls.push(chunk);
yield chunk;
}
}
}
} finally {
reader.releaseLock();
}
if (!pendingCalls.length) return;
const results = await Promise.all(
pendingCalls.map(async (tc) => {
const args = JSON.parse(tc.function.arguments);
const result = await callTool(tc.function.name, args);
return {...tc, type: 'tool_result' as const, result};
}),
);
yield* results;
if (c) {
body.messages.push(
{
role: 'assistant',
content: assistantContent || null,
tool_calls: pendingCalls.map((tc) => ({...tc, type: 'function'})),
},
...pendingCalls.map((tc, i) => ({
role: 'tool',
tool_call_id: tc.id,
content: JSON.stringify(results[i].result),
})),
);
} else {
conversationInput = [
...conversationInput,
...outputItems,
...pendingCalls.map((tc, i) => ({
type: 'function_call_output',
call_id: tc.id,
output: JSON.stringify(results[i].result),
})),
];
}
}
}
import ai from './dist/ai.js';
const config = {
apiKey: '1',
baseURL: 'https://proxy-shopify-ai.local.shop.dev/v1',
model: 'gpt-4o-mini',
};
// Basic streaming
async function streamingDemo() {
let text = '';
for await (const chunk of ai({
...config,
input: 'Count from 1 to 5',
max_output_tokens: 50,
})) {
if (chunk.type === 'text') text += chunk.text;
}
console.log('Streaming:', text);
}
// Tool calling with inline handlers
async function toolDemo() {
const tools = [
{
type: 'function',
name: 'get_weather',
description: 'Get weather for a city',
parameters: {
type: 'object',
properties: {location: {type: 'string'}},
required: ['location'],
},
call: ({location}) => ({temp: 72, conditions: 'sunny', location}),
},
];
let text = '';
for await (const chunk of ai({...config, input: 'Weather in Tokyo?', tools})) {
if (chunk.type === 'text') text += chunk.text;
}
console.log('Tool call:', text);
}
await streamingDemo();
await toolDemo();
{
"name": "@experiments/tiny-ai-client",
"version": "0.1.0",
"type": "module",
"main": "./dist/ai.js",
"types": "./dist/ai.d.ts",
"exports": {
"types": "./dist/ai.d.ts",
"import": "./dist/ai.js"
},
"files": [
"dist"
],
"scripts": {
"build": "tsup --clean",
"dev": "tsup --watch",
"test": "node --test *.test.ts",
"test:watch": "node --test --watch *.test.ts",
"pretest": "pnpm run build"
},
"devDependencies": {
"tsup": "^8.0.0",
"typescript": "^5.3.0"
}
}
{
"compilerOptions": {
"target": "ES2022",
"module": "ESNext",
"moduleResolution": "node",
"lib": ["ES2022", "DOM"],
"declaration": true,
"declarationMap": true,
"strict": true,
"esModuleInterop": true,
"skipLibCheck": true,
"forceConsistentCasingInFileNames": true,
"outDir": "dist",
"rootDir": "."
},
"include": ["ai.ts"],
"exclude": ["node_modules", "dist"]
}
import {defineConfig} from 'tsup';
export default defineConfig({
entry: ['ai.ts'],
format: 'esm',
dts: true,
minify: true,
treeshake: true,
target: 'es2022',
esbuildOptions(options, context) {
options.keepNames = false;
options.legalComments = 'none';
options.mangleProps = /^_/;
},
});
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment