Function createHeliconeResponse
- createHeliconeResponse(config): {
choices: {
finish_reason: string;
index: number;
logprobs: null;
message: {
annotations: never[];
content: string;
refusal: null;
role: string;
};
}[];
created: number;
id: string;
model: string;
object: string;
service_tier: string;
system_fingerprint: string;
usage: {
completion_tokens: number;
completion_tokens_details: {
accepted_prediction_tokens: number;
audio_tokens: number;
reasoning_tokens: number;
rejected_prediction_tokens: number;
};
prompt_tokens: number;
prompt_tokens_details: {
audio_tokens: number;
cached_tokens: number;
};
total_tokens: number;
};
} Returns {
choices: {
finish_reason: string;
index: number;
logprobs: null;
message: {
annotations: never[];
content: string;
refusal: null;
role: string;
};
}[];
created: number;
id: string;
model: string;
object: string;
service_tier: string;
system_fingerprint: string;
usage: {
completion_tokens: number;
completion_tokens_details: {
accepted_prediction_tokens: number;
audio_tokens: number;
reasoning_tokens: number;
rejected_prediction_tokens: number;
};
prompt_tokens: number;
prompt_tokens_details: {
audio_tokens: number;
cached_tokens: number;
};
total_tokens: number;
};
}
choices: {
finish_reason: string;
index: number;
logprobs: null;
message: {
annotations: never[];
content: string;
refusal: null;
role: string;
};
}[]
created: number
id: string
model: string
object: string
service_tier: string
system_fingerprint: string
usage: {
completion_tokens: number;
completion_tokens_details: {
accepted_prediction_tokens: number;
audio_tokens: number;
reasoning_tokens: number;
rejected_prediction_tokens: number;
};
prompt_tokens: number;
prompt_tokens_details: {
audio_tokens: number;
cached_tokens: number;
};
total_tokens: number;
}
completion_tokens: number
completion_tokens_details: {
accepted_prediction_tokens: number;
audio_tokens: number;
reasoning_tokens: number;
rejected_prediction_tokens: number;
}
accepted_prediction_tokens: number
audio_tokens: number
reasoning_tokens: number
rejected_prediction_tokens: number
prompt_tokens: number
prompt_tokens_details: {
audio_tokens: number;
cached_tokens: number;
}
audio_tokens: number
cached_tokens: number
total_tokens: number
Creates a standardized Helicone response for API logging