...
Code Block | ||
---|---|---|
| ||
models: { "My Custom Open Source Model": { endpoint: "httpshttp:///myserver127.0.0.1:11434/v1", apiFormatmodel: "completionllama2", modelapiKey: "Llama-2-70b-chat-hf-function-calling-v2",secret", // requred, even if unused apiKeystream: "......"true } } |
The configuration above assumes above configuration is based on the assumption that the model’s API adheres to the model's API follows the format commonly used OpenAI formatby the OpenAI Chat Completion API. This applies, for example, if you're hosting an open-source large language model locally with a tool like Ollama.
Using a custom provider
If the model's API requires uniquely formatted input and output, you can create a custom provider. For example:
Code Block | ||
---|---|---|
| ||
profound.ai.providers.myCustomProvider = {
getAPIFunction: function(data) {
return async function() {
return {
message: {
type: "hardcoded test response",
content: "Hi there. I am a custom model."
}
};
}
},
getAPIParms: function(data) {
return data; // pass all available data to the API function as parameters
},
processResponse: function(response) {
const isToolCall = false;
const responseMessage = response.message;
const content = responseMessage.content;
return { responseMessage, content, isToolCall };
}
};
module.exports = {
// misc configration entries
models: {
"Custom Test": {
provider: "myCustomProvider",
model: "custom-test"
}
}
} |
...