1
0
mirror of https://github.com/Bayselonarrend/OpenIntegrations.git synced 2025-11-25 22:12:29 +02:00
Files
OpenIntegrations/docs/en/md/Ollama/Models-management/Get-model-information.mdx
Vitaly the Alpaca (bot) aa10e4c564 Main build (Jenkins)
2025-10-21 11:36:43 +03:00

229 lines
5.5 KiB
Plaintext
Vendored

---
sidebar_position: 3
description: Get model information and other functions to work with Ollama in the Open Integration Package, a free open-source integration library for 1C:Enterprise 8, OneScript and CLI
keywords: [1C, 1С, 1С:Enterprise, 1С:Enterprise 8.3, API, Integration, Services, Exchange, OneScript, CLI, Ollama]
---
import Tabs from '@theme/Tabs';
import TabItem from '@theme/TabItem';
# Get model information
Gets information about the model
`Function GetModelInformation(Val URL, Val Model, Val Detailed = True, Val AdditionalHeaders = "") Export`
| Parameter | CLI option | Type | Required | Description |
|-|-|-|-|-|
| URL | --url | String | ✔ | Ollama server URL |
| Model | --model | String | ✔ | Models name |
| Detailed | --verbose | Boolean | ✖ | Return full model information |
| AdditionalHeaders | --headers | Map Of KeyAndValue | ✖ | Additional request headers, if necessary |
Returns: Map Of KeyAndValue - Processing result
<br/>
:::tip
Method at API documentation: [Show Model Information](https://github.com/ollama/ollama/blob/main/docs/api.md#show-model-information)
:::
<br/>
```bsl title="1C:Enterprise/OneScript code example"
URL = "https://hut.openintegrations.dev/ollama";
Token = "12We34..."; // Authorization - not part API Ollama
Model = "mario";
AdditionalHeaders = New Map;
AdditionalHeaders.Insert("Authorization", StrTemplate("Bearer %1", Token));
Result = OPI_Ollama.GetModelInformation(URL, Model, False, AdditionalHeaders);
```
<Tabs>
<TabItem value="bash" label="Bash" default>
```bash
# JSON data can also be passed as a path to a .json file
oint ollama GetModelInformation \
--url "https://hut.openintegrations.dev/ollama" \
--model "mario" \
--verbose false \
--headers "{'Authorization':'***'}"
```
</TabItem>
<TabItem value="bat" label="CMD/Bat" default>
```batch
:: JSON data can also be passed as a path to a .json file
oint ollama GetModelInformation ^
--url "https://hut.openintegrations.dev/ollama" ^
--model "mario" ^
--verbose false ^
--headers "{'Authorization':'***'}"
```
</TabItem>
</Tabs>
```json title="Result"
{
"modelfile": "# Modelfile generated by \"ollama show\"\n# To build a new Modelfile based on this, replace FROM with:\n# FROM mario:latest\n\nFROM /root/.ollama/models/blobs/sha256-2af3b81862c6be03c769683af18efdadb2c33f60ff32ab6f83e42c043d6c7816\nTEMPLATE \"<|system|>\n{{ .System }}</s>\n<|user|>\n{{ .Prompt }}</s>\n<|assistant|>\n\"\nSYSTEM You are Mario from Super Mario Bros.\nPARAMETER stop <|system|>\nPARAMETER stop <|user|>\nPARAMETER stop <|assistant|>\nPARAMETER stop </s>\n",
"parameters": "stop \"<|system|>\"\nstop \"<|user|>\"\nstop \"<|assistant|>\"\nstop \"</s>\"",
"template": "<|system|>\n{{ .System }}</s>\n<|user|>\n{{ .Prompt }}</s>\n<|assistant|>\n",
"system": "You are Mario from Super Mario Bros.",
"details": {
"parent_model": "tinyllama:latest",
"format": "gguf",
"family": "llama",
"families": [
"llama"
],
"parameter_size": "1.1B",
"quantization_level": "Q4_0"
},
"model_info": {
"general.architecture": "llama",
"general.file_type": 2,
"general.parameter_count": 1100048384,
"general.quantization_version": 2,
"llama.attention.head_count": 32,
"llama.attention.head_count_kv": 4,
"llama.attention.layer_norm_rms_epsilon": 0.00001,
"llama.block_count": 22,
"llama.context_length": 2048,
"llama.embedding_length": 2048,
"llama.feed_forward_length": 5632,
"llama.rope.dimension_count": 64,
"llama.rope.freq_base": 10000,
"tokenizer.ggml.bos_token_id": 1,
"tokenizer.ggml.eos_token_id": 2,
"tokenizer.ggml.merges": null,
"tokenizer.ggml.model": "llama",
"tokenizer.ggml.padding_token_id": 2,
"tokenizer.ggml.scores": null,
"tokenizer.ggml.token_type": null,
"tokenizer.ggml.tokens": null,
"tokenizer.ggml.unknown_token_id": 0
},
"tensors": [
{
"name": "output.weight",
"type": "Q6_K",
"shape": [
2048,
32000
]
},
{
"name": "token_embd.weight",
"type": "Q4_0",
"shape": [
2048,
32000
]
},
{
"name": "blk.0.attn_norm.weight",
"type": "F32",
"shape": [
2048
]
},
{
"name": "blk.0.ffn_down.weight",
"type": "Q4_0",
"shape": [
5632,
2048
]
},
{
"name": "blk.0.ffn_gate.weight",
"type": "Q4_0",
"shape": [
2048,
5632
]
},
{
"name": "blk.0.ffn_up.weight",
"type": "Q4_0",
"shape": [
2048,
5632
]
},
{
"name": "blk.0.ffn_norm.weight",
"type": "F32",
"shape": [
2048
]
},
{
"name": "blk.0.attn_k.weight",
"type": "Q4_0",
"shape": [
2048,
256
]
},
{
"name": "blk.0.attn_output.weight",
"type": "Q4_0",
"shape": [
2048,
2048
]
},
{
"name": "blk.0.attn_q.weight",
"type": "Q4_0",
"shape": [
2048,
2048
]
},
{
"name": "blk.0.attn_v.weight",
"type": "Q4_0",
"shape": [
2048,
256
]
},
{
"name": "blk.1.attn_norm.weight",
"type": "F32",
"shape": [
2048
]
},
{
"name": "blk.1.ffn_down.weight",
"type": "Q4_0",
"shape": [
5632,
2048
]
},
{
"name": "blk.1.ffn_gate.weight",
"type": "Q4_0",
"shape": [
2048,
5632
]
},
{
...
```