google-generative-ai-rs
google-generative-ai-rs copied to clipboard
Add the new model versions as configuration options
The new Gemini models (1.0) have been released on 02/15/2024, these include gemini-1.0-pro-latest
(using get_model
)
ModelInformationList {
models: [
ModelInformation {
name: "models/gemini-1.0-pro",
version: "001",
display_name: "Gemini 1.0 Pro",
description: "The best model for scaling across a wide range of tasks",
input_token_limit: 30720,
output_token_limit: 2048,
supported_generation_methods: [
"generateContent",
"countTokens",
],
temperature: Some(
0.9,
),
top_p: Some(
1.0,
),
top_k: Some(
1,
),
},
ModelInformation {
name: "models/gemini-1.0-pro-001",
version: "001",
display_name: "Gemini 1.0 Pro 001",
description: "The best model for scaling across a wide range of tasks. This is a stable model.",
input_token_limit: 30720,
output_token_limit: 2048,
supported_generation_methods: [
"generateContent",
"countTokens",
],
temperature: Some(
0.9,
),
top_p: Some(
1.0,
),
top_k: Some(
1,
),
},
ModelInformation {
name: "models/gemini-1.0-pro-latest",
version: "001",
display_name: "Gemini 1.0 Pro Latest",
description: "The best model for scaling across a wide range of tasks. This is the latest model.",
input_token_limit: 30720,
output_token_limit: 2048,
supported_generation_methods: [
"generateContent",
"countTokens",
],
temperature: Some(
0.9,
),
top_p: Some(
1.0,
),
top_k: Some(
1,
),
},
ModelInformation {
name: "models/gemini-1.0-pro-vision-latest",
version: "001",
display_name: "Gemini 1.0 Pro Vision",
description: "The best image understanding model to handle a broad range of applications",
input_token_limit: 12288,
output_token_limit: 4096,
supported_generation_methods: [
"generateContent",
"countTokens",
],
temperature: Some(
0.4,
),
top_p: Some(
1.0,
),
top_k: Some(
32,
),
},
ModelInformation {
name: "models/gemini-pro",
version: "001",
display_name: "Gemini 1.0 Pro",
description: "The best model for scaling across a wide range of tasks",
input_token_limit: 30720,
output_token_limit: 2048,
supported_generation_methods: [
"generateContent",
"countTokens",
],
temperature: Some(
0.9,
),
top_p: Some(
1.0,
),
top_k: Some(
1,
),
},
ModelInformation {
name: "models/gemini-pro-vision",
version: "001",
display_name: "Gemini 1.0 Pro Vision",
description: "The best image understanding model to handle a broad range of applications",
input_token_limit: 12288,
output_token_limit: 4096,
supported_generation_methods: [
"generateContent",
"countTokens",
],
temperature: Some(
0.4,
),
top_p: Some(
1.0,
),
top_k: Some(
32,
),
},
ModelInformation {
name: "models/embedding-001",
version: "001",
display_name: "Embedding 001",
description: "Obtain a distributed representation of a text.",
input_token_limit: 2048,
output_token_limit: 1,
supported_generation_methods: [
"embedContent",
"countTextTokens",
],
temperature: None,
top_p: None,
top_k: None,
},
],
}