Gemini API 使用指南
本指南面向开发者,介绍如何通过 API 调用 Gemini 模型。
环境准备
安装 SDK
bash
pip install google-generativeai配置 API 密钥
python
import google.generativeai as genai
genai.configure(api_key="YOUR_API_KEY")获取 API 密钥
可以通过 Google AI Studio 获取免费的 API 密钥,或使用镜像站提供的 API 服务。
基础用法
文本生成
python
import google.generativeai as genai
genai.configure(api_key="YOUR_API_KEY")
# 创建模型实例
model = genai.GenerativeModel('gemini-3-pro')
# 生成内容
response = model.generate_content("请介绍一下人工智能的发展历史")
print(response.text)多轮对话
python
# 创建聊天会话
chat = model.start_chat(history=[])
# 第一轮对话
response = chat.send_message("Python 和 JavaScript 有什么区别?")
print(response.text)
# 第二轮对话(保持上下文)
response = chat.send_message("哪个更适合初学者?")
print(response.text)
# 查看对话历史
for message in chat.history:
print(f"{message.role}: {message.parts[0].text}")多模态输入
图片理解
python
import PIL.Image
# 加载图片
img = PIL.Image.open("photo.jpg")
# 发送图片和文本
response = model.generate_content([
"请详细描述这张图片的内容",
img
])
print(response.text)多图片分析
python
img1 = PIL.Image.open("image1.jpg")
img2 = PIL.Image.open("image2.jpg")
response = model.generate_content([
"比较这两张图片的异同",
img1,
img2
])文件上传
python
# 上传文件
file = genai.upload_file("document.pdf")
# 分析文件内容
response = model.generate_content([
"总结这份文档的主要内容",
file
])视频分析
python
# 上传视频
video = genai.upload_file("video.mp4")
# 等待处理完成
import time
while video.state.name == "PROCESSING":
time.sleep(10)
video = genai.get_file(video.name)
# 分析视频
response = model.generate_content([
"描述这个视频的内容",
video
])流式输出
python
# 启用流式输出
response = model.generate_content(
"写一篇关于人工智能的文章",
stream=True
)
# 逐块打印
for chunk in response:
print(chunk.text, end="", flush=True)参数配置
生成参数
python
generation_config = {
"temperature": 0.7, # 创造性(0-2)
"top_p": 0.9, # 核采样
"top_k": 40, # Top-K 采样
"max_output_tokens": 2048, # 最大输出长度
}
model = genai.GenerativeModel(
'gemini-3-pro',
generation_config=generation_config
)安全设置
python
safety_settings = [
{
"category": "HARM_CATEGORY_HARASSMENT",
"threshold": "BLOCK_MEDIUM_AND_ABOVE"
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"threshold": "BLOCK_MEDIUM_AND_ABOVE"
},
]
model = genai.GenerativeModel(
'gemini-3-pro',
safety_settings=safety_settings
)系统指令
python
model = genai.GenerativeModel(
'gemini-3-pro',
system_instruction="""你是一位专业的Python编程助手。
请用简洁清晰的语言回答问题,并提供代码示例。
回答时使用中文。"""
)
response = model.generate_content("如何读取 CSV 文件?")函数调用
定义函数
python
def get_weather(city: str) -> dict:
"""获取指定城市的天气信息"""
# 这里是模拟数据
return {"city": city, "temperature": 25, "condition": "晴天"}
# 声明函数
tools = [get_weather]
model = genai.GenerativeModel(
'gemini-3-pro',
tools=tools
)使用函数调用
python
response = model.generate_content("北京今天天气怎么样?")
# 处理函数调用
for part in response.parts:
if hasattr(part, 'function_call'):
function_name = part.function_call.name
args = part.function_call.args
# 执行函数
if function_name == "get_weather":
result = get_weather(**args)
print(f"天气查询结果: {result}")错误处理
python
from google.api_core import exceptions
try:
response = model.generate_content("你好")
print(response.text)
except exceptions.InvalidArgument as e:
print(f"参数错误: {e}")
except exceptions.ResourceExhausted as e:
print(f"配额耗尽: {e}")
except exceptions.GoogleAPIError as e:
print(f"API 错误: {e}")Token 计算
python
# 计算输入 tokens
model = genai.GenerativeModel('gemini-3-pro')
token_count = model.count_tokens("要计算的文本内容")
print(f"Token 数量: {token_count.total_tokens}")
# 包含图片的 token 计算
img = PIL.Image.open("photo.jpg")
token_count = model.count_tokens(["描述这张图片", img])
print(f"Token 数量: {token_count.total_tokens}")使用镜像站 API
部分镜像站提供兼容 OpenAI 格式的 API:
python
import openai
client = openai.OpenAI(
api_key="YOUR_API_KEY",
base_url="https://api.xsimplechat.com/v1"
)
response = client.chat.completions.create(
model="gemini-3-pro",
messages=[
{"role": "user", "content": "你好"}
]
)
print(response.choices[0].message.content)最佳实践
1. 重试机制
python
import time
from functools import wraps
def retry(max_attempts=3, delay=1):
def decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
for attempt in range(max_attempts):
try:
return func(*args, **kwargs)
except Exception as e:
if attempt < max_attempts - 1:
time.sleep(delay * (attempt + 1))
else:
raise e
return wrapper
return decorator
@retry(max_attempts=3)
def generate_with_retry(prompt):
return model.generate_content(prompt)2. 缓存结果
python
from functools import lru_cache
@lru_cache(maxsize=100)
def cached_generate(prompt):
return model.generate_content(prompt).text3. 异步调用
python
import asyncio
import google.generativeai as genai
async def async_generate(prompt):
model = genai.GenerativeModel('gemini-3-pro')
response = await model.generate_content_async(prompt)
return response.text
# 批量异步调用
async def batch_generate(prompts):
tasks = [async_generate(p) for p in prompts]
return await asyncio.gather(*tasks)