背景简介
基于 Python 的智谱清言API调用。
前置信息
- Python 3.11.13 【Conda - 创建 Python 环境】
详细信息
依赖库
zhipuai==2.1.4
代码
- 配置
config.py
LLMINFO = {
"APIKEY" : "YOUR_PRIVATE_KEY",
"MODEL" : "glm-4-flash"
}
- 新建
zhipu.py
# app/core/zhipu.py
import logging
from typing import List, Dict, Any, Union, Iterator, Optional
import httpx # For detailed HTTP error handling
from zhipuai import ZhipuAI # Correct import for latest SDK
import config
# Configure logging for easier debugging
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')
class Chatbot:
"""
Optimized Chatbot class for interacting with ZhipuAI's chat models.
Features:
- Supports both synchronous and streaming responses.
- Robust error handling using httpx.
- Flexible parameter configuration.
- Returns structured data, not raw JSON strings.
"""
# Class variables (shared across all instances)
APIKEY = config.LLMINFO["APIKEY"]
MODEL = config.LLMINFO["MODEL"]
def __init__(self):
"""Initialize the Chatbot instance and create a ZhipuAI client."""
if not self.APIKEY:
raise ValueError("CHATGLM_APIKEY is not set in settings.")
self.client = ZhipuAI(api_key=self.APIKEY)
def chat(
self,
messages: List[Dict[str, str]],
*,
stream: bool = False,
temperature: Optional[float] = None,
top_p: Optional[float] = None,
max_tokens: Optional[int] = None,
**kwargs: Any
) -> Union[Dict[str, Any], Iterator[Dict[str, Any]]]:
"""
Send messages to ZhipuAI and get the response.
Args:
messages (List[Dict[str, str]]): List of chat messages.
stream (bool): Whether to enable streaming response. Default is False.
temperature (Optional[float]): Controls randomness.
top_p (Optional[float]): Nucleus sampling.
max_tokens (Optional[int]): Maximum tokens to generate.
**kwargs: Additional parameters to pass to the API.
Returns:
Union[Dict[str, Any], Iterator[Dict[str, Any]]]:
- If stream=False, returns a dict with response content and metadata.
- If stream=True, returns an iterator yielding streaming chunks.
- If an error occurs, returns a dict with error details.
"""
try:
params = {
"model": self.MODEL,
"messages": messages,
"stream": stream,
**kwargs
}
if temperature is not None:
params["temperature"] = temperature
if top_p is not None:
params["top_p"] = top_p
if max_tokens is not None:
params["max_tokens"] = max_tokens
response = self.client.chat.completions.create(**params)
if stream:
return self._handle_stream_response(response)
else:
return self._handle_sync_response(response)
except httpx.HTTPStatusError as e:
error_detail = "Unknown error"
try:
error_detail = e.response.json().get("error", {}).get("message", "No error message in response body.")
except Exception:
error_detail = e.response.text
logging.error(f"ZhipuAI API HTTP Error: {e.response.status_code} - {error_detail}")
return {
"error": True,
"type": "HTTPStatusError",
"status_code": e.response.status_code,
"message": error_detail,
}
except Exception as e:
logging.error(f"An unexpected error occurred: {e}", exc_info=True)
return {"error": True, "type": "UnexpectedError", "message": str(e)}
def _handle_sync_response(self, response: Any) -> Dict[str, Any]:
"""Process synchronous response and extract key information."""
choice = response.choices[0]
message = choice.message
return {
"content": message.content,
"role": message.role,
"finish_reason": choice.finish_reason,
"usage": response.usage.model_dump() if response.usage else None,
"id": response.id,
"created": response.created,
}
def _handle_stream_response(self, response: Iterator) -> Iterator[Dict[str, Any]]:
"""Process streaming response and yield data chunks."""
for chunk in response:
if chunk.choices[0].delta.content is not None:
yield {
"id": chunk.id,
"content": chunk.choices[0].delta.content,
"finish_reason": chunk.choices[0].finish_reason,
"created": chunk.created,
}
if __name__ == "__main__":
# 假设 settings 已正确配置
chatbot = Chatbot()
messages = [
{"role": "user", "content": "你好,请介绍一下你自己。"}
]
response = chatbot.chat(messages, temperature=0.8)
if response.get("error"):
print(f"调用失败: [{response['type']}] {response['message']}")
else:
print(f" 模型回复: {response['content']}")
print(f"Token 使用: {response['usage']}")
print("开始流式调用")
messages = [
{"role": "user", "content": "用生动的语言描述一下地球的四季变化。"}
]
print("模型正在回复(流式):")
stream_response = chatbot.chat(messages, stream=True)
if isinstance(stream_response, dict) and response.get("error"):
print(f"调用失败: [{response['type']}] {response['message']}")
else:
full_content = ""
for chunk in stream_response:
content_chunk = chunk.get("content", "")
print(content_chunk, end="", flush=True)
full_content += content_chunk
print("\n--- 流式接收完成 ---")
验证
- 执行脚本
python zhipu.py
2025-10-29 20:33:47,754 - INFO - HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions "HTTP/1.1 200 OK"
模型回复: 你好!我是一个人工智能助手,名叫 ChatGLM。我是基于清华大学 KEG 实验室和智谱 AI 公司于 2024 年共同训练的语言模型 GLM-4 开发的,我的任务是针对用户的问题和要求提供适当的答复和支持。
Token 使用: {'prompt_tokens': 11, 'completion_tokens': 54, 'total_tokens': 65}
开始流式调用
模型正在回复(流式):
2025-10-29 20:33:48,046 - INFO - HTTP Request: POST https://open.bigmodel.cn/api/paas/v4/chat/completions "HTTP/1.1 200 OK"
地球的四季变化,宛如一幅流动的画卷,充满了生机与变幻。
春回大地,万物复苏。随着春风的轻拂,沉睡了一冬的树木披上了嫩绿的新装,枝头绽放出五彩斑斓的花朵。小草破土而出,绿意盎然,仿佛在向世界宣告着春天的到来。阳光明媚,温暖宜人,人们纷纷走出家门,沐浴在这温暖的阳光下,感受生命的活力。
夏至盛夏,骄阳似火。太阳高悬,烈日炎炎,大地被烤得滚烫。蝉鸣声声,鸟语花香,一片生机勃勃的景象。湖水荡漾,绿树成荫,人们纷纷前往海滨、山林,享受清凉的夏日时光。夏夜星空,繁星点点,萤火虫在草丛中飞舞,为夜晚增添了几分浪漫。
秋高气爽,硕果累累。随着秋风送爽,大地换上了金黄的色彩。丰收的季节,硕果累累,硕大的果实挂满枝头,农民们欢声笑语,庆祝着辛勤劳动的成果。枫叶红了,一片片金黄、火红、橙黄的枫叶,如诗如画,美不胜收。秋风习习,人们感受着收获的喜悦,沉浸在美好的秋日时光中。
冬至严寒,银装素裹。随着冬风的凛冽,大地披上了洁白的雪装。白雪皑皑,银装素裹,仿佛进入了童话世界。人们穿上厚厚的棉衣,踏着雪地,感受着冬日的宁静与安详。雪花飘飘,如梦如幻,给人们带来了无尽的欢乐。冰雕、雪人、滑雪,冬日的运动让人们忘却寒冷,沉浸在欢乐的海洋。
四季更迭,岁月如歌。地球的四季变化,如同大自然的交响曲,奏响了生命的乐章。在这变幻无穷的四季中,我们见证了生命的奇迹,感受到了大自然的神奇魅力。
--- 流式接收完成 ---
以上便是本文的全部内容,感谢您的阅读,如遇到任何问题,欢迎在评论区留言讨论。