diff --git a/README.md b/README.md index 9dafa4c..d49bc30 100644 --- a/README.md +++ b/README.md @@ -322,6 +322,8 @@ Using the API is also simple. See the example below: import openai openai.api_base = "http://localhost:8000/v1" openai.api_key = "none" + +# create a request activating streaming response for chunk in openai.ChatCompletion.create( model="Qwen-7B", messages=[ @@ -331,6 +333,16 @@ for chunk in openai.ChatCompletion.create( ): if hasattr(chunk.choices[0].delta, "content"): print(chunk.choices[0].delta.content, end="", flush=True) + +# create a request not activating streaming response +response = openai.ChatCompletion.create( + model="Qwen-7B", + messages=[ + {"role": "user", "content": "你好"} + ], + stream=False +) +print(response.choices[0].message.content) ``` diff --git a/README_CN.md b/README_CN.md index f6a0539..aea803b 100644 --- a/README_CN.md +++ b/README_CN.md @@ -325,6 +325,8 @@ python openai_api.py import openai openai.api_base = "http://localhost:8000/v1" openai.api_key = "none" + +# 使用流式回复的请求 for chunk in openai.ChatCompletion.create( model="Qwen-7B", messages=[ @@ -334,6 +336,16 @@ for chunk in openai.ChatCompletion.create( ): if hasattr(chunk.choices[0].delta, "content"): print(chunk.choices[0].delta.content, end="", flush=True) + +# 不使用流式回复的请求 +response = openai.ChatCompletion.create( + model="Qwen-7B", + messages=[ + {"role": "user", "content": "你好"} + ], + stream=False +) +print(response.choices[0].message.content) ``` diff --git a/README_JA.md b/README_JA.md index 7ae5bbc..e1431d3 100644 --- a/README_JA.md +++ b/README_JA.md @@ -328,6 +328,8 @@ APIの使い方も簡単だ。以下の例をご覧ください: import openai openai.api_base = "http://localhost:8000/v1" openai.api_key = "none" + +# create a request activating streaming response for chunk in openai.ChatCompletion.create( model="Qwen-7B", messages=[ @@ -337,6 +339,16 @@ for chunk in openai.ChatCompletion.create( ): if hasattr(chunk.choices[0].delta, "content"): print(chunk.choices[0].delta.content, end="", flush=True) + +# create a request not activating streaming response +response = openai.ChatCompletion.create( + model="Qwen-7B", + messages=[ + {"role": "user", "content": "你好"} + ], + stream=False +) +print(response.choices[0].message.content) ``` ## ツールの使用