From 1247af9143ac2a363854a8e6e64297cd8c8ad6c1 Mon Sep 17 00:00:00 2001 From: Jianxin Ma Date: Thu, 10 Aug 2023 15:32:31 +0800 Subject: [PATCH] Add files via upload --- examples/langchain_tooluse.ipynb | 105 ++++++++++++++++++------------- 1 file changed, 60 insertions(+), 45 deletions(-) diff --git a/examples/langchain_tooluse.ipynb b/examples/langchain_tooluse.ipynb index 0efc608..de91bce 100644 --- a/examples/langchain_tooluse.ipynb +++ b/examples/langchain_tooluse.ipynb @@ -24,7 +24,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "id": "e21c6728", "metadata": {}, "outputs": [], @@ -65,7 +65,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 2, "id": "07e49b98-9d6c-41f2-9b18-f043f2d13e1a", "metadata": {}, "outputs": [], @@ -124,7 +124,7 @@ " 'parameters': [{\n", " \"name\": \"query\",\n", " \"type\": \"string\",\n", - " \"description\": \"search query of Wolfram Alpha\",\n", + " \"description\": \"the problem to solved by Wolfram Alpha\",\n", " 'required': True\n", " }], \n", " 'tool_api': tool_wrapper_for_qwen(WolframAlpha)\n", @@ -277,36 +277,55 @@ "name": "stderr", "output_type": "stream", "text": [ - "The model is automatically converting to bf16 for faster inference. If you want to disable the automatic precision, please manually add bf16/fp16/fp32=True to \"AutoModelForCausalLM.from_pretrained\".\n", - "Try importing flash-attention for faster inference...\n", - "Warning: import flash_attn rotary fail, please install FlashAttention rotary to get higher efficiency https://github.com/Dao-AILab/flash-attention/tree/main/csrc/rotary\n", - "Warning: import flash_attn rms_norm fail, please install FlashAttention layer_norm to get higher efficiency https://github.com/Dao-AILab/flash-attention/tree/main/csrc/layer_norm\n", - "Warning: import flash_attn fail, please install FlashAttention to get higher efficiency https://github.com/Dao-AILab/flash-attention\n" + "A new version of the following files was downloaded from https://huggingface.co/Qwen/Qwen-7B-Chat:\n", + "- tokenization_qwen.py\n", + ". Make sure to double-check they do not contain any added malicious code. To avoid downloading new versions of the code file, you can pin a revision.\n", + "A new version of the following files was downloaded from https://huggingface.co/Qwen/Qwen-7B-Chat:\n", + "- configuration_qwen.py\n", + ". Make sure to double-check they do not contain any added malicious code. To avoid downloading new versions of the code file, you can pin a revision.\n", + "A new version of the following files was downloaded from https://huggingface.co/Qwen/Qwen-7B-Chat:\n", + "- qwen_generation_utils.py\n", + ". Make sure to double-check they do not contain any added malicious code. To avoid downloading new versions of the code file, you can pin a revision.\n", + "A new version of the following files was downloaded from https://huggingface.co/Qwen/Qwen-7B-Chat:\n", + "- modeling_qwen.py\n", + "- qwen_generation_utils.py\n", + ". Make sure to double-check they do not contain any added malicious code. To avoid downloading new versions of the code file, you can pin a revision.\n" ] }, { "data": { "application/vnd.jupyter.widget-view+json": { - "model_id": "ed76951f2662402c9932f0b27548a47b", + "model_id": "23435445dded44d6951aa6a7b771a963", "version_major": 2, "version_minor": 0 }, "text/plain": [ - "Loading checkpoint shards: 0%| | 0/8 [00:00