code stringlengths 141 79.4k | apis listlengths 1 23 | extract_api stringlengths 126 73.2k |
|---|---|---|
from langchain_community.chat_models import ChatOpenAI
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import Chroma
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.pydantic_v1 import Ba... | [
"langchain_community.chat_models.ChatOpenAI",
"langchain_core.prompts.ChatPromptTemplate.from_template",
"langchain_core.output_parsers.StrOutputParser",
"langchain_community.embeddings.OpenAIEmbeddings",
"langchain_core.runnables.RunnableParallel"
] | [((1516, 1558), 'langchain_core.prompts.ChatPromptTemplate.from_template', 'ChatPromptTemplate.from_template', (['template'], {}), '(template)\n', (1548, 1558), False, 'from langchain_core.prompts import ChatPromptTemplate\n'), ((1574, 1586), 'langchain_community.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {}), '()\n',... |
"""
AI Module
This module provides an AI class that interfaces with language models to perform various tasks such as
starting a conversation, advancing the conversation, and handling message serialization. It also includes
backoff strategies for handling rate limit errors from the OpenAI API.
Classes:
AI: A class... | [
"langchain.schema.messages_to_dict",
"langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler",
"langchain.schema.messages_from_dict",
"langchain.schema.HumanMessage",
"langchain.schema.AIMessage",
"langchain.schema.SystemMessage"
] | [((1266, 1293), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1283, 1293), False, 'import logging\n'), ((7101, 7188), 'backoff.on_exception', 'backoff.on_exception', (['backoff.expo', 'openai.RateLimitError'], {'max_tries': '(7)', 'max_time': '(45)'}), '(backoff.expo, openai.RateLimitEr... |
import os
import csv
from datetime import datetime
from constants import EMBEDDING_MODEL_NAME
from langchain.embeddings import HuggingFaceInstructEmbeddings
from langchain.embeddings import HuggingFaceBgeEmbeddings
from langchain.embeddings import HuggingFaceEmbeddings
def log_to_csv(question, answer):
log_dir, ... | [
"langchain.embeddings.HuggingFaceInstructEmbeddings",
"langchain.embeddings.HuggingFaceEmbeddings",
"langchain.embeddings.HuggingFaceBgeEmbeddings"
] | [((531, 562), 'os.path.join', 'os.path.join', (['log_dir', 'log_file'], {}), '(log_dir, log_file)\n', (543, 562), False, 'import os\n'), ((426, 449), 'os.path.exists', 'os.path.exists', (['log_dir'], {}), '(log_dir)\n', (440, 449), False, 'import os\n'), ((459, 479), 'os.makedirs', 'os.makedirs', (['log_dir'], {}), '(l... |
from fastapi import Body
from sse_starlette.sse import EventSourceResponse
from configs import LLM_MODELS, TEMPERATURE
from server.utils import wrap_done, get_OpenAI
from langchain.chains import LLMChain
from langchain.callbacks import AsyncIteratorCallbackHandler
from typing import AsyncIterable, Optional
import async... | [
"langchain.chains.LLMChain",
"langchain.prompts.PromptTemplate.from_template",
"langchain.callbacks.AsyncIteratorCallbackHandler"
] | [((450, 498), 'fastapi.Body', 'Body', (['...'], {'description': '"""用户输入"""', 'examples': "['恼羞成怒']"}), "(..., description='用户输入', examples=['恼羞成怒'])\n", (454, 498), False, 'from fastapi import Body\n'), ((536, 567), 'fastapi.Body', 'Body', (['(False)'], {'description': '"""流式输出"""'}), "(False, description='流式输出')\n", ... |
# — coding: utf-8 –
import openai
import json
import logging
import sys
import argparse
from langchain.chat_models import ChatOpenAI
from langchain.prompts import (
ChatPromptTemplate,
MessagesPlaceholder,
SystemMessagePromptTemplate,
HumanMessagePromptTemplate
)
from langchain import LLMCh... | [
"langchain.LLMChain",
"langchain.prompts.HumanMessagePromptTemplate.from_template",
"langchain.chat_models.ChatOpenAI",
"langchain.prompts.ChatPromptTemplate.from_messages",
"langchain.prompts.SystemMessagePromptTemplate.from_template"
] | [((717, 746), 'os.path.exists', 'os.path.exists', (['progress_file'], {}), '(progress_file)\n', (731, 746), False, 'import os\n'), ((1210, 1243), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': 'model_name'}), '(model_name=model_name)\n', (1220, 1243), False, 'from langchain.chat_models import Cha... |
from langchain.llms import Ollama
input = input("What is your question?")
llm = Ollama(model="llama2")
res = llm.predict(input)
print (res)
| [
"langchain.llms.Ollama"
] | [((81, 103), 'langchain.llms.Ollama', 'Ollama', ([], {'model': '"""llama2"""'}), "(model='llama2')\n", (87, 103), False, 'from langchain.llms import Ollama\n')] |
import os
from pathlib import Path
from typing import Union
import cloudpickle
import yaml
from mlflow.exceptions import MlflowException
from mlflow.langchain.utils import (
_BASE_LOAD_KEY,
_CONFIG_LOAD_KEY,
_MODEL_DATA_FOLDER_NAME,
_MODEL_DATA_KEY,
_MODEL_DATA_PKL_FILE_NAME,
_MODEL_DATA_YAML_... | [
"langchain.chains.loading.load_chain",
"langchain.prompts.loading.load_prompt",
"langchain.schema.runnable.RunnableSequence",
"langchain.schema.runnable.RunnableParallel",
"langchain.schema.runnable.passthrough.RunnableAssign",
"langchain.schema.runnable.RunnableBranch",
"langchain.llms.get_type_to_cls_... | [((2386, 2443), 'mlflow.exceptions.MlflowException', 'MlflowException', (['f"""Unsupported type {_type} for loading."""'], {}), "(f'Unsupported type {_type} for loading.')\n", (2401, 2443), False, 'from mlflow.exceptions import MlflowException\n'), ((2853, 2915), 'mlflow.exceptions.MlflowException', 'MlflowException', ... |
import os
import tempfile
from typing import List, Union
import streamlit as st
import tiktoken
from langchain.text_splitter import (
CharacterTextSplitter,
RecursiveCharacterTextSplitter,
)
from langchain.text_splitter import (
TextSplitter as LCSplitter,
)
from langchain.text_splitter import TokenTextSpl... | [
"langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder",
"langchain.text_splitter.CharacterTextSplitter.from_tiktoken_encoder",
"langchain.text_splitter.TokenTextSplitter"
] | [((718, 772), 'streamlit.sidebar.text_area', 'st.sidebar.text_area', (['"""Enter text"""'], {'value': 'DEFAULT_TEXT'}), "('Enter text', value=DEFAULT_TEXT)\n", (738, 772), True, 'import streamlit as st\n'), ((790, 857), 'streamlit.sidebar.file_uploader', 'st.sidebar.file_uploader', (['"""Upload file"""'], {'accept_mult... |
import json
from langchain.schema import OutputParserException
def parse_json_markdown(json_string: str) -> dict:
# Remove the triple backticks if present
json_string = json_string.strip()
start_index = json_string.find("```json")
end_index = json_string.find("```", start_index + len("```json"))
... | [
"langchain.schema.OutputParserException"
] | [((526, 555), 'json.loads', 'json.loads', (['extracted_content'], {}), '(extracted_content)\n', (536, 555), False, 'import json\n'), ((871, 900), 'json.loads', 'json.loads', (['extracted_content'], {}), '(extracted_content)\n', (881, 900), False, 'import json\n'), ((1322, 1383), 'langchain.schema.OutputParserException'... |
# From project chatglm-langchain
from langchain.document_loaders import UnstructuredFileLoader
from langchain.text_splitter import CharacterTextSplitter
import re
from typing import List
class ChineseTextSplitter(CharacterTextSplitter):
def __init__(self, pdf: bool = False, sentence_size: int = None, **kwargs):
... | [
"langchain.document_loaders.UnstructuredFileLoader"
] | [((3017, 3066), 'langchain.document_loaders.UnstructuredFileLoader', 'UnstructuredFileLoader', (['filepath'], {'mode': '"""elements"""'}), "(filepath, mode='elements')\n", (3039, 3066), False, 'from langchain.document_loaders import UnstructuredFileLoader\n'), ((657, 714), 're.compile', 're.compile', (['"""([﹒﹔﹖﹗.。!?][... |
import os
import uuid
from typing import Any, Dict, List, Optional, Tuple
from langchain.agents.agent import RunnableAgent
from langchain.agents.tools import tool as LangChainTool
from langchain.memory import ConversationSummaryMemory
from langchain.tools.render import render_text_description
from langchain_core.agent... | [
"langchain.memory.ConversationSummaryMemory",
"langchain.agents.agent.RunnableAgent",
"langchain.tools.render.render_text_description"
] | [((2392, 2405), 'pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2403, 2405), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((2443, 2468), 'pydantic.PrivateAttr', 'PrivateAttr', ([], {'default': 'None'}), '(default=None)\n', (24... |
import os
import logging
import hashlib
import PyPDF2
from tqdm import tqdm
from modules.presets import *
from modules.utils import *
from modules.config import local_embedding
def get_documents(file_src):
from langchain.schema import Document
from langchain.text_splitter import TokenTextSplitter
text_s... | [
"langchain.document_loaders.UnstructuredWordDocumentLoader",
"langchain.embeddings.huggingface.HuggingFaceEmbeddings",
"langchain.vectorstores.FAISS.load_local",
"langchain.document_loaders.TextLoader",
"langchain.document_loaders.UnstructuredPowerPointLoader",
"langchain.document_loaders.UnstructuredEPub... | [((330, 381), 'langchain.text_splitter.TokenTextSplitter', 'TokenTextSplitter', ([], {'chunk_size': '(500)', 'chunk_overlap': '(30)'}), '(chunk_size=500, chunk_overlap=30)\n', (347, 381), False, 'from langchain.text_splitter import TokenTextSplitter\n'), ((406, 443), 'logging.debug', 'logging.debug', (['"""Loading docu... |
import re
from typing import Union
from langchain.agents.mrkl.output_parser import MRKLOutputParser
from langchain.schema import AgentAction, AgentFinish, OutputParserException
FORMAT_INSTRUCTIONS0 = """Use the following format and be sure to use new lines after each task.
Question: the input question you must answe... | [
"langchain.schema.AgentAction",
"langchain.schema.OutputParserException"
] | [((3055, 3088), 're.search', 're.search', (['regex', 'text', 're.DOTALL'], {}), '(regex, text, re.DOTALL)\n', (3064, 3088), False, 'import re\n'), ((3689, 3749), 're.search', 're.search', (['"""Action\\\\s*\\\\d*\\\\s*:[\\\\s]*(.*?)"""', 'text', 're.DOTALL'], {}), "('Action\\\\s*\\\\d*\\\\s*:[\\\\s]*(.*?)', text, re.DO... |
import os
import re
import uuid
import cv2
import torch
import requests
import io, base64
import numpy as np
import gradio as gr
from PIL import Image
from omegaconf import OmegaConf
from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration, BlipForQuestionAnswering
from transformers import AutoMod... | [
"langchain.llms.openai.OpenAI",
"langchain.agents.tools.Tool",
"langchain.chains.conversation.memory.ConversationBufferMemory",
"langchain.agents.initialize.initialize_agent"
] | [((3812, 3837), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3835, 3837), False, 'import torch\n'), ((3891, 3907), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (3901, 3907), False, 'import cv2\n'), ((3929, 3954), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'img'], {}), "('.jpg... |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# coding: utf-8
import os
import gradio as gr
import random
import torch
import cv2
import re
import uuid
from PIL import Image, ImageDraw, ImageOps, ImageFont
import math
import numpy as np
import argparse
import inspect
import tempfile
from tra... | [
"langchain.llms.openai.OpenAI",
"langchain.agents.tools.Tool",
"langchain.chains.conversation.memory.ConversationBufferMemory",
"langchain.agents.initialize.initialize_agent"
] | [((6155, 6190), 'os.makedirs', 'os.makedirs', (['"""image"""'], {'exist_ok': '(True)'}), "('image', exist_ok=True)\n", (6166, 6190), False, 'import os\n'), ((6224, 6241), 'random.seed', 'random.seed', (['seed'], {}), '(seed)\n', (6235, 6241), False, 'import random\n'), ((6246, 6266), 'numpy.random.seed', 'np.random.see... |
from typing import Any, Callable, Dict, TypeVar
from langchain import BasePromptTemplate, LLMChain
from langchain.chat_models.base import BaseChatModel
from langchain.schema import BaseOutputParser, OutputParserException
from openai.error import (
AuthenticationError,
InvalidRequestError,
RateLimitError,
... | [
"langchain.LLMChain"
] | [((469, 481), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (476, 481), False, 'from typing import Any, Callable, Dict, TypeVar\n'), ((2486, 2520), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'model', 'prompt': 'prompt'}), '(llm=model, prompt=prompt)\n', (2494, 2520), False, 'from langchain import BaseP... |
import json
import os.path
import logging
import time
from langchain.vectorstores import FAISS
from langchain import PromptTemplate
from utils.references import References
from utils.knowledge import Knowledge
from utils.file_operations import make_archive, copy_templates
from utils.tex_processing import create_copies... | [
"langchain.vectorstores.FAISS.load_local",
"langchain.PromptTemplate"
] | [((1271, 1292), 'logging.info', 'logging.info', (['message'], {}), '(message)\n', (1283, 1292), False, 'import logging\n'), ((1552, 1587), 'utils.gpt_interaction.GPTModel', 'GPTModel', ([], {'model': '"""gpt-3.5-turbo-16k"""'}), "(model='gpt-3.5-turbo-16k')\n", (1560, 1587), False, 'from utils.gpt_interaction import GP... |
import sys
import os
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'NeuralSeq'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__))... | [
"langchain.llms.openai.OpenAI",
"langchain.agents.tools.Tool",
"langchain.chains.conversation.memory.ConversationBufferMemory",
"langchain.agents.initialize.initialize_agent"
] | [((3966, 3992), 'scipy.io.wavfile.read', 'wavfile.read', (['audio_path_1'], {}), '(audio_path_1)\n', (3978, 3992), True, 'import scipy.io.wavfile as wavfile\n'), ((4014, 4040), 'scipy.io.wavfile.read', 'wavfile.read', (['audio_path_2'], {}), '(audio_path_2)\n', (4026, 4040), True, 'import scipy.io.wavfile as wavfile\n'... |
from typing import Optional
import typer
from typing_extensions import Annotated
from langchain_cli.namespaces import app as app_namespace
from langchain_cli.namespaces import integration as integration_namespace
from langchain_cli.namespaces import template as template_namespace
from langchain_cli.utils.packages imp... | [
"langchain_cli.namespaces.template.serve",
"langchain_cli.utils.packages.get_package_root",
"langchain_cli.namespaces.app.serve",
"langchain_cli.utils.packages.get_langserve_export"
] | [((394, 449), 'typer.Typer', 'typer.Typer', ([], {'no_args_is_help': '(True)', 'add_completion': '(False)'}), '(no_args_is_help=True, add_completion=False)\n', (405, 449), False, 'import typer\n'), ((952, 1076), 'typer.Option', 'typer.Option', (['(False)', '"""--version"""', '"""-v"""'], {'help': '"""Print the current ... |
import os
from langchain_community.document_loaders import UnstructuredFileLoader
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import Redis
from langchain_text_splitters import RecursiveCharacterTextSplitter
from rag_redis.config import EMBED_MODEL, INDEX_NAME,... | [
"langchain_community.vectorstores.Redis.from_texts",
"langchain_community.document_loaders.UnstructuredFileLoader",
"langchain_community.embeddings.HuggingFaceEmbeddings",
"langchain_text_splitters.RecursiveCharacterTextSplitter"
] | [((726, 818), 'langchain_text_splitters.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(1500)', 'chunk_overlap': '(100)', 'add_start_index': '(True)'}), '(chunk_size=1500, chunk_overlap=100,\n add_start_index=True)\n', (756, 818), False, 'from langchain_text_splitters import ... |
from langchain.indexes import VectorstoreIndexCreator
from langchain_community.document_loaders import CSVLoader
from langchain_community.vectorstores import FAISS
loader = CSVLoader("/Users/harrisonchase/Downloads/titanic.csv")
docs = loader.load()
index_creator = VectorstoreIndexCreator(vectorstore_cls=FAISS)
inde... | [
"langchain_community.document_loaders.CSVLoader",
"langchain.indexes.VectorstoreIndexCreator"
] | [((174, 229), 'langchain_community.document_loaders.CSVLoader', 'CSVLoader', (['"""/Users/harrisonchase/Downloads/titanic.csv"""'], {}), "('/Users/harrisonchase/Downloads/titanic.csv')\n", (183, 229), False, 'from langchain_community.document_loaders import CSVLoader\n'), ((268, 314), 'langchain.indexes.VectorstoreInde... |
from langchain_core.prompts.prompt import PromptTemplate
# There are a few different templates to choose from
# These are just different ways to generate hypothetical documents
web_search_template = """Please write a passage to answer the question
Question: {question}
Passage:"""
sci_fact_template = """Please write a... | [
"langchain_core.prompts.prompt.PromptTemplate.from_template"
] | [((716, 765), 'langchain_core.prompts.prompt.PromptTemplate.from_template', 'PromptTemplate.from_template', (['web_search_template'], {}), '(web_search_template)\n', (744, 765), False, 'from langchain_core.prompts.prompt import PromptTemplate\n')] |
"""Interface with the LangChain Hub."""
from __future__ import annotations
import json
from typing import TYPE_CHECKING, Any, Optional
from langchain_core.load.dump import dumps
from langchain_core.load.load import loads
from langchain_core.prompts import BasePromptTemplate
if TYPE_CHECKING:
from langchainhub i... | [
"langchain_core.load.load.loads",
"langchain_core.load.dump.dumps",
"langchainhub.Client"
] | [((746, 778), 'langchainhub.Client', 'Client', (['api_url'], {'api_key': 'api_key'}), '(api_url, api_key=api_key)\n', (752, 778), False, 'from langchainhub import Client\n'), ((1979, 1992), 'langchain_core.load.dump.dumps', 'dumps', (['object'], {}), '(object)\n', (1984, 1992), False, 'from langchain_core.load.dump imp... |
from pathlib import Path
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings.openai import OpenAIEmbeddings
from langchain_community.graphs import Neo4jGraph
from langchain_community.vectorstores import Neo4jVector
from langchain_text_splitters import TokenTextSplitter
txt_... | [
"langchain_community.embeddings.openai.OpenAIEmbeddings",
"langchain_community.graphs.Neo4jGraph",
"langchain_text_splitters.TokenTextSplitter"
] | [((371, 383), 'langchain_community.graphs.Neo4jGraph', 'Neo4jGraph', ([], {}), '()\n', (381, 383), False, 'from langchain_community.graphs import Neo4jGraph\n'), ((513, 564), 'langchain_text_splitters.TokenTextSplitter', 'TokenTextSplitter', ([], {'chunk_size': '(512)', 'chunk_overlap': '(24)'}), '(chunk_size=512, chun... |
from langchain_community.graphs import Neo4jGraph
graph = Neo4jGraph()
graph.query(
"""
MERGE (m:Movie {name:"Top Gun"})
WITH m
UNWIND ["Tom Cruise", "Val Kilmer", "Anthony Edwards", "Meg Ryan"] AS actor
MERGE (a:Actor {name:actor})
MERGE (a)-[:ACTED_IN]->(m)
"""
)
| [
"langchain_community.graphs.Neo4jGraph"
] | [((59, 71), 'langchain_community.graphs.Neo4jGraph', 'Neo4jGraph', ([], {}), '()\n', (69, 71), False, 'from langchain_community.graphs import Neo4jGraph\n')] |
from langchain_community.graphs import Neo4jGraph
graph = Neo4jGraph()
# Import sample data
graph.query(
"""
MERGE (m:Movie {name:"Top Gun"})
WITH m
UNWIND ["Tom Cruise", "Val Kilmer", "Anthony Edwards", "Meg Ryan"] AS actor
MERGE (a:Person {name:actor})
MERGE (a)-[:ACTED_IN]->(m)
"""
)
# Create full text index ... | [
"langchain_community.graphs.Neo4jGraph"
] | [((59, 71), 'langchain_community.graphs.Neo4jGraph', 'Neo4jGraph', ([], {}), '()\n', (69, 71), False, 'from langchain_community.graphs import Neo4jGraph\n')] |
from importlib import metadata
from langchain_core._api import (
surface_langchain_beta_warnings,
surface_langchain_deprecation_warnings,
)
try:
__version__ = metadata.version(__package__)
except metadata.PackageNotFoundError:
# Case where package metadata is not available.
__version__ = ""
surfa... | [
"langchain_core._api.surface_langchain_beta_warnings",
"langchain_core._api.surface_langchain_deprecation_warnings"
] | [((315, 355), 'langchain_core._api.surface_langchain_deprecation_warnings', 'surface_langchain_deprecation_warnings', ([], {}), '()\n', (353, 355), False, 'from langchain_core._api import surface_langchain_beta_warnings, surface_langchain_deprecation_warnings\n'), ((356, 389), 'langchain_core._api.surface_langchain_bet... |
# ruff: noqa: E402
"""Main entrypoint into package."""
import warnings
from importlib import metadata
from typing import Any, Optional
from langchain_core._api.deprecation import surface_langchain_deprecation_warnings
try:
__version__ = metadata.version(__package__)
except metadata.PackageNotFoundError:
# Cas... | [
"langchain.utils.interactive_env.is_interactive_env",
"langchain_core._api.deprecation.surface_langchain_deprecation_warnings"
] | [((1348, 1388), 'langchain_core._api.deprecation.surface_langchain_deprecation_warnings', 'surface_langchain_deprecation_warnings', ([], {}), '()\n', (1386, 1388), False, 'from langchain_core._api.deprecation import surface_langchain_deprecation_warnings\n'), ((243, 272), 'importlib.metadata.version', 'metadata.version... |
import os
from langchain_community.document_loaders import JSONLoader
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_elasticsearch import ElasticsearchStore
from langchain_text_splitters import RecursiveCharacterTextSplitter
ELASTIC_CLOUD_ID = os.getenv("ELASTIC_CLOUD_ID")
ELASTIC_USE... | [
"langchain_community.embeddings.HuggingFaceEmbeddings",
"langchain_community.document_loaders.JSONLoader",
"langchain_text_splitters.RecursiveCharacterTextSplitter"
] | [((279, 308), 'os.getenv', 'os.getenv', (['"""ELASTIC_CLOUD_ID"""'], {}), "('ELASTIC_CLOUD_ID')\n", (288, 308), False, 'import os\n'), ((328, 368), 'os.getenv', 'os.getenv', (['"""ELASTIC_USERNAME"""', '"""elastic"""'], {}), "('ELASTIC_USERNAME', 'elastic')\n", (337, 368), False, 'import os\n'), ((388, 417), 'os.getenv... |
import importlib
import json
import os
from typing import Any, Dict, List, Optional
from langchain_core._api import beta
from langchain_core.load.mapping import (
_JS_SERIALIZABLE_MAPPING,
_OG_SERIALIZABLE_MAPPING,
OLD_CORE_NAMESPACES_MAPPING,
SERIALIZABLE_MAPPING,
)
from langchain_core.load.serializab... | [
"langchain_core._api.beta"
] | [((3922, 3928), 'langchain_core._api.beta', 'beta', ([], {}), '()\n', (3926, 3928), False, 'from langchain_core._api import beta\n'), ((4509, 4515), 'langchain_core._api.beta', 'beta', ([], {}), '()\n', (4513, 4515), False, 'from langchain_core._api import beta\n')] |
from typing import Any, Dict, List, Type, Union
from langchain_community.graphs import NetworkxEntityGraph
from langchain_community.graphs.networkx_graph import (
KnowledgeTriple,
get_entities,
parse_triples,
)
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import... | [
"langchain_community.graphs.networkx_graph.get_entities",
"langchain.chains.llm.LLMChain",
"langchain.memory.utils.get_prompt_input_key",
"langchain_core.pydantic_v1.Field",
"langchain_core.messages.get_buffer_string",
"langchain_community.graphs.networkx_graph.parse_triples"
] | [((1062, 1104), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'NetworkxEntityGraph'}), '(default_factory=NetworkxEntityGraph)\n', (1067, 1104), False, 'from langchain_core.pydantic_v1 import Field\n'), ((3163, 3223), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.llm', 'prompt':... |
"""**Retriever** class returns Documents given a text **query**.
It is more general than a vector store. A retriever does not need to be able to
store documents, only to return (or retrieve) it. Vector stores can be used as
the backbone of a retriever, but there are other types of retrievers as well.
**Class hierarch... | [
"langchain_core.runnables.ensure_config",
"langchain_core.load.dump.dumpd"
] | [((5405, 5426), 'langchain_core.runnables.ensure_config', 'ensure_config', (['config'], {}), '(config)\n', (5418, 5426), False, 'from langchain_core.runnables import Runnable, RunnableConfig, RunnableSerializable, ensure_config\n'), ((5868, 5889), 'langchain_core.runnables.ensure_config', 'ensure_config', (['config'], ... |
# Ingest Documents into a Zep Collection
import os
from langchain_community.document_loaders import WebBaseLoader
from langchain_community.embeddings import FakeEmbeddings
from langchain_community.vectorstores.zep import CollectionConfig, ZepVectorStore
from langchain_text_splitters import RecursiveCharacterTextSplitt... | [
"langchain_community.document_loaders.WebBaseLoader",
"langchain_community.embeddings.FakeEmbeddings",
"langchain_community.vectorstores.zep.CollectionConfig",
"langchain_text_splitters.RecursiveCharacterTextSplitter"
] | [((338, 392), 'os.environ.get', 'os.environ.get', (['"""ZEP_API_URL"""', '"""http://localhost:8000"""'], {}), "('ZEP_API_URL', 'http://localhost:8000')\n", (352, 392), False, 'import os\n'), ((407, 442), 'os.environ.get', 'os.environ.get', (['"""ZEP_API_KEY"""', 'None'], {}), "('ZEP_API_KEY', None)\n", (421, 442), Fals... |
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
template = """You are a helpful assistant. Help the user answer any questions.
You have access to the following tools:
{tools}
In order to use a tool, you can use <tool></tool> and <... | [
"langchain_core.prompts.MessagesPlaceholder",
"langchain_core.agents.AgentAction",
"langchain_core.agents.AgentFinish"
] | [((1068, 1117), 'langchain_core.prompts.MessagesPlaceholder', 'MessagesPlaceholder', ([], {'variable_name': '"""chat_history"""'}), "(variable_name='chat_history')\n", (1087, 1117), False, 'from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder\n'), ((1548, 1605), 'langchain_core.agents.AgentAction'... |
from langchain_community.graphs import Neo4jGraph
# Instantiate connection to Neo4j
graph = Neo4jGraph()
# Define unique constraints
graph.query("CREATE CONSTRAINT IF NOT EXISTS FOR (m:Movie) REQUIRE m.id IS UNIQUE;")
graph.query("CREATE CONSTRAINT IF NOT EXISTS FOR (u:User) REQUIRE u.id IS UNIQUE;")
graph.query("CRE... | [
"langchain_community.graphs.Neo4jGraph"
] | [((93, 105), 'langchain_community.graphs.Neo4jGraph', 'Neo4jGraph', ([], {}), '()\n', (103, 105), False, 'from langchain_community.graphs import Neo4jGraph\n')] |
"""Tool for the Exa Search API."""
from typing import Dict, List, Optional, Union
from exa_py import Exa # type: ignore
from exa_py.api import HighlightsContentsOptions, TextContentsOptions # type: ignore
from langchain_core.callbacks import (
CallbackManagerForToolRun,
)
from langchain_core.pydantic_v1 import ... | [
"langchain_exa._utilities.initialize_client",
"langchain_core.pydantic_v1.Field",
"langchain_core.pydantic_v1.root_validator"
] | [((796, 815), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default': 'None'}), '(default=None)\n', (801, 815), False, 'from langchain_core.pydantic_v1 import Field, SecretStr, root_validator\n'), ((845, 864), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default': 'None'}), '(default=None)\n', (850, 864), ... |
from typing import Any, List, Literal
from langchain_core.messages.base import (
BaseMessage,
BaseMessageChunk,
merge_content,
)
from langchain_core.utils._merge import merge_dicts
class ChatMessage(BaseMessage):
"""Message that can be assigned an arbitrary speaker (i.e. role)."""
role: str
... | [
"langchain_core.messages.base.merge_content",
"langchain_core.utils._merge.merge_dicts"
] | [((1490, 1532), 'langchain_core.messages.base.merge_content', 'merge_content', (['self.content', 'other.content'], {}), '(self.content, other.content)\n', (1503, 1532), False, 'from langchain_core.messages.base import BaseMessage, BaseMessageChunk, merge_content\n'), ((1568, 1628), 'langchain_core.utils._merge.merge_di... |
from typing import Any, List
from langchain_core.prompt_values import ImagePromptValue, ImageURL, PromptValue
from langchain_core.prompts.base import BasePromptTemplate
from langchain_core.pydantic_v1 import Field
from langchain_core.utils import image as image_utils
class ImagePromptTemplate(BasePromptTemplate[Imag... | [
"langchain_core.pydantic_v1.Field",
"langchain_core.utils.image.image_to_data_url"
] | [((409, 436), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (414, 436), False, 'from langchain_core.pydantic_v1 import Field\n'), ((2391, 2426), 'langchain_core.utils.image.image_to_data_url', 'image_utils.image_to_data_url', (['path'], {}), '(path)\n', (242... |
"""
**LLM** classes provide
access to the large language model (**LLM**) APIs and services.
**Class hierarchy:**
.. code-block::
BaseLanguageModel --> BaseLLM --> LLM --> <name> # Examples: AI21, HuggingFaceHub, OpenAI
**Main helpers:**
.. code-block::
LLMResult, PromptValue,
CallbackManagerForLLMRun... | [
"langchain.utils.interactive_env.is_interactive_env"
] | [((11338, 11358), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (11356, 11358), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((11368, 11729), 'warnings.warn', 'warnings.warn', (['f"""Importing LLMs from langchain is deprecated. Importing fro... |
import logging
from abc import ABC, abstractmethod
from itertools import islice
from typing import Any, Dict, Iterable, List, Optional
from langchain_community.utilities.redis import get_client
from langchain_core.language_models import BaseLanguageModel
from langchain_core.messages import BaseMessage, get_buffer_stri... | [
"langchain_community.utilities.redis.get_client",
"langchain.chains.llm.LLMChain",
"langchain.memory.utils.get_prompt_input_key",
"langchain_core.pydantic_v1.Field",
"langchain_core.messages.get_buffer_string"
] | [((701, 728), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (718, 728), False, 'import logging\n'), ((10994, 11036), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'InMemoryEntityStore'}), '(default_factory=InMemoryEntityStore)\n', (10999, 11036), False, 'from lang... |
from typing import Any, Dict, List, Optional
from langchain_core.messages import BaseMessage, get_buffer_string
from langchain_core.pydantic_v1 import root_validator
from langchain.memory.chat_memory import BaseChatMemory, BaseMemory
from langchain.memory.utils import get_prompt_input_key
class ConversationBufferMe... | [
"langchain.memory.utils.get_prompt_input_key",
"langchain_core.messages.get_buffer_string",
"langchain_core.pydantic_v1.root_validator"
] | [((2888, 2904), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {}), '()\n', (2902, 2904), False, 'from langchain_core.pydantic_v1 import root_validator\n'), ((983, 1073), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['messages'], {'human_prefix': 'self.human_prefix', 'ai_prefi... |
"""**Prompt values** for language model prompts.
Prompt values are used to represent different pieces of prompts.
They can be used to represent text, images, or chat message pieces.
"""
from __future__ import annotations
from abc import ABC, abstractmethod
from typing import List, Literal, Sequence, cast
from typing... | [
"langchain_core.messages.HumanMessage",
"langchain_core.messages.get_buffer_string"
] | [((2116, 2148), 'langchain_core.messages.get_buffer_string', 'get_buffer_string', (['self.messages'], {}), '(self.messages)\n', (2133, 2148), False, 'from langchain_core.messages import AnyMessage, BaseMessage, HumanMessage, get_buffer_string\n'), ((1800, 1831), 'langchain_core.messages.HumanMessage', 'HumanMessage', (... |
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, Union
from langchain_core.load.serializable import Serializable
from langchain_core.pydantic_v1 import Extra, Field
from langchain_core.utils import get_bolded_text
from langchain_core.utils._merge import merge_d... | [
"langchain_core.utils.get_bolded_text",
"langchain_core.utils.interactive_env.is_interactive_env",
"langchain_core.pydantic_v1.Field",
"langchain_core.prompts.chat.ChatPromptTemplate",
"langchain_core.utils._merge.merge_dicts"
] | [((736, 763), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (741, 763), False, 'from langchain_core.pydantic_v1 import Extra, Field\n'), ((950, 977), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', ... |
"""Utilities for loading configurations from langchain_core-hub."""
import os
import re
import tempfile
from pathlib import Path, PurePosixPath
from typing import Any, Callable, Optional, Set, TypeVar, Union
from urllib.parse import urljoin
import requests
from langchain_core._api.deprecation import deprecated
DEFA... | [
"langchain_core._api.deprecation.deprecated"
] | [((330, 383), 'os.environ.get', 'os.environ.get', (['"""LANGCHAIN_HUB_DEFAULT_REF"""', '"""master"""'], {}), "('LANGCHAIN_HUB_DEFAULT_REF', 'master')\n", (344, 383), False, 'import os\n'), ((476, 546), 'os.environ.get', 'os.environ.get', (['"""LANGCHAIN_HUB_URL_BASE"""', "(LANGCHAINHUB_REPO + '{ref}/')"], {}), "('LANGC... |
"""Functionality for loading agents."""
import json
import logging
from pathlib import Path
from typing import Any, List, Optional, Union
import yaml
from langchain_core._api import deprecated
from langchain_core.language_models import BaseLanguageModel
from langchain_core.tools import Tool
from langchain_core.utils.l... | [
"langchain_core._api.deprecated",
"langchain_core.utils.loading.try_load_from_hub"
] | [((564, 591), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (581, 591), False, 'import logging\n'), ((1154, 1190), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'removal': '"""0.2.0"""'}), "('0.1.0', removal='0.2.0')\n", (1164, 1190), False, 'from langchain_core._api... |
"""BasePrompt schema definition."""
from __future__ import annotations
import warnings
from abc import ABC
from string import Formatter
from typing import Any, Callable, Dict, List, Set
from langchain_core.prompt_values import PromptValue, StringPromptValue
from langchain_core.prompts.base import BasePromptTemplate
... | [
"langchain_core.utils.get_colored_text",
"langchain_core.utils.interactive_env.is_interactive_env"
] | [((3179, 3192), 'jinja2.Environment', 'Environment', ([], {}), '()\n', (3190, 3192), False, 'from jinja2 import Environment, meta\n'), ((3239, 3274), 'jinja2.meta.find_undeclared_variables', 'meta.find_undeclared_variables', (['ast'], {}), '(ast)\n', (3269, 3274), False, 'from jinja2 import Environment, meta\n'), ((607... |
"""**Tools** are classes that an Agent uses to interact with the world.
Each tool has a **description**. Agent uses the description to choose the right
tool for the job.
**Class hierarchy:**
.. code-block::
ToolMetaclass --> BaseTool --> <name>Tool # Examples: AIPluginTool, BaseGraphQLTool
... | [
"langchain.utils.interactive_env.is_interactive_env"
] | [((2151, 2171), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (2169, 2171), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((2185, 2548), 'warnings.warn', 'warnings.warn', (['f"""Importing tools from langchain is deprecated. Importing from lan... |
from __future__ import annotations
from typing import Any, List, Literal
from langchain_core.load.serializable import Serializable
from langchain_core.pydantic_v1 import Field
class Document(Serializable):
"""Class for storing a piece of text and associated metadata."""
page_content: str
"""String text... | [
"langchain_core.pydantic_v1.Field"
] | [((346, 373), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (351, 373), False, 'from langchain_core.pydantic_v1 import Field\n')] |
"""Schemas for tracers."""
from __future__ import annotations
import datetime
import warnings
from typing import Any, Dict, List, Optional, Type
from uuid import UUID
from langsmith.schemas import RunBase as BaseRunV2
from langsmith.schemas import RunTypeEnum as RunTypeEnumDep
from langchain_core._api import depreca... | [
"langchain_core._api.deprecated",
"langchain_core.pydantic_v1.Field",
"langchain_core.pydantic_v1.root_validator"
] | [((444, 515), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'alternative': '"""Use string instead."""', 'removal': '"""0.2.0"""'}), "('0.1.0', alternative='Use string instead.', removal='0.2.0')\n", (454, 515), False, 'from langchain_core._api import deprecated\n'), ((781, 817), 'langchain_core._ap... |
"""Load prompts."""
import json
import logging
from pathlib import Path
from typing import Callable, Dict, Union
import yaml
from langchain_core.output_parsers.string import StrOutputParser
from langchain_core.prompts.base import BasePromptTemplate
from langchain_core.prompts.chat import ChatPromptTemplate
from langc... | [
"langchain_core.utils.try_load_from_hub",
"langchain_core.prompts.chat.ChatPromptTemplate.from_template",
"langchain_core.output_parsers.string.StrOutputParser",
"langchain_core.prompts.few_shot.FewShotPromptTemplate",
"langchain_core.prompts.prompt.PromptTemplate"
] | [((581, 608), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (598, 608), False, 'import logging\n'), ((3962, 3993), 'langchain_core.prompts.few_shot.FewShotPromptTemplate', 'FewShotPromptTemplate', ([], {}), '(**config)\n', (3983, 3993), False, 'from langchain_core.prompts.few_shot import... |
from functools import partial
from typing import Optional
from langchain_core.callbacks.manager import (
Callbacks,
)
from langchain_core.prompts import BasePromptTemplate, PromptTemplate, format_document
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_core.retrievers import BaseRetriever
f... | [
"langchain_core.pydantic_v1.Field",
"langchain_core.prompts.format_document",
"langchain.tools.Tool",
"langchain_core.prompts.PromptTemplate.from_template"
] | [((439, 489), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'description': '"""query to look up in retriever"""'}), "(description='query to look up in retriever')\n", (444, 489), False, 'from langchain_core.pydantic_v1 import BaseModel, Field\n'), ((1996, 2126), 'functools.partial', 'partial', (['_get_relevant_doc... |
from typing import Any, List, Sequence, Tuple, Union
from langchain_core._api import deprecated
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.callbacks import Callbacks
from langchain_core.language_models import BaseLanguageModel
from langchain_core.prompts.base import BasePromptTempla... | [
"langchain_core.prompts.chat.AIMessagePromptTemplate.from_template",
"langchain_core.prompts.chat.ChatPromptTemplate.from_template",
"langchain.agents.output_parsers.XMLAgentOutputParser",
"langchain.agents.format_scratchpad.format_xml",
"langchain_core._api.deprecated"
] | [((875, 943), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'alternative': '"""create_xml_agent"""', 'removal': '"""0.2.0"""'}), "('0.1.0', alternative='create_xml_agent', removal='0.2.0')\n", (885, 943), False, 'from langchain_core._api import deprecated\n'), ((1644, 1696), 'langchain_core.prompts... |
"""**Graphs** provide a natural language interface to graph databases."""
import warnings
from typing import Any
from langchain_core._api import LangChainDeprecationWarning
from langchain.utils.interactive_env import is_interactive_env
def __getattr__(name: str) -> Any:
from langchain_community import graphs
... | [
"langchain.utils.interactive_env.is_interactive_env"
] | [((378, 398), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (396, 398), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((408, 773), 'warnings.warn', 'warnings.warn', (['f"""Importing graphs from langchain is deprecated. Importing from langchai... |
"""Chain that makes API calls and summarizes the responses to answer a question."""
from __future__ import annotations
from typing import Any, Dict, List, Optional, Sequence, Tuple
from urllib.parse import urlparse
from langchain_community.utilities.requests import TextRequestsWrapper
from langchain_core.callbacks im... | [
"langchain_core.callbacks.AsyncCallbackManagerForChainRun.get_noop_manager",
"langchain_core.callbacks.CallbackManagerForChainRun.get_noop_manager",
"langchain.chains.llm.LLMChain",
"langchain_community.utilities.requests.TextRequestsWrapper",
"langchain_core.pydantic_v1.Field",
"langchain_core.pydantic_v... | [((979, 992), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (987, 992), False, 'from urllib.parse import urlparse\n'), ((2555, 2574), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'exclude': '(True)'}), '(exclude=True)\n', (2560, 2574), False, 'from langchain_core.pydantic_v1 import Field, root_va... |
"""Prompt template that contains few shot examples."""
from __future__ import annotations
from pathlib import Path
from typing import Any, Dict, List, Literal, Optional, Union
from langchain_core.messages import BaseMessage, get_buffer_string
from langchain_core.prompts.chat import (
BaseChatPromptTemplate,
... | [
"langchain_core.pydantic_v1.Field",
"langchain_core.messages.get_buffer_string",
"langchain_core.pydantic_v1.root_validator",
"langchain_core.prompts.string.get_template_variables"
] | [((1200, 1224), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (1214, 1224), False, 'from langchain_core.pydantic_v1 import BaseModel, Extra, Field, root_validator\n'), ((3468, 3484), 'langchain_core.pydantic_v1.root_validator', 'root_validator', ([], {}), '()\n'... |
"""Hypothetical Document Embeddings.
https://arxiv.org/abs/2212.10496
"""
from __future__ import annotations
from typing import Any, Dict, List, Optional
import numpy as np
from langchain_core.callbacks import CallbackManagerForChainRun
from langchain_core.embeddings import Embeddings
from langchain_core.language_mo... | [
"langchain.chains.hyde.prompts.PROMPT_MAP.keys",
"langchain_core.callbacks.CallbackManagerForChainRun.get_noop_manager",
"langchain.chains.llm.LLMChain"
] | [((3148, 3180), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (3156, 3180), False, 'from langchain.chains.llm import LLMChain\n'), ((2258, 2303), 'langchain_core.callbacks.CallbackManagerForChainRun.get_noop_manager', 'CallbackManagerForChainRun.get... |
"""Attempt to implement MRKL systems as described in arxiv.org/pdf/2205.00445.pdf."""
from __future__ import annotations
from typing import Any, Callable, List, NamedTuple, Optional, Sequence
from langchain_core._api import deprecated
from langchain_core.callbacks import BaseCallbackManager
from langchain_core.langua... | [
"langchain.agents.mrkl.output_parser.MRKLOutputParser",
"langchain.agents.utils.validate_tools_single_input",
"langchain_core.pydantic_v1.Field",
"langchain_core.prompts.PromptTemplate",
"langchain_core._api.deprecated",
"langchain.chains.LLMChain",
"langchain.agents.tools.Tool",
"langchain_core.promp... | [((1278, 1348), 'langchain_core._api.deprecated', 'deprecated', (['"""0.1.0"""'], {'alternative': '"""create_react_agent"""', 'removal': '"""0.2.0"""'}), "('0.1.0', alternative='create_react_agent', removal='0.2.0')\n", (1288, 1348), False, 'from langchain_core._api import deprecated\n'), ((5068, 5104), 'langchain_core... |
import base64
import io
import os
import uuid
from io import BytesIO
from pathlib import Path
from langchain.retrievers.multi_vector import MultiVectorRetriever
from langchain.storage import LocalFileStore
from langchain_community.chat_models import ChatOllama
from langchain_community.embeddings import OllamaEmbedding... | [
"langchain_core.documents.Document",
"langchain_community.embeddings.OllamaEmbeddings",
"langchain_community.chat_models.ChatOllama",
"langchain_core.messages.HumanMessage",
"langchain.retrievers.multi_vector.MultiVectorRetriever"
] | [((731, 774), 'langchain_community.chat_models.ChatOllama', 'ChatOllama', ([], {'model': '"""bakllava"""', 'temperature': '(0)'}), "(model='bakllava', temperature=0)\n", (741, 774), False, 'from langchain_community.chat_models import ChatOllama\n'), ((2494, 2525), 'base64.b64decode', 'base64.b64decode', (['base64_strin... |
from langchain_community.chat_models import ChatOpenAI
from langchain_community.embeddings import OpenAIEmbeddings
from langchain_community.vectorstores import Chroma
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.pydantic_v1 import Ba... | [
"langchain_community.chat_models.ChatOpenAI",
"langchain_core.prompts.ChatPromptTemplate.from_template",
"langchain_core.output_parsers.StrOutputParser",
"langchain_community.embeddings.OpenAIEmbeddings",
"langchain_core.runnables.RunnableParallel"
] | [((1516, 1558), 'langchain_core.prompts.ChatPromptTemplate.from_template', 'ChatPromptTemplate.from_template', (['template'], {}), '(template)\n', (1548, 1558), False, 'from langchain_core.prompts import ChatPromptTemplate\n'), ((1574, 1586), 'langchain_community.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {}), '()\n',... |
from fastapi import Body
from sse_starlette.sse import EventSourceResponse
from configs import LLM_MODELS, TEMPERATURE
from server.utils import wrap_done, get_OpenAI
from langchain.chains import LLMChain
from langchain.callbacks import AsyncIteratorCallbackHandler
from typing import AsyncIterable, Optional
import async... | [
"langchain.chains.LLMChain",
"langchain.prompts.PromptTemplate.from_template",
"langchain.callbacks.AsyncIteratorCallbackHandler"
] | [((450, 498), 'fastapi.Body', 'Body', (['...'], {'description': '"""用户输入"""', 'examples': "['恼羞成怒']"}), "(..., description='用户输入', examples=['恼羞成怒'])\n", (454, 498), False, 'from fastapi import Body\n'), ((536, 567), 'fastapi.Body', 'Body', (['(False)'], {'description': '"""流式输出"""'}), "(False, description='流式输出')\n", ... |
from fastapi import Body
from sse_starlette.sse import EventSourceResponse
from configs import LLM_MODELS, TEMPERATURE
from server.utils import wrap_done, get_OpenAI
from langchain.chains import LLMChain
from langchain.callbacks import AsyncIteratorCallbackHandler
from typing import AsyncIterable, Optional
import async... | [
"langchain.chains.LLMChain",
"langchain.prompts.PromptTemplate.from_template",
"langchain.callbacks.AsyncIteratorCallbackHandler"
] | [((450, 498), 'fastapi.Body', 'Body', (['...'], {'description': '"""用户输入"""', 'examples': "['恼羞成怒']"}), "(..., description='用户输入', examples=['恼羞成怒'])\n", (454, 498), False, 'from fastapi import Body\n'), ((536, 567), 'fastapi.Body', 'Body', (['(False)'], {'description': '"""流式输出"""'}), "(False, description='流式输出')\n", ... |
from fastapi import Body
from sse_starlette.sse import EventSourceResponse
from configs import LLM_MODELS, TEMPERATURE
from server.utils import wrap_done, get_OpenAI
from langchain.chains import LLMChain
from langchain.callbacks import AsyncIteratorCallbackHandler
from typing import AsyncIterable, Optional
import async... | [
"langchain.chains.LLMChain",
"langchain.prompts.PromptTemplate.from_template",
"langchain.callbacks.AsyncIteratorCallbackHandler"
] | [((450, 498), 'fastapi.Body', 'Body', (['...'], {'description': '"""用户输入"""', 'examples': "['恼羞成怒']"}), "(..., description='用户输入', examples=['恼羞成怒'])\n", (454, 498), False, 'from fastapi import Body\n'), ((536, 567), 'fastapi.Body', 'Body', (['(False)'], {'description': '"""流式输出"""'}), "(False, description='流式输出')\n", ... |
from fastapi import Body
from sse_starlette.sse import EventSourceResponse
from configs import LLM_MODELS, TEMPERATURE
from server.utils import wrap_done, get_OpenAI
from langchain.chains import LLMChain
from langchain.callbacks import AsyncIteratorCallbackHandler
from typing import AsyncIterable, Optional
import async... | [
"langchain.chains.LLMChain",
"langchain.prompts.PromptTemplate.from_template",
"langchain.callbacks.AsyncIteratorCallbackHandler"
] | [((450, 498), 'fastapi.Body', 'Body', (['...'], {'description': '"""用户输入"""', 'examples': "['恼羞成怒']"}), "(..., description='用户输入', examples=['恼羞成怒'])\n", (454, 498), False, 'from fastapi import Body\n'), ((536, 567), 'fastapi.Body', 'Body', (['(False)'], {'description': '"""流式输出"""'}), "(False, description='流式输出')\n", ... |
from langchain.llms import Ollama
input = input("What is your question?")
llm = Ollama(model="llama2")
res = llm.predict(input)
print (res)
| [
"langchain.llms.Ollama"
] | [((81, 103), 'langchain.llms.Ollama', 'Ollama', ([], {'model': '"""llama2"""'}), "(model='llama2')\n", (87, 103), False, 'from langchain.llms import Ollama\n')] |
from langchain.llms import Ollama
input = input("What is your question?")
llm = Ollama(model="llama2")
res = llm.predict(input)
print (res)
| [
"langchain.llms.Ollama"
] | [((81, 103), 'langchain.llms.Ollama', 'Ollama', ([], {'model': '"""llama2"""'}), "(model='llama2')\n", (87, 103), False, 'from langchain.llms import Ollama\n')] |
from langchain.llms import Ollama
input = input("What is your question?")
llm = Ollama(model="llama2")
res = llm.predict(input)
print (res)
| [
"langchain.llms.Ollama"
] | [((81, 103), 'langchain.llms.Ollama', 'Ollama', ([], {'model': '"""llama2"""'}), "(model='llama2')\n", (87, 103), False, 'from langchain.llms import Ollama\n')] |
import os
import tempfile
from typing import List, Union
import streamlit as st
import tiktoken
from langchain.text_splitter import (
CharacterTextSplitter,
RecursiveCharacterTextSplitter,
)
from langchain.text_splitter import (
TextSplitter as LCSplitter,
)
from langchain.text_splitter import TokenTextSpl... | [
"langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder",
"langchain.text_splitter.CharacterTextSplitter.from_tiktoken_encoder",
"langchain.text_splitter.TokenTextSplitter"
] | [((718, 772), 'streamlit.sidebar.text_area', 'st.sidebar.text_area', (['"""Enter text"""'], {'value': 'DEFAULT_TEXT'}), "('Enter text', value=DEFAULT_TEXT)\n", (738, 772), True, 'import streamlit as st\n'), ((790, 857), 'streamlit.sidebar.file_uploader', 'st.sidebar.file_uploader', (['"""Upload file"""'], {'accept_mult... |
import os
import tempfile
from typing import List, Union
import streamlit as st
import tiktoken
from langchain.text_splitter import (
CharacterTextSplitter,
RecursiveCharacterTextSplitter,
)
from langchain.text_splitter import (
TextSplitter as LCSplitter,
)
from langchain.text_splitter import TokenTextSpl... | [
"langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder",
"langchain.text_splitter.CharacterTextSplitter.from_tiktoken_encoder",
"langchain.text_splitter.TokenTextSplitter"
] | [((718, 772), 'streamlit.sidebar.text_area', 'st.sidebar.text_area', (['"""Enter text"""'], {'value': 'DEFAULT_TEXT'}), "('Enter text', value=DEFAULT_TEXT)\n", (738, 772), True, 'import streamlit as st\n'), ((790, 857), 'streamlit.sidebar.file_uploader', 'st.sidebar.file_uploader', (['"""Upload file"""'], {'accept_mult... |
import json
from langchain.schema import OutputParserException
def parse_json_markdown(json_string: str) -> dict:
# Remove the triple backticks if present
json_string = json_string.strip()
start_index = json_string.find("```json")
end_index = json_string.find("```", start_index + len("```json"))
... | [
"langchain.schema.OutputParserException"
] | [((526, 555), 'json.loads', 'json.loads', (['extracted_content'], {}), '(extracted_content)\n', (536, 555), False, 'import json\n'), ((871, 900), 'json.loads', 'json.loads', (['extracted_content'], {}), '(extracted_content)\n', (881, 900), False, 'import json\n'), ((1322, 1383), 'langchain.schema.OutputParserException'... |
import json
from langchain.schema import OutputParserException
def parse_json_markdown(json_string: str) -> dict:
# Remove the triple backticks if present
json_string = json_string.strip()
start_index = json_string.find("```json")
end_index = json_string.find("```", start_index + len("```json"))
... | [
"langchain.schema.OutputParserException"
] | [((526, 555), 'json.loads', 'json.loads', (['extracted_content'], {}), '(extracted_content)\n', (536, 555), False, 'import json\n'), ((871, 900), 'json.loads', 'json.loads', (['extracted_content'], {}), '(extracted_content)\n', (881, 900), False, 'import json\n'), ((1322, 1383), 'langchain.schema.OutputParserException'... |
# From project chatglm-langchain
from langchain.document_loaders import UnstructuredFileLoader
from langchain.text_splitter import CharacterTextSplitter
import re
from typing import List
class ChineseTextSplitter(CharacterTextSplitter):
def __init__(self, pdf: bool = False, sentence_size: int = None, **kwargs):
... | [
"langchain.document_loaders.UnstructuredFileLoader"
] | [((3017, 3066), 'langchain.document_loaders.UnstructuredFileLoader', 'UnstructuredFileLoader', (['filepath'], {'mode': '"""elements"""'}), "(filepath, mode='elements')\n", (3039, 3066), False, 'from langchain.document_loaders import UnstructuredFileLoader\n'), ((657, 714), 're.compile', 're.compile', (['"""([﹒﹔﹖﹗.。!?][... |
import os
import uuid
from typing import Any, Dict, List, Optional, Tuple
from langchain.agents.agent import RunnableAgent
from langchain.agents.tools import tool as LangChainTool
from langchain.memory import ConversationSummaryMemory
from langchain.tools.render import render_text_description
from langchain_core.agent... | [
"langchain.memory.ConversationSummaryMemory",
"langchain.agents.agent.RunnableAgent",
"langchain.tools.render.render_text_description"
] | [((2392, 2405), 'pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2403, 2405), False, 'from pydantic import UUID4, BaseModel, ConfigDict, Field, InstanceOf, PrivateAttr, field_validator, model_validator\n'), ((2443, 2468), 'pydantic.PrivateAttr', 'PrivateAttr', ([], {'default': 'None'}), '(default=None)\n', (24... |
import re
from typing import Union
from langchain.agents.mrkl.output_parser import MRKLOutputParser
from langchain.schema import AgentAction, AgentFinish, OutputParserException
FORMAT_INSTRUCTIONS0 = """Use the following format and be sure to use new lines after each task.
Question: the input question you must answe... | [
"langchain.schema.AgentAction",
"langchain.schema.OutputParserException"
] | [((3055, 3088), 're.search', 're.search', (['regex', 'text', 're.DOTALL'], {}), '(regex, text, re.DOTALL)\n', (3064, 3088), False, 'import re\n'), ((3689, 3749), 're.search', 're.search', (['"""Action\\\\s*\\\\d*\\\\s*:[\\\\s]*(.*?)"""', 'text', 're.DOTALL'], {}), "('Action\\\\s*\\\\d*\\\\s*:[\\\\s]*(.*?)', text, re.DO... |
import os
import re
import uuid
import cv2
import torch
import requests
import io, base64
import numpy as np
import gradio as gr
from PIL import Image
from omegaconf import OmegaConf
from transformers import pipeline, BlipProcessor, BlipForConditionalGeneration, BlipForQuestionAnswering
from transformers import AutoMod... | [
"langchain.llms.openai.OpenAI",
"langchain.agents.tools.Tool",
"langchain.chains.conversation.memory.ConversationBufferMemory",
"langchain.agents.initialize.initialize_agent"
] | [((3812, 3837), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (3835, 3837), False, 'import torch\n'), ((3891, 3907), 'cv2.imread', 'cv2.imread', (['path'], {}), '(path)\n', (3901, 3907), False, 'import cv2\n'), ((3929, 3954), 'cv2.imencode', 'cv2.imencode', (['""".jpg"""', 'img'], {}), "('.jpg... |
from typing import Any, Callable, Dict, TypeVar
from langchain import BasePromptTemplate, LLMChain
from langchain.chat_models.base import BaseChatModel
from langchain.schema import BaseOutputParser, OutputParserException
from openai.error import (
AuthenticationError,
InvalidRequestError,
RateLimitError,
... | [
"langchain.LLMChain"
] | [((469, 481), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (476, 481), False, 'from typing import Any, Callable, Dict, TypeVar\n'), ((2486, 2520), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'model', 'prompt': 'prompt'}), '(llm=model, prompt=prompt)\n', (2494, 2520), False, 'from langchain import BaseP... |
from typing import Any, Callable, Dict, TypeVar
from langchain import BasePromptTemplate, LLMChain
from langchain.chat_models.base import BaseChatModel
from langchain.schema import BaseOutputParser, OutputParserException
from openai.error import (
AuthenticationError,
InvalidRequestError,
RateLimitError,
... | [
"langchain.LLMChain"
] | [((469, 481), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (476, 481), False, 'from typing import Any, Callable, Dict, TypeVar\n'), ((2486, 2520), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'model', 'prompt': 'prompt'}), '(llm=model, prompt=prompt)\n', (2494, 2520), False, 'from langchain import BaseP... |
from typing import Any, Callable, Dict, TypeVar
from langchain import BasePromptTemplate, LLMChain
from langchain.chat_models.base import BaseChatModel
from langchain.schema import BaseOutputParser, OutputParserException
from openai.error import (
AuthenticationError,
InvalidRequestError,
RateLimitError,
... | [
"langchain.LLMChain"
] | [((469, 481), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (476, 481), False, 'from typing import Any, Callable, Dict, TypeVar\n'), ((2486, 2520), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'model', 'prompt': 'prompt'}), '(llm=model, prompt=prompt)\n', (2494, 2520), False, 'from langchain import BaseP... |
from typing import Any, Callable, Dict, TypeVar
from langchain import BasePromptTemplate, LLMChain
from langchain.chat_models.base import BaseChatModel
from langchain.schema import BaseOutputParser, OutputParserException
from openai.error import (
AuthenticationError,
InvalidRequestError,
RateLimitError,
... | [
"langchain.LLMChain"
] | [((469, 481), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (476, 481), False, 'from typing import Any, Callable, Dict, TypeVar\n'), ((2486, 2520), 'langchain.LLMChain', 'LLMChain', ([], {'llm': 'model', 'prompt': 'prompt'}), '(llm=model, prompt=prompt)\n', (2494, 2520), False, 'from langchain import BaseP... |
import json
import os.path
import logging
import time
from langchain.vectorstores import FAISS
from langchain import PromptTemplate
from utils.references import References
from utils.knowledge import Knowledge
from utils.file_operations import make_archive, copy_templates
from utils.tex_processing import create_copies... | [
"langchain.vectorstores.FAISS.load_local",
"langchain.PromptTemplate"
] | [((1271, 1292), 'logging.info', 'logging.info', (['message'], {}), '(message)\n', (1283, 1292), False, 'import logging\n'), ((1552, 1587), 'utils.gpt_interaction.GPTModel', 'GPTModel', ([], {'model': '"""gpt-3.5-turbo-16k"""'}), "(model='gpt-3.5-turbo-16k')\n", (1560, 1587), False, 'from utils.gpt_interaction import GP... |
import json
import os.path
import logging
import time
from langchain.vectorstores import FAISS
from langchain import PromptTemplate
from utils.references import References
from utils.knowledge import Knowledge
from utils.file_operations import make_archive, copy_templates
from utils.tex_processing import create_copies... | [
"langchain.vectorstores.FAISS.load_local",
"langchain.PromptTemplate"
] | [((1271, 1292), 'logging.info', 'logging.info', (['message'], {}), '(message)\n', (1283, 1292), False, 'import logging\n'), ((1552, 1587), 'utils.gpt_interaction.GPTModel', 'GPTModel', ([], {'model': '"""gpt-3.5-turbo-16k"""'}), "(model='gpt-3.5-turbo-16k')\n", (1560, 1587), False, 'from utils.gpt_interaction import GP... |
import json
import os.path
import logging
import time
from langchain.vectorstores import FAISS
from langchain import PromptTemplate
from utils.references import References
from utils.knowledge import Knowledge
from utils.file_operations import make_archive, copy_templates
from utils.tex_processing import create_copies... | [
"langchain.vectorstores.FAISS.load_local",
"langchain.PromptTemplate"
] | [((1271, 1292), 'logging.info', 'logging.info', (['message'], {}), '(message)\n', (1283, 1292), False, 'import logging\n'), ((1552, 1587), 'utils.gpt_interaction.GPTModel', 'GPTModel', ([], {'model': '"""gpt-3.5-turbo-16k"""'}), "(model='gpt-3.5-turbo-16k')\n", (1560, 1587), False, 'from utils.gpt_interaction import GP... |
import sys
import os
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'NeuralSeq'))
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__))... | [
"langchain.llms.openai.OpenAI",
"langchain.agents.tools.Tool",
"langchain.chains.conversation.memory.ConversationBufferMemory",
"langchain.agents.initialize.initialize_agent"
] | [((3966, 3992), 'scipy.io.wavfile.read', 'wavfile.read', (['audio_path_1'], {}), '(audio_path_1)\n', (3978, 3992), True, 'import scipy.io.wavfile as wavfile\n'), ((4014, 4040), 'scipy.io.wavfile.read', 'wavfile.read', (['audio_path_2'], {}), '(audio_path_2)\n', (4026, 4040), True, 'import scipy.io.wavfile as wavfile\n'... |
from typing import Optional
import typer
from typing_extensions import Annotated
from langchain_cli.namespaces import app as app_namespace
from langchain_cli.namespaces import integration as integration_namespace
from langchain_cli.namespaces import template as template_namespace
from langchain_cli.utils.packages imp... | [
"langchain_cli.namespaces.template.serve",
"langchain_cli.utils.packages.get_package_root",
"langchain_cli.namespaces.app.serve",
"langchain_cli.utils.packages.get_langserve_export"
] | [((394, 449), 'typer.Typer', 'typer.Typer', ([], {'no_args_is_help': '(True)', 'add_completion': '(False)'}), '(no_args_is_help=True, add_completion=False)\n', (405, 449), False, 'import typer\n'), ((952, 1076), 'typer.Option', 'typer.Option', (['(False)', '"""--version"""', '"""-v"""'], {'help': '"""Print the current ... |
from typing import Optional
import typer
from typing_extensions import Annotated
from langchain_cli.namespaces import app as app_namespace
from langchain_cli.namespaces import integration as integration_namespace
from langchain_cli.namespaces import template as template_namespace
from langchain_cli.utils.packages imp... | [
"langchain_cli.namespaces.template.serve",
"langchain_cli.utils.packages.get_package_root",
"langchain_cli.namespaces.app.serve",
"langchain_cli.utils.packages.get_langserve_export"
] | [((394, 449), 'typer.Typer', 'typer.Typer', ([], {'no_args_is_help': '(True)', 'add_completion': '(False)'}), '(no_args_is_help=True, add_completion=False)\n', (405, 449), False, 'import typer\n'), ((952, 1076), 'typer.Option', 'typer.Option', (['(False)', '"""--version"""', '"""-v"""'], {'help': '"""Print the current ... |
from typing import Optional
import typer
from typing_extensions import Annotated
from langchain_cli.namespaces import app as app_namespace
from langchain_cli.namespaces import integration as integration_namespace
from langchain_cli.namespaces import template as template_namespace
from langchain_cli.utils.packages imp... | [
"langchain_cli.namespaces.template.serve",
"langchain_cli.utils.packages.get_package_root",
"langchain_cli.namespaces.app.serve",
"langchain_cli.utils.packages.get_langserve_export"
] | [((394, 449), 'typer.Typer', 'typer.Typer', ([], {'no_args_is_help': '(True)', 'add_completion': '(False)'}), '(no_args_is_help=True, add_completion=False)\n', (405, 449), False, 'import typer\n'), ((952, 1076), 'typer.Option', 'typer.Option', (['(False)', '"""--version"""', '"""-v"""'], {'help': '"""Print the current ... |
from typing import Optional
import typer
from typing_extensions import Annotated
from langchain_cli.namespaces import app as app_namespace
from langchain_cli.namespaces import integration as integration_namespace
from langchain_cli.namespaces import template as template_namespace
from langchain_cli.utils.packages imp... | [
"langchain_cli.namespaces.template.serve",
"langchain_cli.utils.packages.get_package_root",
"langchain_cli.namespaces.app.serve",
"langchain_cli.utils.packages.get_langserve_export"
] | [((394, 449), 'typer.Typer', 'typer.Typer', ([], {'no_args_is_help': '(True)', 'add_completion': '(False)'}), '(no_args_is_help=True, add_completion=False)\n', (405, 449), False, 'import typer\n'), ((952, 1076), 'typer.Option', 'typer.Option', (['(False)', '"""--version"""', '"""-v"""'], {'help': '"""Print the current ... |
import os
from langchain_community.document_loaders import UnstructuredFileLoader
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import Redis
from langchain_text_splitters import RecursiveCharacterTextSplitter
from rag_redis.config import EMBED_MODEL, INDEX_NAME,... | [
"langchain_community.vectorstores.Redis.from_texts",
"langchain_community.document_loaders.UnstructuredFileLoader",
"langchain_community.embeddings.HuggingFaceEmbeddings",
"langchain_text_splitters.RecursiveCharacterTextSplitter"
] | [((726, 818), 'langchain_text_splitters.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(1500)', 'chunk_overlap': '(100)', 'add_start_index': '(True)'}), '(chunk_size=1500, chunk_overlap=100,\n add_start_index=True)\n', (756, 818), False, 'from langchain_text_splitters import ... |
from langchain.indexes import VectorstoreIndexCreator
from langchain_community.document_loaders import CSVLoader
from langchain_community.vectorstores import FAISS
loader = CSVLoader("/Users/harrisonchase/Downloads/titanic.csv")
docs = loader.load()
index_creator = VectorstoreIndexCreator(vectorstore_cls=FAISS)
inde... | [
"langchain_community.document_loaders.CSVLoader",
"langchain.indexes.VectorstoreIndexCreator"
] | [((174, 229), 'langchain_community.document_loaders.CSVLoader', 'CSVLoader', (['"""/Users/harrisonchase/Downloads/titanic.csv"""'], {}), "('/Users/harrisonchase/Downloads/titanic.csv')\n", (183, 229), False, 'from langchain_community.document_loaders import CSVLoader\n'), ((268, 314), 'langchain.indexes.VectorstoreInde... |
from langchain.indexes import VectorstoreIndexCreator
from langchain_community.document_loaders import CSVLoader
from langchain_community.vectorstores import FAISS
loader = CSVLoader("/Users/harrisonchase/Downloads/titanic.csv")
docs = loader.load()
index_creator = VectorstoreIndexCreator(vectorstore_cls=FAISS)
inde... | [
"langchain_community.document_loaders.CSVLoader",
"langchain.indexes.VectorstoreIndexCreator"
] | [((174, 229), 'langchain_community.document_loaders.CSVLoader', 'CSVLoader', (['"""/Users/harrisonchase/Downloads/titanic.csv"""'], {}), "('/Users/harrisonchase/Downloads/titanic.csv')\n", (183, 229), False, 'from langchain_community.document_loaders import CSVLoader\n'), ((268, 314), 'langchain.indexes.VectorstoreInde... |
from langchain.indexes import VectorstoreIndexCreator
from langchain_community.document_loaders import CSVLoader
from langchain_community.vectorstores import FAISS
loader = CSVLoader("/Users/harrisonchase/Downloads/titanic.csv")
docs = loader.load()
index_creator = VectorstoreIndexCreator(vectorstore_cls=FAISS)
inde... | [
"langchain_community.document_loaders.CSVLoader",
"langchain.indexes.VectorstoreIndexCreator"
] | [((174, 229), 'langchain_community.document_loaders.CSVLoader', 'CSVLoader', (['"""/Users/harrisonchase/Downloads/titanic.csv"""'], {}), "('/Users/harrisonchase/Downloads/titanic.csv')\n", (183, 229), False, 'from langchain_community.document_loaders import CSVLoader\n'), ((268, 314), 'langchain.indexes.VectorstoreInde... |
from langchain.indexes import VectorstoreIndexCreator
from langchain_community.document_loaders import CSVLoader
from langchain_community.vectorstores import FAISS
loader = CSVLoader("/Users/harrisonchase/Downloads/titanic.csv")
docs = loader.load()
index_creator = VectorstoreIndexCreator(vectorstore_cls=FAISS)
inde... | [
"langchain_community.document_loaders.CSVLoader",
"langchain.indexes.VectorstoreIndexCreator"
] | [((174, 229), 'langchain_community.document_loaders.CSVLoader', 'CSVLoader', (['"""/Users/harrisonchase/Downloads/titanic.csv"""'], {}), "('/Users/harrisonchase/Downloads/titanic.csv')\n", (183, 229), False, 'from langchain_community.document_loaders import CSVLoader\n'), ((268, 314), 'langchain.indexes.VectorstoreInde... |
from langchain_core.prompts.prompt import PromptTemplate
# There are a few different templates to choose from
# These are just different ways to generate hypothetical documents
web_search_template = """Please write a passage to answer the question
Question: {question}
Passage:"""
sci_fact_template = """Please write a... | [
"langchain_core.prompts.prompt.PromptTemplate.from_template"
] | [((716, 765), 'langchain_core.prompts.prompt.PromptTemplate.from_template', 'PromptTemplate.from_template', (['web_search_template'], {}), '(web_search_template)\n', (744, 765), False, 'from langchain_core.prompts.prompt import PromptTemplate\n')] |
from langchain_core.prompts.prompt import PromptTemplate
# There are a few different templates to choose from
# These are just different ways to generate hypothetical documents
web_search_template = """Please write a passage to answer the question
Question: {question}
Passage:"""
sci_fact_template = """Please write a... | [
"langchain_core.prompts.prompt.PromptTemplate.from_template"
] | [((716, 765), 'langchain_core.prompts.prompt.PromptTemplate.from_template', 'PromptTemplate.from_template', (['web_search_template'], {}), '(web_search_template)\n', (744, 765), False, 'from langchain_core.prompts.prompt import PromptTemplate\n')] |
from langchain_core.prompts.prompt import PromptTemplate
# There are a few different templates to choose from
# These are just different ways to generate hypothetical documents
web_search_template = """Please write a passage to answer the question
Question: {question}
Passage:"""
sci_fact_template = """Please write a... | [
"langchain_core.prompts.prompt.PromptTemplate.from_template"
] | [((716, 765), 'langchain_core.prompts.prompt.PromptTemplate.from_template', 'PromptTemplate.from_template', (['web_search_template'], {}), '(web_search_template)\n', (744, 765), False, 'from langchain_core.prompts.prompt import PromptTemplate\n')] |
from langchain_core.prompts.prompt import PromptTemplate
# There are a few different templates to choose from
# These are just different ways to generate hypothetical documents
web_search_template = """Please write a passage to answer the question
Question: {question}
Passage:"""
sci_fact_template = """Please write a... | [
"langchain_core.prompts.prompt.PromptTemplate.from_template"
] | [((716, 765), 'langchain_core.prompts.prompt.PromptTemplate.from_template', 'PromptTemplate.from_template', (['web_search_template'], {}), '(web_search_template)\n', (744, 765), False, 'from langchain_core.prompts.prompt import PromptTemplate\n')] |
from pathlib import Path
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings.openai import OpenAIEmbeddings
from langchain_community.graphs import Neo4jGraph
from langchain_community.vectorstores import Neo4jVector
from langchain_text_splitters import TokenTextSplitter
txt_... | [
"langchain_community.embeddings.openai.OpenAIEmbeddings",
"langchain_community.graphs.Neo4jGraph",
"langchain_text_splitters.TokenTextSplitter"
] | [((371, 383), 'langchain_community.graphs.Neo4jGraph', 'Neo4jGraph', ([], {}), '()\n', (381, 383), False, 'from langchain_community.graphs import Neo4jGraph\n'), ((513, 564), 'langchain_text_splitters.TokenTextSplitter', 'TokenTextSplitter', ([], {'chunk_size': '(512)', 'chunk_overlap': '(24)'}), '(chunk_size=512, chun... |
from langchain_community.graphs import Neo4jGraph
graph = Neo4jGraph()
graph.query(
"""
MERGE (m:Movie {name:"Top Gun"})
WITH m
UNWIND ["Tom Cruise", "Val Kilmer", "Anthony Edwards", "Meg Ryan"] AS actor
MERGE (a:Actor {name:actor})
MERGE (a)-[:ACTED_IN]->(m)
"""
)
| [
"langchain_community.graphs.Neo4jGraph"
] | [((59, 71), 'langchain_community.graphs.Neo4jGraph', 'Neo4jGraph', ([], {}), '()\n', (69, 71), False, 'from langchain_community.graphs import Neo4jGraph\n')] |
from langchain_community.graphs import Neo4jGraph
graph = Neo4jGraph()
graph.query(
"""
MERGE (m:Movie {name:"Top Gun"})
WITH m
UNWIND ["Tom Cruise", "Val Kilmer", "Anthony Edwards", "Meg Ryan"] AS actor
MERGE (a:Actor {name:actor})
MERGE (a)-[:ACTED_IN]->(m)
"""
)
| [
"langchain_community.graphs.Neo4jGraph"
] | [((59, 71), 'langchain_community.graphs.Neo4jGraph', 'Neo4jGraph', ([], {}), '()\n', (69, 71), False, 'from langchain_community.graphs import Neo4jGraph\n')] |
from langchain_community.graphs import Neo4jGraph
graph = Neo4jGraph()
graph.query(
"""
MERGE (m:Movie {name:"Top Gun"})
WITH m
UNWIND ["Tom Cruise", "Val Kilmer", "Anthony Edwards", "Meg Ryan"] AS actor
MERGE (a:Actor {name:actor})
MERGE (a)-[:ACTED_IN]->(m)
"""
)
| [
"langchain_community.graphs.Neo4jGraph"
] | [((59, 71), 'langchain_community.graphs.Neo4jGraph', 'Neo4jGraph', ([], {}), '()\n', (69, 71), False, 'from langchain_community.graphs import Neo4jGraph\n')] |
from langchain_community.graphs import Neo4jGraph
graph = Neo4jGraph()
graph.query(
"""
MERGE (m:Movie {name:"Top Gun"})
WITH m
UNWIND ["Tom Cruise", "Val Kilmer", "Anthony Edwards", "Meg Ryan"] AS actor
MERGE (a:Actor {name:actor})
MERGE (a)-[:ACTED_IN]->(m)
"""
)
| [
"langchain_community.graphs.Neo4jGraph"
] | [((59, 71), 'langchain_community.graphs.Neo4jGraph', 'Neo4jGraph', ([], {}), '()\n', (69, 71), False, 'from langchain_community.graphs import Neo4jGraph\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.