モデルの読み込み
ChatGPT
import os
from langchain_openai import ChatOpenAI
# Load the model
model_name = "gpt-4o-mini"
openai_api_key = os.getenv("OPENAI_API_KEY")
openai = ChatOpenAI(model=model_name, api_key=openai_api_key)
prompt = "日本の首都はどこですか?"
# Generate a response
response = openai.invoke(prompt)
print(response.content)
Gemini
import os
from langchain_google_genai import ChatGoogleGenerativeAI
# Load the model
model_name = "gemini-1.5-flash"
gemini_api_key = os.getenv("GOOGLEAI_API_KEY")
gemini = ChatGoogleGenerativeAI(model=model_name, google_api_key=gemini_api_key)
prompt = "日本の首都はどこですか?"
# Generate a response
response = gemini.invoke(prompt)
print(response.content)
ローカルLLM
import os
from langchain_openai import ChatOpenAI
# Load the model
# モデル名を指定
model_name = "llama-3.3-70b-instruct"
# ChatOpenAIのインスタンスを作成
llm = ChatOpenAI(
base_url="http://localhost:1234/v1",
model=model_name,
api_key="not-needed",
temperature=0 # 再現性のために低温設定
)
prompt = "日本の首都はどこですか?"
# Generate a response
response = llm.invoke(prompt)
print(response.content)
コメント