Learn how to combine LangChain with Llama 3 (local via Ollama or API) to create intelligent agents, RAG chatbots, and production-ready AI tools.
# 1. Install Ollama (local Llama 3)
# https://ollama.com/download
# 2. Download Llama 3 model
ollama pull llama3:8b
# 3. Install LangChain & integrations
pip install langchain langchain-community langchain-ollama langchain-core
pip install chromadb sentence-transformers # for RAG
from langchain_ollama import OllamaLLM
llm = OllamaLLM(model="llama3:8b")
response = llm.invoke("Explain quantum computing in simple terms for a 10-year-old.")
print(response)
from langchain_ollama import OllamaEmbeddings, ChatOllama
from langchain_community.vectorstores import Chroma
from langchain_core.documents import Document
from langchain.chains import create_retrieval_chain
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain_core.prompts import ChatPromptTemplate
# Sample documents (replace with your PDFs/texts)
documents = [
Document(page_content="Python 3.14 introduces JIT compilation..."),
Document(page_content="LangChain is a framework for building LLM applications...")
]
# Embeddings & vector store
embeddings = OllamaEmbeddings(model="llama3:8b")
vectorstore = Chroma.from_documents(documents, embeddings)
# Retriever
retriever = vectorstore.as_retriever(search_kwargs={"k": 3})
# LLM
llm = ChatOllama(model="llama3:8b")
# Prompt
prompt = ChatPromptTemplate.from_template(
"""Answer the question based only on the following context:
{context}
Question: {input}"""
)
# Chains
question_answer_chain = create_stuff_documents_chain(llm, prompt)
rag_chain = create_retrieval_chain(retriever, question_answer_chain)
# Ask question
response = rag_chain.invoke({"input": "What is new in Python 3.14?"})
print(response["answer"])
from langgraph.graph import StateGraph, END
from typing import TypedDict, Annotated
import operator
class AgentState(TypedDict):
messages: Annotated[list, operator.add]
def researcher(state):
# Research agent logic
return {"messages": ["Research done: AI in 2026 is advanced."]}
def writer(state):
# Writer agent logic
return {"messages": ["Article written."]}
workflow = StateGraph(AgentState)
workflow.add_node("researcher", researcher)
workflow.add_node("writer", writer)
workflow.add_edge("researcher", "writer")
workflow.add_edge("writer", END)
graph = workflow.compile()
result = graph.invoke({"messages": ["Write article on AI 2026"]})
print(result)