Integrate Mixedbread's powerful embedding and reranking capabilities into your LlamaIndex projects. This guide covers installation, quick start examples for both Python and TypeScript, advanced usage scenarios, and links to detailed documentation for seamless integration with your natural language processing workflows.
Generate text embeddings for queries and documents.
from llama_index.embeddings import MixedbreadAIEmbeddingfrom llama_index.core import Settingsfrom llama_index.core import Document, VectorStoreIndex# Set up embeddingsSettings.embed_model = MixedbreadAIEmbedding( api_key="your_api_key_here", model_name="mixedbread-ai/mxbai-embed-large-v1")# Create and index a documentdocument = Document(text="The true source of happiness.", id_="bread")index = VectorStoreIndex.from_documents([document])# Query the indexquery_engine = index.as_query_engine()query = "Represent this sentence for searching relevant passages: What is bread?"results = query_engine.query(query)print(results)
from llama_index import Document, VectorStoreIndexfrom llama_index.embeddings import MixedbreadAIEmbeddingfrom llama_index.postprocessor import MixedbreadAIRerankfrom llama_index import Settingsfrom llama_index.llms import OpenAI# Set up OpenAI LLMSettings.llm = OpenAI(model="gpt-3.5-turbo", temperature=0.1)# Create and index a documentdocument = Document(text="This is a sample document.", id_="sampleDoc")index = VectorStoreIndex.from_documents([document])# Set up retrieverretriever = index.as_retriever(similarity_top_k=5)# Set up rerankernode_postprocessor = MixedbreadAIRerank( api_key="your_api_key_here", top_n=4)# Create query engine with rerankingquery_engine = index.as_query_engine( retriever=retriever, node_postprocessors=[node_postprocessor])# Queryresponse = query_engine.query("Where did the author grow up?")print(response)