Skip to content

Typesense

Typesense is an open-source, typo-tolerant search engine optimized for instant search experiences. It is designed to be simple to set up and use, making it a great choice for developers looking to add powerful search capabilities to their applications.

RAG capabilities

We use Typesense to power our Retrieval-Augmented Generation (RAG) capabilities. By indexing relevant documents and data in Typesense, we can efficiently retrieve information to enhance the responses generated by our AI models. This allows us to provide more accurate and contextually relevant answers to user queries.

Create chat history collection

from typesense import Client

conversation_store_collection_name = "conversation_store"

client = Client(
    {
        "api_key": "abcd",
        "nodes": [{"host": "localhost", "port": "8108", "protocol": "http"}],
        "connection_timeout_seconds": 2,
    }
)

conversation_store_schema = {
    "name": conversation_store_collection_name,
    "fields": [
        {"name": "conversation_id", "type": "string"},
        {"name": "model_id", "type": "string"},
        {"name": "role", "type": "string", "index": False},
        {"name": "message", "type": "string", "index": False},
        {"name": "timestamp", "type": "int32"},
    ],
}

result = client.collections.create(conversation_store_schema)
print(result)

Create embedding collection

from typesense import Client

client = Client(
    {
        "api_key": "abcd",
        "nodes": [{"host": "localhost", "port": "8108", "protocol": "http"}],
        "connection_timeout_seconds": 2,
    }
)

embedding_schema = {
    "name": "CollectiveAgreement",
    "fields": [
        {"name": "id", "type": "string"},
        {"name": "title", "type": "string", "facet": False},
        {"name": "text", "type": "string", "facet": False},
        {
            "name": "embedding",
            "type": "float[]",
            "embed": {
                "from": ["title", "text"],
                "model_config": {"model_name": "ts/all-MiniLM-L12-v2"},
            },
        },
    ],
}

result = client.collections.create(embedding_schema)
print(result)