October 19, 2024

Member Variables in ServiceContext

@dataclass
class ServiceContext:
    # The LLM used to generate natural language responses to queries.
    # If not provided, defaults to gpt-3.5-turbo from OpenAI
    # If your OpenAI key is not set, defaults to llama2-chat-13B from Llama.cpp
    llm: LLM

    # The PromptHelper object that helps with truncating and repacking text chunks to fit in the LLM's context window.
    prompt_helper: PromptHelper

    # The embedding model used to generate vector representations of text.
    # If not provided, defaults to text-embedding-ada-002
    # If your OpenAI key is not set, defaults to BAAI/bge-small-en
    embed_model: BaseEmbedding

    # The parser that converts documents into nodes.
    node_parser: NodeParser

    # The callback manager object that calls it's handlers on events. Provides basic logging and tracing capabilities.
    callback_manager: CallbackManager

    @classmethod
    def from_defaults(cls, ...) -> "ServiceContext":
      ... 

The function to return a SeviceContext

def get_service_context(
) -> ServiceContext:

    llm = OpenAI(
        model='text-davinci-003',
        temperature=0,
        max_tokens=256
    )
    embed_model = OpenAIEmbedding()
    node_parser = SimpleNodeParser.from_defaults(
    text_splitter=TokenTextSplitter(chunk_size=1024, chunk_overlap=20)
    )
    prompt_helper = PromptHelper(
        context_window=4096, 
        num_output=256, 
        chunk_overlap_ratio=0.1, 
        chunk_size_limit=None
    )
    llama_debug = LlamaDebugHandler(print_trace_on_end=True)
    callback_manager = CallbackManager([llama_debug])

    service_context = ServiceContext.from_defaults(
        llm=llm,
        embed_model=embed_model,
        node_parser=node_parser,
        callback_manager=callback_manager,
        prompt_helper=prompt_helper
    )
    return service_context