Hi! I’ve been trying to use the Langchain library in Palantir. However, the only method I’ve found requires the use of an personal Open API key, using external resource method in Palantir.
Is there a way to utilize our company’s Palantir AIP tokens instead?
3 Likes
To use langchain in a transform, you could use a Palantir-provided model in a transform and wrap that in a small wrapper in langchain: https://python.langchain.com/v0.2/docs/how_to/custom_llm/
Thank you for your feedback.
I tried my best but only palantir_llm.invoke(question) option I made is feasible.
My purpose of applying langchain is that using various option including pipeline but not working with small wrapping.
from transforms.api import transform, Input, Output
from palantir_models.transforms import OpenAiGptChatLanguageModelInput
from palantir_models.models import OpenAiGptChatLanguageModel
from language_model_service_api.languagemodelservice_api_completion_v3 import GptChatCompletionRequest
from language_model_service_api.languagemodelservice_api import ChatMessage, ChatMessageRole
import pandas as pd
import logging
from typing import Optional, List, Any, Tuple
# Configure logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class LLM:
"""Base class for Language Models."""
def _call(self, prompt: str, stop: Optional[List[str]] = None, **kwargs: Any) -> str:
raise NotImplementedError
@property
def _llm_type(self) -> str:
raise NotImplementedError
class PalantirLLM(LLM):
"""A custom LLM wrapper for Palantir-provided language models."""
def __init__(self, model: OpenAiGptChatLanguageModel):
self.model = model
logger.info("PalantirLLM initialized with model: %s", model)
def invoke(
self,
prompt: str,
stop: Optional[List[str]] = None,
**kwargs: Any,
) -> str:
"""Run the LLM on the given input."""
logger.info("Generating response for prompt: %s", prompt)
system_prompt = "Answer the following question"
request = GptChatCompletionRequest(
[ChatMessage(ChatMessageRole.SYSTEM, system_prompt), ChatMessage(ChatMessageRole.USER, prompt)]
)
try:
resp = self.model.create_chat_completion(request)
logger.info("Response received: %s", resp.choices[0].message.content)
return resp.choices[0].message.content
except Exception as e:
logger.error("Error during API call: %s", e)
return "Error generating response"
@property
def _llm_type(self) -> str:
"""Get the type of language model used by this chat model. Used for logging purposes only."""
return "palantir"
@transform(
model=OpenAiGptChatLanguageModelInput("ri.language-model-service..language-model.gpt-4-o"),
output=Output("ri.foundry.main.dataset.83708b87-7551-44f3-8579-bce00edb67e5"),
)
def compute_answer(ctx, model: OpenAiGptChatLanguageModel, output):
logger.info("Starting compute_answer transform")
question = "ChatGPT에 대해 설명 해 주세요"
palantir_llm = PalantirLLM(model)
answer = palantir_llm.invoke(question)
logger.info("Generated answer: %s", answer)
# Assuming you want to save the answer to a dataset
try:
answer_df = pd.DataFrame({"question": [question], "answer": [answer]})
spark_df = ctx.spark_session.createDataFrame(answer_df)
output.write_dataframe(spark_df)
logger.info("Answer saved to dataset")
except Exception as e:
logger.error("Error saving to dataset: %s", e)
Are you getting an error? You could try using the debugger to inspect it and make sure it’s behaving like you expect.
There is no error but I cannot use full Langchain option with that wrapping code…