If you collect feedback from your users, such as thumbs up or down, you can record that feedback on your trace logs.
Record feedback
To record feedback you need the trace_id of the trace log you want to record feedback on.
You can get the trace_id using the get_current_trace_id()
helper function, or from the completion method’s response.
from parea.schemas import FeedbackRequest
from parea import Parea, get_current_trace_id, trace
p = Parea(api_key="PAREA_API_KEY")
@trace
def argument_chain(messages: list[dict]) -> tuple[str, str]:
# get_current_trace_id will return the trace_id of the most recent trace
trace_id = get_current_trace_id()
return call_llm(messages), trace_id
result, trace_id = argument_chain([{"role": "user", "content": "Hello"}])
p.record_feedback(
FeedbackRequest(
trace_id=trace_id,
# insert your user score here, score must be float
score=USER_SCORE,
# Optionally, you can also provide a ground truth or
# target answer. This could also be from your user.
target="ground_truth",
)
)
Alternatively, if you used p.completion
for your LLM request, you can access the inference_id
field on the response to get the trace_id.
from parea.schemas import LLMInputs, Message, Role, Completion, CompletionResponse, FeedbackRequest
result: CompletionResponse = p.completion(
Completion(
llm_configuration=LLMInputs(
model="gpt-3.5-turbo",
messages=[Message(role=Role.user, content="Hello")],
)
)
)
p.record_feedback(
FeedbackRequest(
trace_id=result.inference_id,
score=USER_SCORE,
)
)
from parea.schemas import FeedbackRequest
from parea import Parea, get_current_trace_id, trace
p = Parea(api_key="PAREA_API_KEY")
@trace
def argument_chain(messages: list[dict]) -> tuple[str, str]:
# get_current_trace_id will return the trace_id of the most recent trace
trace_id = get_current_trace_id()
return call_llm(messages), trace_id
result, trace_id = argument_chain([{"role": "user", "content": "Hello"}])
p.record_feedback(
FeedbackRequest(
trace_id=trace_id,
# insert your user score here, score must be float
score=USER_SCORE,
# Optionally, you can also provide a ground truth or
# target answer. This could also be from your user.
target="ground_truth",
)
)
Alternatively, if you used p.completion
for your LLM request, you can access the inference_id
field on the response to get the trace_id.
from parea.schemas import LLMInputs, Message, Role, Completion, CompletionResponse, FeedbackRequest
result: CompletionResponse = p.completion(
Completion(
llm_configuration=LLMInputs(
model="gpt-3.5-turbo",
messages=[Message(role=Role.user, content="Hello")],
)
)
)
p.record_feedback(
FeedbackRequest(
trace_id=result.inference_id,
score=USER_SCORE,
)
)
import {getCurrentTraceId, trace} from "parea-ai";
const p = new Parea("PAREA_API_KEY");
const argumentChain = trace('argumentChain', async (messages) => {
// getCurrentTraceId will return the trace_id of current traced function
const traceId = getCurrentTraceId()
const result = await call_llm(messages)
return [result, traceId]
},
);
const [result, traceId] = await argumentChain([{role: "user", content: "Hello"}]);
await p.recordFeedback({
trace_id: traceId,
// insert your user score here, score must be float
score: USER_SCORE,
});
Alternatively, if you used p.completion
for your LLM request, you can access the inference_id
field on the response to get the trace_id.
const result = await p.completion({
llm_configuration: {
model: 'gpt-3.5-turbo',
messages: [{role: 'user', content: "Hello"}],
}
})
await p.recordFeedback({
trace_id: result.inference_id,
score: USER_SCORE,
});