diff --git a/sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/_evaluators/_groundedness/_groundedness.py b/sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/_evaluators/_groundedness/_groundedness.py index d8678eab22be..eae5ae042e80 100644 --- a/sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/_evaluators/_groundedness/_groundedness.py +++ b/sdk/evaluation/azure-ai-evaluation/azure/ai/evaluation/_evaluators/_groundedness/_groundedness.py @@ -277,6 +277,24 @@ def _validate_context(self, context) -> bool: @override async def _do_eval(self, eval_input: Dict) -> Dict[str, Union[float, str]]: + # Import helper functions from base class module + from azure.ai.evaluation._evaluators._common._base_prompty_eval import ( + _is_intermediate_response, + _preprocess_messages, + ) + + if _is_intermediate_response(eval_input.get("response")): + return self._not_applicable_result( + "Intermediate response. Please provide the agent's final response for evaluation.", + self._threshold, + ) + + # Preprocess messages if they are lists + if isinstance(eval_input.get("response"), list): + eval_input["response"] = _preprocess_messages(eval_input["response"]) + if isinstance(eval_input.get("query"), list): + eval_input["query"] = _preprocess_messages(eval_input["query"]) + if eval_input.get("query", None) is None: return await super()._do_eval(eval_input)