| gen_ai_hub.evaluations.constants | index /home/jenkins/agent/workspace/ation_generative-ai-hub-sdk_main/gen_ai_hub/evaluations/constants.py |
| Modules | ||||||
| ||||||
| Data | ||
| ADDITIONAL_INFO_KEY = 'additional_info' AGGREGATIONS_TABLE_KEY = 'aggregation_result' AICORE_EXTRA_SUFFIX = '/lm' AICORE_LLM_COMPLETION_KEY = 'aicore_llm_completion' AICORE_LLM_GROUNDING_QUERY_KEY = 'grounding_query' AICORE_LLM_GROUNDING_RESPONSE_KEY = 'grounding_response' AICORE_LLM_PROMPT_TEMPLATE_KEY = 'prompt' AI_CORE_PREFIX = 'AICORE' AI_PROTOCOL_PREFIX = 'ai://' ALL_METRICS_COLUMN_MAPPING_KEY = 'all_metrics' AUTH_ENDPOINT_SUFFIX = '/oauth/token' AWS_ACCESS_KEY_ID = 'AWS_ACCESS_KEY_ID' AWS_OSS_BUCKET_URL_KEY = 'storage.ai.sap.com/bucket' AWS_OSS_PATH_PREFIX_URL_KEY = 'storage.ai.sap.com/pathPrefix' AWS_OSS_REGION_URL_KEY = 'storage.ai.sap.com/region' AWS_PROVIDER_KEY = 'aws' AWS_S3_OSS_TYPE_KEY = 'S3' AWS_SECRET_ACCESS_KEY = 'AWS_SECRET_ACCESS_KEY' AZURE_CONTENT_SAFETY_KEY = 'azure_content_safety' BERTSCORE_METRIC_ID = 'bert_score' BLEU_METRIC_ID = 'bleu' COLUMN_MAPPING_DEFAULT_KEYS = ['prompt', 'all_metrics'] COMPLETIONS_TABLE_KEY = 'submission_result' COMPLETION_ENDPOINT_V2 = '/v2/completion' CONFIG_FILE_ENV_VAR = 'AICORE_CONFIG' CONFIG_KEY = 'config' CONTENT_FILTER_ON_INPUT_METRIC_ID = 'content_filter_on_input' CONTENT_FILTER_ON_OUTPUT_METRIC_ID = 'content_filter_on_output' CONTENT_KEY = 'content' CONTENT_TYPE = 'application/json' CSV_FILE_TYPE = 'csv' DATASET_FOLDER_KEY = 'testdata' DEBUG_ENV_VAR_NAME = 'DEBUG' DEFAULT_HOME_PATH = '/home/node/.aicore' DEFAULT_KEY = 'default' DEFAULT_ORCHESTRATION_CONFIG_NAME = 'defaultOrchestrationConfig' DEFAULT_SECRET_SETUP_KEY = 'default_secret' DEFAULT_TIMEOUT = 3600 DEPLOYMENT_URL_KEY = 'deploymentUrl' ERROR_KEY = 'error' EVALUATIONS_ARTIFACT_DESCRIPTION = 'Artifact for Evaluations Service' EVALUATIONS_ARTIFACT_PREFIX_KEY = 'evaluation-artifact-' EVALUATIONS_CONFIG_PREFIX_KEY = 'evaluation-config-' EVALUATIONS_SCENARIO_ID = 'genai-evaluations' EVALUATION_METRICS_ENDPOINT = '/evaluationMetrics' EVAL_ORCHESTRATION_CONFIG_PREFIX_NAME = 'evalOrchestrationConfig-' EXACT_MATCH_METRIC_ID = 'exact_match' FILTERING_MODULE_CONFIG_KEY = 'filtering_module_config' FILTERS_KEY = 'filters' GROUNDING_MODULE_CONFIG_KEY = 'grounding_module_config' HOME_PATH_ENV_VAR = 'AICORE_HOME' ID = 'id' IMAGE_URL_KEY = 'image_url' INPUT_SECRET_SETUP_KEY = 'input_secret' INPUT_VARIABLE_REGEX_PATTERN = r'(?<=\{\{).+?(?=\}\})' JSONL_FILE_TYPE = 'jsonl' JSON_FILE_TYPE = 'json' JSON_SCHEMA_KEY = 'json_schema' JSON_SCHEMA_MATCH_METRIC_ID = 'json_schema_match' LANGUAGE_KEY = 'language' LANGUAGE_MATCH_METRIC_ID = 'f3ad2f40-8fcd-41ba-8a9a-fb82469bf99b' LATEST_MODEL_VERSION_KEY = 'latest' LLAMA_GUARD_CONTENT_SAFETY_KEY = 'llama_guard_3_8b' LLM_AS_A_JUDGE = 'llm-as-a-judge' LLM_MODULE_CONFIG_KEY = 'llm_module_config' LLM_MODULE_V2_NAME_KEY = 'name' LLM_MODULE_V2_PARAMETERS_KEY = 'parameters' LLM_MODULE_V2_VERSION_KEY = 'version' METRICS_TABLE_KEY = 'evaluation_result' METRIC_SERVER_ENDPOINT = '/lm/evaluationMetrics' METRIC_TO_DEPENDENT_VARIABLES_DICT = {'bert_score': ['reference'], 'bleu': ['reference'], 'content_filter_on_input': [], 'content_filter_on_output': [], 'exact_match': ['reference'], 'f3ad2f40-8fcd-41ba-8a9a-fb82469bf99b': ['language'], 'json_schema_match': ['json_schema'], 'pointwise_answer_relevance': [], 'pointwise_conciseness': [], 'pointwise_correctness': [], ...} MODEL_CONFIGURATION_KEY = 'model_configuration' MODEL_FILTER_LIST_KEY = 'modelFilterList' MODEL_FILTER_LIST_TYPE_KEY = 'modelFilterListType' MODEL_KEY = 'model' MODEL_NAME_KEY = 'model_name' MODEL_VERSION_KEY = 'model_version' MODULES_KEY = 'modules' MODULE_CONFIGURATIONS_KEY = 'module_configurations' NAME_KEY = 'name' OBJECT_STORE_SECRET_EXISTS_MESSAGE = 'Secret exists' ORCHESTRATION_CONFIGURATION_V2 = {'config': {'modules': {'prompt_templating': {'model': {'name': '', 'params': {}, 'version': ''}, 'prompt': {'defaults': {}, 'template': [{...}]}}}}} ORCHESTRATION_CONFIG_KEY = 'orchestration_config' ORCHESTRATION_CONFIG_TEMPLATE_V2 = {'modules': {'prompt_templating': {'model': {}, 'prompt': {}}}} ORCHESTRATION_GLOBAL_SCENARIO_NAME = 'orchestration' ORCHESTRATION_REGISTRY_ENDPOINT = '/registry/v2/orchestrationConfigs' ORCHESTRATION_URL_SETUP_KEY = 'orchestration_url' POINTWISE_ANSWER_RELEVANCE_METRIC_ID = 'pointwise_answer_relevance' POINTWISE_CONCISENESS_METRIC_ID = 'pointwise_conciseness' POINTWISE_CORRECTNESS_METRIC_ID = 'pointwise_correctness' POINTWISE_INSTRUCTION_FOLLOWING_METRIC_ID = 'pointwise_instruction_following' POINTWISE_RAG_COMPLETENESS_METRIC_ID = 'pointwise_rag_completeness' POINTWISE_RAG_CONTEXT_PRECISION_METRIC_ID = 'pointwise_rag_context_precision' POINTWISE_RAG_CONTEXT_RELEVANCE_METRIC_ID = 'pointwise_rag_context_relevance' POINTWISE_RAG_GROUNDEDNESS_METRIC_ID = 'pointwise_rag_groundedness' PREDEFINED_SYSTEM_VARIABLES_LIST = ['aicore_llm_completion', 'prompt', 'grounding_query', 'grounding_response'] PROFILE_ENV_VAR = 'AICORE_PROFILE' PROMPT_KEY = 'prompt' PROMPT_REGISTRY_CONTENT_KEY = 'content' PROMPT_REGISTRY_ROLE_KEY = 'role' PROMPT_TEMPLATE_ID_KEY = 'id' PROMPT_TEMPLATE_METADATA_FIELDS = ['scenario', 'name', 'version'] PROMPT_TEMPLATE_NAME_KEY = 'name' PROMPT_TEMPLATE_SCENARIO_KEY = 'scenario' PROMPT_TEMPLATE_VERSION_KEY = 'version' PROMPT_TEMPLATING_KEY = 'prompt_templating' PROVIDER_NAME = 'PROVIDER_NAME' REFERENCE_KEY = 'reference' RESULTS_FILE_KEY = 'results.db' ROLE_KEY = 'role' ROUGE_METRIC_ID = 'rouge' RUNS_FOLDER_KEY = 'runs' SUFFIX_TO_FILE_TYPE = {'.csv': 'csv', '.json': 'json', '.jsonl': 'jsonl'} SUPPORTED_CUSTOM_JUDGE_METRIC_TYPES = ['free-form', 'structured', 'extension'] SUPPORTED_FILE_TYPES = ['json', 'jsonl', 'csv'] SUPPORTED_OSS_TYPES = ['S3'] SYSTEM_DEFINED_METRIC_MAPPING = {'bert_score': 'BERT Score', 'bleu': 'BLEU', 'content_filter_on_input': 'Content Filter on Input', 'content_filter_on_output': 'Content Filter on Output', 'exact_match': 'Exact Match', 'f3ad2f40-8fcd-41ba-8a9a-fb82469bf99b': 'Language Match', 'json_schema_match': 'JSON Schema Match', 'pointwise_answer_relevance': 'Pointwise Answer Relevance', 'pointwise_conciseness': 'Pointwise Conciseness', 'pointwise_correctness': 'Pointwise Correctness', ...} SYSTEM_SUPPORTED_LLM_JUDGE_METRIC_IDS = ['pointwise_instruction_following', 'pointwise_correctness', 'pointwise_answer_relevance', 'pointwise_conciseness', 'pointwise_rag_groundedness', 'pointwise_rag_context_relevance', 'pointwise_rag_context_precision', 'pointwise_rag_completeness'] SYSTEM_SUPPORTED_METRIC_IDS = ['bert_score', 'bleu', 'rouge', 'json_schema_match', 'content_filter_on_input', 'content_filter_on_output', 'exact_match', 'f3ad2f40-8fcd-41ba-8a9a-fb82469bf99b', 'pointwise_instruction_following', 'pointwise_correctness', 'pointwise_answer_relevance', 'pointwise_conciseness', 'pointwise_rag_groundedness', 'pointwise_rag_context_relevance', 'pointwise_rag_context_precision', 'pointwise_rag_completeness'] TEMPLATE_KEY = 'template' TEMPLATE_REF_KEY = 'template_ref' TEMPLATING_MODULE_CONFIG_KEY = 'templating_module_config' TEST_PROMPT_TEMPLATE_NAME = 'evalPromptTemplateConfig-' TEST_PROMPT_TEMPLATE_VERSION = '1.0.0' TEST_TEMPLATE_STRING = 'What is Generative AI?' TRACKING_SERVICE_ENDPOINT = '/lm/metrics' TYPE_KEY = 'type' USER_KEY = 'user' VALIDATION_REGEX_PATTERN_FOR_INPUT_VARIABLES = '(?!_)(?!.*__)(?!.*--)^[a-zA-Z][a-zA-Z0-9_-]*[a-zA-Z0-9]$' VARIABLES_KEY = 'variables' VARIABLE_MAPPING_DATA_PREFIX_KEY = 'data/' VARIABLE_MAPPING_PROMPT_PREFIX_KEY = 'prompt/' VCAP_AICORE_SERVICE_NAME = 'aicore' VCAP_SERVICES_ENV_VAR = 'VCAP_SERVICES' | ||