gen_ai_hub.evaluations.utils.config_data_utils
index
/home/jenkins/agent/workspace/ation_generative-ai-hub-sdk_main/gen_ai_hub/evaluations/utils/config_data_utils.py

 
Functions
       
get_dataset_data(dataset_config: gen_ai_hub.evaluations.models.dataset_config.Dataset, ai_core_client: ai_core_sdk.ai_core_v2_client.AICoreV2Client, object_store_credentials: gen_ai_hub.evaluations._internal._models._AWSObjectStoreData, resource_group: str, error_collector: gen_ai_hub.evaluations.helpers.collector.ValidationCollector)
get_orch_config_data(evaluation_config: gen_ai_hub.evaluations.models.evaluation_config.EvaluationConfig, ai_core_client: ai_core_sdk.ai_core_v2_client.AICoreV2Client, gen_ai_hub_proxy_client: gen_ai_hub.proxy.gen_ai_hub_proxy.client.GenAIHubProxyClient, error_collector: gen_ai_hub.evaluations.helpers.collector.ValidationCollector)

 
Data
        EVALUATIONS_SCENARIO_ID = 'genai-evaluations'
PROMPT_TEMPLATE_ID_KEY = 'id'
PROMPT_TEMPLATE_METADATA_FIELDS = ['scenario', 'name', 'version']
TEST_PROMPT_TEMPLATE_NAME = 'evalPromptTemplateConfig-'
TEST_PROMPT_TEMPLATE_VERSION = '1.0.0'
logger = <Logger gen_ai_evaluations_sdk (INFO)>