gen_ai_hub.evaluations.utils.aicore_utils
index
/home/jenkins/agent/workspace/ation_generative-ai-hub-sdk_main/gen_ai_hub/evaluations/utils/aicore_utils.py

 
Modules
       
json
os
time
uuid

 
Functions
       
build_s3_file_key(object_store_secret_metadata_details: Dict[str, str], artifact_url_relative_path: str, artifact_source: gen_ai_hub.evaluations.models.artifact_source.ArtifactSource)
call_orchestration_service_with_v2_config(test_orch_config: dict, ai_core_client: ai_core_sdk.ai_core_v2_client.AICoreV2Client, orchestration_deployment_url: str, resource_group: str, error_collector: gen_ai_hub.evaluations.helpers.collector.ValidationCollector, proxy_client=None)
create_deployment_by_configuration_id(ai_core_client: ai_core_sdk.ai_core_v2_client.AICoreV2Client, configuration_id: str, resource_group: str)
create_llm_orchestration_deployment_url(ai_core_client: ai_core_sdk.ai_core_v2_client.AICoreV2Client, resource_group: str)
creates the llm-orchestration configuration based on orchestration global scenario and then creates a deployment using that configuration
fetch_configuration_by_id(configuration_id: str, ai_core_client: ai_core_sdk.ai_core_v2_client.AICoreV2Client, resource_group: str, error_collector: gen_ai_hub.evaluations.helpers.collector.ValidationCollector)
fetch_deployment_config(deployment_id: str, ai_core_client: ai_core_sdk.ai_core_v2_client.AICoreV2Client, resource_group: str, error_collector: gen_ai_hub.evaluations.helpers.collector.ValidationCollector)
fetch_orchestration_config_from_registry(orchestration_registry_reference: str, ai_core_client: ai_core_sdk.ai_core_v2_client.AICoreV2Client, error_collector: gen_ai_hub.evaluations.helpers.collector.ValidationCollector)
find_configuration_id_by_name(configurations_list: List[ai_api_client_sdk.models.configuration.Configuration], target_name: str)
generate_random_id()
generates and returns a random uuid everytime
get_all_configurations(ai_core_client: ai_core_sdk.ai_core_v2_client.AICoreV2Client, resource_group: str, scenario_id: str) -> List[ai_api_client_sdk.models.configuration.Configuration]
get_running_deployments_by_configuration_id(ai_core_client: ai_core_sdk.ai_core_v2_client.AICoreV2Client, configuration_id: str, resource_group: str) -> List[ai_api_client_sdk.models.deployment.Deployment]
list_available_llm_models(ai_core_client: ai_core_sdk.ai_core_v2_client.AICoreV2Client, resource_group: str)
read_data_from_artifact(object_store_credentials: gen_ai_hub.evaluations._internal._models._AWSObjectStoreData, object_store_secret_metadata_details: Dict[str, str], s3_file_key: str, file_type: str, error_collector: gen_ai_hub.evaluations.helpers.collector.ValidationCollector)
register_aicore_artifact(artifact_folder_path: str, ai_core_client: ai_core_sdk.ai_core_v2_client.AICoreV2Client, resource_group: str, object_store_secret_name: str, error_collector: gen_ai_hub.evaluations.helpers.collector.ValidationCollector)
register_aicore_configuration(aicore_artifact_id: str, ai_core_client: ai_core_sdk.ai_core_v2_client.AICoreV2Client, resource_group: str, accumulated_config_data: gen_ai_hub.evaluations._internal._models._EvaluationConfigData, orchestration_url: str, dataset_file_key: str, run_ids_list: List[str], llm_model_config: str, template_config: List, orchestration_registry_config: str, error_collector: gen_ai_hub.evaluations.helpers.collector.ValidationCollector)
register_aicore_execution(ai_core_client: ai_core_sdk.ai_core_v2_client.AICoreV2Client, configuration_id: str, resource_group: str, error_collector: gen_ai_hub.evaluations.helpers.collector.ValidationCollector)
resolve_artifact_path(artifact_source: gen_ai_hub.evaluations.models.artifact_source.ArtifactSource, ai_core_client: ai_core_sdk.ai_core_v2_client.AICoreV2Client, object_store_credentials: gen_ai_hub.evaluations._internal._models._AWSObjectStoreData, resource_group: str, error_collector: gen_ai_hub.evaluations.helpers.collector.ValidationCollector)
# Assumption is the provided artifact is the same as what creds are provided and url is of type ai://secret_name/pathPRefix
resolve_metric_identifiers(metrics: List[gen_ai_hub.evaluations.models.metric_config.MetricConfig], ai_core_client: ai_core_sdk.ai_core_v2_client.AICoreV2Client, resource_group: str, error_collector: gen_ai_hub.evaluations.helpers.collector.ValidationCollector) -> List[Dict]
Resolves metric identifiers to metric template metadata.
resolve_metric_names(metric_configs_list: List[gen_ai_hub.evaluations.models.metric_config.MetricConfig], error_collector: gen_ai_hub.evaluations.helpers.collector.ValidationCollector)
upload_evaluation_dataset_data(evaluation_config_data: gen_ai_hub.evaluations._internal._models._EvaluationConfigData, object_store_credentials: gen_ai_hub.evaluations._internal._models._AWSObjectStoreData, object_store_secret_name: str, ai_core_client: ai_core_sdk.ai_core_v2_client.AICoreV2Client, resource_group: str, error_collector: gen_ai_hub.evaluations.helpers.collector.ValidationCollector)
Method to upload the evaluation config data using the object store secrets data passed
upload_file_to_aws_s3(object_store_credentials: gen_ai_hub.evaluations._internal._models._AWSObjectStoreData, object_store_secret_metadata_details: Dict[str, str], file_data: Any, file_key: str, file_type: str, error_collector: gen_ai_hub.evaluations.helpers.collector.ValidationCollector)
wait_for_target_status(status_fetcher: Callable[[], Any], target_status: ai_api_client_sdk.models.status.Status, extract_url: Optional[Callable[[Any], str]] = None, timeout: int = 1200, initial_interval: int = 120, pending_interval: int = 40) -> Optional[str]
Reusable polling function to wait until a resource reaches target_status.
 
:param status_fetcher: Function to get current status response
:type status_fetcher: Callable[[], Any]
:param target_status: Target status to wait for (Status enum)
:type target_status: Status
:param extract_url: Optional function to extract URL from response, defaults to None
:type extract_url: Optional[Callable[[Any], str]]
:param timeout: Maximum time to wait in seconds, defaults to 1200
:type timeout: int
:param initial_interval: Initial polling interval in seconds, defaults to 120
:type initial_interval: int
:param pending_interval: Polling interval for pending/running status in seconds, defaults to 40
:type pending_interval: int
:return: Extracted URL if extract_url is provided and status reached, None otherwise
:rtype: Optional[str]

 
Data
        AI_PROTOCOL_PREFIX = 'ai://'
AWS_OSS_BUCKET_URL_KEY = 'storage.ai.sap.com/bucket'
AWS_OSS_PATH_PREFIX_URL_KEY = 'storage.ai.sap.com/pathPrefix'
AWS_OSS_REGION_URL_KEY = 'storage.ai.sap.com/region'
Any = typing.Any
CSV_FILE_TYPE = 'csv'
Callable = typing.Callable
DATASET_FOLDER_KEY = 'testdata'
Dict = typing.Dict
EVALUATIONS_ARTIFACT_DESCRIPTION = 'Artifact for Evaluations Service'
EVALUATIONS_ARTIFACT_PREFIX_KEY = 'evaluation-artifact-'
EVALUATIONS_CONFIG_PREFIX_KEY = 'evaluation-config-'
EVALUATIONS_SCENARIO_ID = 'genai-evaluations'
EVAL_ORCHESTRATION_CONFIG_PREFIX_NAME = 'evalOrchestrationConfig-'
JSON_FILE_TYPE = 'json'
List = typing.List
ORCHESTRATION_GLOBAL_SCENARIO_NAME = 'orchestration'
ORCHESTRATION_REGISTRY_ENDPOINT = '/registry/v2/orchestrationConfigs'
Optional = typing.Optional
SYSTEM_DEFINED_METRIC_MAPPING = {'bert_score': 'BERT Score', 'bleu': 'BLEU', 'content_filter_on_input': 'Content Filter on Input', 'content_filter_on_output': 'Content Filter on Output', 'exact_match': 'Exact Match', 'f3ad2f40-8fcd-41ba-8a9a-fb82469bf99b': 'Language Match', 'json_schema_match': 'JSON Schema Match', 'pointwise_answer_relevance': 'Pointwise Answer Relevance', 'pointwise_conciseness': 'Pointwise Conciseness', 'pointwise_correctness': 'Pointwise Correctness', ...}
Union = typing.Union
logger = <Logger gen_ai_evaluations_sdk (INFO)>