From 5f2a8b3b3a2bc388bdb906555048404b743504a4 Mon Sep 17 00:00:00 2001 From: Alex Date: Wed, 27 Mar 2024 20:57:04 +0800 Subject: [PATCH 001/251] [CoreEngine] 1. For better processing the status in the unified status center, we split the client runner and server runner into four base classes: agent, protocol manager, job runner manager, job runner. 2. Add the unified account manager to bind into the MLOps backend. 3. Abstract the same design paradigm for launching and deployment scheduler with the following format: agent -> protocol manager -> job runner manager -> job runner. 4. For better debugging messages, we save the message records to local files for sender and receiver in the message center. 5. Write the class diagram and sequence diagram in the lark doc ( https://fedml-inc.larksuite.com/wiki/NpTUwdXnciBlpQkxPkBu79k0sPc, section: FEDML Launch Python(v2) ) --- python/fedml/__init__.py | 2 +- .../scheduler/master/base_master_agent.py | 126 + .../master/base_master_job_runner.py | 622 ++++ .../master/base_master_job_runner_manager.py | 73 + .../master/base_master_protocol_manager.py | 667 ++++ .../scheduler/master/cloud_server_manager.py | 164 + .../scheduler/master/deploy_job_launcher.py | 90 + .../scheduler/master/launch_job_runner.py | 44 + .../master/launch_job_runner_manager.py | 20 + .../scheduler/master/master_agent.py | 28 + .../master/master_protocol_manager.py | 36 + .../scheduler/master/server_login.py | 406 +-- .../scheduler/master/server_runner.py | 2767 ----------------- .../model_scheduler/device_client_runner.py | 1335 -------- .../model_scheduler/device_model_cache.py | 193 +- .../model_scheduler/device_model_db.py | 62 +- .../device_model_deployment.py | 778 ++--- .../model_scheduler/device_model_inference.py | 68 +- .../device_model_msg_object.py | 64 +- .../device_replica_controller.py | 437 +++ .../model_scheduler/device_replica_handler.py | 138 + .../model_scheduler/device_server_runner.py | 2160 ------------- .../model_scheduler/job_runner_msg_sender.py | 204 ++ .../scheduler/model_scheduler/master_agent.py | 27 + .../model_scheduler/master_job_runner.py | 578 ++++ .../master_job_runner_manager.py | 62 + .../master_protocol_manager.py | 365 +++ .../model_scheduler/model_device_client.py | 140 +- .../model_scheduler/model_device_server.py | 142 +- .../scheduler/model_scheduler/worker_agent.py | 27 + .../model_scheduler/worker_job_runner.py | 489 +++ .../worker_job_runner_manager.py | 23 + .../worker_protocol_manager.py | 195 ++ .../scheduler_core/account_manager.py | 460 +++ .../scheduler_core/compute_cache_manager.py | 9 +- .../scheduler_core/compute_status_cache.py | 76 + .../scheduler_core/compute_status_db.py | 123 + .../scheduler_core/endpoint_sync_protocol.py | 25 +- .../scheduler_core/general_constants.py | 193 ++ .../scheduler_core/master_api_daemon.py | 23 +- .../scheduler_core/message_center.py | 239 +- .../scheduler_core/message_common.py | 77 + .../scheduler/scheduler_core/ota_upgrade.py | 99 + .../scheduler_base_job_runner.py | 545 ++++ .../scheduler_base_job_runner_manager.py | 66 + .../scheduler_base_protocol_manager.py | 260 ++ .../scheduler/scheduler_core/status_center.py | 410 +++ .../status_manager_protocols.py | 303 ++ .../scheduler/slave/base_slave_agent.py | 139 + .../scheduler/slave/base_slave_job_runner.py | 264 ++ .../slave/base_slave_job_runner_manager.py | 12 + .../slave/base_slave_protocol_manager.py | 571 ++++ .../computing/scheduler/slave/client_login.py | 335 +- .../scheduler/slave/client_runner.py | 1775 ----------- .../scheduler/slave/launch_job_runner.py | 41 + .../slave/launch_job_runner_manager.py | 22 + .../computing/scheduler/slave/slave_agent.py | 26 + .../scheduler/slave/slave_protocol_manager.py | 104 + python/fedml/core/mlops/__init__.py | 19 +- python/fedml/core/mlops/mlops_configs.py | 2 - python/fedml/core/mlops/mlops_device_perfs.py | 95 +- python/fedml/core/mlops/mlops_metrics.py | 90 +- .../customized_workflow.py | 794 ++++- .../deploy_image_job.yaml | 12 +- .../deploy_image_job/fedml_model_config.yaml | 19 +- .../deploy_image_job/mnist_serve_main.py | 37 + .../deploy_image_job/model/minist_model.py | 11 + .../model/model_parms_from_mlops | Bin 0 -> 32188 bytes .../deploy_llm_job.yaml | 29 + .../deploy_llm_job/.gitignore | 1 + .../__init__.py | 0 .../app/__init__.py | 0 .../app/pipe/__init__.py | 0 .../app/pipe/constants.py | 0 .../app/pipe/instruct_pipeline.py | 0 .../config/__init__.py | 0 .../deploy_llm_job/fedml_model_config.yaml | 12 + .../main_entry.py | 0 python/setup.py | 6 +- 79 files changed, 10058 insertions(+), 9798 deletions(-) create mode 100755 python/fedml/computing/scheduler/master/base_master_agent.py create mode 100755 python/fedml/computing/scheduler/master/base_master_job_runner.py create mode 100755 python/fedml/computing/scheduler/master/base_master_job_runner_manager.py create mode 100755 python/fedml/computing/scheduler/master/base_master_protocol_manager.py create mode 100755 python/fedml/computing/scheduler/master/cloud_server_manager.py create mode 100755 python/fedml/computing/scheduler/master/deploy_job_launcher.py create mode 100755 python/fedml/computing/scheduler/master/launch_job_runner.py create mode 100755 python/fedml/computing/scheduler/master/launch_job_runner_manager.py create mode 100755 python/fedml/computing/scheduler/master/master_agent.py create mode 100755 python/fedml/computing/scheduler/master/master_protocol_manager.py delete mode 100755 python/fedml/computing/scheduler/master/server_runner.py delete mode 100755 python/fedml/computing/scheduler/model_scheduler/device_client_runner.py create mode 100644 python/fedml/computing/scheduler/model_scheduler/device_replica_controller.py create mode 100644 python/fedml/computing/scheduler/model_scheduler/device_replica_handler.py delete mode 100755 python/fedml/computing/scheduler/model_scheduler/device_server_runner.py create mode 100755 python/fedml/computing/scheduler/model_scheduler/job_runner_msg_sender.py create mode 100755 python/fedml/computing/scheduler/model_scheduler/master_agent.py create mode 100755 python/fedml/computing/scheduler/model_scheduler/master_job_runner.py create mode 100755 python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py create mode 100755 python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py create mode 100755 python/fedml/computing/scheduler/model_scheduler/worker_agent.py create mode 100755 python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py create mode 100755 python/fedml/computing/scheduler/model_scheduler/worker_job_runner_manager.py create mode 100755 python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py create mode 100755 python/fedml/computing/scheduler/scheduler_core/account_manager.py create mode 100755 python/fedml/computing/scheduler/scheduler_core/compute_status_cache.py create mode 100755 python/fedml/computing/scheduler/scheduler_core/compute_status_db.py create mode 100755 python/fedml/computing/scheduler/scheduler_core/general_constants.py create mode 100755 python/fedml/computing/scheduler/scheduler_core/message_common.py create mode 100755 python/fedml/computing/scheduler/scheduler_core/ota_upgrade.py create mode 100755 python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py create mode 100755 python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner_manager.py create mode 100755 python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py create mode 100755 python/fedml/computing/scheduler/scheduler_core/status_center.py create mode 100755 python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py create mode 100755 python/fedml/computing/scheduler/slave/base_slave_agent.py create mode 100755 python/fedml/computing/scheduler/slave/base_slave_job_runner.py create mode 100755 python/fedml/computing/scheduler/slave/base_slave_job_runner_manager.py create mode 100755 python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py delete mode 100755 python/fedml/computing/scheduler/slave/client_runner.py create mode 100755 python/fedml/computing/scheduler/slave/launch_job_runner.py create mode 100755 python/fedml/computing/scheduler/slave/launch_job_runner_manager.py create mode 100755 python/fedml/computing/scheduler/slave/slave_agent.py create mode 100755 python/fedml/computing/scheduler/slave/slave_protocol_manager.py create mode 100644 python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/mnist_serve_main.py create mode 100644 python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/model/minist_model.py create mode 100644 python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/model/model_parms_from_mlops create mode 100755 python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job.yaml create mode 100644 python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job/.gitignore rename python/fedml/workflow/driver_example/customized_job_example/{deploy_image_job => deploy_llm_job}/__init__.py (100%) rename python/fedml/workflow/driver_example/customized_job_example/{deploy_image_job => deploy_llm_job}/app/__init__.py (100%) rename python/fedml/workflow/driver_example/customized_job_example/{deploy_image_job => deploy_llm_job}/app/pipe/__init__.py (100%) rename python/fedml/workflow/driver_example/customized_job_example/{deploy_image_job => deploy_llm_job}/app/pipe/constants.py (100%) rename python/fedml/workflow/driver_example/customized_job_example/{deploy_image_job => deploy_llm_job}/app/pipe/instruct_pipeline.py (100%) rename python/fedml/workflow/driver_example/customized_job_example/{deploy_image_job => deploy_llm_job}/config/__init__.py (100%) create mode 100644 python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job/fedml_model_config.yaml rename python/fedml/workflow/driver_example/customized_job_example/{deploy_image_job => deploy_llm_job}/main_entry.py (100%) diff --git a/python/fedml/__init__.py b/python/fedml/__init__.py index b06c6264a7..f6659cd622 100644 --- a/python/fedml/__init__.py +++ b/python/fedml/__init__.py @@ -34,7 +34,7 @@ _global_training_type = None _global_comm_backend = None -__version__ = "0.8.27.dev2" +__version__ = "0.8.29.dev4" # This is the deployment environment used for different roles (RD/PM/BD/Public Developers). Potential VALUE: local, dev, test, release diff --git a/python/fedml/computing/scheduler/master/base_master_agent.py b/python/fedml/computing/scheduler/master/base_master_agent.py new file mode 100755 index 0000000000..66bc35d96f --- /dev/null +++ b/python/fedml/computing/scheduler/master/base_master_agent.py @@ -0,0 +1,126 @@ + +from multiprocessing import Process +from ..comm_utils import sys_utils +from ..comm_utils.job_cleanup import JobCleanup +from ....core.mlops import MLOpsRuntimeLog, MLOpsMetrics +from ..scheduler_core.master_api_daemon import MasterApiDaemon +from ..scheduler_core.account_manager import FedMLAccountManager +from ..scheduler_core.general_constants import GeneralConstants +from abc import ABC, abstractmethod + + +class FedMLBaseMasterAgent(ABC): + + def __init__(self): + self.agent_args = None + self.master_api_daemon = None + self.master_api_process = None + self.mlops_metrics = MLOpsMetrics() + self.status_reporter = None + self.enable_simulation_cloud_agent = True + self.use_local_process_as_cloud_server = False + self.protocol_mgr = None + + def login( + self, user_id, api_key=None, device_id=None, + os_name=None, role=None + ): + # Login account + login_result = FedMLAccountManager.get_instance().login( + user_id, api_key=api_key, device_id=device_id, + os_name=os_name, role=role + ) + if login_result is not None: + self.agent_args = login_result + else: + return None + + # Save the bound info + self._save_agent_info( + login_result.current_device_id + "." + login_result.os_name, login_result.edge_id) + + # Init the logs for protocol manager + self._init_logs(login_result, login_result.edge_id) + + # Create the protocol manager to communicate with the slave agents and MLOps. + self._create_protocol_manager(role, login_result) + + # Initialize the protocol manager + # noinspection PyBoardException + try: + self._initialize_protocol_manager() + except Exception as e: + FedMLAccountManager.write_login_failed_file(is_client=False) + self.protocol_mgr.stop() + raise e + + # Start the protocol manager to process the messages from MLOps and slave agents. + self.protocol_mgr.start() + + @staticmethod + def logout(): + GeneralConstants.cleanup_run_process(None, is_master=True) + sys_utils.cleanup_all_fedml_server_api_processes() + + def _create_protocol_manager(self, role, login_result): + if self.protocol_mgr is not None: + return + self.protocol_mgr = self._generate_protocol_manager_instance( + login_result, agent_config=login_result.agent_config) + self.protocol_mgr.run_as_edge_server_and_agent = True \ + if role == FedMLAccountManager.ROLE_EDGE_SERVER else False + self.protocol_mgr.run_as_cloud_agent = True if role == FedMLAccountManager.ROLE_CLOUD_AGENT else False + self.protocol_mgr.run_as_cloud_server = True if role == FedMLAccountManager.ROLE_CLOUD_SERVER else False + self.protocol_mgr.args = login_result + self.protocol_mgr.edge_id = login_result.edge_id + self.protocol_mgr.unique_device_id = login_result.unique_device_id + self.protocol_mgr.user_name = login_result.user_name + self.protocol_mgr.agent_config = login_result.agent_config + self.protocol_mgr.enable_simulation_cloud_agent = self.enable_simulation_cloud_agent + self.protocol_mgr.use_local_process_as_cloud_server = self.use_local_process_as_cloud_server + + def _initialize_protocol_manager(self): + # Init local database + self._init_database() + + # Initialize the master protocol + self.protocol_mgr.initialize() + + # Report the IDLE status to MLOps + self.mlops_metrics.report_server_training_status( + None, GeneralConstants.MSG_MLOPS_SERVER_STATUS_IDLE, edge_id=self.agent_args.edge_id) + + # Cleanup data when startup + JobCleanup.get_instance().sync_data_on_startup(self.agent_args.edge_id, is_client=False) + + # Start the API server on master agent + self.master_api_daemon = MasterApiDaemon() + self.master_api_process = Process(target=self.master_api_daemon.run) + self.master_api_process.start() + + def _init_logs(self, agent_args, edge_id): + # Init runtime logs + in_args = agent_args + in_args.log_file_dir = self._get_log_file_dir() + in_args.run_id = 0 + in_args.role = "server" + in_args.edge_id = edge_id + in_args.using_mlops = True + in_args.server_agent_id = edge_id + MLOpsRuntimeLog.get_instance(in_args).init_logs() + + @abstractmethod + def _get_log_file_dir(self): + pass + + @abstractmethod + def _save_agent_info(self, unique_device_id, edge_id): + pass + + @abstractmethod + def _init_database(self): + pass + + @abstractmethod + def _generate_protocol_manager_instance(self, args, agent_config=None): + return None diff --git a/python/fedml/computing/scheduler/master/base_master_job_runner.py b/python/fedml/computing/scheduler/master/base_master_job_runner.py new file mode 100755 index 0000000000..3dbc1fd891 --- /dev/null +++ b/python/fedml/computing/scheduler/master/base_master_job_runner.py @@ -0,0 +1,622 @@ + +import json +import logging +import multiprocessing +import platform +import queue +import os +import time +import traceback +from ..scheduler_entry.constants import Constants +from ....core.mlops.mlops_runtime_log import MLOpsRuntimeLog +from ..master.server_constants import ServerConstants +from ....core.mlops.mlops_runtime_log_daemon import MLOpsRuntimeLogDaemon +from ..comm_utils import sys_utils +from .server_data_interface import FedMLServerDataInterface +from ....core.mlops.mlops_utils import MLOpsUtils +from ..scheduler_core.log_manager import LogsManager +from ..scheduler_core.metrics_manager import MetricsManager +from fedml.utils.debugging import debug +from ..scheduler_core.status_center import JobStatus +from ..scheduler_core.compute_cache_manager import ComputeCacheManager +from multiprocessing import Process, Queue +from ..scheduler_core.general_constants import GeneralConstants +from ..scheduler_core.scheduler_base_job_runner import FedMLSchedulerBaseJobRunner, RunnerError, RunnerCompletedError +from abc import ABC, abstractmethod + + +class FedMLBaseMasterJobRunner(FedMLSchedulerBaseJobRunner, ABC): + debug_cloud_server = False + + def __init__(self, args, run_id=0, request_json=None, agent_config=None, edge_id=0, + cuda_visible_gpu_ids_str=None, + agent_data_dir=None, agent_package_download_dir=None, + agent_package_unzip_dir=None, agent_log_file_dir=None): + FedMLSchedulerBaseJobRunner.__init__( + self, args, edge_id=edge_id, request_json=request_json, agent_config=agent_config, run_id=run_id, + cuda_visible_gpu_ids_str=cuda_visible_gpu_ids_str, agent_data_dir=agent_data_dir, + agent_package_download_dir=agent_package_download_dir, + agent_package_unzip_dir=agent_package_unzip_dir, + agent_log_file_dir=agent_package_unzip_dir, + is_master_runner=True + ) + + self.run_edge_id_status_queue = Queue() + self.run_metrics_queue = Queue() + self.run_events_queue = Queue() + self.run_artifacts_queue = Queue() + self.run_logs_queue = Queue() + self.run_edge_device_info_queue = Queue() + self.run_edge_device_info_global_queue = Queue() + self.run_extend_queue_list = None + self.async_check_timeout = 0 + self.enable_async_cluster = False + self.origin_fedml_config_object = None + self.server_agent_id = 0 + if request_json is not None: + self.server_agent_id = request_json.get("server_id", 0) + self.fedml_data_base_package_dir = os.path.join("/", "fedml", "data") + self.fedml_data_local_package_dir = os.path.join("/", "fedml", "fedml-package", "fedml", "data") + self.fedml_data_dir = self.fedml_data_base_package_dir + self.fedml_config_dir = os.path.join("/", "fedml", "conf") + + @debug + def run( + self, process_event, completed_event, edge_id_status_queue=None, + edge_device_info_queue=None, run_metrics_queue=None, run_event_queue=None, + run_artifacts_queue=None, run_logs_queue=None, edge_device_info_global_queue=None, + run_extend_queue_list=None, sender_message_center_queue=None, listener_message_queue=None, + status_center_queue=None + ): + print(f"Master job runner process id {os.getpid()}, run id {self.run_id}") + + if platform.system() != "Windows": + os.setsid() + + os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning' + os.environ.setdefault('PYTHONWARNINGS', 'ignore:semaphore_tracker:UserWarning') + + self.run_process_event = process_event + self.run_process_completed_event = completed_event + try: + MLOpsUtils.set_ntp_offset(self.ntp_offset) + + self.rebuild_message_status_center(sender_message_center_queue, listener_message_queue, status_center_queue) + + self.run_impl( + edge_id_status_queue, edge_device_info_queue, run_metrics_queue, + run_event_queue, run_artifacts_queue, run_logs_queue, edge_device_info_global_queue, + run_extend_queue_list=run_extend_queue_list, sender_message_queue=sender_message_center_queue, + listener_message_queue=listener_message_queue, status_center_queue=status_center_queue + ) + except RunnerError: + logging.info("Runner stopped.") + self.status_reporter.report_server_id_status( + self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED, edge_id=self.edge_id, + server_id=self.edge_id, server_agent_id=self.edge_id) + except RunnerCompletedError: + logging.info("Runner completed.") + except Exception as e: + logging.error("Runner exits with exceptions. {}".format(traceback.format_exc())) + self.status_reporter.report_server_id_status( + self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id, + server_id=self.edge_id, server_agent_id=self.edge_id) + finally: + logging.info("Release resources.") + self._process_run_metrics_queue(run_metrics_queue) + self._process_run_logs_queue(run_logs_queue) + MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(self.run_id, self.edge_id) + if self.mlops_metrics is not None: + self.mlops_metrics.stop_sys_perf() + time.sleep(3) + ServerConstants.cleanup_run_process(self.run_id) + ServerConstants.cleanup_learning_process(self.run_id) + ServerConstants.cleanup_bootstrap_process(self.run_id) + + @debug + @abstractmethod + def run_impl( + self, edge_id_status_queue, edge_device_info_queue, run_metrics_queue, + run_event_queue, run_artifacts_queue, run_logs_queue, edge_device_info_global_queue, + run_extend_queue_list=None, sender_message_queue=None, listener_message_queue=None, + status_center_queue=None + ): + run_id = self.request_json["runId"] + run_config = self.request_json["run_config"] + data_config = run_config["data_config"] + edge_ids = self.request_json["edgeids"] + + self.check_runner_stop_event() + + self.run_id = run_id + self.args.run_id = self.run_id + MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO) + + logging.info("Detect all status of Edge ids: " + str(edge_ids)) + + status_ok, active_edge_info_dict, inactivate_edges = self.detect_edges_status( + edge_device_info_queue, edge_device_info_global_queue=edge_device_info_global_queue, + callback_when_edges_ready=self.send_training_request_to_edges) + logging.info(f"Status OK: {status_ok}, Active edge info dict: {active_edge_info_dict}, " + f"inactivate edges: {inactivate_edges}") + if not status_ok: + logging.error(f"Status of edge device is not OK. Active edge info dict: {active_edge_info_dict}, " + f"Inactivate edges: {inactivate_edges}") + return + + if not self.should_continue_run_job(run_id): + if FedMLBaseMasterJobRunner.debug_cloud_server: + while True: + time.sleep(30) + # Check if the run status is normal + self.aggregate_run_metrics_logs( + run_id, edge_ids, edge_id_status_queue, edge_device_info_queue, + edge_device_info_global_queue, + run_metrics_queue, run_logs_queue) + return + + # Start the server job + self.start_runner_process( + run_id, self.request_json, edge_id=self.edge_id, is_server_job=True, + sender_message_queue=sender_message_queue, + listener_message_queue=listener_message_queue, + status_center_queue=status_center_queue + ) + + # Check if the run status is normal + self.aggregate_run_metrics_logs( + run_id, edge_ids, edge_id_status_queue, edge_device_info_queue, + edge_device_info_global_queue, + run_metrics_queue, run_logs_queue) + + @abstractmethod + def _generate_extend_queue_list(self): + return list() + + def aggregate_run_metrics_logs( + self, run_id, edge_id_list, edge_id_status_queue, edge_device_info_queue, + edge_device_info_global_queue, run_metrics_queue, run_logs_queue): + + ComputeCacheManager.get_instance().set_redis_params() + + while True: + self.check_runner_stop_event() + + # Process run metrics + self._process_run_metrics_queue(run_metrics_queue) + + # Process run logs + self._process_run_logs_queue(run_logs_queue) + + # Check the job status + job_status = ComputeCacheManager.get_instance().get_status_cache().get_job_status(run_id) + if JobStatus.is_job_completed(job_status): + break + + def _process_run_metrics_queue(self, run_metrics_queue): + # Fetch metrics from the run metrics queue + while True: + try: + metrics_item = run_metrics_queue.get(block=False, timeout=3) + MetricsManager.get_instance().save_metrics(metrics_item) + metric_json = json.loads(metrics_item) + if metric_json.get("is_endpoint", False): + metric_json().pop("is_endpoint") + self.mlops_metrics.report_endpoint_metric({}, payload=json.dumps(metric_json)) + else: + self.mlops_metrics.report_server_training_metric({}, payload=metrics_item) + except queue.Empty as e: # If queue is empty, then break loop + break + + def _process_run_logs_queue(self, run_logs_queue): + # Fetch logs from the run logs queue + while True: + try: + logs_item = run_logs_queue.get(block=False, timeout=3) + LogsManager.save_logs(logs_item) + except queue.Empty as e: # If queue is empty, then break loop + break + + def run_server_job( + self, process_event, completed_event, edge_id_status_queue=None, + edge_device_info_queue=None, run_metrics_queue=None, + run_event_queue=None, run_artifacts_queue=None, run_logs_queue=None, + sender_message_queue=None, listener_message_queue=None, + edge_device_info_global_queue=None, status_center_queue=None + ): + print(f"Server runner process id {os.getpid()}, run id {self.run_id}") + + if platform.system() != "Windows": + os.setsid() + + os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning' + os.environ.setdefault('PYTHONWARNINGS', 'ignore:semaphore_tracker:UserWarning') + + self.run_process_event = process_event + self.run_process_completed_event = completed_event + try: + MLOpsUtils.set_ntp_offset(self.ntp_offset) + + self.rebuild_message_status_center(sender_message_queue, listener_message_queue, status_center_queue) + + self.run_server_job_impl(process_event, completed_event, + message_center_queue=sender_message_queue) + except RunnerError: + logging.info("Runner stopped.") + self.status_reporter.report_server_id_status( + self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED, edge_id=self.edge_id, + server_id=self.edge_id, server_agent_id=self.edge_id) + except RunnerCompletedError: + logging.info("Runner completed.") + except Exception as e: + logging.error("Runner exits with exceptions. {}".format(traceback.format_exc())) + self.status_reporter.report_server_id_status( + self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id, + server_id=self.edge_id, server_agent_id=self.edge_id) + finally: + logging.info("Release resources.") + MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(self.run_id, self.edge_id) + if self.mlops_metrics is not None: + self.mlops_metrics.stop_sys_perf() + time.sleep(3) + ServerConstants.cleanup_run_process(self.run_id) + ServerConstants.cleanup_learning_process(self.run_id) + ServerConstants.cleanup_bootstrap_process(self.run_id) + + def run_server_job_impl(self, process_event, completed_event, + message_center_queue=None): + run_id = self.request_json["runId"] + run_config = self.request_json["run_config"] + data_config = run_config["data_config"] + edge_ids = self.request_json["edgeids"] + + self.check_runner_stop_event() + + self.run_id = run_id + self.args.run_id = self.run_id + MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO) + + # get training params + private_local_data_dir = data_config.get("privateLocalData", "") + is_using_local_data = 0 + # if private_local_data_dir is not None and len(str(private_local_data_dir).strip(' ')) > 0: + # is_using_local_data = 1 + + # start a run according to the hyper-parameters + # fedml_local_data_dir = self.cur_dir + "/fedml_data/run_" + run_id_str + "_edge_" + str(edge_id) + fedml_local_data_dir = os.path.join(self.cur_dir, "fedml_data") + fedml_local_config_dir = os.path.join(self.cur_dir, "fedml_config") + if is_using_local_data: + fedml_local_data_dir = private_local_data_dir + self.fedml_data_dir = self.fedml_data_local_package_dir + + self.check_runner_stop_event() + + logging.info("download packages and run the bootstrap script...") + + # update local config with real time parameters from server and dynamically replace variables value + unzip_package_path, fedml_config_object = self.update_local_fedml_config(run_id, run_config) + if unzip_package_path is None or fedml_config_object is None: + logging.info("failed to update local fedml config.") + self.check_runner_stop_event() + self.report_exception_status(run_id) + return + + logging.info("cleanup the previous aggregation process and check downloaded packages...") + + entry_file_config = fedml_config_object["entry_config"] + dynamic_args_config = fedml_config_object["dynamic_args"] + entry_file = str(entry_file_config["entry_file"]).replace('\\', os.sep).replace('/', os.sep) + entry_file = os.path.basename(entry_file) + conf_file = entry_file_config["conf_file"] + conf_file = str(conf_file).replace('\\', os.sep).replace('/', os.sep) + ServerConstants.cleanup_learning_process(run_id) + self.check_runner_stop_event() + if not os.path.exists(unzip_package_path): + logging.info("failed to unzip file.") + self.check_runner_stop_event() + self.report_exception_status(run_id) + return + os.chdir(os.path.join(unzip_package_path, "fedml")) + + self.check_runner_stop_event() + + logging.info("starting the server user process...") + + entry_file_full_path = os.path.join(unzip_package_path, "fedml", entry_file) + conf_file_full_path = os.path.join(unzip_package_path, "fedml", conf_file) + logging.info(" ") + logging.info(" ") + logging.info("====Your Run Logs Begin===") + + process, is_launch_task, error_list = self.execute_job_task( + unzip_package_path=unzip_package_path, entry_file_full_path=entry_file_full_path, + conf_file_full_path=conf_file_full_path, dynamic_args_config=dynamic_args_config, + fedml_config_object=self.fedml_config_object) + + logging.info("====Your Run Logs End===") + logging.info(" ") + logging.info(" ") + + ret_code, out, err = process.returncode, None, None + is_run_ok = sys_utils.is_runner_finished_normally(process.pid) + if is_launch_task: + is_run_ok = True + if error_list is not None and len(error_list) > 0: + is_run_ok = False + if ret_code is None or ret_code <= 0: + self.check_runner_stop_event() + + if is_run_ok: + if out is not None: + out_str = sys_utils.decode_our_err_result(out) + if out_str != "": + logging.info("{}".format(out_str)) + + self.status_reporter.report_server_id_status( + run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED, edge_id=self.edge_id, + server_id=self.edge_id, server_agent_id=self.edge_id) + + if is_launch_task: + sys_utils.log_return_info(f"job {run_id}", 0) + else: + sys_utils.log_return_info(entry_file, 0) + else: + is_run_ok = False + + if not is_run_ok: + # If the run status is killed or finished, then return with the normal state. + current_job = FedMLServerDataInterface.get_instance().get_job_by_id(run_id) + if current_job is not None and (current_job.status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED or + current_job.status == ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED): + return + + self.check_runner_stop_event() + + logging.error("failed to run the aggregation process...") + + if err is not None: + err_str = sys_utils.decode_our_err_result(err) + if err_str != "": + logging.error("{}".format(err_str)) + + if is_launch_task: + sys_utils.log_return_info(f"job {run_id}", ret_code) + else: + sys_utils.log_return_info(entry_file, ret_code) + + self.report_exception_status(run_id) + + @abstractmethod + def _generate_job_runner_instance(self, args, run_id=None, request_json=None, agent_config=None, edge_id=None): + return None + + def start_runner_process( + self, run_id, request_json, edge_id=None, is_server_job=False, + sender_message_queue=None, listener_message_queue=None, + status_center_queue=None, + ): + server_runner = self._generate_job_runner_instance( + self.args, run_id=run_id, request_json=request_json, + agent_config=self.agent_config, edge_id=edge_id + ) + + run_id_str = str(run_id) + server_runner.edge_id = self.edge_id + server_runner.server_agent_id = self.server_agent_id + server_runner.start_request_json = json.dumps(request_json) + self.run_process_event = multiprocessing.Event() + server_runner.run_process_event = self.run_process_event + self.run_process_completed_event = multiprocessing.Event() + server_runner.run_process_completed_event = self.run_process_completed_event + server_runner.edge_id_status_queue = self.run_edge_id_status_queue + server_runner.edge_device_info_queue = self.run_edge_device_info_queue + self.run_extend_queue_list = self._generate_extend_queue_list() + self.run_process = Process( + target=server_runner.run if not is_server_job else server_runner.run_server_job, args=( + self.run_process_event, self.run_process_completed_event, self.run_edge_id_status_queue, + self.run_edge_device_info_queue, self.run_metrics_queue, self.run_events_queue, + self.run_artifacts_queue, self.run_logs_queue, self.run_edge_device_info_global_queue, + self.run_extend_queue_list, sender_message_queue, listener_message_queue, status_center_queue + ) + ) + self.run_process.start() + ServerConstants.save_run_process(run_id, self.run_process.pid) + return self.run_process + + def put_run_edge_device_info_to_queue(self, run_id, device_info): + run_id_str = str(run_id) + if self.run_edge_device_info_queue is None: + self.run_edge_device_info_queue = Queue() + self.run_edge_device_info_queue.put(device_info) + + def should_continue_run_job(self, run_id): + run_config = self.request_json["run_config"] + run_params = run_config.get("parameters", {}) + job_yaml = run_params.get("job_yaml", {}) + job_yaml_default_none = run_params.get("job_yaml", None) + framework_type = job_yaml.get("framework_type", None) + job_type = job_yaml.get("job_type", None) + job_type = job_yaml.get("task_type", Constants.JOB_TASK_TYPE_TRAIN) if job_type is None else job_type + if job_yaml_default_none is not None: + if job_type == Constants.JOB_TASK_TYPE_FEDERATE: + return True + + if framework_type is None or framework_type != Constants.JOB_FRAMEWORK_TYPE_FEDML: + self.status_reporter.report_server_id_status( + run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_RUNNING, edge_id=self.edge_id, + server_id=self.edge_id, server_agent_id=self.edge_id) + return False + + return True + + @debug + def detect_edges_status( + self, edge_device_info_queue, edge_device_info_global_queue=None, callback_when_edges_ready=None, + status_timeout=None, + need_to_trigger_exception=True, status_check_context=None, given_edge_ids=None, + callback_when_detecting=None, args_for_callback_when_detecting=None + ): + run_id = self.request_json["runId"] + run_id_str = str(run_id) + edge_id_list = self.request_json["edgeids"] + if given_edge_ids is not None: + edge_id_list = given_edge_ids + + # Init realtime status of all edges + run_edges_realtime_status = dict() + run_edges_realtime_status[run_id_str] = dict() + + edge_info_global_dict = dict() + + # Send status message to all edges + allowed_cache_edge_status_time = 60 + for edge_id in edge_id_list: + # Check if the edge status was filled allowed_cache_edge_status_time seconds ago, + # if so no more checking message would be sent. + edge_info = edge_info_global_dict.get(edge_id, None) + if edge_info is not None: + timestamp = edge_info.get("timestamp", None) + time_interval = time.time() - timestamp + if time_interval <= allowed_cache_edge_status_time: + continue + + self.send_status_check_msg(run_id, edge_id, self.edge_id, context=status_check_context) + time.sleep(3) + + total_sleep_seconds = 0 + status_check_sleep_seconds = 10 + allowed_status_check_sleep_seconds = 60 * 2 if status_timeout is None else status_timeout + allowed_status_check_sleep_seconds_for_async = 30 + inactivate_edges = list() + active_edge_info_dict = dict() + while True: + if callback_when_detecting is not None: + callback_when_detecting(args_for_callback_when_detecting) + + # Fetch edge info from the edge status queue, which will be added to realtime status map + while True: + self.check_runner_stop_event() + + try: + edge_info = edge_device_info_queue.get(block=False, timeout=1) + if edge_info is not None: + edge_id = edge_info.get("edge_id", None) + if edge_id is not None: + run_edges_realtime_status[run_id_str][edge_id] = edge_info + except queue.Empty as e: # If queue is empty, then break loop + break + + self.check_runner_stop_event() + + # Check all edges which don't send response status successfully + # and retry to send the status checking message. + active_edges_count = 0 + inactivate_edges.clear() + active_edge_info_dict.clear() + for edge_id in edge_id_list: + edge_info_dict = run_edges_realtime_status.get(run_id_str, {}) + edge_info = edge_info_dict.get(edge_id, None) + edge_info = edge_info_dict.get(str(edge_id), None) if edge_info is None else edge_info + if edge_info is not None: + active_edges_count += 1 + active_edge_info_dict[str(edge_id)] = edge_info + else: + # Check if the edge status was filled allowed_cache_edge_status_time seconds ago, + # if so no more checking message would be sent. + edge_info = edge_info_global_dict.get(edge_id, None) + if edge_info is not None: + timestamp = edge_info.get("timestamp", None) + time_interval = time.time() - timestamp + if time_interval <= allowed_cache_edge_status_time: + active_edges_count += 1 + active_edge_info_dict[str(edge_id)] = edge_info + continue + + inactivate_edges.append(edge_id) + self.send_status_check_msg(run_id, edge_id, self.edge_id, context=status_check_context) + + # If all edges are ready then send the starting job message to them + if active_edges_count == len(edge_id_list): + logging.info(f"All edges are ready. Active edge id list is as follows. {active_edge_info_dict}") + if callback_when_edges_ready is not None: + logging.info("All edges are ready. Start to process the callback function.") + callback_when_edges_ready(active_edge_info_dict=active_edge_info_dict) + else: + logging.info("All edges are ready. No callback function to process.") + break + else: + logging.info(f"All edges are not ready. Active edge id list: {active_edge_info_dict}, " + f"Inactive edge id list: {inactivate_edges}") + + # Check if runner needs to stop and sleep specific time + self.check_runner_stop_event() + time.sleep(status_check_sleep_seconds) + total_sleep_seconds += status_check_sleep_seconds + + # Check if the status response message has timed out to receive + if total_sleep_seconds >= allowed_status_check_sleep_seconds: + # If so, send failed message to MLOps and send exception message to all edges. + logging.error(f"There are inactive edge devices. " + f"Inactivate edge id list is as follows. {inactivate_edges}") + if need_to_trigger_exception: + self.status_reporter.report_server_id_status( + run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id, + server_id=self.edge_id, server_agent_id=self.server_agent_id) + self.report_exception_status(run_id) + return False, active_edge_info_dict, inactivate_edges + + # If we enable the mode for async cluster, then sleep some time and send messages to all clients. + if callback_when_edges_ready is not None and self.should_process_async_cluster is not None: + should_async, async_timeout = self.should_process_async_cluster() + if should_async and total_sleep_seconds >= allowed_status_check_sleep_seconds_for_async: + if async_timeout > allowed_status_check_sleep_seconds_for_async: + time.sleep(async_timeout - allowed_status_check_sleep_seconds_for_async) + self.send_training_request_to_edges(active_edge_info_dict) + return True, active_edge_info_dict, inactivate_edges + + return True, active_edge_info_dict, inactivate_edges + + def send_status_check_msg(self, run_id, edge_id, server_id, context=None): + topic_get_model_device_id = "server/client/request_device_info/" + str(edge_id) + payload = {"server_id": server_id, "run_id": run_id} + if context is not None: + payload["context"] = context + self.message_center.send_message(topic_get_model_device_id, json.dumps(payload)) + + def report_exception_status(self, run_id): + self.status_reporter.report_job_status(run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_EXCEPTION) + + def callback_run_logs(self, topic, payload): + run_id = str(topic).split('/')[-1] + run_id_str = str(run_id) + if self.run_logs_queue is None: + self.run_logs_queue = Queue() + self.run_logs_queue.put(payload) + + def callback_run_metrics(self, topic, payload): + print(f"callback_run_metrics topic {topic}, payload {payload}") + run_id = str(topic).split('/')[-1] + run_id_str = str(run_id) + if self.run_metrics_queue is None: + self.run_metrics_queue = Queue() + self.run_metrics_queue.put(payload) + + def send_training_request_to_edges(self, active_edge_info_dict): + topic = GeneralConstants.MSG_TOPIC_SEND_TRAINING_REQUEST_TO_EDGES + payload = json.dumps(active_edge_info_dict) + self.message_center.receive_message(topic, payload) + + def should_process_async_cluster(self): + run_config = self.request_json.get("run_config", {}) + run_params = run_config.get("parameters", {}) + common_args = run_params.get("common_args", {}) + self.enable_async_cluster = common_args.get("enable_async_cluster", False) + self.async_check_timeout = common_args.get("async_check_timeout", 0) + if self.enable_async_cluster: + return True, self.async_check_timeout + + return False, self.async_check_timeout + + + diff --git a/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py b/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py new file mode 100755 index 0000000000..694fab5f5f --- /dev/null +++ b/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py @@ -0,0 +1,73 @@ +import base64 +import json +import logging +import time +from abc import ABC +from multiprocessing import Process +from .cloud_server_manager import FedMLCloudServerManager +from ..scheduler_core.scheduler_base_job_runner_manager import FedMLSchedulerBaseJobRunnerManager + + +class FedMLBaseMasterJobRunnerManager(FedMLSchedulerBaseJobRunnerManager, ABC): + def __init__(self): + FedMLSchedulerBaseJobRunnerManager.__init__(self) + + # Override + def start_job_runner( + self, run_id, request_json, args=None, edge_id=None, is_server_job=False, + sender_message_queue=None, listener_message_queue=None, status_center_queue=None, + should_start_cloud_server=False, use_local_process_as_cloud_server=False, + cuda_visible_gpu_ids_str=None + ): + if should_start_cloud_server: + self._start_cloud_server(args, run_id, request_json, edge_id=edge_id, + use_local_process_as_cloud_server=use_local_process_as_cloud_server) + return + + run_id_str = str(run_id) + self.job_runners[run_id_str] = self._generate_job_runner_instance( + args, run_id=run_id, request_json=request_json, + agent_config=args.agent_config, edge_id=edge_id, + ) + self.job_runners[run_id_str].start_runner_process( + run_id, request_json, edge_id=edge_id, is_server_job=is_server_job, + sender_message_queue=sender_message_queue, + listener_message_queue=listener_message_queue, + status_center_queue=status_center_queue + ) + + def _start_cloud_server( + self, args, run_id, request_json, edge_id=None, + use_local_process_as_cloud_server=False + ): + run_id_str = str(run_id) + cloud_server_mgr = FedMLCloudServerManager( + args, run_id=run_id, edge_id=edge_id, request_json=request_json, + agent_config=args.agent_config + ) + if not use_local_process_as_cloud_server: + self.cloud_run_process_map[run_id_str] = Process(target=cloud_server_mgr.start_cloud_server_process_entry) + self.cloud_run_process_map[run_id_str].start() + else: + message_bytes = json.dumps(request_json).encode("ascii") + base64_bytes = base64.b64encode(message_bytes) + runner_cmd_encoded = base64_bytes.decode("ascii") + cloud_device_id = request_json.get("cloudServerDeviceId", "0") + + logging.info("runner_cmd_encoded: {}".format(runner_cmd_encoded)) + + self.cloud_run_process_map[run_id_str] = Process( + target=cloud_server_mgr.start_local_cloud_server, + args=(args.account_id, args.version, cloud_device_id, runner_cmd_encoded)) + self.cloud_run_process_map[run_id_str].start() + time.sleep(1) + + def callback_run_logs(self, run_id, topic, payload): + run_id_str = str(run_id) + if self.job_runners.get(run_id_str, None) is not None: + self.job_runners[run_id_str].callback_run_logs(topic, payload) + + def callback_run_metrics(self, run_id, topic, payload): + run_id_str = str(run_id) + if self.job_runners.get(run_id_str, None) is not None: + self.job_runners[run_id_str].callback_run_metrics(topic, payload) diff --git a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py new file mode 100755 index 0000000000..bf720515d9 --- /dev/null +++ b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py @@ -0,0 +1,667 @@ + +import base64 +import json +import logging +import fedml +from ..scheduler_core.scheduler_matcher import SchedulerMatcher +from ..comm_utils.constants import SchedulerConstants +from ..comm_utils.job_utils import JobRunnerUtils +from ....core.mlops.mlops_runtime_log import MLOpsRuntimeLog +from ....core.mlops.mlops_configs import MLOpsConfigs +from ....core.mlops.mlops_runtime_log_daemon import MLOpsRuntimeLogDaemon +from ..comm_utils import sys_utils +from ....core.mlops.mlops_utils import MLOpsUtils +from ..model_scheduler import device_client_constants +from fedml.utils.debugging import debug +from ..scheduler_core.compute_cache_manager import ComputeCacheManager +from ..scheduler_core.ota_upgrade import FedMLOtaUpgrade +from .deploy_job_launcher import FedMLDeployJobLauncher +from ..scheduler_core.general_constants import GeneralConstants +from ..scheduler_core.scheduler_base_protocol_manager import FedMLSchedulerBaseProtocolManager +from abc import ABC, abstractmethod + + +class FedMLBaseMasterProtocolManager(FedMLSchedulerBaseProtocolManager, ABC): + def __init__(self, args, agent_config=None): + FedMLSchedulerBaseProtocolManager.__init__(self, args, agent_config=agent_config, is_master=True) + + self.async_check_timeout = 0 + self.enable_async_cluster = False + self.request_json = None + self.run_edge_ids = dict() + self.version = fedml.get_env_version() + self.args = args + self.run_id = None + self.edge_id = args.edge_id + self.server_agent_id = args.edge_id + self.current_device_id = args.current_device_id + self.unique_device_id = args.unique_device_id + self.agent_config = agent_config + self.topic_start_train = None + self.topic_stop_train = None + self.topic_report_status = None + self.topic_ota_msg = None + self.topic_response_device_info = None + self.topic_request_device_info_from_mlops = None + self.topic_requesst_job_status = None + self.topic_requesst_device_status_in_job = None + self.topic_send_training_request_to_edges = None + self.run_as_cloud_agent = False + self.run_as_cloud_server = False + self.run_as_edge_server_and_agent = False + self.run_as_cloud_server_and_agent = False + self.enable_simulation_cloud_agent = True + self.use_local_process_as_cloud_server = False + self.ota_upgrade = FedMLOtaUpgrade(edge_id=args.edge_id) + self.running_request_json = dict() + self.start_request_json = None + self.deploy_job_launcher = FedMLDeployJobLauncher() + + @abstractmethod + def generate_topics(self): + # The MQTT message topic format is as follows: // + + # The topic for stopping training + self.topic_start_train = "mlops/flserver_agent_" + str(self.edge_id) + "/start_train" + + # The topi for stopping training + self.topic_stop_train = "mlops/flserver_agent_" + str(self.edge_id) + "/stop_train" + + # The topic for reporting current device status. + self.topic_report_status = "mlops/report_device_status" + + # The topic for OTA messages from the MLOps. + self.topic_ota_msg = "mlops/flserver_agent_" + str(self.edge_id) + "/ota" + + # The topic for requesting device info from the client. + self.topic_response_device_info = "client/server/response_device_info/" + str(self.edge_id) + + # The topic for requesting device info from MLOps. + self.topic_request_device_info_from_mlops = f"mlops/master_agent/request_device_info/{self.edge_id}" + + # The topic for getting job status from the status center. + self.topic_requesst_job_status = f"anywhere/master_agent/request_job_status/{self.edge_id}" + + # The topic for getting device status of job from the status center. + self.topic_requesst_device_status_in_job = f"anywhere/master_agent/request_device_status_in_job/{self.edge_id}" + + # The topic for reporting online status + self.topic_active = "flserver_agent/active" + + # The topic for last-will messages. + self.topic_last_will = "flserver_agent/last_will_msg" + + # The topic for sending training request to edges (Request from the job runner when all edges are ready) + self.topic_send_training_request_to_edges = GeneralConstants.MSG_TOPIC_SEND_TRAINING_REQUEST_TO_EDGES + + # Subscribe topics for starting train, stopping train and fetching client status. + self.subscribed_topics.clear() + self.add_subscribe_topic(self.topic_start_train) + self.add_subscribe_topic(self.topic_stop_train) + self.add_subscribe_topic(self.topic_report_status) + self.add_subscribe_topic(self.topic_ota_msg) + self.add_subscribe_topic(self.topic_response_device_info) + self.add_subscribe_topic(self.topic_request_device_info_from_mlops) + self.add_subscribe_topic(self.topic_requesst_job_status) + self.add_subscribe_topic(self.topic_requesst_device_status_in_job) + + @abstractmethod + def add_protocol_handler(self): + # Add the message listeners for all topics, the following is an example. + # self.add_message_listener(self.topic_start_train, self.callback_start_train) + # Add the message listeners for all topics + self.add_message_listener(self.topic_start_train, self.callback_start_train) + self.add_message_listener(self.topic_stop_train, self.callback_stop_train) + self.add_message_listener(self.topic_ota_msg, FedMLBaseMasterProtocolManager.callback_server_ota_msg) + self.add_message_listener(self.topic_report_status, self.callback_report_current_status) + self.add_message_listener(self.topic_response_device_info, self.callback_response_device_info) + self.add_message_listener(self.topic_request_device_info_from_mlops, + self.callback_request_device_info_from_mlops) + self.add_message_listener(self.topic_requesst_job_status, self.callback_request_job_status) + self.add_message_listener(self.topic_requesst_device_status_in_job, self.callback_request_device_status_in_job) + self.add_message_listener(self.topic_send_training_request_to_edges, + self.callback_send_training_request_to_edges) + + @abstractmethod + def _get_job_runner_manager(self): + return None + + @abstractmethod + def _init_extra_items(self): + pass + + def add_subscribe_topic(self, topic): + self.subscribed_topics.append(topic) + + def on_agent_communication_connected(self, mqtt_client_object): + super().on_agent_communication_connected(mqtt_client_object) + + if self.run_as_cloud_server: + # Start the FedML cloud server + message_bytes = self.args.runner_cmd.encode("ascii") + base64_bytes = base64.b64decode(message_bytes) + payload = base64_bytes.decode("ascii") + self.receive_message_json(self.topic_start_train, payload) + + def callback_start_train(self, topic=None, payload=None): + # Fetch config from MLOps + # noinspection PyBroadException + try: + MLOpsConfigs.fetch_all_configs() + except Exception: + pass + + # Parse the message when running in the cloud server mode. + if self.run_as_cloud_server: + message_bytes = payload.encode("ascii") + base64_bytes = base64.b64decode(message_bytes) + payload = base64_bytes.decode("ascii") + + # Parse the parameters + # [NOTES] Example Request JSON: + # https://fedml-inc.larksuite.com/wiki/ScnIwUif9iupbjkYS0LuBrd6sod#WjbEdhYrvogmlGxKTOGu98C6sSb + request_json = json.loads(payload) + is_retain = request_json.get("is_retain", False) + if is_retain: + return + run_id = request_json["runId"] + run_id_str = str(run_id) + + # Process the log when running in the edge server mode. + if self.run_as_edge_server_and_agent or self.enable_simulation_cloud_agent: + # Start log processor for current run + self.args.run_id = run_id + self.args.edge_id = self.edge_id + MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO) + MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor( + run_id, self.edge_id, SchedulerConstants.get_log_source(request_json)) + # Process the log when running in the cloud agent mode. + elif self.run_as_cloud_agent: + # Start log processor for current run + MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor( + run_id, request_json.get("server_id", "0"), SchedulerConstants.get_log_source(request_json) + ) + # Process the log when running in the cloud server mode. + elif self.run_as_cloud_server: + # Parse the parameters. + self.server_agent_id = request_json.get("cloud_agent_id", self.edge_id) + run_id = request_json["runId"] + run_id_str = str(run_id) + + # Start log processor for current run. + self.args.run_id = run_id + MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor( + run_id, self.edge_id, SchedulerConstants.get_log_source(request_json)) + + # Print the payload + logging.info("callback_start_train payload: {}".format(payload)) + logging.info( + f"FedMLDebug - Receive: topic ({topic}), payload ({payload})" + ) + + # Save the parameters + self.start_request_json = payload + self.run_id = run_id + self.request_json = request_json + self.running_request_json[run_id_str] = request_json + edge_id_list = request_json.get("edgeids", list()) + self.run_edge_ids[run_id_str] = edge_id_list + + # report server running status to master agent + if not self.run_as_cloud_server: + self.mlops_metrics.report_server_id_status( + run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_STARTING, edge_id=self.edge_id, + server_id=self.edge_id, server_agent_id=self.edge_id) + + # Start server with multiprocessing mode + if self.run_as_edge_server_and_agent or self.enable_simulation_cloud_agent: + self.init_job_task(request_json) + + self.args.run_id = run_id + + self._get_job_runner_manager().start_job_runner( + run_id, request_json, args=self.args, edge_id=self.edge_id, + sender_message_queue=self.message_center.get_sender_message_queue(), + listener_message_queue=self.get_listener_message_queue(), + status_center_queue=self.get_status_queue() + ) + + process = self._get_job_runner_manager().get_runner_process(run_id) + if process is not None: + GeneralConstants.save_run_process(run_id, process.pid, is_master=True) + elif self.run_as_cloud_agent: + self.init_job_task(request_json) + + self._get_job_runner_manager().start_job_runner( + run_id, request_json, args=self.args, edge_id=self.edge_id, + sender_message_queue=self.message_center.get_sender_message_queue(), + listener_message_queue=self.get_listener_message_queue(), + status_center_queue=self.get_status_queue(), should_start_cloud_server=True, + use_local_process_as_cloud_server=self.use_local_process_as_cloud_server + ) + + process = self._get_job_runner_manager().get_runner_process(run_id, is_cloud_server=True) + if process is not None: + GeneralConstants.save_run_process(run_id, process.pid, is_master=True) + elif self.run_as_cloud_server: + self.server_agent_id = request_json.get("cloud_agent_id", self.edge_id) + self.start_request_json = json.dumps(request_json) + run_id = request_json["runId"] + run_id_str = str(run_id) + + self.init_job_task(request_json) + + self.args.run_id = run_id + + self._get_job_runner_manager().start_job_runner( + run_id, request_json, args=self.args, edge_id=self.edge_id, + sender_message_queue=self.message_center.get_sender_message_queue(), + listener_message_queue=self.get_listener_message_queue(), + status_center_queue=self.get_status_queue() + ) + + def callback_stop_train(self, topic, payload, use_payload=None): + # Print the payload + logging.info( + f"FedMLDebug - Receive: topic ({topic}), payload ({payload})" + ) + + # Parse the parameters. + request_json = json.loads(payload) + run_id = request_json.get("runId", None) + run_id = request_json.get("id", None) if run_id is None else run_id + run_id_str = str(run_id) + + # Broadcast the job status to all edges + self.rebuild_status_center(self.get_status_queue()) + self.status_reporter.report_job_status(run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_KILLED) + + # Cleanup the cached object + if self.running_request_json.get(run_id_str, None) is not None: + self.running_request_json.pop(run_id_str) + + # Stop the job runner + self._get_job_runner_manager().stop_job_runner(run_id) + + def callback_run_logs(self, topic, payload): + run_id = str(topic).split('/')[-1] + run_id_str = str(run_id) + self._get_job_runner_manager().callback_run_logs(run_id, topic, payload) + + def callback_run_metrics(self, topic, payload): + run_id = str(topic).split('/')[-1] + run_id_str = str(run_id) + self._get_job_runner_manager().callback_run_metrics(run_id, topic, payload) + + def callback_edge_status(self, topic, payload): + self.send_status_message(topic, payload) + + def callback_report_current_status(self, topic, payload): + logging.info( + f"FedMLDebug - Receive: topic ({topic}), payload ({payload})" + ) + + if self.run_as_edge_server_and_agent: + self.send_agent_active_msg(self.edge_id) + elif self.run_as_cloud_agent: + self.send_agent_active_msg(self.edge_id) + elif self.run_as_cloud_server: + pass + + @staticmethod + def callback_server_ota_msg(topic, payload): + logging.info( + f"FedMLDebug - Receive: topic ({topic}), payload ({payload})" + ) + + request_json = json.loads(payload) + cmd = request_json["cmd"] + + if cmd == GeneralConstants.FEDML_OTA_CMD_UPGRADE: + # noinspection PyBroadException + try: + FedMLOtaUpgrade.process_ota_upgrade_msg() + # Process(target=FedMLServerRunner.process_ota_upgrade_msg).start() + raise Exception("After upgraded, restart runner...") + except Exception as e: + pass + elif cmd == GeneralConstants.FEDML_OTA_CMD_RESTART: + raise Exception("Restart runner...") + + def callback_response_device_info(self, topic, payload): + # Parse payload + payload_json = json.loads(payload) + run_id = payload_json.get("run_id", 0) + context = payload_json.get("context", None) + master_device_id = payload_json.get("master_device_id", 0) + slave_device_id = payload_json.get("slave_device_id", 0) + slave_device_id_list = payload_json.get("slave_device_id_list", 0) + edge_id = payload_json.get("edge_id", 0) + device_info = payload_json.get("edge_info", 0) + device_info["master_device_id"] = master_device_id + device_info["slave_device_id"] = slave_device_id + device_info["slave_device_id_list"] = slave_device_id_list + run_id_str = str(run_id) + + # Put device info into a multiprocessing queue so master runner checks if all edges are ready + if context is None: + self._get_job_runner_manager().put_run_edge_device_info_to_queue(run_id, device_info) + + # if self.run_edge_device_info_global_queue is None: + # self.run_edge_device_info_global_queue = Array('i', list()) + # + # self.run_edge_device_info_global_queue[len(self.run_edge_device_info_global_queue)] = \ + # {"timestamp": time.time(), "edge_id": edge_id, "device_info": device_info} + + request_json = self.running_request_json.get(str(run_id), None) + if request_json is not None: + self.deploy_job_launcher.check_model_device_ready_and_deploy( + request_json, run_id, master_device_id, slave_device_id, run_edge_ids=self.run_edge_ids) + + def callback_request_device_info_from_mlops(self, topic, payload): + self.response_device_info_to_mlops(topic, payload) + + def callback_request_job_status(self, topic, payload): + self.response_job_status(topic, payload) + + def callback_request_device_status_in_job(self, topic, payload): + self.response_device_status_in_job(topic, payload) + + def callback_send_training_request_to_edges(self, topic, payload): + payload_json = json.loads(payload) + self.send_training_request_to_edges(active_edge_info_dict=payload_json) + + def generate_protocol_manager(self): + message_status_runner = self._generate_protocol_manager_instance( + self.args, agent_config=self.agent_config + ) + message_status_runner.async_check_timeout = self.async_check_timeout + message_status_runner.enable_async_cluster = self.enable_async_cluster + message_status_runner.request_json = self.request_json + message_status_runner.run_edge_ids = self.run_edge_ids + message_status_runner.version = self.version + message_status_runner.message_center_name = self.message_center_name + message_status_runner.run_id = self.run_id + message_status_runner.edge_id = self.edge_id + message_status_runner.server_agent_id = self.server_agent_id + message_status_runner.current_device_id = self.current_device_id + message_status_runner.unique_device_id = self.unique_device_id + message_status_runner.subscribed_topics = self.subscribed_topics + message_status_runner.run_as_cloud_agent = self.run_as_cloud_agent + message_status_runner.run_as_cloud_server = self.run_as_cloud_server + message_status_runner.run_as_edge_server_and_agent = self.run_as_edge_server_and_agent + message_status_runner.run_as_cloud_server_and_agent = self.run_as_cloud_server_and_agent + message_status_runner.enable_simulation_cloud_agent = self.enable_simulation_cloud_agent + message_status_runner.use_local_process_as_cloud_server = self.use_local_process_as_cloud_server + message_status_runner.running_request_json = self.running_request_json + message_status_runner.start_request_json = self.start_request_json + message_status_runner.user_name = self.user_name + message_status_runner.status_queue = self.get_status_queue() + + return message_status_runner + + def response_job_status(self, topic, payload): + payload_json = json.loads(payload) + if self.mlops_metrics is not None: + run_id = payload_json.get("run_id", None) + edge_id = payload_json.get("edge_id", None) + if run_id is None or edge_id is None: + return + response_topic = f"master_agent/somewhere/response_job_status/{edge_id}" + response_payload = { + "run_id": run_id, + "master_agent": self.edge_id, + "edge_id": edge_id, + "job_status": ComputeCacheManager.get_instance().get_status_cache().get_job_status(), + "fedml_version": fedml.__version__ + } + self.mlops_metrics.report_json_message(response_topic, json.dumps(response_payload)) + + def response_device_status_in_job(self, topic, payload): + payload_json = json.loads(payload) + if self.mlops_metrics is not None: + run_id = payload_json.get("run_id", None) + edge_id = payload_json.get("edge_id", None) + if run_id is None or edge_id is None: + return + response_topic = f"master_agent/somewhere/response_device_status_in_job/{edge_id}" + response_payload = { + "run_id": run_id, + "master_agent": self.edge_id, + "edge_id": edge_id, + "device_status_in_job": + ComputeCacheManager.get_instance().get_status_cache().get_device_status_in_job(run_id, edge_id), + "fedml_version": fedml.__version__ + } + self.mlops_metrics.report_json_message(response_topic, json.dumps(response_payload)) + + def response_device_info_to_mlops(self, topic, payload): + response_topic = f"master_agent/mlops/response_device_info" + payload_json = json.loads(payload) + need_gpu_info = payload_json.get("need_gpu_info", False) + if self.mlops_metrics is not None: + if not need_gpu_info: + response_payload = { + "run_id": self.run_id, + "master_agent_device_id": self.edge_id, + "fedml_version": fedml.__version__ + } + else: + total_mem, free_mem, total_disk_size, free_disk_size, cup_utilization, cpu_cores, \ + gpu_cores_total, gpu_cores_available, sent_bytes, recv_bytes, gpu_available_ids = \ + sys_utils.get_sys_realtime_stats() + gpu_available_ids = JobRunnerUtils.get_instance().get_available_gpu_id_list(self.edge_id) + gpu_available_ids = JobRunnerUtils.trim_unavailable_gpu_ids(gpu_available_ids) + gpu_cores_available = len(gpu_available_ids) + response_payload = { + "run_id": self.run_id, + "master_agent_device_id": self.edge_id, + "memoryTotal": round(total_mem * MLOpsUtils.BYTES_TO_GB, 2), + "memoryAvailable": round(free_mem * MLOpsUtils.BYTES_TO_GB, 2), + "diskSpaceTotal": round(total_disk_size * MLOpsUtils.BYTES_TO_GB, 2), + "diskSpaceAvailable": round(free_disk_size * MLOpsUtils.BYTES_TO_GB, 2), + "cpuUtilization": round(cup_utilization, 2), + "cpuCores": cpu_cores, + "gpuCoresTotal": gpu_cores_total, + "gpuCoresAvailable": gpu_cores_available, + "networkTraffic": sent_bytes + recv_bytes, + "timestamp": int(MLOpsUtils.get_ntp_time()), + "fedml_version": fedml.__version__ + } + self.mlops_metrics.report_json_message(response_topic, json.dumps(response_payload)) + + def init_job_task(self, request_json): + run_id = request_json["runId"] + run_config = request_json["run_config"] + edge_ids = request_json["edgeids"] + run_params = run_config.get("parameters", {}) + job_yaml = run_params.get("job_yaml", None) + server_id = request_json["server_id"] + if self.run_as_cloud_agent: + server_id = self.edge_id + + self.setup_listeners_for_edge_status(run_id, edge_ids, server_id) + self.setup_listener_for_run_metrics(run_id) + self.setup_listener_for_run_logs(run_id) + + @debug + def send_training_request_to_edges(self, active_edge_info_dict=None): + run_id = self.request_json["runId"] + edge_id_list = self.request_json["edgeids"] + run_config = self.request_json.get("run_config", {}) + run_params = run_config.get("parameters", {}) + job_yaml = run_params.get("job_yaml", {}) + job_yaml_default_none = run_params.get("job_yaml", None) + computing = job_yaml.get("computing", {}) + request_num_gpus = computing.get("minimum_num_gpus", None) + job_gpu_id_list = self.request_json.get("job_gpu_id_list", None) + assigned_gpu_num_dict = dict() + assigned_gpu_ids_dict = dict() + master_node_addr = "" + master_node_port = 0 + + logging.info("Send training request to Edge ids: " + str(edge_id_list)) + + should_match_gpu = False + if job_yaml_default_none is not None and request_num_gpus is not None and \ + int(request_num_gpus) > 0 and active_edge_info_dict is not None: + should_match_gpu = True + SchedulerMatcher.parse_and_print_gpu_info_for_all_edges(active_edge_info_dict, show_gpu_list=True) + + # Match and assign gpus to each device + assigned_gpu_num_dict, assigned_gpu_ids_dict = SchedulerMatcher.match_and_assign_gpu_resources_to_devices( + request_num_gpus, edge_id_list, active_edge_info_dict, job_gpu_id_list=job_gpu_id_list) + if assigned_gpu_num_dict is None or assigned_gpu_ids_dict is None: + # If no resources available, send failed message to MLOps and send exception message to all edges. + gpu_count, gpu_available_count = SchedulerMatcher.parse_and_print_gpu_info_for_all_edges( + active_edge_info_dict, should_print=True) + err_info = f"No resources available." \ + f"Total available GPU count {gpu_available_count} is less than " \ + f"request GPU count {request_num_gpus}" + logging.error(err_info) + + # Bug fix: This mqtt message needs to be sent so platform can clean up the failed run and change the + # status from running to failed. + self.mlops_metrics.report_server_training_status( + run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id + ) + + self.status_reporter.report_server_id_status( + run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id, + server_id=self.edge_id, server_agent_id=self.server_agent_id) + self.report_exception_status(run_id) + + serving_args = job_yaml.get("serving_args", {}) + endpoint_id = serving_args.get("endpoint_id", None) + if endpoint_id is not None: + fedml.mlops.log_endpoint_status( + endpoint_id, device_client_constants.ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED) + fedml.mlops.log_run_log_lines( + endpoint_id, 0, [err_info], + log_source=device_client_constants.ClientConstants.FEDML_LOG_SOURCE_TYPE_MODEL_END_POINT + ) + return + + # Generate master node addr and port + master_node_addr, master_node_port = SchedulerMatcher.get_master_node_info(edge_id_list, + active_edge_info_dict) + + # Generate new edge id list after matched + edge_id_list = SchedulerMatcher.generate_new_edge_list_for_gpu_matching(assigned_gpu_num_dict) + if len(edge_id_list) <= 0: + gpu_count, gpu_available_count = SchedulerMatcher.parse_and_print_gpu_info_for_all_edges( + active_edge_info_dict, should_print=True) + logging.error(f"Request parameter for GPU num is invalid." + f"Total available GPU count {gpu_available_count}." + f"Request GPU num {request_num_gpus}") + self.status_reporter.report_server_id_status( + run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id, + server_id=self.edge_id, server_agent_id=self.server_agent_id) + self.report_exception_status(run_id) + return + + if should_match_gpu: + # Report gpu num and related infos to MLOps. + serving_args = job_yaml.get("serving_args", {}) + endpoint_id = serving_args.get("endpoint_id", None) + if endpoint_id is not None: + endpoint_info = list() + for edge_id_item, gpu_num in assigned_gpu_num_dict.items(): + edge_info = active_edge_info_dict.get(str(edge_id_item), {}) + endpoint_info.append({ + "machine_id": edge_id_item, "endpoint_gpu_count": gpu_num, + "master_deploy_id": edge_info.get("master_device_id", 0), + "slave_deploy_id": edge_info.get("slave_device_id", 0)}) + topic_name = f"compute/mlops/endpoint" + endpoint_info_json = {"endpoint_id": endpoint_id, "endpoint_info": endpoint_info} + print(f"endpoint_info_json {endpoint_info_json}") + self.message_center.send_message(topic_name, json.dumps(endpoint_info_json)) + + client_rank = 1 + for edge_id in edge_id_list: + topic_start_train = "flserver_agent/" + str(edge_id) + "/start_train" + logging.info("start_train: send topic " + topic_start_train + " to client...") + request_json = self.request_json + request_json["client_rank"] = client_rank + client_rank += 1 + + if active_edge_info_dict is not None: + edge_info = active_edge_info_dict.get(str(edge_id), {}) + model_master_device_id = edge_info.get("master_device_id", None) + model_slave_device_id = edge_info.get("slave_device_id", None) + model_slave_device_id_list = edge_info.get("slave_device_id_list", None) + + if should_match_gpu: + request_json["scheduler_match_info"] = SchedulerMatcher.generate_match_info_for_scheduler( + edge_id, edge_id_list, master_node_addr, master_node_port, + assigned_gpu_num_dict, assigned_gpu_ids_dict, + model_master_device_id=model_master_device_id, + model_slave_device_id=model_slave_device_id, + model_slave_device_id_list=model_slave_device_id_list + ) + + self.message_center.send_message(topic_start_train, json.dumps(request_json)) + + def setup_listeners_for_edge_status(self, run_id, edge_ids, server_id): + edge_status_topic = "fl_client/flclient_agent_" + str(server_id) + "/status" + payload = {"run_id": run_id, "init_all_edge_id_list": edge_ids, "init_server_id": server_id} + self.callback_edge_status(edge_status_topic, json.dumps(payload)) + + for edge_id in edge_ids: + edge_status_topic = "fl_client/flclient_agent_" + str(edge_id) + "/status" + self.add_message_listener(edge_status_topic, self.callback_edge_status) + self.subscribe_msg(edge_status_topic) + + def remove_listeners_for_edge_status(self, edge_ids=None): + if edge_ids is None: + edge_ids = self.request_json["edgeids"] + + for edge_id in edge_ids: + edge_status_topic = "fl_client/flclient_agent_" + str(edge_id) + "/status" + self.unsubscribe_msg(edge_status_topic) + + def setup_listener_for_run_metrics(self, run_id): + metric_topic = f"fedml_slave/fedml_master/metrics/{run_id}" + self.add_message_listener(metric_topic, self.callback_run_metrics) + self.subscribe_msg(metric_topic) + + def remove_listener_for_run_metrics(self, run_id): + metric_topic = f"fedml_slave/fedml_master/metrics/{run_id}" + self.unsubscribe_msg(metric_topic) + + def setup_listener_for_run_logs(self, run_id): + logs_topic = f"fedml_slave/fedml_master/logs/{run_id}" + self.add_message_listener(logs_topic, self.callback_run_logs) + self.subscribe_msg(logs_topic) + + def remove_listener_for_run_logs(self, run_id): + logs_topic = f"fedml_slave/fedml_master/logs/{run_id}" + self.unsubscribe_msg(logs_topic) + + def send_training_stop_request_to_edges( + self, edge_id_list, payload=None, run_id=0): + if payload is None: + payload_obj = {"runId": run_id, "edgeids": edge_id_list} + else: + payload_obj = json.loads(payload) + + for edge_id in edge_id_list: + topic_stop_train = "flserver_agent/" + str(edge_id) + "/stop_train" + logging.info("stop_train: send topic " + topic_stop_train) + self.message_center.send_message(topic_stop_train, json.dumps(payload_obj)) + + def send_training_stop_request_to_specific_edge(self, edge_id, payload): + topic_stop_train = "flserver_agent/" + str(edge_id) + "/stop_train" + logging.info("stop_train: send topic " + topic_stop_train) + self.message_center.send_message(topic_stop_train, payload) + + def report_exception_status(self, run_id): + self.status_reporter.report_job_status(run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_EXCEPTION) + + @staticmethod + def get_start_train_topic_with_edge_id(edge_id): + return "mlops/flserver_agent_" + str(edge_id) + "/start_train" + + @abstractmethod + def _generate_protocol_manager_instance(self, args, agent_config=None): + return None diff --git a/python/fedml/computing/scheduler/master/cloud_server_manager.py b/python/fedml/computing/scheduler/master/cloud_server_manager.py new file mode 100755 index 0000000000..ed39707034 --- /dev/null +++ b/python/fedml/computing/scheduler/master/cloud_server_manager.py @@ -0,0 +1,164 @@ +import base64 +import json +import logging +import os +import traceback +from fedml.computing.scheduler.comm_utils.sys_utils import get_python_program + + +class FedMLCloudServerManager: + FEDML_CLOUD_SERVER_PREFIX = "fedml-server-run-" + LOCAL_RUNNER_INFO_DIR_NAME = 'runner_infos' + STATUS_IDLE = "IDLE" + + def __init__(self, args, run_id=None, edge_id=None, request_json=None, agent_config=None, version=None): + self.server_docker_image = None + self.args = args + self.run_id = run_id + self.edge_id = edge_id + self.request_json = request_json + self.agent_config = agent_config + self.version = version + image_version = self.version + if image_version == "local": + image_version = "dev" + self.server_docker_base_image = "/fedml-device-image:" + image_version + self.cloud_server_name = None + + @staticmethod + def start_local_cloud_server(user, version, cloud_device_id, runner_cmd_encoded): + print(f"start cloud server, device id {cloud_device_id}, runner cmd {runner_cmd_encoded}") + pip_source_dir = os.path.dirname(__file__) + login_cmd = os.path.join(pip_source_dir, "server_login.py") + run_cmd = f"{get_python_program()} -W ignore {login_cmd} -t login -r cloud_server -u {str(user)} " \ + f"-v {version} -id {cloud_device_id} -rc {runner_cmd_encoded}" + os.system(run_cmd) + + def start_cloud_server_process_entry(self): + try: + self.start_cloud_server_process() + except Exception as e: + logging.info(f"Failed to start the cloud server. {traceback.format_exc()}") + + def start_cloud_server_process(self): + run_config = self.request_json["run_config"] + packages_config = run_config["packages_config"] + self.start_cloud_server(packages_config) + + def start_cloud_server(self, packages_config): + server_id = self.request_json["server_id"] + self.cloud_server_name = f"{FedMLCloudServerManager.FEDML_CLOUD_SERVER_PREFIX}{self.run_id}-{server_id}" + self.server_docker_image = ( + self.agent_config["docker_config"]["registry_server"] + + self.agent_config["docker_config"]["registry_dir"] + + self.server_docker_base_image + ) + + logging.info("docker image {}".format(self.server_docker_image)) + # logging.info("file_sys_driver {}".format(self.agent_config["docker_config"]["file_sys_driver"])) + + registry_secret_cmd = ( + "kubectl create namespace fedml-devops-aggregator-" + + self.version + + ";kubectl -n fedml-devops-aggregator-" + + self.version + + " delete secret secret-" + + self.cloud_server_name + + " ;kubectl create secret docker-registry secret-" + + self.cloud_server_name + + " --docker-server=" + + self.agent_config["docker_config"]["registry_server"] + + " --docker-username=" + + self.agent_config["docker_config"]["user_name"] + + " --docker-password=$(aws ecr-public get-login-password --region " + + self.agent_config["docker_config"]["public_cloud_region"] + + ")" + + " --docker-email=fedml@fedml.ai -n fedml-devops-aggregator-" + + self.version + ) + logging.info("Create secret cmd: " + registry_secret_cmd) + os.system(registry_secret_cmd) + + message_bytes = json.dumps(self.request_json).encode("ascii") + base64_bytes = base64.b64encode(message_bytes) + runner_cmd_encoded = base64_bytes.decode("ascii") + logging.info("runner_cmd_encoded: {}".format(runner_cmd_encoded)) + # logging.info("runner_cmd_decoded: {}".format(base64.b64decode(runner_cmd_encoded).decode())) + cur_dir = os.path.dirname(__file__) + run_deployment_cmd = ( + "export FEDML_AGGREGATOR_NAME=" + + self.cloud_server_name + + ";export FEDML_AGGREGATOR_SVC=" + + self.cloud_server_name + + ";export FEDML_AGGREGATOR_VERSION=" + + self.version + + ';export FEDML_AGGREGATOR_IMAGE_PATH="' + + self.server_docker_image + + '"' + + ";export FEDML_CONF_ID=" + + self.cloud_server_name + + ";export FEDML_DATA_PV_ID=" + + self.cloud_server_name + + ";export FEDML_DATA_PVC_ID=" + + self.cloud_server_name + + ";export FEDML_REGISTRY_SECRET_SUFFIX=" + + self.cloud_server_name + + ";export FEDML_ACCOUNT_ID=0" + + ";export FEDML_SERVER_DEVICE_ID=" + + self.request_json.get("cloudServerDeviceId", "0") + + ";export FEDML_VERSION=" + + self.version + + ";export FEDML_PACKAGE_NAME=" + + packages_config.get("server", "") + + ";export FEDML_PACKAGE_URL=" + + packages_config.get("serverUrl", "") + + ";export FEDML_RUNNER_CMD=" + + runner_cmd_encoded + + ";envsubst < " + + os.path.join(cur_dir, "templates", "fedml-server-deployment.yaml") + + " | kubectl apply -f - " + ) + logging.info("start run with k8s: " + run_deployment_cmd) + os.system(run_deployment_cmd) + + def stop_cloud_server(self): + self.cloud_server_name = FedMLCloudServerManager.FEDML_CLOUD_SERVER_PREFIX + str(self.run_id) \ + + "-" + str(self.edge_id) + self.server_docker_image = ( + self.agent_config["docker_config"]["registry_server"] + + self.agent_config["docker_config"]["registry_dir"] + + self.server_docker_base_image + ) + delete_deployment_cmd = ( + "export FEDML_AGGREGATOR_NAME=" + + self.cloud_server_name + + ";export FEDML_AGGREGATOR_SVC=" + + self.cloud_server_name + + ";export FEDML_AGGREGATOR_VERSION=" + + self.version + + ';export FEDML_AGGREGATOR_IMAGE_PATH="' + + self.server_docker_image + + '"' + + ";export FEDML_CONF_ID=" + + self.cloud_server_name + + ";export FEDML_DATA_PV_ID=" + + self.cloud_server_name + + ";export FEDML_DATA_PVC_ID=" + + self.cloud_server_name + + ";export FEDML_REGISTRY_SECRET_SUFFIX=" + + self.cloud_server_name + + ";kubectl -n fedml-devops-aggregator-" + + self.version + + " delete deployment " + + self.cloud_server_name + + ";kubectl -n fedml-devops-aggregator-" + + self.version + + " delete svc " + + self.cloud_server_name + + ";kubectl -n fedml-devops-aggregator-" + + self.version + + " delete secret secret-" + + self.cloud_server_name + ) + logging.info("stop run with k8s: " + delete_deployment_cmd) + os.system(delete_deployment_cmd) diff --git a/python/fedml/computing/scheduler/master/deploy_job_launcher.py b/python/fedml/computing/scheduler/master/deploy_job_launcher.py new file mode 100755 index 0000000000..e4af2a20be --- /dev/null +++ b/python/fedml/computing/scheduler/master/deploy_job_launcher.py @@ -0,0 +1,90 @@ +import json +from fedml.computing.scheduler.comm_utils import sys_utils +from fedml.computing.scheduler.model_scheduler import device_client_constants +from fedml.computing.scheduler.model_scheduler.device_model_cards import FedMLModelCards +from fedml.computing.scheduler.scheduler_entry.constants import Constants + + +class FedMLDeployJobLauncher: + LOCAL_RUNNER_INFO_DIR_NAME = 'runner_infos' + STATUS_IDLE = "IDLE" + + def __init__(self, edge_id=None): + self.edge_id = edge_id + self.run_model_device_ids = dict() + + @staticmethod + def deploy_model(serving_devices, request_json, run_id): + run_config = request_json["run_config"] + run_params = run_config.get("parameters", {}) + job_yaml = run_params.get("job_yaml", {}) + job_type = job_yaml.get("job_type", None) + job_type = job_yaml.get("task_type", Constants.JOB_TASK_TYPE_TRAIN) if job_type is None else job_type + if job_type == Constants.JOB_TASK_TYPE_DEPLOY or job_type == Constants.JOB_TASK_TYPE_SERVE: + # computing = job_yaml.get("computing", {}) + # num_gpus = computing.get("minimum_num_gpus", 1) + serving_args = run_params.get("serving_args", {}) + model_id = serving_args.get("model_id", None) + model_name = serving_args.get("model_name", None) + model_version = serving_args.get("model_version", None) + # model_storage_url = serving_args.get("model_storage_url", None) + endpoint_name = serving_args.get("endpoint_name", None) + endpoint_id = serving_args.get("endpoint_id", None) + random = serving_args.get("random", "") + random_out = sys_utils.random2(random, "FEDML@9999GREAT") + random_list = random_out.split("FEDML@") + device_type = device_client_constants.ClientConstants.login_role_list[ + device_client_constants.ClientConstants.LOGIN_MODE_FEDML_CLOUD_INDEX] + FedMLModelCards.get_instance().deploy_model( + model_name, device_type, json.dumps(serving_devices), + "", random_list[1], None, + in_model_id=model_id, in_model_version=model_version, + endpoint_name=endpoint_name, endpoint_id=endpoint_id, run_id=run_id) + + def check_model_device_ready_and_deploy(self, request_json, run_id, master_device_id, + slave_device_id, run_edge_ids=None): + run_config = request_json["run_config"] + run_params = run_config.get("parameters", {}) + job_yaml = run_params.get("job_yaml", {}) + job_type = job_yaml.get("job_type", None) + job_type = job_yaml.get("task_type", Constants.JOB_TASK_TYPE_TRAIN) if job_type is None else job_type + if job_type != Constants.JOB_TASK_TYPE_DEPLOY and job_type != Constants.JOB_TASK_TYPE_SERVE: + return + + # Init model device ids for each run + run_id_str = str(run_id) + if self.run_model_device_ids.get(run_id_str, None) is None: + self.run_model_device_ids[run_id_str] = list() + + # Append master device and slave devices to the model devices map + self.run_model_device_ids[run_id_str].append({"master_device_id": master_device_id, + "slave_device_id": slave_device_id}) + model_device_ids = self.run_model_device_ids.get(run_id_str, None) + if model_device_ids is None: + return + if run_edge_ids is None: + return + + # Check if all model devices are ready + if len(model_device_ids) != len(run_edge_ids.get(run_id_str, list())): + return + + # Generate model master ids and model slave device ids + device_master_ids = list() + device_slave_ids = list() + for device_ids in model_device_ids: + model_master_id = device_ids.get("master_device_id") + model_slave_id = device_ids.get("slave_device_id") + device_master_ids.append(model_master_id) + device_slave_ids.append(model_slave_id) + + if len(device_master_ids) <= 0: + return + + # Generate serving devices for deploying + serving_devices = list() + serving_devices.append(device_master_ids[0]) + serving_devices.extend(device_slave_ids) + + # Start to deploy the model + FedMLDeployJobLauncher.deploy_model(serving_devices, request_json, run_id=run_id) diff --git a/python/fedml/computing/scheduler/master/launch_job_runner.py b/python/fedml/computing/scheduler/master/launch_job_runner.py new file mode 100755 index 0000000000..c28458fc0f --- /dev/null +++ b/python/fedml/computing/scheduler/master/launch_job_runner.py @@ -0,0 +1,44 @@ + +from ..master.server_constants import ServerConstants +from ..scheduler_core.general_constants import GeneralConstants +from .base_master_job_runner import FedMLBaseMasterJobRunner + + +class FedMLLaunchMasterJobRunner(FedMLBaseMasterJobRunner): + + def __init__(self, args, run_id=0, request_json=None, agent_config=None, edge_id=0, + cuda_visible_gpu_ids_str=None): + FedMLBaseMasterJobRunner.__init__( + self, args, edge_id=edge_id, request_json=request_json, agent_config=agent_config, run_id=run_id, + cuda_visible_gpu_ids_str=cuda_visible_gpu_ids_str, agent_data_dir=ServerConstants.get_data_dir(), + agent_package_download_dir=ServerConstants.get_package_download_dir(), + agent_package_unzip_dir=GeneralConstants.get_package_unzip_dir(ServerConstants.get_package_download_dir()), + agent_log_file_dir=ServerConstants.get_log_file_dir() + ) + + # Override + def _generate_job_runner_instance(self, args, run_id=None, request_json=None, agent_config=None, edge_id=None,): + return FedMLLaunchMasterJobRunner( + args, run_id=run_id, request_json=request_json, agent_config=self.agent_config, edge_id=edge_id + ) + + # Override + def _generate_extend_queue_list(self): + return None + + # Override + def get_download_package_info(self, packages_config=None): + return super().get_download_package_info(packages_config) + + # Override + def run_impl( + self, edge_id_status_queue, edge_device_info_queue, run_metrics_queue, + run_event_queue, run_artifacts_queue, run_logs_queue, edge_device_info_global_queue, + run_extend_queue_list=None, sender_message_queue=None, listener_message_queue=None, + status_center_queue=None + ): + super().run_impl( + edge_id_status_queue, edge_device_info_queue, run_metrics_queue, + run_event_queue, run_artifacts_queue, run_logs_queue, edge_device_info_global_queue, + run_extend_queue_list=run_extend_queue_list, sender_message_queue=sender_message_queue, + listener_message_queue=listener_message_queue, status_center_queue=status_center_queue) diff --git a/python/fedml/computing/scheduler/master/launch_job_runner_manager.py b/python/fedml/computing/scheduler/master/launch_job_runner_manager.py new file mode 100755 index 0000000000..9e94b089a3 --- /dev/null +++ b/python/fedml/computing/scheduler/master/launch_job_runner_manager.py @@ -0,0 +1,20 @@ + +from fedml.core.common.singleton import Singleton +from .launch_job_runner import FedMLLaunchMasterJobRunner +from .base_master_job_runner_manager import FedMLBaseMasterJobRunnerManager + + +class FedMLLaunchJobRunnerManager(FedMLBaseMasterJobRunnerManager, Singleton): + def __init__(self): + FedMLBaseMasterJobRunnerManager.__init__(self) + + @staticmethod + def get_instance(): + return FedMLLaunchJobRunnerManager() + + # Override + def _generate_job_runner_instance( + self, args, run_id=None, request_json=None, agent_config=None, edge_id=None + ): + return FedMLLaunchMasterJobRunner( + args, run_id=run_id, request_json=request_json, agent_config=agent_config, edge_id=edge_id) diff --git a/python/fedml/computing/scheduler/master/master_agent.py b/python/fedml/computing/scheduler/master/master_agent.py new file mode 100755 index 0000000000..9bbf6eb982 --- /dev/null +++ b/python/fedml/computing/scheduler/master/master_agent.py @@ -0,0 +1,28 @@ + +from ..master.server_constants import ServerConstants +from .server_data_interface import FedMLServerDataInterface +from .master_protocol_manager import FedMLLaunchMasterProtocolManager +from .base_master_agent import FedMLBaseMasterAgent + + +class FedMLLaunchMasterAgent(FedMLBaseMasterAgent): + + def __init__(self): + FedMLBaseMasterAgent.__init__(self) + + # Override + def _get_log_file_dir(self): + return ServerConstants.get_log_file_dir() + + # Override + def _save_agent_info(self, unique_device_id, edge_id): + ServerConstants.save_runner_infos(unique_device_id, edge_id) + + # Override + def _init_database(self): + FedMLServerDataInterface.get_instance().create_job_table() + + # Override + def _generate_protocol_manager_instance(self, args, agent_config=None): + return FedMLLaunchMasterProtocolManager(args, agent_config=agent_config) + diff --git a/python/fedml/computing/scheduler/master/master_protocol_manager.py b/python/fedml/computing/scheduler/master/master_protocol_manager.py new file mode 100755 index 0000000000..5eef5914e7 --- /dev/null +++ b/python/fedml/computing/scheduler/master/master_protocol_manager.py @@ -0,0 +1,36 @@ +from abc import ABC + +from .base_master_protocol_manager import FedMLBaseMasterProtocolManager +from .launch_job_runner_manager import FedMLLaunchJobRunnerManager + + +class FedMLLaunchMasterProtocolManager(FedMLBaseMasterProtocolManager, ABC): + def __init__(self, args, agent_config=None): + FedMLBaseMasterProtocolManager.__init__(self, args, agent_config=agent_config) + + # Override + def generate_topics(self): + super().generate_topics() + + # Override + def add_protocol_handler(self): + super().add_protocol_handler() + + # Override + def _generate_protocol_manager_instance(self, args, agent_config=None): + return FedMLLaunchMasterProtocolManager(args, agent_config=agent_config) + + # Override + def _get_job_runner_manager(self): + return FedMLLaunchJobRunnerManager.get_instance() + + # Override + def _init_extra_items(self): + # Start the monitor process + self.mlops_metrics.stop_device_realtime_perf() + self.mlops_metrics.report_device_realtime_perf( + self.args, self.args.agent_config["mqtt_config"], is_client=False) + + # Override + def print_connected_info(self): + super().print_connected_info() diff --git a/python/fedml/computing/scheduler/master/server_login.py b/python/fedml/computing/scheduler/master/server_login.py index dee2c83236..3d8d1f6fc9 100755 --- a/python/fedml/computing/scheduler/master/server_login.py +++ b/python/fedml/computing/scheduler/master/server_login.py @@ -1,407 +1,11 @@ import argparse -import logging import os -import platform -import time -import traceback - -import click import fedml -from fedml.computing.scheduler.comm_utils import sys_utils -from fedml.computing.scheduler.comm_utils.constants import SchedulerConstants -from fedml.computing.scheduler.master.server_runner import FedMLServerRunner -from fedml.computing.scheduler.master.server_constants import ServerConstants -from fedml.core.mlops.mlops_runtime_log import MLOpsRuntimeLog -from fedml.core.mlops.mlops_runtime_log_daemon import MLOpsRuntimeLogDaemon - - -def __login_as_edge_server_and_agent(args, userid, version, api_key="", use_extra_device_id_suffix=None, role=None): - setattr(args, "account_id", userid) - setattr(args, "current_running_dir", ServerConstants.get_fedml_home_dir()) - - sys_name = platform.system() - if sys_name == "Darwin": - sys_name = "MacOS" - if hasattr(args, "os_name") and args.os_name is not None and args.os_name != "": - pass - else: - setattr(args, "os_name", sys_name) - setattr(args, "version", version) - setattr(args, "log_file_dir", ServerConstants.get_log_file_dir()) - is_from_docker = False - if hasattr(args, "device_id") and args.device_id is not None and args.device_id != "0": - setattr(args, "current_device_id", args.device_id) - is_from_docker = True - else: - setattr(args, "current_device_id", FedMLServerRunner.get_device_id()) - setattr(args, "config_version", version) - setattr(args, "cloud_region", "") - - # Create server runner for communication with the FedML client. - runner = FedMLServerRunner(args) - runner.run_as_edge_server_and_agent = True - - # Fetch configs from the MLOps config server. - service_config = dict() - config_try_count = 0 - edge_id = 0 - while config_try_count < 5: - try: - mqtt_config, s3_config, mlops_config, docker_config = runner.fetch_configs() - service_config["mqtt_config"] = mqtt_config - service_config["s3_config"] = s3_config - service_config["ml_ops_config"] = mlops_config - service_config["docker_config"] = docker_config - runner.agent_config = service_config - log_server_url = mlops_config.get("LOG_SERVER_URL", None) - if log_server_url is not None: - setattr(args, "log_server_url", log_server_url) - setattr(runner.args, "log_server_url", log_server_url) - break - except Exception as e: - click.echo("{}\n{}".format(SchedulerConstants.ERR_MSG_BINDING_EXCEPTION_1, traceback.format_exc())) - click.echo(SchedulerConstants.ERR_MSG_BINDING_EXIT_RETRYING) - config_try_count += 1 - time.sleep(3) - continue - - if config_try_count >= 5: - click.echo("") - click.echo("[5] Oops, you failed to login the FedML MLOps platform.") - click.echo("Please check whether your network is normal!") - return - - # Judge whether running from fedml docker hub - is_from_fedml_docker_hub = False - dock_loc_file = ServerConstants.get_docker_location_file() - if os.path.exists(dock_loc_file): - is_from_fedml_docker_hub = True - - # Build unique device id - if is_from_docker: - unique_device_id = args.current_device_id + "@" + args.os_name + ".Docker.Edge.Server" - else: - unique_device_id = args.current_device_id + "@" + args.os_name + ".Edge.Server" - setattr(args, "is_from_docker", is_from_docker) - - if is_from_fedml_docker_hub: - unique_device_id = args.current_device_id + "@" + args.os_name + ".DockerHub.Edge.Server" - - if use_extra_device_id_suffix is not None: - unique_device_id = args.current_device_id + "@" + args.os_name + use_extra_device_id_suffix - - # Bind account id to FedML® Nexus AI Platform - register_try_count = 0 - edge_id = -1 - user_name = None - while register_try_count < 5: - try: - edge_id, user_name, extra_url = runner.bind_account_and_device_id( - service_config["ml_ops_config"]["EDGE_BINDING_URL"], args.account_id, unique_device_id, args.os_name, - api_key=api_key, role=role - ) - if edge_id > 0: - runner.edge_id = edge_id - break - except SystemExit as e: - click.echo("Your account does not exist. Please make sure your account correct.") - os.system("fedml logout -s") - return - except Exception as e: - click.echo("{}\n{}".format(SchedulerConstants.ERR_MSG_BINDING_EXCEPTION_2, traceback.format_exc())) - click.echo(SchedulerConstants.ERR_MSG_BINDING_EXIT_RETRYING) - register_try_count += 1 - time.sleep(3) - continue - - if edge_id <= 0: - click.echo("") - click.echo("[6] Oops, you failed to login the FedML MLOps platform.") - click.echo("Please check whether your network is normal!") - return - setattr(args, "server_id", edge_id) - runner.args = args - runner.edge_id = edge_id - init_logs(args, edge_id) - - # Log arguments and binding results. - # logging.info("login: unique_device_id = %s" % str(unique_device_id)) - # logging.info("login: server_id = %s" % str(edge_id)) - runner.unique_device_id = unique_device_id - runner.user_name = user_name - ServerConstants.save_runner_infos(args.current_device_id + "." + args.os_name, edge_id) - - # Setup MQTT connection for communication with the FedML server. - try: - runner.setup_agent_mqtt_connection(service_config) - except Exception as e: - login_exit_file = os.path.join(ServerConstants.get_log_file_dir(), "exited.log") - with open(login_exit_file, "w") as f: - f.writelines(f"{os.getpid()}.") - runner.stop_agent() - raise e - - # Start mqtt looper - runner.start_agent_mqtt_loop() - - -def __login_as_cloud_agent(args, userid, version): - setattr(args, "account_id", userid) - setattr(args, "current_running_dir", ServerConstants.get_fedml_home_dir()) - - sys_name = platform.system() - if sys_name == "Darwin": - sys_name = "MacOS" - setattr(args, "os_name", sys_name) - setattr(args, "version", version) - setattr(args, "log_file_dir", ServerConstants.get_log_file_dir()) - if hasattr(args, "device_id") and args.device_id is not None and args.device_id != "0": - setattr(args, "current_device_id", args.device_id) - else: - setattr(args, "current_device_id", FedMLServerRunner.get_device_id()) - setattr(args, "config_version", version) - setattr(args, "cloud_region", "") - - # Create server runner for communication with the FedML client. - runner = FedMLServerRunner(args) - runner.run_as_cloud_agent = True - - # Fetch configs from the MLOps config server. - service_config = dict() - config_try_count = 0 - edge_id = 0 - while config_try_count < 5: - try: - mqtt_config, s3_config, mlops_config, docker_config = runner.fetch_configs() - service_config["mqtt_config"] = mqtt_config - service_config["s3_config"] = s3_config - service_config["ml_ops_config"] = mlops_config - service_config["docker_config"] = docker_config - runner.agent_config = service_config - log_server_url = mlops_config.get("LOG_SERVER_URL", None) - if log_server_url is not None: - setattr(args, "log_server_url", log_server_url) - setattr(runner.args, "log_server_url", log_server_url) - break - except Exception as e: - click.echo("{}\n{}".format(SchedulerConstants.ERR_MSG_BINDING_EXCEPTION_1, traceback.format_exc())) - click.echo(SchedulerConstants.ERR_MSG_BINDING_EXIT_RETRYING) - config_try_count += 1 - time.sleep(3) - continue - - if config_try_count >= 5: - click.echo("") - click.echo("[7] Oops, you failed to login the FedML MLOps platform.") - click.echo("Please check whether your network is normal!") - return - - # Build unique device id - if args.current_device_id is not None and len(str(args.current_device_id)) > 0: - unique_device_id = args.current_device_id + "@" + args.os_name + ".Public.Cloud" - - # Bind account id to FedML® Nexus AI Platform - register_try_count = 0 - if hasattr(args, "server_agent_id") and args.server_agent_id is not None: - edge_id = args.server_agent_id - else: - edge_id = -1 - user_name = None - while register_try_count < 5: - try: - edge_id, user_name, extra_url = runner.bind_account_and_device_id( - service_config["ml_ops_config"]["EDGE_BINDING_URL"], args.account_id, unique_device_id, args.os_name - ) - if edge_id > 0: - runner.edge_id = edge_id - break - except SystemExit as e: - click.echo("Your account does not exist. Please make sure your account correct.") - os.system("fedml logout -s") - return - except Exception as e: - click.echo("{}\n{}".format(SchedulerConstants.ERR_MSG_BINDING_EXCEPTION_2, traceback.format_exc())) - click.echo(SchedulerConstants.ERR_MSG_BINDING_EXIT_RETRYING) - register_try_count += 1 - time.sleep(3) - continue - - if edge_id <= 0: - click.echo("") - click.echo("[8] Oops, you failed to login the FedML MLOps platform.") - click.echo("Please check whether your network is normal!") - return - setattr(args, "server_id", edge_id) - runner.args = args - runner.edge_id = edge_id - init_logs(args, edge_id) - logging.info("args {}".format(args)) - - # Log arguments and binding results. - logging.info("login: unique_device_id = %s" % str(unique_device_id)) - logging.info("login: server_id = %s" % str(edge_id)) - runner.unique_device_id = unique_device_id - runner.user_name = "cloud_agent" if user_name is None else user_name - ServerConstants.save_runner_infos(args.current_device_id + "." + args.os_name, edge_id) - - # Setup MQTT connection for communication with the FedML server. - try: - runner.setup_agent_mqtt_connection(service_config) - except Exception as e: - login_exit_file = os.path.join(ServerConstants.get_log_file_dir(), "exited.log") - with open(login_exit_file, "w") as f: - f.writelines(f"{os.getpid()}.") - runner.stop_agent() - raise e - - # Start mqtt looper - runner.start_agent_mqtt_loop() - - -def __login_as_cloud_server(args, userid, version): - setattr(args, "account_id", userid) - setattr(args, "current_running_dir", ServerConstants.get_fedml_home_dir()) - - sys_name = platform.system() - if sys_name == "Darwin": - sys_name = "MacOS" - setattr(args, "os_name", sys_name) - setattr(args, "version", version) - setattr(args, "log_file_dir", ServerConstants.get_log_file_dir()) - if hasattr(args, "device_id") and args.device_id is not None and args.device_id != "0": - setattr(args, "current_device_id", args.device_id) - else: - setattr(args, "current_device_id", FedMLServerRunner.get_device_id()) - setattr(args, "config_version", version) - setattr(args, "cloud_region", "") - - # Create server runner for communication with the FedML client. - runner = FedMLServerRunner(args) - runner.run_as_cloud_server = True - - # Fetch configs from the MLOps config server. - service_config = dict() - config_try_count = 0 - edge_id = 0 - while config_try_count < 5: - try: - mqtt_config, s3_config, mlops_config, docker_config = runner.fetch_configs() - service_config["mqtt_config"] = mqtt_config - service_config["s3_config"] = s3_config - service_config["ml_ops_config"] = mlops_config - service_config["docker_config"] = docker_config - runner.agent_config = service_config - log_server_url = mlops_config.get("LOG_SERVER_URL", None) - if log_server_url is not None: - setattr(args, "log_server_url", log_server_url) - setattr(runner.args, "log_server_url", log_server_url) - break - except Exception as e: - click.echo("{}\n{}".format(SchedulerConstants.ERR_MSG_BINDING_EXCEPTION_1, traceback.format_exc())) - click.echo(SchedulerConstants.ERR_MSG_BINDING_EXIT_RETRYING) - config_try_count += 1 - time.sleep(3) - continue - - if config_try_count >= 5: - click.echo("") - click.echo("[9] Oops, you failed to login the FedML MLOps platform.") - click.echo("Please check whether your network is normal!") - return - - # Build unique device id - if hasattr(args, "device_id") and args.device_id is not None and args.device_id != "0": - unique_device_id = args.current_device_id - else: - unique_device_id = args.current_device_id + "@" + args.os_name + ".Public.Server" - - # Bind account id to FedML® Nexus AI Platform - register_try_count = 0 - edge_id = -1 - user_name = None - while register_try_count < 5: - try: - edge_id, user_name, extra_url = runner.bind_account_and_device_id( - service_config["ml_ops_config"]["EDGE_BINDING_URL"], args.account_id, unique_device_id, args.os_name - ) - if edge_id > 0: - runner.edge_id = edge_id - break - except SystemExit as e: - click.echo("Your account does not exist. Please make sure your account correct.") - os.system("fedml logout -s") - return - except Exception as e: - click.echo("{}\n{}".format(SchedulerConstants.ERR_MSG_BINDING_EXCEPTION_2, traceback.format_exc())) - click.echo(SchedulerConstants.ERR_MSG_BINDING_EXIT_RETRYING) - register_try_count += 1 - time.sleep(3) - continue - - if edge_id <= 0: - click.echo("") - click.echo("[10] Oops, you failed to login the FedML MLOps platform.") - click.echo("Please check whether your network is normal!") - return - setattr(args, "server_id", edge_id) - runner.args = args - runner.edge_id = edge_id - runner.user_name = "cloud_server" if user_name is None else user_name - init_logs(args, edge_id) - - # Log arguments and binding results. - logging.info("login: unique_device_id = %s" % str(unique_device_id)) - logging.info("login: server_id = %s" % str(edge_id)) - ServerConstants.save_runner_infos(args.current_device_id + "." + args.os_name, edge_id) - - # Echo results - print("\n\nCongratulations, your device is connected to the FedML MLOps platform successfully!") - print( - "Your unique device ID is " - + str(unique_device_id) - + "\n" - ) - - # Setup MQTT connection for communication with the FedML server. - try: - runner.setup_agent_mqtt_connection(service_config) - except Exception as e: - login_exit_file = os.path.join(ServerConstants.get_log_file_dir(), "exited.log") - with open(login_exit_file, "w") as f: - f.writelines(f"{os.getpid()}.") - runner.stop_agent() - raise e - - # Start mqtt looper - runner.start_agent_mqtt_loop() - - -def init_logs(args, edge_id): - # Init runtime logs - args.log_file_dir = ServerConstants.get_log_file_dir() - args.run_id = 0 - args.role = "server" - args.edge_id = edge_id - setattr(args, "using_mlops", True) - setattr(args, "server_agent_id", edge_id) - MLOpsRuntimeLog.get_instance(args).init_logs() - - -def login(args): - if args.role == ServerConstants.login_role_list[ServerConstants.LOGIN_MODE_LOCAL_INDEX]: - __login_as_edge_server_and_agent(args, args.user, args.version, api_key=args.api_key) - elif args.role == ServerConstants.login_role_list[ServerConstants.LOGIN_MODE_CLOUD_AGENT_INDEX]: - __login_as_cloud_agent(args, args.user, args.version) - elif args.role == ServerConstants.login_role_list[ServerConstants.LOGIN_MODE_CLOUD_SERVER_INDEX]: - __login_as_cloud_server(args, args.user, args.version) - elif args.role == ServerConstants.login_role_list[ServerConstants.LOGIN_MODE_GPU_MASTER_SERVER_INDEX]: - __login_as_edge_server_and_agent(args, args.user, args.version, api_key=args.api_key, - use_extra_device_id_suffix=".Edge.GPU.MasterServer", role=args.role) +from fedml.computing.scheduler.master.master_agent import FedMLLaunchMasterAgent def logout(): - ServerConstants.cleanup_run_process(None) - sys_utils.cleanup_all_fedml_server_api_processes() + FedMLLaunchMasterAgent.logout() if __name__ == "__main__": @@ -432,7 +36,9 @@ def logout(): fedml.set_local_on_premise_platform_port(args.local_on_premise_platform_port) fedml.set_env_version(args.version) + master_agent = FedMLLaunchMasterAgent() if args.type == 'login': - login(args) + master_agent.login(args.api_key, api_key=args.api_key, device_id=args.device_id, + os_name=args.os_name, role=args.role) else: - logout() + master_agent.logout() diff --git a/python/fedml/computing/scheduler/master/server_runner.py b/python/fedml/computing/scheduler/master/server_runner.py deleted file mode 100755 index 0442c99972..0000000000 --- a/python/fedml/computing/scheduler/master/server_runner.py +++ /dev/null @@ -1,2767 +0,0 @@ -import base64 -import copy -import json -import logging -import platform -import queue -import sys - -import multiprocessing -from multiprocessing import Process, Queue, Value, Array -import os -import shutil -import stat -import subprocess -import threading - -import time -import traceback -import urllib -import uuid -import zipfile -from os import listdir -from urllib.parse import urljoin, urlparse - -import requests - -import fedml -from ..comm_utils.job_cleanup import JobCleanup -from ..scheduler_core.scheduler_matcher import SchedulerMatcher -from ..comm_utils.constants import SchedulerConstants -from ..comm_utils.job_utils import JobRunnerUtils -from ..comm_utils.run_process_utils import RunProcessUtils -from ....core.mlops.mlops_runtime_log import MLOpsRuntimeLog - -from ....core.distributed.communication.mqtt.mqtt_manager import MqttManager -from ..comm_utils.yaml_utils import load_yaml_config -from ..slave.client_constants import ClientConstants -from ..master.server_constants import ServerConstants - -from ....core.mlops.mlops_metrics import MLOpsMetrics - -from ....core.mlops.mlops_configs import MLOpsConfigs -from ....core.mlops.mlops_runtime_log_daemon import MLOpsRuntimeLogDaemon -from ....core.mlops.mlops_status import MLOpsStatus -from ..comm_utils.sys_utils import get_sys_runner_info, get_python_program -from ..comm_utils import sys_utils -from .server_data_interface import FedMLServerDataInterface -from ....core.mlops.mlops_utils import MLOpsUtils -from ..scheduler_entry.constants import Constants -from ..model_scheduler.model_device_server import FedMLModelDeviceServerRunner -from ..model_scheduler.device_model_cards import FedMLModelCards -from ..model_scheduler import device_client_constants -from ..scheduler_core.log_manager import LogsManager -from ..scheduler_core.metrics_manager import MetricsManager -from ..scheduler_core.master_api_daemon import MasterApiDaemon -from fedml.utils.debugging import debug -from ..scheduler_core.message_center import FedMLMessageCenter - - -class RunnerError(Exception): - """ Runner stopped. """ - pass - - -class RunnerCompletedError(Exception): - """ Runner completed. """ - pass - - -class FedMLServerRunner(FedMLMessageCenter): - FEDML_CLOUD_SERVER_PREFIX = "fedml-server-run-" - debug_cloud_server = False - - def __init__(self, args, run_id=0, request_json=None, agent_config=None, edge_id=0): - super().__init__() - self.master_api_daemon = None - self.run_stop_process = None - self.run_stop_process_map = dict() - self.run_edge_id_status_queue_map = dict() - self.run_metrics_queue_map = dict() - self.run_events_queue_map = dict() - self.run_artifacts_queue_map = dict() - self.run_logs_queue_map = dict() - self.async_check_timeout = 0 - self.enable_async_cluster = False - self.origin_fedml_config_object = None - self.package_type = SchedulerConstants.JOB_PACKAGE_TYPE_DEFAULT - self.local_api_process = None - self.run_process_event = None - self.run_process_event_map = dict() - self.run_process_completed_event = None - self.run_process_completed_event_map = dict() - self.run_process_event_map_for_stop = dict() - self.edge_device_info_queue = None - self.run_edge_device_info_queue_map = dict() - self.run_edge_device_info_queue_map_for_stop = dict() - self.run_edge_device_info_global_queue = None - self.run_edge_device_info_global_queue_for_stop = None - self.run_process = None - self.run_process_map = dict() - self.start_request_json = None - self.server_docker_image = None - self.cloud_server_name = None - self.run_as_cloud_agent = False - self.run_as_cloud_server = False - self.run_as_edge_server_and_agent = False - self.run_as_cloud_server_and_agent = False - self.fedml_packages_base_dir = None - self.fedml_packages_unzip_dir = None - self.mqtt_mgr = None - self.running_request_json = dict() - self.run_id = run_id - self.unique_device_id = None - self.edge_id = edge_id - self.server_agent_id = 0 - if request_json is not None: - self.server_agent_id = request_json.get("server_id", 0) - self.process = None - self.args = args - self.request_json = copy.deepcopy(request_json) - self.version = args.version - self.device_id = args.device_id - self.cur_dir = os.path.split(os.path.realpath(__file__))[0] - if args.current_running_dir is not None: - self.cur_dir = args.current_running_dir - - image_version = self.version - if image_version == "local": - image_version = "dev" - self.server_docker_base_image = "/fedml-device-image:" + image_version - - self.agent_config = agent_config - self.fedml_data_base_package_dir = os.path.join("/", "fedml", "data") - self.fedml_data_local_package_dir = os.path.join("/", "fedml", "fedml-package", "fedml", "data") - self.fedml_data_dir = self.fedml_data_base_package_dir - self.fedml_config_dir = os.path.join("/", "fedml", "conf") - - self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES = { - "${FEDSYS.RUN_ID}": "", - "${FEDSYS.PRIVATE_LOCAL_DATA}": "", - "${FEDSYS.CLIENT_ID_LIST}": "", - "${FEDSYS.SYNTHETIC_DATA_URL}": "", - "${FEDSYS.IS_USING_LOCAL_DATA}": "", - "${FEDSYS.CLIENT_NUM}": "", - "${FEDSYS.CLIENT_INDEX}": "", - "${FEDSYS.CLIENT_OBJECT_LIST}": "", - "${FEDSYS.LOG_SERVER_URL}": "", - } - - self.mlops_metrics = None - self.client_agent_active_list = dict() - self.server_active_list = dict() - self.run_status = None - self.ntp_offset = MLOpsUtils.get_ntp_offset() - self.runner_list = dict() - self.enable_simulation_cloud_agent = False - self.use_local_process_as_cloud_server = False - - self.model_device_server = None - self.run_model_device_ids = dict() - self.run_edge_ids = dict() - self.master_api_process = None - - self.subscribed_topics = list() - self.user_name = None - self.message_center = None - - def build_dynamic_constrain_variables(self, run_id, run_config): - data_config = run_config.get("data_config", {}) - server_edge_id_list = self.request_json["edgeids"] - is_using_local_data = 0 - private_data_dir = data_config.get("privateLocalData", "") - synthetic_data_url = data_config.get("syntheticDataUrl", "") - edges = self.request_json["edges"] - # if private_data_dir is not None \ - # and len(str(private_data_dir).strip(' ')) > 0: - # is_using_local_data = 1 - if private_data_dir is None or len(str(private_data_dir).strip(" ")) <= 0: - params_config = run_config.get("parameters", None) - private_data_dir = ServerConstants.get_data_dir() - if synthetic_data_url is None or len(str(synthetic_data_url)) <= 0: - synthetic_data_url = private_data_dir - - self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.RUN_ID}"] = run_id - self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.PRIVATE_LOCAL_DATA}"] = private_data_dir.replace(" ", "") - self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_ID_LIST}"] = str(server_edge_id_list).replace(" ", "") - self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.SYNTHETIC_DATA_URL}"] = synthetic_data_url.replace(" ", "") - self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.IS_USING_LOCAL_DATA}"] = str(is_using_local_data) - self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_NUM}"] = len(server_edge_id_list) - client_objects = str(json.dumps(edges)) - client_objects = client_objects.replace(" ", "").replace("\n", "").replace('"', '\\"') - self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_OBJECT_LIST}"] = client_objects - self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.LOG_SERVER_URL}"] = self.agent_config["ml_ops_config"][ - "LOG_SERVER_URL" - ] - - def unzip_file(self, zip_file, unzip_file_path) -> str: - unziped_file_name = "" - if zipfile.is_zipfile(zip_file): - with zipfile.ZipFile(zip_file, "r") as zipf: - zipf.extractall(unzip_file_path) - unziped_file_name = zipf.namelist()[0] - else: - raise Exception("Invalid zip file {}".format(zip_file)) - - return unziped_file_name - - def package_download_progress(self, count, blksize, filesize): - self.check_runner_stop_event() - - downloaded = count * blksize - downloaded = filesize if downloaded > filesize else downloaded - progress = (downloaded / filesize * 100) if filesize != 0 else 0 - progress_int = int(progress) - downloaded_kb = format(downloaded / 1024, '.2f') - - # since this hook funtion is stateless, we need a state to avoid printing progress repeatly - if count == 0: - self.prev_download_progress = 0 - if progress_int != self.prev_download_progress and progress_int % 5 == 0: - self.prev_download_progress = progress_int - logging.info("package downloaded size {} KB, progress {}%".format(downloaded_kb, progress_int)) - - def retrieve_and_unzip_package(self, package_name, package_url): - local_package_path = ServerConstants.get_package_download_dir() - os.makedirs(local_package_path, exist_ok=True) - filename, filename_without_extension, file_extension = ServerConstants.get_filename_and_extension(package_url) - local_package_file = os.path.join(local_package_path, f"fedml_run_{self.run_id}_{filename_without_extension}") - if os.path.exists(local_package_file): - os.remove(local_package_file) - package_url_without_query_path = urljoin(package_url, urlparse(package_url).path) - urllib.request.urlretrieve(package_url_without_query_path, local_package_file, - reporthook=self.package_download_progress) - unzip_package_path = os.path.join(ClientConstants.get_package_unzip_dir(), - f"unzip_fedml_run_{self.run_id}_{filename_without_extension}") - try: - shutil.rmtree(unzip_package_path, ignore_errors=True) - except Exception as e: - pass - - package_dir_name = self.unzip_file(local_package_file, unzip_package_path) # Using unziped folder name - unzip_package_full_path = os.path.join(unzip_package_path, package_dir_name) - - logging.info("local_package_file {}, unzip_package_path {}, unzip file full path {}".format( - local_package_file, unzip_package_path, unzip_package_full_path)) - - return unzip_package_full_path - - def update_local_fedml_config(self, run_id, run_config): - packages_config = run_config["packages_config"] - - # Copy config file from the client - server_package_name = packages_config.get("server", None) - server_package_url = packages_config.get("serverUrl", None) - unzip_package_path = self.retrieve_and_unzip_package(server_package_name, server_package_url) - self.fedml_packages_unzip_dir = unzip_package_path - fedml_local_config_file = os.path.join(unzip_package_path, "conf", "fedml.yaml") - - # Load the above config to memory - config_from_container = load_yaml_config(fedml_local_config_file) - container_entry_file_config = config_from_container["entry_config"] - container_dynamic_args_config = config_from_container["dynamic_args"] - entry_file = container_entry_file_config["entry_file"] - conf_file = container_entry_file_config["conf_file"] - self.package_type = container_entry_file_config.get("package_type", SchedulerConstants.JOB_PACKAGE_TYPE_DEFAULT) - full_conf_path = os.path.join(unzip_package_path, "fedml", "config", os.path.basename(conf_file)) - - # Dynamically build constrain variable with realtime parameters from server - self.build_dynamic_constrain_variables(run_id, run_config) - - # Update entry arguments value with constrain variable values with realtime parameters from server - # currently we support the following constrain variables: - # ${FEDSYS_RUN_ID}: a run id represented one entire Federated Learning flow - # ${FEDSYS_PRIVATE_LOCAL_DATA}: private local data path in the Federated Learning client - # ${FEDSYS_CLIENT_ID_LIST}: client list in one entire Federated Learning flow - # ${FEDSYS_SYNTHETIC_DATA_URL}: synthetic data url from server, - # if this value is not null, the client will download data from this URL to use it as - # federated training data set - # ${FEDSYS_IS_USING_LOCAL_DATA}: whether use private local data as federated training data set - # container_dynamic_args_config["data_cache_dir"] = "${FEDSYS.PRIVATE_LOCAL_DATA}" - for constrain_variable_key, constrain_variable_value in self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES.items(): - for argument_key, argument_value in container_dynamic_args_config.items(): - if argument_value is not None and str(argument_value).find(constrain_variable_key) == 0: - replaced_argument_value = str(argument_value).replace( - constrain_variable_key, str(constrain_variable_value) - ) - container_dynamic_args_config[argument_key] = replaced_argument_value - - # Merge all container new config sections as new config dictionary - package_conf_object = dict() - package_conf_object["entry_config"] = container_entry_file_config - package_conf_object["dynamic_args"] = container_dynamic_args_config - package_conf_object["dynamic_args"]["config_version"] = self.args.config_version - container_dynamic_args_config["mqtt_config_path"] = os.path.join( - unzip_package_path, "fedml", "config", os.path.basename(container_dynamic_args_config["mqtt_config_path"]) - ) - container_dynamic_args_config["s3_config_path"] = os.path.join( - unzip_package_path, "fedml", "config", os.path.basename(container_dynamic_args_config["s3_config_path"]) - ) - log_file_dir = ServerConstants.get_log_file_dir() - os.makedirs(log_file_dir, exist_ok=True) - package_conf_object["dynamic_args"]["log_file_dir"] = log_file_dir - - # Save new config dictionary to local file - fedml_updated_config_file = os.path.join(unzip_package_path, "conf", "fedml.yaml") - ServerConstants.generate_yaml_doc(package_conf_object, fedml_updated_config_file) - - # Build dynamic arguments and set arguments to fedml config object - if not self.build_dynamic_args(run_id, run_config, package_conf_object, unzip_package_path): - return None, None - - return unzip_package_path, package_conf_object - - def build_dynamic_args(self, run_id, run_config, package_conf_object, base_dir): - fedml_conf_file = package_conf_object["entry_config"]["conf_file"] - fedml_conf_file_processed = str(fedml_conf_file).replace('\\', os.sep).replace('/', os.sep) - fedml_conf_path = os.path.join(base_dir, "fedml", "config", - os.path.basename(fedml_conf_file_processed)) - fedml_conf_object = load_yaml_config(fedml_conf_path) - self.origin_fedml_config_object = fedml_conf_object.copy() - run_params = run_config.get("parameters", {}) - job_yaml = run_params.get("job_yaml", {}) - - # Replace local fedml config objects with parameters from MLOps web - parameters_object = run_config.get("parameters", None) - if parameters_object is not None: - for config_k, config_v in fedml_conf_object.items(): - parameter_v = parameters_object.get(config_k, None) - if parameter_v is not None: - fedml_conf_object[config_k] = parameter_v - parameters_object.pop(config_k) - - for config_k, config_v in parameters_object.items(): - fedml_conf_object[config_k] = config_v - - package_dynamic_args = package_conf_object["dynamic_args"] - if fedml_conf_object.get("comm_args", None) is not None: - fedml_conf_object["comm_args"]["mqtt_config_path"] = package_dynamic_args["mqtt_config_path"] - fedml_conf_object["comm_args"]["s3_config_path"] = package_dynamic_args["s3_config_path"] - fedml_conf_object["common_args"]["using_mlops"] = True - if fedml_conf_object.get("train_args", None) is not None: - fedml_conf_object["train_args"]["run_id"] = package_dynamic_args["run_id"] - fedml_conf_object["train_args"]["client_id_list"] = package_dynamic_args["client_id_list"] - fedml_conf_object["train_args"]["client_num_in_total"] = int(package_dynamic_args["client_num_in_total"]) - fedml_conf_object["train_args"]["client_num_per_round"] = int(package_dynamic_args["client_num_in_total"]) - fedml_conf_object["train_args"]["server_id"] = self.edge_id - fedml_conf_object["train_args"]["server_agent_id"] = self.request_json.get("cloud_agent_id", self.edge_id) - fedml_conf_object["train_args"]["group_server_id_list"] = self.request_json.get("group_server_id_list", - list()) - if fedml_conf_object.get("device_args", None) is not None: - fedml_conf_object["device_args"]["worker_num"] = int(package_dynamic_args["client_num_in_total"]) - # fedml_conf_object["data_args"]["data_cache_dir"] = package_dynamic_args["data_cache_dir"] - if fedml_conf_object.get("tracking_args", None) is not None: - fedml_conf_object["tracking_args"]["log_file_dir"] = package_dynamic_args["log_file_dir"] - fedml_conf_object["tracking_args"]["log_server_url"] = package_dynamic_args["log_server_url"] - - bootstrap_script_path = None - env_args = fedml_conf_object.get("environment_args", None) - if env_args is not None: - bootstrap_script_file = env_args.get("bootstrap", None) - if bootstrap_script_file is not None: - bootstrap_script_file = str(bootstrap_script_file).replace('\\', os.sep).replace('/', os.sep) - if platform.system() == 'Windows': - bootstrap_script_file = bootstrap_script_file.rstrip('.sh') + '.bat' - if bootstrap_script_file is not None: - bootstrap_script_dir = os.path.join(base_dir, "fedml", os.path.dirname(bootstrap_script_file)) - bootstrap_script_path = os.path.join( - bootstrap_script_dir, bootstrap_script_dir, os.path.basename(bootstrap_script_file) - ) - # try: - # os.makedirs(package_dynamic_args["data_cache_dir"], exist_ok=True) - # except Exception as e: - # pass - fedml_conf_object["dynamic_args"] = package_dynamic_args - - ServerConstants.generate_yaml_doc(fedml_conf_object, fedml_conf_path) - - is_bootstrap_run_ok = True - try: - if bootstrap_script_path is not None: - if os.path.exists(bootstrap_script_path): - bootstrap_stat = os.stat(bootstrap_script_path) - if platform.system() == 'Windows': - os.chmod(bootstrap_script_path, - bootstrap_stat.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) - bootstrap_scripts = "{}".format(bootstrap_script_path) - else: - os.chmod(bootstrap_script_path, - bootstrap_stat.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) - bootstrap_scripts = "cd {}; ./{}".format(bootstrap_script_dir, - os.path.basename(bootstrap_script_file)) - bootstrap_scripts = str(bootstrap_scripts).replace('\\', os.sep).replace('/', os.sep) - logging.info("Bootstrap scripts are being executed...") - shell_cmd_list = list() - shell_cmd_list.append(bootstrap_scripts) - process, error_list = ServerConstants.execute_commands_with_live_logs( - shell_cmd_list, callback=self.callback_run_bootstrap) - - ret_code, out, err = process.returncode, None, None - if ret_code is None or ret_code <= 0: - if error_list is not None and len(error_list) > 0: - is_bootstrap_run_ok = False - else: - if out is not None: - out_str = sys_utils.decode_our_err_result(out) - if out_str != "": - logging.info("{}".format(out_str)) - - sys_utils.log_return_info(bootstrap_script_file, 0) - - is_bootstrap_run_ok = True - else: - if err is not None: - err_str = sys_utils.decode_our_err_result(err) - if err_str != "": - logging.error("{}".format(err_str)) - - sys_utils.log_return_info(bootstrap_script_file, ret_code) - - is_bootstrap_run_ok = False - except Exception as e: - logging.error("Bootstrap scripts error: {}".format(traceback.format_exc())) - - is_bootstrap_run_ok = False - - return is_bootstrap_run_ok - - def callback_run_bootstrap(self, job_pid): - ServerConstants.save_bootstrap_process(self.run_id, job_pid) - - @debug - def run( - self, process_event, completed_event, edge_id_status_queue=None, - edge_device_info_queue=None, run_metrics_queue=None, - run_event_queue=None, run_artifacts_queue=None, run_logs_queue=None, - message_center_queue=None, edge_device_info_global_queue=None - ): - print(f"Server runner process id {os.getpid()}, run id {self.run_id}") - - if platform.system() != "Windows": - os.setsid() - - os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning' - os.environ.setdefault('PYTHONWARNINGS', 'ignore:semaphore_tracker:UserWarning') - - self.run_process_event = process_event - self.run_process_completed_event = completed_event - try: - MLOpsUtils.set_ntp_offset(self.ntp_offset) - - self.rebuild_message_center(message_center_queue) - - self.run_impl(edge_id_status_queue, edge_device_info_queue, run_metrics_queue, - run_event_queue, run_artifacts_queue, run_logs_queue, edge_device_info_global_queue) - except RunnerError: - logging.info("Runner stopped.") - self.mlops_metrics.report_server_id_status( - self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED, edge_id=self.edge_id, - server_id=self.edge_id, server_agent_id=self.edge_id) - except RunnerCompletedError: - logging.info("Runner completed.") - except Exception as e: - logging.error("Runner exits with exceptions. {}".format(traceback.format_exc())) - self.mlops_metrics.report_server_id_status( - self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id, - server_id=self.edge_id, server_agent_id=self.edge_id) - finally: - logging.info("Release resources.") - self._process_run_metrics_queue(run_metrics_queue) - self._process_run_logs_queue(run_logs_queue) - MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(self.run_id, self.edge_id) - if self.mlops_metrics is not None: - self.mlops_metrics.stop_sys_perf() - time.sleep(3) - ServerConstants.cleanup_run_process(self.run_id) - ServerConstants.cleanup_learning_process(self.run_id) - ServerConstants.cleanup_bootstrap_process(self.run_id) - - def check_runner_stop_event(self): - if self.run_process_event is not None and self.run_process_event.is_set(): - logging.info("Received stopping event.") - raise RunnerError("Runner stopped") - - if self.run_process_completed_event is not None and self.run_process_completed_event.is_set(): - logging.info("Received completed event.") - raise RunnerCompletedError("Runner completed") - - def deploy_model(self, serving_devices, request_json, run_id): - run_config = request_json["run_config"] - run_params = run_config.get("parameters", {}) - job_yaml = run_params.get("job_yaml", {}) - job_type = job_yaml.get("job_type", None) - job_type = job_yaml.get("task_type", Constants.JOB_TASK_TYPE_TRAIN) if job_type is None else job_type - if job_type == Constants.JOB_TASK_TYPE_DEPLOY or job_type == Constants.JOB_TASK_TYPE_SERVE: - computing = job_yaml.get("computing", {}) - num_gpus = computing.get("minimum_num_gpus", 1) - serving_args = run_params.get("serving_args", {}) - model_id = serving_args.get("model_id", None) - model_name = serving_args.get("model_name", None) - model_version = serving_args.get("model_version", None) - model_storage_url = serving_args.get("model_storage_url", None) - endpoint_name = serving_args.get("endpoint_name", None) - endpoint_id = serving_args.get("endpoint_id", None) - random = serving_args.get("random", "") - random_out = sys_utils.random2(random, "FEDML@9999GREAT") - random_list = random_out.split("FEDML@") - device_type = device_client_constants.ClientConstants.login_role_list[ - device_client_constants.ClientConstants.LOGIN_MODE_FEDML_CLOUD_INDEX] - FedMLModelCards.get_instance().deploy_model( - model_name, device_type, json.dumps(serving_devices), - "", random_list[1], None, - in_model_id=model_id, in_model_version=model_version, - endpoint_name=endpoint_name, endpoint_id=endpoint_id, run_id=run_id) - - @debug - def run_impl( - self, edge_id_status_queue, edge_device_info_queue, run_metrics_queue, - run_event_queue, run_artifacts_queue, run_logs_queue, edge_device_info_global_queue - ): - run_id = self.request_json["runId"] - run_config = self.request_json["run_config"] - data_config = run_config["data_config"] - edge_ids = self.request_json["edgeids"] - - self.check_runner_stop_event() - - self.run_id = run_id - self.args.run_id = self.run_id - MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO) - - # report server running status - self.mlops_metrics.report_server_id_status( - run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_STARTING, edge_id=self.edge_id, - server_id=self.edge_id, server_agent_id=self.edge_id) - - logging.info("Detect all status of Edge ids: " + str(edge_ids)) - - status_ok, active_edge_info_dict, inactivate_edges = self.detect_edges_status( - edge_device_info_queue, edge_device_info_global_queue=edge_device_info_global_queue, - callback_when_edges_ready=self.send_training_request_to_edges) - logging.info(f"Status OK: {status_ok}, Active edge info dict: {active_edge_info_dict}, " - f"inactivate edges: {inactivate_edges}") - if not status_ok: - logging.error(f"Status of edge device is not OK. Active edge info dict: {active_edge_info_dict}, " - f"Inactivate edges: {inactivate_edges}") - return - - if not self.should_continue_run_job(run_id): - if FedMLServerRunner.debug_cloud_server: - while True: - time.sleep(30) - # Check if the run status is normal - self.aggregate_run_status_metrics_logs( - run_id, edge_ids, edge_id_status_queue, edge_device_info_queue, - edge_device_info_global_queue, - run_metrics_queue, run_logs_queue) - return - - # Start the server job - self._start_runner_process(run_id, self.request_json, is_server_job=True) - - # Check if the run status is normal - self.aggregate_run_status_metrics_logs( - run_id, edge_ids, edge_id_status_queue, edge_device_info_queue, - edge_device_info_global_queue, - run_metrics_queue, run_logs_queue) - - def aggregate_run_status_metrics_logs( - self, run_id, edge_id_list, edge_id_status_queue, edge_device_info_queue, - edge_device_info_global_queue, run_metrics_queue, run_logs_queue): - total_sleep_seconds = 0 - sleep_seconds = 3 - allowed_status_check_sleep_seconds = 60 * 25 - server_id = self.edge_id - normal_response_status_list = [ - ClientConstants.MSG_MLOPS_CLIENT_STATUS_OFFLINE, ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE, - ClientConstants.MSG_MLOPS_CLIENT_STATUS_TRAINING, ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED, - ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED, - ClientConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION, ClientConstants.MSG_MLOPS_CLIENT_STATUS_RUNNING - ] - edges_id_status_timeout_map = dict() - number_of_failed_edges = 0 - number_of_finished_edges = 0 - number_of_killed_edges = 0 - running_edges_list = list() - inactivate_edge_list = list() - current_edge_id_status_map = dict() - - while True: - self.check_runner_stop_event() - - # Process run metrics - self._process_run_metrics_queue(run_metrics_queue) - - # Process run logs - self._process_run_logs_queue(run_logs_queue) - - # Fetch edge id and status from the edge id status queue - while True: - try: - queue_item = edge_id_status_queue.get(block=False, timeout=3) - if queue_item is not None: - current_edge_id_status_map.update(queue_item) - except queue.Empty as e: # If queue is empty, then break loop - break - - # Calc the total completed device number - server_id = current_edge_id_status_map.get("server", 0) - running_edges_list.clear() - number_of_failed_edges = 0 - number_of_finished_edges = 0 - number_of_killed_edges = 0 - for edge_id_item, status_item in current_edge_id_status_map.items(): - if edge_id_item == "server": - continue - - if status_item is None or status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED or \ - status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_EXCEPTION: - number_of_failed_edges += 1 - continue - - if status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED: - number_of_finished_edges += 1 - continue - - if status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED: - number_of_killed_edges += 1 - continue - - if status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE or \ - status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_OFFLINE: - continue - - running_edges_list.append(edge_id_item) - - # Process the no response edges and accumulate the counter. - for edge_id_item in edge_id_list: - status_dict = edges_id_status_timeout_map.get(str(edge_id_item)) - status_item = current_edge_id_status_map.get(str(edge_id_item)) - if status_item is None: - continue - if status_dict is None: - status_dict = {"status": status_item, "count": 0} - else: - if status_item in normal_response_status_list: - status_dict["count"] = 0 - else: - status_dict["count"] += 1 - edges_id_status_timeout_map[str(edge_id_item)] = status_dict - - # If the completed device number is equal total device number, then break - if len(running_edges_list) <= 0 and len(current_edge_id_status_map.keys()) == len(edge_id_list) + 1: - break - - # Calc the timeout value to wait to device killed. - self.check_runner_stop_event() - time.sleep(sleep_seconds) - total_sleep_seconds += sleep_seconds - no_response_edge_ids = list() - for no_res_edge, no_res_status in edges_id_status_timeout_map.items(): - if no_res_status.get("count") * sleep_seconds > allowed_status_check_sleep_seconds: - no_response_edge_ids.append(no_res_edge) - - # If timeout, then report killed device status - if len(no_response_edge_ids) > 0: - for edge_id_item in no_response_edge_ids: - self.mlops_metrics.report_client_id_status( - edge_id_item, ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED, - server_id=self.edge_id, run_id=self.run_id) - - # Check if we can get the response device info from edge devices - # and set the inactive edges to killed status. - self.check_runner_stop_event() - given_edge_ids = list(set(edge_id_list) - set(inactivate_edge_list)) - status_ok, active_edge_info_dict, inactivate_edges = self.detect_edges_status( - edge_device_info_queue, edge_device_info_global_queue=edge_device_info_global_queue, - need_to_trigger_exception=False, status_timeout=60, - given_edge_ids=given_edge_ids, callback_when_detecting=self.callback_when_detecting_on_aggregation, - args_for_callback_when_detecting=(run_metrics_queue, run_logs_queue) - ) - if not status_ok: - inactivate_edge_list.extend(inactivate_edges) - for edge_id_item in inactivate_edges: - self.mlops_metrics.report_client_id_status( - edge_id_item, ClientConstants.MSG_MLOPS_CLIENT_STATUS_OFFLINE, - server_id=self.edge_id, run_id=self.run_id) - - # Calc the final run status based on the completed device numbers and fault tolerance parameters. - enable_fault_tolerance, fault_tolerance_rate = self.parse_fault_tolerance_params(run_id) - running_edges_list = list(set(running_edges_list)) - status_to_report = self.calculate_server_status( - run_id, len(edge_id_list), number_of_failed_edges, number_of_finished_edges, - number_of_killed_edges, running_edges_list, enable_fault_tolerance=enable_fault_tolerance, - fault_tolerance_rate=fault_tolerance_rate) - if status_to_report is not None: - logging.info( - f"Run completed when aggregating status, metrics and logs, will report status {status_to_report}") - self.mlops_metrics.report_server_id_status( - self.run_id, status_to_report, edge_id=self.edge_id, - server_id=self.edge_id, server_agent_id=self.edge_id) - - def callback_when_detecting_on_aggregation(self, detecting_args): - # Process run metrics - self._process_run_metrics_queue(detecting_args[0]) - - # Process run logs - self._process_run_logs_queue(detecting_args[1]) - - def _process_run_metrics_queue(self, run_metrics_queue): - # Fetch metrics from the run metrics queue - while True: - try: - metrics_item = run_metrics_queue.get(block=False, timeout=3) - MetricsManager.get_instance().save_metrics(metrics_item) - metric_json = json.loads(metrics_item) - if metric_json.get("is_endpoint", False): - metric_json().pop("is_endpoint") - self.mlops_metrics.report_endpoint_metric({}, payload=json.dumps(metric_json)) - else: - self.mlops_metrics.report_server_training_metric({}, payload=metrics_item) - except queue.Empty as e: # If queue is empty, then break loop - break - - def _process_run_logs_queue(self, run_logs_queue): - # Fetch logs from the run logs queue - while True: - try: - logs_item = run_logs_queue.get(block=False, timeout=3) - LogsManager.save_logs(logs_item) - except queue.Empty as e: # If queue is empty, then break loop - break - - def run_server_job_impl(self, process_event, completed_event, edge_id_status_queue=None, - edge_device_info_queue=None, run_metrics_queue=None, - run_event_queue=None, run_artifacts_queue=None, run_logs_queue=None, - message_center_queue=None, edge_device_info_global_queue=None): - print(f"Server runner process id {os.getpid()}, run id {self.run_id}") - - if platform.system() != "Windows": - os.setsid() - - os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning' - os.environ.setdefault('PYTHONWARNINGS', 'ignore:semaphore_tracker:UserWarning') - - self.run_process_event = process_event - self.run_process_completed_event = completed_event - - MLOpsUtils.set_ntp_offset(self.ntp_offset) - - self.rebuild_message_center(message_center_queue) - - run_id = self.request_json["runId"] - run_config = self.request_json["run_config"] - data_config = run_config["data_config"] - edge_ids = self.request_json["edgeids"] - - self.check_runner_stop_event() - - # get training params - private_local_data_dir = data_config.get("privateLocalData", "") - is_using_local_data = 0 - # if private_local_data_dir is not None and len(str(private_local_data_dir).strip(' ')) > 0: - # is_using_local_data = 1 - - # start a run according to the hyper-parameters - # fedml_local_data_dir = self.cur_dir + "/fedml_data/run_" + run_id_str + "_edge_" + str(edge_id) - fedml_local_data_dir = os.path.join(self.cur_dir, "fedml_data") - fedml_local_config_dir = os.path.join(self.cur_dir, "fedml_config") - if is_using_local_data: - fedml_local_data_dir = private_local_data_dir - self.fedml_data_dir = self.fedml_data_local_package_dir - - self.check_runner_stop_event() - - logging.info("download packages and run the bootstrap script...") - - # update local config with real time parameters from server and dynamically replace variables value - unzip_package_path, fedml_config_object = self.update_local_fedml_config(run_id, run_config) - if unzip_package_path is None or fedml_config_object is None: - logging.info("failed to update local fedml config.") - self.check_runner_stop_event() - self.cleanup_run_when_starting_failed() - self.send_training_stop_request_to_edges_when_exception(edge_ids, payload=self.start_request_json, - run_id=run_id) - return - - logging.info("cleanup the previous aggregation process and check downloaded packages...") - - entry_file_config = fedml_config_object["entry_config"] - dynamic_args_config = fedml_config_object["dynamic_args"] - entry_file = str(entry_file_config["entry_file"]).replace('\\', os.sep).replace('/', os.sep) - entry_file = os.path.basename(entry_file) - conf_file = entry_file_config["conf_file"] - conf_file = str(conf_file).replace('\\', os.sep).replace('/', os.sep) - ServerConstants.cleanup_learning_process(run_id) - self.check_runner_stop_event() - if not os.path.exists(unzip_package_path): - logging.info("failed to unzip file.") - self.check_runner_stop_event() - self.cleanup_run_when_starting_failed() - self.send_training_stop_request_to_edges_when_exception(edge_ids, payload=self.start_request_json, - run_id=run_id) - return - os.chdir(os.path.join(unzip_package_path, "fedml")) - - self.check_runner_stop_event() - - logging.info("starting the server user process...") - - entry_file_full_path = os.path.join(unzip_package_path, "fedml", entry_file) - conf_file_full_path = os.path.join(unzip_package_path, "fedml", conf_file) - logging.info(" ") - logging.info(" ") - logging.info("====Your Run Logs Begin===") - process, is_launch_task, error_list = self.execute_job_task(entry_file_full_path, conf_file_full_path, run_id) - logging.info("====Your Run Logs End===") - logging.info(" ") - logging.info(" ") - - ret_code, out, err = process.returncode, None, None - is_run_ok = sys_utils.is_runner_finished_normally(process.pid) - if is_launch_task: - is_run_ok = True - if error_list is not None and len(error_list) > 0: - is_run_ok = False - if ret_code is None or ret_code <= 0: - self.check_runner_stop_event() - - if is_run_ok: - if out is not None: - out_str = sys_utils.decode_our_err_result(out) - if out_str != "": - logging.info("{}".format(out_str)) - - self.mlops_metrics.report_server_id_status( - run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED, edge_id=self.edge_id, - server_id=self.edge_id, server_agent_id=self.edge_id) - - if is_launch_task: - sys_utils.log_return_info(f"job {run_id}", 0) - else: - sys_utils.log_return_info(entry_file, 0) - else: - is_run_ok = False - - if not is_run_ok: - # If the run status is killed or finished, then return with the normal state. - current_job = FedMLServerDataInterface.get_instance().get_job_by_id(run_id) - if current_job is not None and (current_job.status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED or - current_job.status == ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED): - return - - self.check_runner_stop_event() - - logging.error("failed to run the aggregation process...") - - if err is not None: - err_str = sys_utils.decode_our_err_result(err) - if err_str != "": - logging.error("{}".format(err_str)) - - if is_launch_task: - sys_utils.log_return_info(f"job {run_id}", ret_code) - else: - sys_utils.log_return_info(entry_file, ret_code) - - self.send_training_stop_request_to_edges_when_exception(edge_ids, run_id=run_id) - - def init_job_task(self, request_json): - run_id = request_json["runId"] - run_config = request_json["run_config"] - edge_ids = request_json["edgeids"] - run_params = run_config.get("parameters", {}) - job_yaml = run_params.get("job_yaml", None) - server_id = request_json["server_id"] - if self.run_as_cloud_agent: - server_id = self.edge_id - - self.setup_listeners_for_edge_status(run_id, edge_ids, server_id) - self.setup_listener_for_run_metrics(run_id) - self.setup_listener_for_run_logs(run_id) - - def should_continue_run_job(self, run_id): - run_config = self.request_json["run_config"] - run_params = run_config.get("parameters", {}) - job_yaml = run_params.get("job_yaml", {}) - job_yaml_default_none = run_params.get("job_yaml", None) - framework_type = job_yaml.get("framework_type", None) - job_type = job_yaml.get("job_type", None) - job_type = job_yaml.get("task_type", Constants.JOB_TASK_TYPE_TRAIN) if job_type is None else job_type - if job_yaml_default_none is not None: - if job_type == Constants.JOB_TASK_TYPE_FEDERATE: - return True - - if framework_type is None or framework_type != Constants.JOB_FRAMEWORK_TYPE_FEDML: - self.mlops_metrics.report_server_id_status( - run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_RUNNING, edge_id=self.edge_id, - server_id=self.edge_id, server_agent_id=self.edge_id) - return False - - return True - - def execute_job_task(self, entry_file_full_path, conf_file_full_path, run_id): - run_config = self.request_json["run_config"] - run_params = run_config.get("parameters", {}) - job_yaml = run_params.get("job_yaml", {}) - job_yaml_default_none = run_params.get("job_yaml", None) - job_api_key = job_yaml.get("run_api_key", None) - job_api_key = job_yaml.get("fedml_run_dynamic_params", None) if job_api_key is None else job_api_key - assigned_gpu_ids = run_params.get("gpu_ids", None) - framework_type = job_yaml.get("framework_type", None) - job_type = job_yaml.get("job_type", None) - job_type = job_yaml.get("task_type", Constants.JOB_TASK_TYPE_TRAIN) if job_type is None else job_type - conf_file_object = load_yaml_config(conf_file_full_path) - entry_args_dict = conf_file_object.get("fedml_entry_args", {}) - entry_args = entry_args_dict.get("arg_items", None) - - executable_interpreter = ClientConstants.CLIENT_SHELL_PS \ - if platform.system() == ClientConstants.PLATFORM_WINDOWS else ClientConstants.CLIENT_SHELL_BASH - - if job_yaml_default_none is None: - # Generate the job executing commands for previous federated learning (Compatibility) - python_program = get_python_program() - logging.info("Run the server: {} {} --cf {} --rank 0 --role server".format( - python_program, entry_file_full_path, conf_file_full_path)) - entry_command = f"{python_program} {entry_file_full_path} --cf " \ - f"{conf_file_full_path} --rank 0 --role server" - shell_cmd_list = [entry_command] - - # Run the job executing commands for previous federated learning (Compatibility) - process, error_list = ClientConstants.execute_commands_with_live_logs( - shell_cmd_list, callback=self.callback_start_fl_job, should_write_log_file=False) - is_launch_task = False - else: - self.check_runner_stop_event() - - self.mlops_metrics.report_server_id_status( - run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_RUNNING, edge_id=self.edge_id, - server_id=self.edge_id, server_agent_id=self.edge_id) - - # Generate the job executing commands - job_executing_commands = JobRunnerUtils.generate_job_execute_commands( - run_id=self.run_id, edge_id=self.edge_id, version=self.version, package_type=self.package_type, - executable_interpreter=executable_interpreter, entry_file_full_path=entry_file_full_path, - conf_file_object=conf_file_object, entry_args=entry_args, assigned_gpu_ids=assigned_gpu_ids, - job_api_key=job_api_key, client_rank=0) - - # Run the job executing commands - logging.info(f"Run the server job with job id {self.run_id}, device id {self.edge_id}.") - process, error_list = ServerConstants.execute_commands_with_live_logs( - job_executing_commands, callback=self.start_job_perf, error_processor=self.job_error_processor) - is_launch_task = True - - return process, is_launch_task, error_list - - def callback_start_fl_job(self, job_pid): - ServerConstants.save_learning_process(self.run_id, job_pid) - self.mlops_metrics.report_sys_perf( - self.args, self.agent_config["mqtt_config"], job_process_id=job_pid) - - def start_job_perf(self, job_pid): - ServerConstants.save_learning_process(self.run_id, job_pid) - self.mlops_metrics.report_job_perf(self.args, self.agent_config["mqtt_config"], job_pid) - - def job_error_processor(self, error_list): - self.check_runner_stop_event() - - error_str = "\n".join(error_list) - raise Exception(f"Error occurs when running the job... {error_str}") - - def process_job_status(self, run_id, edge_id, status): - number_of_failed_edges = 0 - number_of_finished_edges = 0 - number_of_killed_edges = 0 - edge_id_status_dict = self.client_agent_active_list.get(f"{run_id}", {}) - server_id = edge_id_status_dict.get("server", 0) - enable_fault_tolerance, fault_tolerance_rate = self.parse_fault_tolerance_params(run_id) - running_edges_list = list() - for edge_id_item, status_item in edge_id_status_dict.items(): - if edge_id_item == "server": - continue - - if status_item is None or status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED or \ - status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_EXCEPTION: - number_of_failed_edges += 1 - continue - - if status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED: - number_of_finished_edges += 1 - continue - - if status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED: - number_of_killed_edges += 1 - continue - - if status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE or \ - status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_OFFLINE: - continue - - running_edges_list.append(edge_id_item) - - # Report client status - edge_status = ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED if status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION else status - self.mlops_metrics.report_client_training_status(edge_id, edge_status, run_id=run_id) - self.mlops_metrics.report_client_device_status_to_web_ui(edge_id, edge_status, run_id=run_id) - - # Report server status based on the fault tolerance model and parameters - edge_nums = len(edge_id_status_dict.keys()) - 1 - status_to_report = self.calculate_server_status( - run_id, edge_nums, number_of_failed_edges, number_of_finished_edges, number_of_killed_edges, - running_edges_list, enable_fault_tolerance=enable_fault_tolerance, - fault_tolerance_rate=fault_tolerance_rate) - if status_to_report is not None: - logging.info(f"Run completed when processing edge status, will report status {status_to_report}") - self.report_server_status(run_id, server_id, status_to_report) - - def calculate_server_status( - self, run_id, total_edge_nums, number_of_failed_edges, number_of_finished_edges, - number_of_killed_edges, running_edges_list, enable_fault_tolerance=False, - fault_tolerance_rate=0.8 - ): - # Report server status based on the fault tolerance model and parameters - actual_failed_rate = number_of_failed_edges / total_edge_nums - all_edges_run_completed = True if len(running_edges_list) <= 0 else False - if all_edges_run_completed: - status_to_report = None - if enable_fault_tolerance: - if actual_failed_rate >= fault_tolerance_rate: - status_to_report = ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED - self.send_training_stop_request_to_edges_when_exception( - running_edges_list, run_id=run_id, status=status_to_report) - return status_to_report - else: - if number_of_killed_edges == total_edge_nums: - status_to_report = ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED - else: - status_to_report = ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED - else: - if number_of_failed_edges > 0: - status_to_report = ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED - elif number_of_finished_edges == total_edge_nums: - status_to_report = ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED - elif number_of_killed_edges == total_edge_nums: - status_to_report = ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED - - return status_to_report - - def parse_fault_tolerance_params(self, run_id): - run_json = self.running_request_json.get(str(run_id), None) - if run_json is None: - run_json = self.request_json - run_config = run_json.get("run_config", {}) - run_params = run_config.get("parameters", {}) - common_args = run_params.get("common_args", {}) - enable_fault_tolerance = common_args.get("enable_fault_tolerance", False) - fault_tolerance_rate = common_args.get("fault_tolerance_rate", 0) - return enable_fault_tolerance, fault_tolerance_rate - - def report_server_status(self, run_id, server_id, status): - self.mlops_metrics.report_server_id_status(run_id, status, edge_id=self.edge_id, - server_id=server_id, server_agent_id=self.edge_id) - - def stop_run_when_starting_failed(self): - edge_id_list = self.request_json["edgeids"] - run_id = self.request_json.get("run_id", 0) - logging.error("edge ids {}".format(str(edge_id_list))) - - payload = self.running_request_json.get(str(run_id)) - if payload is not None: - self.send_training_stop_request_to_edges(edge_id_list, payload=json.dumps(payload), run_id=run_id) - - # logging.info("Stop run successfully when starting failed.") - - self.mlops_metrics.report_server_id_status( - self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id, - server_id=self.edge_id, server_agent_id=self.edge_id) - - def cleanup_run_when_finished(self, should_send_server_id_status=True): - # logging.info("Cleanup run successfully when finished.") - - self.mlops_metrics.report_server_training_status( - self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED, edge_id=self.edge_id - ) - - if should_send_server_id_status: - self.mlops_metrics.report_server_id_status( - self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED, edge_id=self.edge_id, - server_id=self.edge_id, server_agent_id=self.edge_id) - - try: - self.mlops_metrics.stop_sys_perf() - except Exception as ex: - pass - - time.sleep(1) - - ServerConstants.cleanup_learning_process(self.run_id) - ServerConstants.cleanup_bootstrap_process(self.run_id) - - try: - local_package_path = ServerConstants.get_package_download_dir() - for package_file in listdir(local_package_path): - if os.path.basename(package_file).startswith("run_" + str(self.run_id)): - shutil.rmtree(os.path.join(local_package_path, package_file), ignore_errors=True) - except Exception as e: - pass - - def cleanup_run_when_starting_failed( - self, status=ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED, should_send_server_id_status=True): - # logging.info("Cleanup run successfully when starting failed.") - - self.mlops_metrics.report_server_training_status( - self.run_id, status, edge_id=self.edge_id) - - if should_send_server_id_status: - self.mlops_metrics.report_server_id_status( - self.run_id, status, edge_id=self.edge_id, - server_id=self.edge_id, server_agent_id=self.edge_id) - - try: - self.mlops_metrics.stop_sys_perf() - except Exception as ex: - pass - - time.sleep(1) - - ServerConstants.cleanup_learning_process(self.run_id) - ServerConstants.cleanup_bootstrap_process(self.run_id) - - try: - local_package_path = ServerConstants.get_package_download_dir() - for package_file in listdir(local_package_path): - if os.path.basename(package_file).startswith("run_" + str(self.run_id)): - shutil.rmtree(os.path.join(local_package_path, package_file), ignore_errors=True) - except Exception as e: - pass - - def should_process_async_cluster(self): - run_config = self.request_json.get("run_config", {}) - run_params = run_config.get("parameters", {}) - common_args = run_params.get("common_args", {}) - self.enable_async_cluster = common_args.get("enable_async_cluster", False) - self.async_check_timeout = common_args.get("async_check_timeout", 0) - if self.enable_async_cluster: - return True, self.async_check_timeout - - return False, self.async_check_timeout - - @debug - def detect_edges_status( - self, edge_device_info_queue, edge_device_info_global_queue=None, callback_when_edges_ready=None, status_timeout=None, - need_to_trigger_exception=True, status_check_context=None, given_edge_ids=None, - callback_when_detecting=None, args_for_callback_when_detecting=None - ): - run_id = self.request_json["runId"] - run_id_str = str(run_id) - edge_id_list = self.request_json["edgeids"] - if given_edge_ids is not None: - edge_id_list = given_edge_ids - - # Init realtime status of all edges - run_edges_realtime_status = dict() - run_edges_realtime_status[run_id_str] = dict() - - edge_info_global_dict = dict() - if edge_device_info_global_queue is not None: - for edge_info_global in edge_device_info_global_queue: - edge_info_id = edge_info_global.get("edge_id") - edge_info_global_dict[edge_info_id] = edge_info_global - - # Send status message to all edges - allowed_cache_edge_status_time = 60 - for edge_id in edge_id_list: - # Check if the edge status was filled allowed_cache_edge_status_time seconds ago, - # if so no more checking message would be sent. - edge_info = edge_info_global_dict.get(edge_id, None) - if edge_info is not None: - timestamp = edge_info.get("timestamp", None) - time_interval = time.time() - timestamp - if time_interval <= allowed_cache_edge_status_time: - continue - - self.send_status_check_msg(run_id, edge_id, self.edge_id, context=status_check_context) - time.sleep(3) - - total_sleep_seconds = 0 - status_check_sleep_seconds = 10 - allowed_status_check_sleep_seconds = 60 * 2 if status_timeout is None else status_timeout - allowed_status_check_sleep_seconds_for_async = 30 - inactivate_edges = list() - active_edge_info_dict = dict() - while True: - if callback_when_detecting is not None: - callback_when_detecting(args_for_callback_when_detecting) - - # Fetch edge info from the edge status queue, which will be added to realtime status map - while True: - self.check_runner_stop_event() - - try: - edge_info = edge_device_info_queue.get(block=False, timeout=1) - if edge_info is not None: - edge_id = edge_info.get("edge_id", None) - if edge_id is not None: - run_edges_realtime_status[run_id_str][edge_id] = edge_info - except queue.Empty as e: # If queue is empty, then break loop - break - - self.check_runner_stop_event() - - # Check all edges which don't send response status successfully - # and retry to send the status checking message. - active_edges_count = 0 - inactivate_edges.clear() - active_edge_info_dict.clear() - for edge_id in edge_id_list: - edge_info_dict = run_edges_realtime_status.get(run_id_str, {}) - edge_info = edge_info_dict.get(edge_id, None) - edge_info = edge_info_dict.get(str(edge_id), None) if edge_info is None else edge_info - if edge_info is not None: - active_edges_count += 1 - active_edge_info_dict[str(edge_id)] = edge_info - else: - # Check if the edge status was filled allowed_cache_edge_status_time seconds ago, - # if so no more checking message would be sent. - edge_info = edge_info_global_dict.get(edge_id, None) - if edge_info is not None: - timestamp = edge_info.get("timestamp", None) - time_interval = time.time() - timestamp - if time_interval <= allowed_cache_edge_status_time: - active_edges_count += 1 - active_edge_info_dict[str(edge_id)] = edge_info - continue - - inactivate_edges.append(edge_id) - self.send_status_check_msg(run_id, edge_id, self.edge_id, context=status_check_context) - - # If all edges are ready then send the starting job message to them - if active_edges_count == len(edge_id_list): - logging.info(f"All edges are ready. Active edge id list is as follows. {active_edge_info_dict}") - if callback_when_edges_ready is not None: - logging.info("All edges are ready. Start to process the callback function.") - callback_when_edges_ready(active_edge_info_dict=active_edge_info_dict) - else: - logging.info("All edges are ready. No callback function to process.") - break - else: - logging.info(f"All edges are not ready. Active edge id list: {active_edge_info_dict}, " - f"Inactive edge id list: {inactivate_edges}") - - # Check if runner needs to stop and sleep specific time - self.check_runner_stop_event() - time.sleep(status_check_sleep_seconds) - total_sleep_seconds += status_check_sleep_seconds - - # Check if the status response message has timed out to receive - if total_sleep_seconds >= allowed_status_check_sleep_seconds: - # If so, send failed message to MLOps and send exception message to all edges. - logging.error(f"There are inactive edge devices. " - f"Inactivate edge id list is as follows. {inactivate_edges}") - if need_to_trigger_exception: - self.mlops_metrics.report_server_id_status( - run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id, - server_id=self.edge_id, server_agent_id=self.server_agent_id) - self.send_training_stop_request_to_edges_when_exception(edge_id_list, - payload=json.dumps(self.request_json), - run_id=run_id) - return False, active_edge_info_dict, inactivate_edges - - # If we enable the mode for async cluster, then sleep some time and send messages to all clients. - if callback_when_edges_ready is not None: - should_async, async_timeout = self.should_process_async_cluster() - if should_async and total_sleep_seconds >= allowed_status_check_sleep_seconds_for_async: - if async_timeout > allowed_status_check_sleep_seconds_for_async: - time.sleep(async_timeout - allowed_status_check_sleep_seconds_for_async) - self.send_training_request_to_edges() - return True, active_edge_info_dict, inactivate_edges - - return True, active_edge_info_dict, inactivate_edges - - def send_status_check_msg(self, run_id, edge_id, server_id, context=None): - topic_get_model_device_id = "server/client/request_device_info/" + str(edge_id) - payload = {"server_id": server_id, "run_id": run_id} - if context is not None: - payload["context"] = context - self.message_center.send_message(topic_get_model_device_id, json.dumps(payload)) - - @debug - def send_training_request_to_edges(self, active_edge_info_dict=None): - run_id = self.request_json["runId"] - edge_id_list = self.request_json["edgeids"] - run_config = self.request_json.get("run_config", {}) - run_params = run_config.get("parameters", {}) - job_yaml = run_params.get("job_yaml", {}) - job_yaml_default_none = run_params.get("job_yaml", None) - computing = job_yaml.get("computing", {}) - request_num_gpus = computing.get("minimum_num_gpus", None) - job_gpu_id_list = self.request_json.get("job_gpu_id_list", None) - - logging.info("Send training request to Edge ids: " + str(edge_id_list)) - - should_match_gpu = False - if job_yaml_default_none is not None and request_num_gpus is not None and \ - int(request_num_gpus) > 0 and active_edge_info_dict is not None: - should_match_gpu = True - SchedulerMatcher.parse_and_print_gpu_info_for_all_edges(active_edge_info_dict, show_gpu_list=True) - - # Match and assign gpus to each device - assigned_gpu_num_dict, assigned_gpu_ids_dict = SchedulerMatcher.match_and_assign_gpu_resources_to_devices( - request_num_gpus, edge_id_list, active_edge_info_dict, job_gpu_id_list=job_gpu_id_list) - if assigned_gpu_num_dict is None or assigned_gpu_ids_dict is None: - # If no resources available, send failed message to MLOps and send exception message to all edges. - gpu_count, gpu_available_count = SchedulerMatcher.parse_and_print_gpu_info_for_all_edges( - active_edge_info_dict, should_print=True) - err_info = f"No resources available." \ - f"Total available GPU count {gpu_available_count} is less than " \ - f"request GPU count {request_num_gpus}" - logging.error(err_info) - - # Bug fix: This mqtt message needs to be sent so platform can clean up the failed run and change the - # status from running to failed. - self.mlops_metrics.report_server_training_status( - run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id - ) - - self.mlops_metrics.report_server_id_status( - run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id, - server_id=self.edge_id, server_agent_id=self.server_agent_id) - self.send_training_stop_request_to_edges_when_exception(edge_id_list, - payload=json.dumps(self.request_json), - run_id=run_id) - - serving_args = job_yaml.get("serving_args", {}) - endpoint_id = serving_args.get("endpoint_id", None) - if endpoint_id is not None: - fedml.mlops.log_endpoint_status( - endpoint_id, device_client_constants.ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED) - fedml.mlops.log_run_log_lines( - endpoint_id, 0, [err_info], - log_source=device_client_constants.ClientConstants.FEDML_LOG_SOURCE_TYPE_MODEL_END_POINT - ) - return - - # Generate master node addr and port - master_node_addr, master_node_port = SchedulerMatcher.get_master_node_info(edge_id_list, - active_edge_info_dict) - - # Generate new edge id list after matched - edge_id_list = SchedulerMatcher.generate_new_edge_list_for_gpu_matching(assigned_gpu_num_dict) - if len(edge_id_list) <= 0: - gpu_count, gpu_available_count = SchedulerMatcher.parse_and_print_gpu_info_for_all_edges( - active_edge_info_dict, should_print=True) - logging.error(f"Request parameter for GPU num is invalid." - f"Total available GPU count {gpu_available_count}." - f"Request GPU num {request_num_gpus}") - self.mlops_metrics.report_server_id_status( - run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id, - server_id=self.edge_id, server_agent_id=self.server_agent_id) - self.send_training_stop_request_to_edges_when_exception(edge_id_list, - payload=json.dumps(self.request_json), - run_id=run_id) - return - - if should_match_gpu: - # Report gpu num and related infos to MLOps. - serving_args = job_yaml.get("serving_args", {}) - endpoint_id = serving_args.get("endpoint_id", None) - if endpoint_id is not None: - endpoint_info = list() - for edge_id_item, gpu_num in assigned_gpu_num_dict.items(): - edge_info = active_edge_info_dict.get(str(edge_id_item), {}) - endpoint_info.append({ - "machine_id": edge_id_item, "endpoint_gpu_count": gpu_num, - "master_deploy_id": edge_info.get("master_device_id", 0), - "slave_deploy_id": edge_info.get("slave_device_id", 0)}) - topic_name = f"compute/mlops/endpoint" - endpoint_info_json = {"endpoint_id": endpoint_id, "endpoint_info": endpoint_info} - print(f"endpoint_info_json {endpoint_info_json}") - self.message_center.send_message(topic_name, json.dumps(endpoint_info_json)) - - client_rank = 1 - for edge_id in edge_id_list: - topic_start_train = "flserver_agent/" + str(edge_id) + "/start_train" - logging.info("start_train: send topic " + topic_start_train + " to client...") - request_json = self.request_json - request_json["client_rank"] = client_rank - client_rank += 1 - - if active_edge_info_dict is not None: - edge_info = active_edge_info_dict.get(str(edge_id), {}) - model_master_device_id = edge_info.get("master_device_id", None) - model_slave_device_id = edge_info.get("slave_device_id", None) - model_slave_device_id_list = edge_info.get("slave_device_id_list", None) - - if should_match_gpu: - request_json["scheduler_match_info"] = SchedulerMatcher.generate_match_info_for_scheduler( - edge_id, edge_id_list, master_node_addr, master_node_port, - assigned_gpu_num_dict, assigned_gpu_ids_dict, - model_master_device_id=model_master_device_id, - model_slave_device_id=model_slave_device_id, - model_slave_device_id_list=model_slave_device_id_list - ) - - self.message_center.send_message(topic_start_train, json.dumps(request_json)) - - def setup_listeners_for_edge_status(self, run_id, edge_ids, server_id): - self.client_agent_active_list[f"{run_id}"] = dict() - self.client_agent_active_list[f"{run_id}"][f"server"] = server_id - for edge_id in edge_ids: - self.client_agent_active_list[f"{run_id}"][f"{edge_id}"] = ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE - edge_status_topic = "fl_client/flclient_agent_" + str(edge_id) + "/status" - self.add_message_listener(edge_status_topic, self.callback_edge_status) - self.subscribe_msg(edge_status_topic) - - def remove_listeners_for_edge_status(self, edge_ids=None): - if edge_ids is None: - edge_ids = self.request_json["edgeids"] - - for edge_id in edge_ids: - edge_status_topic = "fl_client/flclient_agent_" + str(edge_id) + "/status" - self.unsubscribe_msg(edge_status_topic) - - def setup_listener_for_run_metrics(self, run_id): - metric_topic = f"fedml_slave/fedml_master/metrics/{run_id}" - self.add_message_listener(metric_topic, self.callback_run_metrics) - self.subscribe_msg(metric_topic) - - def remove_listener_for_run_metrics(self, run_id): - metric_topic = f"fedml_slave/fedml_master/metrics/{run_id}" - self.unsubscribe_msg(metric_topic) - - def setup_listener_for_run_logs(self, run_id): - logs_topic = f"fedml_slave/fedml_master/logs/{run_id}" - self.add_message_listener(logs_topic, self.callback_run_logs) - self.subscribe_msg(logs_topic) - - def remove_listener_for_run_logs(self, run_id): - logs_topic = f"fedml_slave/fedml_master/logs/{run_id}" - self.unsubscribe_msg(logs_topic) - - def callback_run_logs(self, topic, payload): - run_id = str(topic).split('/')[-1] - run_id_str = str(run_id) - if self.run_logs_queue_map.get(run_id_str) is None: - self.run_logs_queue_map[run_id_str] = Queue() - self.run_logs_queue_map[run_id_str].put(payload) - - def callback_run_metrics(self, topic, payload): - print(f"callback_run_metrics topic {topic}, payload {payload}") - run_id = str(topic).split('/')[-1] - run_id_str = str(run_id) - if self.run_metrics_queue_map.get(run_id_str) is None: - self.run_metrics_queue_map[run_id_str] = Queue() - self.run_metrics_queue_map[run_id_str].put(payload) - - def callback_edge_status(self, topic, payload): - payload_json = json.loads(payload) - run_id = payload_json.get("run_id", None) - edge_id = payload_json.get("edge_id", None) - status = payload_json.get("status", None) - if run_id is not None and edge_id is not None: - active_item_dict = self.client_agent_active_list.get(f"{run_id}", None) - if active_item_dict is None: - return - self.client_agent_active_list[f"{run_id}"][f"{edge_id}"] = status - - if self.run_edge_id_status_queue_map.get(f"{run_id}") is None: - self.run_edge_id_status_queue_map[f"{run_id}"] = Queue() - self.run_edge_id_status_queue_map[f"{run_id}"].put(self.client_agent_active_list[f"{run_id}"]) - - self.process_job_status(run_id, edge_id, status) - - def ota_upgrade(self, payload, request_json): - run_id = request_json["runId"] - force_ota = False - ota_version = None - - try: - run_config = request_json.get("run_config", None) - parameters = run_config.get("parameters", None) - common_args = parameters.get("common_args", None) - force_ota = common_args.get("force_ota", False) - ota_version = common_args.get("ota_version", None) - except Exception as e: - pass - - if force_ota and ota_version is not None: - should_upgrade = True if ota_version != fedml.__version__ else False - upgrade_version = ota_version - else: - try: - fedml_is_latest_version, local_ver, remote_ver = sys_utils.check_fedml_is_latest_version(self.version) - except Exception as e: - return - - should_upgrade = False if fedml_is_latest_version else True - upgrade_version = remote_ver - - if should_upgrade: - job_obj = FedMLServerDataInterface.get_instance().get_job_by_id(run_id) - if job_obj is None: - FedMLServerDataInterface.get_instance(). \ - save_started_job(run_id, self.edge_id, time.time(), - ServerConstants.MSG_MLOPS_SERVER_STATUS_UPGRADING, - ServerConstants.MSG_MLOPS_SERVER_STATUS_UPGRADING, - payload) - - self.mlops_metrics.report_server_id_status( - run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_UPGRADING, edge_id=self.edge_id, - server_id=self.edge_id, server_agent_id=self.edge_id) - logging.info(f"Upgrade to version {upgrade_version} ...") - - sys_utils.do_upgrade(self.version, upgrade_version) - - raise Exception("Restarting after upgraded...") - - def callback_start_train(self, topic=None, payload=None): - print("callback_start_train: ") - try: - MLOpsConfigs.fetch_all_configs() - except Exception as e: - pass - - # get training params - if self.run_as_cloud_server: - message_bytes = payload.encode("ascii") - base64_bytes = base64.b64decode(message_bytes) - payload = base64_bytes.decode("ascii") - - # [NOTES] Example Request JSON: https://fedml-inc.larksuite.com/wiki/ScnIwUif9iupbjkYS0LuBrd6sod#WjbEdhYrvogmlGxKTOGu98C6sSb - request_json = json.loads(payload) - is_retain = request_json.get("is_retain", False) - if is_retain: - return - - # Process the log - run_id = request_json["runId"] - run_id_str = str(run_id) - if self.run_as_edge_server_and_agent or self.enable_simulation_cloud_agent: - # Start log processor for current run - self.args.run_id = run_id - self.args.edge_id = self.edge_id - MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO) - MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor( - run_id, self.edge_id, SchedulerConstants.get_log_source(request_json)) - logging.info("start the log processor.") - elif self.run_as_cloud_agent: - # Start log processor for current run - MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor( - run_id, request_json.get("server_id", "0"), SchedulerConstants.get_log_source(request_json) - ) - elif self.run_as_cloud_server: - self.server_agent_id = request_json.get("cloud_agent_id", self.edge_id) - run_id = request_json["runId"] - run_id_str = str(run_id) - - # Start log processor for current run - self.args.run_id = run_id - MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor( - run_id, self.edge_id, SchedulerConstants.get_log_source(request_json)) - - logging.info("callback_start_train payload: {}".format(payload)) - logging.info( - f"FedMLDebug - Receive: topic ({topic}), payload ({payload})" - ) - - if not self.run_as_cloud_agent and not self.run_as_cloud_server: - self.ota_upgrade(payload, request_json) - - self.start_request_json = payload - self.run_id = run_id - ServerConstants.save_runner_infos(self.args.device_id + "." + self.args.os_name, self.edge_id, run_id=run_id) - - # Start server with multiprocessing mode - self.request_json = request_json - self.running_request_json[run_id_str] = request_json - edge_id_list = request_json.get("edgeids", list()) - self.run_edge_ids[run_id_str] = edge_id_list - - logging.info("subscribe the client exception message.") - - if self.run_as_edge_server_and_agent or self.enable_simulation_cloud_agent: - self.init_job_task(request_json) - - self.args.run_id = run_id - - self._start_runner_process(run_id, request_json) - - ServerConstants.save_run_process(run_id, self.run_process_map[run_id_str].pid) - elif self.run_as_cloud_agent: - self.init_job_task(request_json) - - server_runner = FedMLServerRunner( - self.args, run_id=run_id, request_json=request_json, agent_config=self.agent_config - ) - server_runner.run_as_cloud_agent = self.run_as_cloud_agent - server_runner.start_request_json = json.dumps(request_json) - self.run_process_event_map[run_id_str] = multiprocessing.Event() - self.run_process_event_map[run_id_str].clear() - server_runner.run_process_event = self.run_process_event_map[run_id_str] - - if not self.use_local_process_as_cloud_server: - self.run_process_map[run_id_str] = Process(target=server_runner.start_cloud_server_process_entry) - self.run_process_map[run_id_str].start() - else: - message_bytes = json.dumps(self.request_json).encode("ascii") - base64_bytes = base64.b64encode(message_bytes) - runner_cmd_encoded = base64_bytes.decode("ascii") - logging.info("runner_cmd_encoded: {}".format(runner_cmd_encoded)) - - cloud_device_id = request_json.get("cloudServerDeviceId", "0") - - self.run_process_map[run_id_str] = Process( - target=FedMLServerRunner.start_local_cloud_server, - args=(run_id_str, self.args.user, self.version, cloud_device_id, runner_cmd_encoded)) - self.run_process_map[run_id_str].start() - time.sleep(1) - - ServerConstants.save_run_process(run_id, self.run_process_map[run_id_str].pid) - elif self.run_as_cloud_server: - self.server_agent_id = request_json.get("cloud_agent_id", self.edge_id) - self.start_request_json = json.dumps(request_json) - run_id = request_json["runId"] - run_id_str = str(run_id) - - self.init_job_task(request_json) - - self.args.run_id = run_id - - self._start_runner_process(run_id, request_json) - # ServerConstants.save_run_process(run_id, self.run_process_map[run_id_str].pid) - - @staticmethod - def start_local_cloud_server(run_id, user, version, cloud_device_id, runner_cmd_encoded): - print(f"start cloud server, device id {cloud_device_id}, runner cmd {runner_cmd_encoded}") - if not FedMLServerRunner.debug_cloud_server: - pip_source_dir = os.path.dirname(__file__) - login_cmd = os.path.join(pip_source_dir, "server_login.py") - run_cmd = f"{get_python_program()} -W ignore {login_cmd} -t login -r cloud_server -u {str(user)} " \ - f"-v {version} -id {cloud_device_id} -rc {runner_cmd_encoded}" - os.system(run_cmd) - - def _start_runner_process(self, run_id, request_json, is_server_job=False): - server_runner = FedMLServerRunner( - self.args, run_id=run_id, request_json=request_json, agent_config=self.agent_config - ) - run_id_str = str(run_id) - server_runner.run_as_edge_server_and_agent = self.run_as_edge_server_and_agent - server_runner.edge_id = self.edge_id - server_runner.server_agent_id = self.server_agent_id - server_runner.start_request_json = json.dumps(request_json) - self.run_process_event_map[run_id_str] = multiprocessing.Event() - self.run_process_event_map[run_id_str].clear() - server_runner.run_process_event = self.run_process_event_map[run_id_str] - self.run_process_completed_event_map[run_id_str] = multiprocessing.Event() - self.run_process_completed_event_map[run_id_str].clear() - server_runner.run_process_completed_event = self.run_process_completed_event_map[run_id_str] - if self.run_edge_id_status_queue_map.get(run_id_str, None) is None: - self.run_edge_id_status_queue_map[run_id_str] = Queue() - if self.run_edge_device_info_queue_map.get(run_id_str, None) is None: - self.run_edge_device_info_queue_map[run_id_str] = Queue() - if self.run_metrics_queue_map.get(run_id_str, None) is None: - self.run_metrics_queue_map[run_id_str] = Queue() - if self.run_events_queue_map.get(run_id_str, None) is None: - self.run_events_queue_map[run_id_str] = Queue() - if self.run_artifacts_queue_map.get(run_id_str, None) is None: - self.run_artifacts_queue_map[run_id_str] = Queue() - if self.run_logs_queue_map.get(run_id_str, None) is None: - self.run_logs_queue_map[run_id_str] = Queue() - # if self.run_edge_device_info_global_queue is None: - # self.run_edge_device_info_global_queue = Array('i', list()) - server_runner.edge_id_status_queue = self.run_edge_id_status_queue_map[run_id_str] - server_runner.edge_device_info_queue = self.run_edge_device_info_queue_map[run_id_str] - self.run_process_map[run_id_str] = Process( - target=server_runner.run if not is_server_job else server_runner.run_server_job_impl, args=( - self.run_process_event_map[run_id_str], self.run_process_completed_event_map[run_id_str], - self.run_edge_id_status_queue_map[run_id_str], self.run_edge_device_info_queue_map[run_id_str], - self.run_metrics_queue_map[run_id_str], self.run_events_queue_map[run_id_str], - self.run_artifacts_queue_map[run_id_str], self.run_logs_queue_map[run_id_str], - self.message_center.get_message_queue(), - self.run_edge_device_info_global_queue - ) - ) - self.run_process_map[run_id_str].start() - ServerConstants.save_run_process(run_id, self.run_process_map[run_id_str].pid) - - def start_cloud_server_process_entry(self): - try: - self.start_cloud_server_process() - except Exception as e: - pass - - def start_cloud_server_process(self): - run_config = self.request_json["run_config"] - packages_config = run_config["packages_config"] - self.start_cloud_server(packages_config) - - def start_cloud_server(self, packages_config): - server_id = self.request_json["server_id"] - self.cloud_server_name = FedMLServerRunner.FEDML_CLOUD_SERVER_PREFIX + str(self.run_id) + "-" + str(server_id) - self.server_docker_image = ( - self.agent_config["docker_config"]["registry_server"] - + self.agent_config["docker_config"]["registry_dir"] - + self.server_docker_base_image - ) - - logging.info("docker image {}".format(self.server_docker_image)) - # logging.info("file_sys_driver {}".format(self.agent_config["docker_config"]["file_sys_driver"])) - - registry_secret_cmd = ( - "kubectl create namespace fedml-devops-aggregator-" - + self.version - + ";kubectl -n fedml-devops-aggregator-" - + self.version - + " delete secret secret-" - + self.cloud_server_name - + " ;kubectl create secret docker-registry secret-" - + self.cloud_server_name - + " --docker-server=" - + self.agent_config["docker_config"]["registry_server"] - + " --docker-username=" - + self.agent_config["docker_config"]["user_name"] - + " --docker-password=$(aws ecr-public get-login-password --region " - + self.agent_config["docker_config"]["public_cloud_region"] - + ")" - + " --docker-email=fedml@fedml.ai -n fedml-devops-aggregator-" - + self.version - ) - logging.info("Create secret cmd: " + registry_secret_cmd) - os.system(registry_secret_cmd) - - message_bytes = json.dumps(self.request_json).encode("ascii") - base64_bytes = base64.b64encode(message_bytes) - runner_cmd_encoded = base64_bytes.decode("ascii") - logging.info("runner_cmd_encoded: {}".format(runner_cmd_encoded)) - # logging.info("runner_cmd_decoded: {}".format(base64.b64decode(runner_cmd_encoded).decode())) - cur_dir = os.path.dirname(__file__) - run_deployment_cmd = ( - "export FEDML_AGGREGATOR_NAME=" - + self.cloud_server_name - + ";export FEDML_AGGREGATOR_SVC=" - + self.cloud_server_name - + ";export FEDML_AGGREGATOR_VERSION=" - + self.version - + ';export FEDML_AGGREGATOR_IMAGE_PATH="' - + self.server_docker_image - + '"' - + ";export FEDML_CONF_ID=" - + self.cloud_server_name - + ";export FEDML_DATA_PV_ID=" - + self.cloud_server_name - + ";export FEDML_DATA_PVC_ID=" - + self.cloud_server_name - + ";export FEDML_REGISTRY_SECRET_SUFFIX=" - + self.cloud_server_name - + ";export FEDML_ACCOUNT_ID=0" - + ";export FEDML_SERVER_DEVICE_ID=" - + self.request_json.get("cloudServerDeviceId", "0") - + ";export FEDML_VERSION=" - + self.version - + ";export FEDML_PACKAGE_NAME=" - + packages_config.get("server", "") - + ";export FEDML_PACKAGE_URL=" - + packages_config.get("serverUrl", "") - + ";export FEDML_RUNNER_CMD=" - + runner_cmd_encoded - + ";envsubst < " - + os.path.join(cur_dir, "templates", "fedml-server-deployment.yaml") - + " | kubectl apply -f - " - ) - logging.info("FedMLServerRunner.run with k8s: " + run_deployment_cmd) - os.system(run_deployment_cmd) - - def stop_cloud_server(self): - self.cloud_server_name = FedMLServerRunner.FEDML_CLOUD_SERVER_PREFIX + str(self.run_id) \ - + "-" + str(self.edge_id) - self.server_docker_image = ( - self.agent_config["docker_config"]["registry_server"] - + self.agent_config["docker_config"]["registry_dir"] - + self.server_docker_base_image - ) - delete_deployment_cmd = ( - "export FEDML_AGGREGATOR_NAME=" - + self.cloud_server_name - + ";export FEDML_AGGREGATOR_SVC=" - + self.cloud_server_name - + ";export FEDML_AGGREGATOR_VERSION=" - + self.version - + ';export FEDML_AGGREGATOR_IMAGE_PATH="' - + self.server_docker_image - + '"' - + ";export FEDML_CONF_ID=" - + self.cloud_server_name - + ";export FEDML_DATA_PV_ID=" - + self.cloud_server_name - + ";export FEDML_DATA_PVC_ID=" - + self.cloud_server_name - + ";export FEDML_REGISTRY_SECRET_SUFFIX=" - + self.cloud_server_name - + ";kubectl -n fedml-devops-aggregator-" - + self.version - + " delete deployment " - + self.cloud_server_name - + ";kubectl -n fedml-devops-aggregator-" - + self.version - + " delete svc " - + self.cloud_server_name - + ";kubectl -n fedml-devops-aggregator-" - + self.version - + " delete secret secret-" - + self.cloud_server_name - ) - logging.info("FedMLServerRunner.stop_run with k8s: " + delete_deployment_cmd) - os.system(delete_deployment_cmd) - - def setup_message_center(self): - if self.message_center is not None: - return - - self.message_center = FedMLMessageCenter(agent_config=self.agent_config) - self.message_center.start_sender() - - if self.mlops_metrics is None: - self.mlops_metrics = MLOpsMetrics() - self.mlops_metrics.set_messenger(self.message_center) - self.mlops_metrics.run_id = self.run_id - self.mlops_metrics.edge_id = self.edge_id - self.mlops_metrics.server_agent_id = self.server_agent_id - - def rebuild_message_center(self, message_center_queue): - self.message_center = FedMLMessageCenter(message_queue=message_center_queue) - - if self.mlops_metrics is None: - self.mlops_metrics = MLOpsMetrics() - self.mlops_metrics.set_messenger(self.message_center) - self.mlops_metrics.run_id = self.run_id - self.mlops_metrics.edge_id = self.edge_id - self.mlops_metrics.server_agent_id = self.server_agent_id - - def release_message_center(self): - try: - if self.message_center is not None: - self.message_center.stop() - self.message_center = None - - except Exception as e: - logging.error( - f"Failed to release client mqtt manager with Exception {e}. Traceback: {traceback.format_exc()}") - pass - - def send_training_stop_request_to_edges( - self, edge_id_list, payload=None, run_id=0): - if payload is None: - payload_obj = {"runId": run_id, "edgeids": edge_id_list} - else: - payload_obj = json.loads(payload) - - for edge_id in edge_id_list: - topic_stop_train = "flserver_agent/" + str(edge_id) + "/stop_train" - logging.info("stop_train: send topic " + topic_stop_train) - self.message_center.send_message(topic_stop_train, json.dumps(payload_obj)) - - def send_training_stop_request_to_specific_edge(self, edge_id, payload): - topic_stop_train = "flserver_agent/" + str(edge_id) + "/stop_train" - logging.info("stop_train: send topic " + topic_stop_train) - self.message_center.send_message(topic_stop_train, payload) - - def send_training_stop_request_to_cloud_server(self, edge_id, payload): - topic_stop_train = "mlops/flserver_agent_" + str(edge_id) + "/stop_train" - logging.info("stop_train: send topic " + topic_stop_train) - self.message_center.send_message(topic_stop_train, payload) - - def send_training_stop_request_to_edges_when_exception( - self, edge_id_list, payload=None, run_id=0, server_id=None, status=None): - if payload is None: - payload_obj = {"runId": run_id, "edgeids": edge_id_list} - if server_id is not None: - payload_obj["serverId"] = server_id - else: - payload_obj = json.loads(payload) - payload_obj["run_status"] = ClientConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION if status is None else status - topic_stop_train = "flserver_agent/" + str(self.edge_id) + "/stop_train" - self.callback_stop_train(topic_stop_train, json.dumps(payload_obj), use_payload=payload_obj) - - def callback_stop_train(self, topic, payload, use_payload=None): - # logging.info("callback_stop_train: topic = %s, payload = %s" % (topic, payload)) - logging.info( - f"FedMLDebug - Receive: topic ({topic}), payload ({payload})" - ) - - request_json = json.loads(payload) - is_retain = request_json.get("is_retain", False) - if is_retain: - return - run_id = request_json.get("runId", None) - if run_id is None: - run_id = request_json.get("id", None) - - edge_id_list = request_json["edgeids"] - server_id = request_json.get("serverId", None) - if server_id is None: - server_id = request_json.get("server_id", None) - - if run_id is None or server_id is None: - logging.info("Json format is not correct!") - return - - # logging.info("Stop run with multiprocessing.") - - # Stop server with multiprocessing mode - run_id_str = str(run_id) - stop_request_json = self.running_request_json.get(run_id_str, None) - if stop_request_json is None: - stop_request_json = request_json - if use_payload is not None: - stop_request_json = use_payload - - if self.run_process_event_map.get(run_id_str) is not None: - self.run_process_event_map.get(run_id_str).set() - - if self.run_as_edge_server_and_agent or self.enable_simulation_cloud_agent: - server_runner = FedMLServerRunner( - self.args, run_id=run_id, request_json=stop_request_json, agent_config=self.agent_config, - edge_id=self.edge_id - ) - server_runner.run_as_edge_server_and_agent = self.run_as_edge_server_and_agent - self.run_process_event_map_for_stop[run_id_str] = multiprocessing.Event() - if self.run_edge_id_status_queue_map.get(run_id_str, None) is None: - self.run_edge_id_status_queue_map[run_id_str] = Queue() - if self.run_edge_device_info_queue_map_for_stop.get(run_id_str, None) is None: - self.run_edge_device_info_queue_map_for_stop[run_id_str] = Queue() - # if self.run_edge_device_info_global_queue_for_stop is None: - # self.run_edge_device_info_global_queue_for_stop = Array('i', list()) - - self.run_stop_process_map[run_id_str] = Process( - target=server_runner.run_stop, args=( - self.run_process_event_map_for_stop[run_id_str], - self.run_edge_id_status_queue_map[run_id_str], - self.run_edge_device_info_queue_map_for_stop[run_id_str], - self.run_edge_device_info_global_queue_for_stop, - self.message_center.get_message_queue(), - ) - ) - self.run_stop_process_map[run_id_str].start() - elif self.run_as_cloud_agent: - self.send_training_stop_request_to_cloud_server(server_id, payload) - return - elif self.run_as_cloud_server: - # if not self.use_local_process_as_cloud_server: - server_runner = FedMLServerRunner( - self.args, run_id=run_id, request_json=stop_request_json, agent_config=self.agent_config, - edge_id=server_id - ) - server_runner.run_as_cloud_agent = self.run_as_cloud_agent - self.run_process_event_map_for_stop[run_id_str] = multiprocessing.Event() - if self.run_edge_id_status_queue_map.get(run_id_str, None) is None: - self.run_edge_id_status_queue_map[run_id_str] = Queue() - if self.run_edge_device_info_queue_map_for_stop.get(run_id_str, None) is None: - self.run_edge_device_info_queue_map_for_stop[run_id_str] = Queue() - # if self.run_edge_device_info_global_queue_for_stop is None: - # self.run_edge_device_info_global_queue_for_stop = Array('i', list()) - - self.run_stop_process_map[run_id_str] = Process( - target=server_runner.run_stop, args=( - self.run_process_event_map_for_stop[run_id_str], - self.run_edge_id_status_queue_map[run_id_str], - self.run_edge_device_info_queue_map_for_stop[run_id_str], - self.run_edge_device_info_global_queue_for_stop, - self.message_center.get_message_queue(), - ) - ) - self.run_stop_process_map[run_id_str].start() - return - - if self.running_request_json.get(run_id_str, None) is not None: - self.running_request_json.pop(run_id_str) - - if self.run_process_map.get(run_id_str, None) is not None: - self.run_process_map.pop(run_id_str) - - def run_stop(self, process_event, edge_id_status_queue, edge_device_info_queue, - edge_device_info_global_queue, message_center_queue): - if platform.system() != "Windows": - os.setsid() - - os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning' - os.environ.setdefault('PYTHONWARNINGS', 'ignore:semaphore_tracker:UserWarning') - - self.run_process_event = process_event - try: - MLOpsUtils.set_ntp_offset(self.ntp_offset) - - self.rebuild_message_center(message_center_queue) - - self.run_stop_impl(edge_id_status_queue, edge_device_info_queue, edge_device_info_global_queue) - except Exception as e: - logging.error("Stop runner exits with exceptions. {}".format(traceback.format_exc())) - finally: - logging.info("Release resources.") - - def run_stop_impl(self, edge_id_status_queue, edge_device_info_queue, edge_device_info_global_queue): - run_id_str = str(self.run_id) - edge_id_list = self.request_json["edgeids"] - - # Detect running status of all edges - status_ok, active_edge_info_dict, inactivate_edges = self.detect_edges_status( - edge_device_info_queue, edge_device_info_global_queue=edge_device_info_global_queue, - status_timeout=120, need_to_trigger_exception=False, - status_check_context=SchedulerConstants.STATUS_CHECK_FRO_RUN_STOP_CONTEXT) - - # Send the training stopping request to running edges. - for edge_id_item, _ in active_edge_info_dict.items(): - self.send_training_stop_request_to_specific_edge(edge_id_item, json.dumps(self.request_json)) - time.sleep(0.2) - time.sleep(3) - - total_sleep_seconds = 0 - allowed_status_check_sleep_seconds = 60 - server_id = self.edge_id - running_edges_list = list() - current_edge_id_status_map = dict() - - while True: - # Fetch edge id and status from the edge id status queue - while True: - try: - queue_item = edge_id_status_queue.get(block=False, timeout=3) - if queue_item is not None: - current_edge_id_status_map.update(queue_item) - except queue.Empty as e: # If queue is empty, then break loop - break - - # Calc the total killed device number - running_edges_list.clear() - number_of_failed_edges = 0 - number_of_finished_edges = 0 - number_of_killed_edges = 0 - for edge_id_item, status_item in current_edge_id_status_map.items(): - if edge_id_item == "server": - continue - - if status_item is None or status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED or \ - status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_EXCEPTION: - number_of_failed_edges += 1 - continue - - if status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED: - number_of_finished_edges += 1 - continue - - if status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED: - number_of_killed_edges += 1 - continue - - if status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE or \ - status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_OFFLINE: - continue - - running_edges_list.append(edge_id_item) - - # If the killed device number is equal total device number, then break - if len(running_edges_list) <= 0 and len(current_edge_id_status_map.keys()) == len(edge_id_list) + 1: - break - - # Calc the timeout value to wait to device killed. - time.sleep(3) - total_sleep_seconds += 3 - if total_sleep_seconds < allowed_status_check_sleep_seconds: - continue - - # If timeout, then report killed device status - no_response_edges = list(set(edge_id_list) - set(running_edges_list)) - if len(no_response_edges) <= 0: - break - for edge_id_item in no_response_edges: - self.mlops_metrics.report_client_id_status( - edge_id_item, ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED, - server_id=self.edge_id, run_id=self.run_id) - - if self.run_as_edge_server_and_agent or self.enable_simulation_cloud_agent: - # Stop log processor for current run - MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(self.run_id, self.edge_id) - elif self.run_as_cloud_agent: - # Stop log processor for current run - MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(self.run_id, server_id) - - self.mlops_metrics.report_server_id_status( - self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED, edge_id=self.edge_id, - server_id=self.edge_id, server_agent_id=self.edge_id) - - def set_run_status(self, run_id, status, running_request_json): - server_runner = FedMLServerRunner( - self.args, run_id=run_id, request_json=running_request_json, agent_config=self.agent_config - ) - server_runner.edge_id = self.edge_id - server_runner.run_as_edge_server_and_agent = self.run_as_edge_server_and_agent - server_runner.run_status = status - server_runner.message_center = self.message_center - server_runner.mlops_metrics = self.mlops_metrics - server_runner.cleanup_client_with_status() - - def callback_runner_id_status(self, topic, payload): - # logging.info("callback_runner_id_status: topic = %s, payload = %s" % (topic, payload)) - # logging.info( - # f"FedMLDebug - Receive: topic ({topic}), payload ({payload})" - # ) - - request_json = json.loads(payload) - is_retain = request_json.get("is_retain", False) - if is_retain: - return - run_id = request_json["run_id"] - status = request_json["status"] - edge_id = request_json["edge_id"] - server_id = request_json.get("server_id", None) - run_id_str = str(run_id) - - if ( - status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED - or status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED - or status == ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED - ): - completed_event = self.run_process_completed_event_map.get(run_id_str, None) - if completed_event is not None: - completed_event.set() - - FedMLServerDataInterface.get_instance().save_job_status(run_id, self.edge_id, status, status) - - # Stop server with multiprocessing mode - running_request_json = self.running_request_json.get(run_id_str, None) - if running_request_json is None: - running_request_json = request_json - if self.run_as_edge_server_and_agent or self.enable_simulation_cloud_agent: - self.set_run_status(run_id, status, running_request_json) - - run_process = self.run_process_map.get(run_id_str, None) - if run_process is not None: - if run_process.pid is not None: - RunProcessUtils.kill_process(run_process.pid) - - self.run_process_map.pop(run_id_str) - - # Stop log processor for current run - MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, self.edge_id) - elif self.run_as_cloud_agent: - pass - elif self.run_as_cloud_server: - self.set_run_status(run_id, status, running_request_json) - - # Stop log processor for current run - MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, self.edge_id) - if self.use_local_process_as_cloud_server: - # RunProcessUtils.kill_process(os.getpid()) - cloud_server_process = self.run_process_map.get(run_id_str, None) - if cloud_server_process is not None: - RunProcessUtils.kill_process(cloud_server_process.pid) - else: - self.stop_cloud_server() - - if self.run_process_map.get(run_id_str, None) is not None: - self.run_process_map.pop(run_id_str) - - self.remove_listener_for_run_metrics(self.run_id) - self.remove_listener_for_run_logs(self.run_id) - elif ( - status == ServerConstants.MSG_MLOPS_SERVER_STATUS_EXCEPTION - ): - request_json = self.running_request_json.get(run_id_str, None) - if request_json is not None: - edge_id_list = request_json.get("edgeids", list()) - server_id = request_json.get("serverId", None) - server_id = request_json.get("server_id", None) if server_id is None else server_id - self.send_training_stop_request_to_edges_when_exception( - edge_id_list, run_id=run_id, server_id=server_id, - status=ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED) - - FedMLServerDataInterface.get_instance().save_job_status(run_id, self.edge_id, status, status) - else: - request_json = self.running_request_json.get(run_id_str, None) - if request_json is None: - request_json = self.start_request_json - self.mlops_metrics.report_server_training_status( - run_id, status, edge_id=self.edge_id, running_json=json.dumps(request_json)) - - def cleanup_client_with_status(self): - if self.run_status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED: - # logging.info("received to finished status.") - self.cleanup_run_when_finished(should_send_server_id_status=False) - elif self.run_status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED: - # logging.info("received to failed status.") - self.cleanup_run_when_starting_failed(should_send_server_id_status=False) - elif self.run_status == ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED: - # logging.info("received to failed status.") - self.cleanup_run_when_starting_failed( - status=self.run_status, should_send_server_id_status=False) - - def callback_report_current_status(self, topic, payload): - logging.info( - f"FedMLDebug - Receive: topic ({topic}), payload ({payload})" - ) - - request_json = json.loads(payload) - if self.run_as_edge_server_and_agent: - self.send_agent_active_msg() - elif self.run_as_cloud_agent: - self.send_agent_active_msg() - elif self.run_as_cloud_server: - pass - - @staticmethod - def process_ota_upgrade_msg(): - os.system("pip install -U fedml") - - def callback_server_ota_msg(self, topic, payload): - logging.info( - f"FedMLDebug - Receive: topic ({topic}), payload ({payload})" - ) - - request_json = json.loads(payload) - cmd = request_json["cmd"] - - if cmd == ServerConstants.FEDML_OTA_CMD_UPGRADE: - try: - self.process_ota_upgrade_msg() - # Process(target=FedMLServerRunner.process_ota_upgrade_msg).start() - raise Exception("After upgraded, restart runner...") - except Exception as e: - pass - elif cmd == ServerConstants.FEDML_OTA_CMD_RESTART: - raise Exception("Restart runner...") - - def callback_response_device_info(self, topic, payload): - # Parse payload - payload_json = json.loads(payload) - run_id = payload_json.get("run_id", 0) - context = payload_json.get("context", None) - master_device_id = payload_json.get("master_device_id", 0) - slave_device_id = payload_json.get("slave_device_id", 0) - slave_device_id_list = payload_json.get("slave_device_id_list", 0) - edge_id = payload_json.get("edge_id", 0) - device_info = payload_json.get("edge_info", 0) - device_info["master_device_id"] = master_device_id - device_info["slave_device_id"] = slave_device_id - device_info["slave_device_id_list"] = slave_device_id_list - run_id_str = str(run_id) - - # Put device info into a multiprocessing queue so master runner checks if all edges are ready - if context is None: - if self.run_edge_device_info_queue_map.get(run_id_str, None) is None: - self.run_edge_device_info_queue_map[run_id_str] = Queue() - self.run_edge_device_info_queue_map[run_id_str].put(device_info) - - # if self.run_edge_device_info_global_queue is None: - # self.run_edge_device_info_global_queue = Array('i', list()) - # - # self.run_edge_device_info_global_queue[len(self.run_edge_device_info_global_queue)] = \ - # {"timestamp": time.time(), "edge_id": edge_id, "device_info": device_info} - - self.check_model_device_ready_and_deploy(run_id, master_device_id, slave_device_id, - slave_device_id_list=slave_device_id_list) - elif context == SchedulerConstants.STATUS_CHECK_FRO_RUN_STOP_CONTEXT: - if self.run_edge_device_info_queue_map_for_stop.get(run_id_str, None) is None: - self.run_edge_device_info_queue_map_for_stop[run_id_str] = Queue() - self.run_edge_device_info_queue_map_for_stop[run_id_str].put(device_info) - - # if self.run_edge_device_info_global_queue_for_stop is None: - # self.run_edge_device_info_global_queue_for_stop = Array('i', list()) - # - # self.run_edge_device_info_global_queue_for_stop[len(self.run_edge_device_info_global_queue_for_stop)] = \ - # {"timestamp": time.time(), "edge_id": edge_id, "device_info": device_info} - - def check_model_device_ready_and_deploy(self, run_id, master_device_id, slave_device_id, slave_device_id_list=None): - request_json = self.running_request_json.get(str(run_id), None) - if request_json is None: - return - run_config = request_json["run_config"] - run_params = run_config.get("parameters", {}) - job_yaml = run_params.get("job_yaml", {}) - job_type = job_yaml.get("job_type", None) - job_type = job_yaml.get("task_type", Constants.JOB_TASK_TYPE_TRAIN) if job_type is None else job_type - if job_type != Constants.JOB_TASK_TYPE_DEPLOY and job_type != Constants.JOB_TASK_TYPE_SERVE: - return - - # Init model device ids for each run - run_id_str = str(run_id) - if self.run_model_device_ids.get(run_id_str, None) is None: - self.run_model_device_ids[run_id_str] = list() - - # Append master device and slave devices to the model devices map - self.run_model_device_ids[run_id_str].append({"master_device_id": master_device_id, - "slave_device_id": slave_device_id}) - model_device_ids = self.run_model_device_ids.get(run_id_str, None) - if model_device_ids is None: - return - - # Check if all model devices are ready - if len(model_device_ids) != len(self.run_edge_ids.get(run_id_str, list())): - return - - # Generate model master ids and model slave device ids - device_master_ids = list() - device_slave_ids = list() - for device_ids in model_device_ids: - model_master_id = device_ids.get("master_device_id") - model_slave_id = device_ids.get("slave_device_id") - device_master_ids.append(model_master_id) - device_slave_ids.append(model_slave_id) - - if len(device_master_ids) <= 0: - return - - # Generate serving devices for deploying - serving_devices = list() - serving_devices.append(device_master_ids[0]) - serving_devices.extend(device_slave_ids) - - # Start to deploy the model - self.deploy_model(serving_devices, request_json, run_id=run_id) - - def callback_request_device_info_from_mlops(self, topic, payload): - self.response_device_info_to_mlops(topic, payload) - - def response_device_info_to_mlops(self, topic, payload): - response_topic = f"master_agent/mlops/response_device_info" - payload_json = json.loads(payload) - need_gpu_info = payload_json.get("need_gpu_info", False) - if self.mlops_metrics is not None: - if not need_gpu_info: - response_payload = { - "run_id": self.run_id, - "master_agent_device_id": self.edge_id, - "fedml_version": fedml.__version__ - } - else: - total_mem, free_mem, total_disk_size, free_disk_size, cup_utilization, cpu_cores, \ - gpu_cores_total, gpu_cores_available, sent_bytes, recv_bytes, gpu_available_ids = \ - sys_utils.get_sys_realtime_stats() - gpu_available_ids = JobRunnerUtils.get_instance().get_available_gpu_id_list(self.edge_id) - gpu_available_ids = JobRunnerUtils.trim_unavailable_gpu_ids(gpu_available_ids) - gpu_cores_available = len(gpu_available_ids) - response_payload = { - "run_id": self.run_id, - "master_agent_device_id": self.edge_id, - "memoryTotal": round(total_mem * MLOpsUtils.BYTES_TO_GB, 2), - "memoryAvailable": round(free_mem * MLOpsUtils.BYTES_TO_GB, 2), - "diskSpaceTotal": round(total_disk_size * MLOpsUtils.BYTES_TO_GB, 2), - "diskSpaceAvailable": round(free_disk_size * MLOpsUtils.BYTES_TO_GB, 2), - "cpuUtilization": round(cup_utilization, 2), - "cpuCores": cpu_cores, - "gpuCoresTotal": gpu_cores_total, - "gpuCoresAvailable": gpu_cores_available, - "networkTraffic": sent_bytes + recv_bytes, - "timestamp": int(MLOpsUtils.get_ntp_time()), - "fedml_version": fedml.__version__ - } - self.mlops_metrics.report_json_message(response_topic, json.dumps(response_payload)) - - @staticmethod - def get_device_id(): - device_file_path = os.path.join(ServerConstants.get_data_dir(), ServerConstants.LOCAL_RUNNER_INFO_DIR_NAME) - file_for_device_id = os.path.join(device_file_path, "devices.id") - if not os.path.exists(device_file_path): - os.makedirs(device_file_path) - elif os.path.exists(file_for_device_id): - with open(file_for_device_id, 'r', encoding='utf-8') as f: - device_id_from_file = f.readline() - if device_id_from_file is not None and device_id_from_file != "": - return device_id_from_file - - if platform.system() == "Darwin": - cmd_get_serial_num = "system_profiler SPHardwareDataType | grep Serial | awk '{gsub(/ /,\"\")}{print}' " \ - "|awk -F':' '{print $2}' " - device_id = os.popen(cmd_get_serial_num).read() - device_id = device_id.replace('\n', '').replace(' ', '') - if device_id is None or device_id == "": - device_id = hex(uuid.getnode()) - else: - device_id = "0x" + device_id - else: - if "nt" in os.name: - - def get_uuid(): - guid = "" - try: - cmd = "wmic csproduct get uuid" - guid = str(subprocess.check_output(cmd)) - pos1 = guid.find("\\n") + 2 - guid = guid[pos1:-15] - except Exception as ex: - pass - return str(guid) - - device_id = str(get_uuid()) - elif "posix" in os.name: - device_id = sys_utils.get_device_id_in_docker() - if device_id is None: - device_id = hex(uuid.getnode()) - else: - device_id = sys_utils.run_subprocess_open( - "hal-get-property --udi /org/freedesktop/Hal/devices/computer --key system.hardware.uuid".split() - ) - device_id = hex(device_id) - - if device_id is not None and device_id != "": - with open(file_for_device_id, 'w', encoding='utf-8') as f: - f.write(device_id) - else: - device_id = hex(uuid.uuid4()) - with open(file_for_device_id, 'w', encoding='utf-8') as f: - f.write(device_id) - - return device_id - - def bind_account_and_device_id(self, url, account_id, device_id, os_name, api_key="", role=None): - if role is None: - role = "edge_server" - if self.run_as_edge_server_and_agent: - role = "edge_server" - elif self.run_as_cloud_agent: - role = "cloud_agent" - elif self.run_as_cloud_server: - role = "cloud_server" - - ip = requests.get('https://checkip.amazonaws.com').text.strip() - fedml_ver, exec_path, os_ver, cpu_info, python_ver, torch_ver, mpi_installed, \ - cpu_usage, available_mem, total_mem, gpu_info, gpu_available_mem, gpu_total_mem, \ - gpu_count, gpu_vendor, cpu_count, gpu_device_name = get_sys_runner_info() - host_name = sys_utils.get_host_name() - json_params = { - "accountid": account_id, - "deviceid": device_id, - "type": os_name, - "state": ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE, - "status": ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE, - "processor": cpu_info, - "core_type": cpu_info, - "network": "", - "role": role, - "os_ver": os_ver, - "memory": total_mem, - "ip": ip, - "api_key": api_key, - "extra_infos": {"fedml_ver": fedml_ver, "exec_path": exec_path, "os_ver": os_ver, - "cpu_info": cpu_info, "python_ver": python_ver, "torch_ver": torch_ver, - "mpi_installed": mpi_installed, "cpu_usage": cpu_usage, - "available_mem": available_mem, "total_mem": total_mem, - "cpu_count": cpu_count, "gpu_count": 0, "host_name": host_name} - } - if gpu_count > 0: - if gpu_total_mem is not None: - json_params["gpu"] = gpu_info if gpu_info is not None else "" + ", Total GPU Memory: " + gpu_total_mem - else: - json_params["gpu"] = gpu_info if gpu_info is not None else "" - json_params["extra_infos"]["gpu_info"] = gpu_info if gpu_info is not None else "" - if gpu_available_mem is not None: - json_params["extra_infos"]["gpu_available_mem"] = gpu_available_mem - if gpu_total_mem is not None: - json_params["extra_infos"]["gpu_total_mem"] = gpu_total_mem - - json_params["extra_infos"]["gpu_count"] = gpu_count - json_params["extra_infos"]["gpu_vendor"] = gpu_vendor - json_params["extra_infos"]["gpu_device_name"] = gpu_device_name - - gpu_available_id_list = sys_utils.get_available_gpu_id_list(limit=gpu_count) - gpu_available_count = len(gpu_available_id_list) if gpu_available_id_list is not None else 0 - gpu_list = sys_utils.get_gpu_list() - json_params["extra_infos"]["gpu_available_count"] = gpu_available_count - json_params["extra_infos"]["gpu_available_id_list"] = gpu_available_id_list - json_params["extra_infos"]["gpu_list"] = gpu_list - else: - json_params["gpu"] = "None" - json_params["extra_infos"]["gpu_available_count"] = 0 - json_params["extra_infos"]["gpu_available_id_list"] = [] - json_params["extra_infos"]["gpu_list"] = [] - - _, cert_path = MLOpsConfigs.get_request_params() - if cert_path is not None: - try: - requests.session().verify = cert_path - response = requests.post( - url, json=json_params, verify=True, - headers={"content-type": "application/json", "Connection": "close"} - ) - except requests.exceptions.SSLError as err: - MLOpsConfigs.install_root_ca_file() - response = requests.post( - url, json=json_params, verify=True, - headers={"content-type": "application/json", "Connection": "close"} - ) - else: - response = requests.post(url, json=json_params, headers={"Connection": "close"}) - edge_id = -1 - user_name = None - extra_url = None - if response.status_code != 200: - print(f"Binding to MLOps with response.status_code = {response.status_code}, " - f"response.content: {response.content}") - pass - else: - # print("url = {}, response = {}".format(url, response)) - status_code = response.json().get("code") - if status_code == "SUCCESS": - edge_id = response.json().get("data").get("id") - user_name = response.json().get("data").get("userName", None) - extra_url = response.json().get("data").get("url", None) - if edge_id is None or edge_id <= 0: - print(f"Binding to MLOps with response.status_code = {response.status_code}, " - f"response.content: {response.content}") - else: - if status_code == SchedulerConstants.BINDING_ACCOUNT_NOT_EXIST_ERROR: - raise SystemExit(SchedulerConstants.BINDING_ACCOUNT_NOT_EXIST_ERROR) - print(f"Binding to MLOps with response.status_code = {response.status_code}, " - f"response.content: {response.content}") - return -1, None, None - return edge_id, user_name, extra_url - - def fetch_configs(self): - return MLOpsConfigs.fetch_all_configs() - - def send_agent_active_msg(self): - active_topic = "flserver_agent/active" - status = MLOpsStatus.get_instance().get_server_agent_status(self.edge_id) - if ( - status is not None - and status != ServerConstants.MSG_MLOPS_SERVER_STATUS_OFFLINE - and status != ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE - ): - return - - if self.run_as_cloud_agent: - status = ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE - else: - try: - current_job = FedMLServerDataInterface.get_instance().get_job_by_id(self.run_id) - except Exception as e: - current_job = None - if current_job is None: - if status is not None and status == ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE: - status = ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE - else: - return - else: - status = ServerConstants.get_device_state_from_run_edge_state(current_job.status) - active_msg = {"ID": self.edge_id, "status": status} - MLOpsStatus.get_instance().set_server_agent_status(self.edge_id, status) - if self.mqtt_mgr is not None: - self.mqtt_mgr.send_message_json(active_topic, json.dumps(active_msg)) - else: - self.send_message_json(active_topic, json.dumps(active_msg)) - - def recover_start_train_msg_after_upgrading(self): - try: - current_job = FedMLServerDataInterface.get_instance().get_current_job() - if current_job is not None and \ - current_job.status == ServerConstants.MSG_MLOPS_SERVER_STATUS_UPGRADING: - logging.info("start training after upgrading.") - server_agent_id = self.edge_id - topic_start_train = "mlops/flserver_agent_" + str(server_agent_id) + "/start_train" - self.callback_start_train(topic_start_train, current_job.running_json) - except Exception as e: - logging.info("recover starting train message after upgrading: {}".format(traceback.format_exc())) - - def on_agent_mqtt_connected(self, mqtt_client_object): - # The MQTT message topic format is as follows: // - - # Setup MQTT message listener for starting training - server_agent_id = self.edge_id - topic_start_train = "mlops/flserver_agent_" + str(server_agent_id) + "/start_train" - self.add_message_listener(topic_start_train, self.callback_start_train) - self.mqtt_mgr.add_message_listener(topic_start_train, self.listener_message_dispatch_center) - - # Setup MQTT message listener for stopping training - topic_stop_train = "mlops/flserver_agent_" + str(server_agent_id) + "/stop_train" - self.add_message_listener(topic_stop_train, self.callback_stop_train) - self.mqtt_mgr.add_message_listener(topic_stop_train, self.listener_message_dispatch_center) - - # Setup MQTT message listener for server status switching - topic_server_status = "fl_server/flserver_agent_" + str(server_agent_id) + "/status" - self.add_message_listener(topic_server_status, self.callback_runner_id_status) - self.mqtt_mgr.add_message_listener(topic_server_status, self.listener_message_dispatch_center) - - # Setup MQTT message listener to report current device status. - topic_report_status = "mlops/report_device_status" - self.add_message_listener(topic_report_status, self.callback_report_current_status) - self.mqtt_mgr.add_message_listener(topic_report_status, self.listener_message_dispatch_center) - - # Setup MQTT message listener to OTA messages from the MLOps. - topic_ota_msg = "mlops/flserver_agent_" + str(server_agent_id) + "/ota" - self.add_message_listener(topic_ota_msg, self.callback_server_ota_msg) - self.mqtt_mgr.add_message_listener(topic_ota_msg, self.listener_message_dispatch_center) - - # Setup MQTT message listener to request device info from the client. - topic_response_device_info = "client/server/response_device_info/" + str(self.edge_id) - self.add_message_listener(topic_response_device_info, self.callback_response_device_info) - self.mqtt_mgr.add_message_listener(topic_response_device_info, self.listener_message_dispatch_center) - - # Setup MQTT message listener to request device info from MLOps. - topic_request_device_info_from_mlops = f"mlops/master_agent/request_device_info/{self.edge_id}" - self.add_message_listener(topic_request_device_info_from_mlops, self.callback_request_device_info_from_mlops) - self.mqtt_mgr.add_message_listener( - topic_request_device_info_from_mlops, self.listener_message_dispatch_center) - - # Subscribe topics for starting train, stopping train and fetching client status. - mqtt_client_object.subscribe(topic_start_train, qos=2) - mqtt_client_object.subscribe(topic_stop_train, qos=2) - mqtt_client_object.subscribe(topic_server_status, qos=2) - mqtt_client_object.subscribe(topic_report_status, qos=2) - mqtt_client_object.subscribe(topic_ota_msg, qos=2) - mqtt_client_object.subscribe(topic_response_device_info, qos=2) - mqtt_client_object.subscribe(topic_request_device_info_from_mlops, qos=2) - - self.subscribed_topics.clear() - self.subscribed_topics.append(topic_start_train) - self.subscribed_topics.append(topic_stop_train) - self.subscribed_topics.append(topic_server_status) - self.subscribed_topics.append(topic_report_status) - self.subscribed_topics.append(topic_ota_msg) - self.subscribed_topics.append(topic_response_device_info) - self.subscribed_topics.append(topic_request_device_info_from_mlops) - - # Broadcast the first active message. - self.send_agent_active_msg() - - if self.run_as_cloud_server: - # Start the FedML server - self.callback_start_train(payload=self.args.runner_cmd) - - # Echo results - MLOpsRuntimeLog.get_instance(self.args).enable_show_log_to_stdout() - print("\nCongratulations, your device is connected to the FedML MLOps platform successfully!") - print( - "Your FedML Edge ID is " + str(self.edge_id) + ", unique device ID is " - + str(self.unique_device_id) - ) - MLOpsRuntimeLog.get_instance(self.args).enable_show_log_to_stdout(enable=True) - - # Start the message center for listener - self.start_listener(sender_message_queue=self.message_center.get_message_queue(), - agent_config=self.agent_config) - - def on_agent_mqtt_disconnected(self, mqtt_client_object): - MLOpsStatus.get_instance().set_server_agent_status( - self.edge_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_OFFLINE - ) - - def setup_agent_mqtt_connection(self, service_config): - # Setup MQTT connection - self.mqtt_mgr = MqttManager( - service_config["mqtt_config"]["BROKER_HOST"], - service_config["mqtt_config"]["BROKER_PORT"], - service_config["mqtt_config"]["MQTT_USER"], - service_config["mqtt_config"]["MQTT_PWD"], - service_config["mqtt_config"]["MQTT_KEEPALIVE"], - f"FedML_ServerAgent_Daemon_@{self.user_name}@_@{self.args.current_device_id}@_@{str(uuid.uuid4())}@", - "flserver_agent/last_will_msg", - json.dumps({"ID": self.edge_id, "status": ServerConstants.MSG_MLOPS_SERVER_STATUS_OFFLINE}) - ) - - # Init local database - FedMLServerDataInterface.get_instance().create_job_table() - - # Start the message center to process edge related messages. - self.setup_message_center() - - server_api_cmd = "fedml.computing.scheduler.master.server_api:api" - server_api_pids = RunProcessUtils.get_pid_from_cmd_line(server_api_cmd) - if server_api_pids is None or len(server_api_pids) <= 0: - # Start local API services - cur_dir = os.path.dirname(__file__) - fedml_base_dir = os.path.dirname(os.path.dirname(os.path.dirname(cur_dir))) - python_program = get_python_program() - self.local_api_process = ServerConstants.exec_console_with_script( - "{} -m uvicorn {} --host 0.0.0.0 --port {} --reload --reload-delay 3 --reload-dir {} " - "--log-level critical".format( - python_program, server_api_cmd, ServerConstants.LOCAL_SERVER_API_PORT, - fedml_base_dir - ), - should_capture_stdout=False, - should_capture_stderr=False - ) - # if self.local_api_process is not None and self.local_api_process.pid is not None: - # print(f"Server local API process id {self.local_api_process.pid}") - - # Setup MQTT connected listener - self.mqtt_mgr.add_connected_listener(self.on_agent_mqtt_connected) - self.mqtt_mgr.add_disconnected_listener(self.on_agent_mqtt_disconnected) - self.mqtt_mgr.connect() - - # Report the IDLE status to MLOps - self.mlops_metrics.report_server_training_status( - self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE, edge_id=self.edge_id) - MLOpsStatus.get_instance().set_server_agent_status( - self.edge_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE - ) - - # MLOpsRuntimeLogDaemon.get_instance(self.args).stop_all_log_processor() - - self.mlops_metrics.stop_device_realtime_perf() - self.mlops_metrics.report_device_realtime_perf(self.args, service_config["mqtt_config"], is_client=False) - - if not self.run_as_cloud_server: - self.recover_start_train_msg_after_upgrading() - - JobCleanup.get_instance().sync_data_on_startup(self.edge_id, is_client=False) - - self.master_api_daemon = MasterApiDaemon() - self.master_api_process = Process(target=self.master_api_daemon.run) - self.master_api_process.start() - - # if self.model_device_server is None: - # self.model_device_server = FedMLModelDeviceServerRunner(self.args, self.args.current_device_id, - # self.args.os_name, self.args.is_from_docker, - # self.agent_config) - # self.model_device_server.start() - - def start_agent_mqtt_loop(self): - # Start MQTT message loop - try: - self.mqtt_mgr.loop_forever() - except Exception as e: - if str(e) == "Restarting after upgraded...": - logging.info("Restarting after upgraded...") - else: - logging.info("Server tracing: {}".format(traceback.format_exc())) - - finally: - login_exit_file = os.path.join(ServerConstants.get_log_file_dir(), "exited.log") - with open(login_exit_file, "w") as f: - f.writelines(f"{os.getpid()}.") - - self.stop_agent() - - time.sleep(5) - sys_utils.cleanup_all_fedml_server_login_processes( - ServerConstants.SERVER_LOGIN_PROGRAM, clean_process_group=False) - sys.exit(1) - - def stop_agent(self): - if self.run_process_event is not None: - self.run_process_event.set() - - if self.mqtt_mgr is not None: - try: - for topic in self.subscribed_topics: - self.mqtt_mgr.unsubscribe_msg(topic) - except Exception as e: - pass - - self.mqtt_mgr.loop_stop() - self.mqtt_mgr.disconnect() - self.release_message_center() - - def get_runner(self): - runner = FedMLServerRunner( - self.args, run_id=self.run_id, request_json=self.request_json, - agent_config=self.agent_config - ) - runner.run_as_edge_server_and_agent = self.run_as_edge_server_and_agent - runner.edge_id = self.edge_id - runner.server_agent_id = self.server_agent_id - runner.start_request_json = self.start_request_json - runner.unique_device_id = self.unique_device_id - runner.user_name = self.user_name - runner.run_as_edge_server_and_agent = self.run_as_edge_server_and_agent - runner.run_as_cloud_agent = self.run_as_cloud_agent - runner.run_as_cloud_server = self.run_as_cloud_server - return runner diff --git a/python/fedml/computing/scheduler/model_scheduler/device_client_runner.py b/python/fedml/computing/scheduler/model_scheduler/device_client_runner.py deleted file mode 100755 index e82e8c5542..0000000000 --- a/python/fedml/computing/scheduler/model_scheduler/device_client_runner.py +++ /dev/null @@ -1,1335 +0,0 @@ -import json -import logging -import multiprocessing -import sys - -from multiprocessing import Process -import os -import platform -import shutil -import subprocess -import threading - -import time -import traceback -import urllib -import uuid -import zipfile -from urllib.parse import urlparse, urljoin - -import requests -import docker - -import fedml -from fedml import mlops -from fedml.computing.scheduler.model_scheduler.device_model_msg_object import FedMLModelMsgObject -from fedml.core.distributed.communication.s3.remote_storage import S3Storage -from .device_model_cache import FedMLModelCache -from ..comm_utils import sys_utils, security_utils - -from ..comm_utils.container_utils import ContainerUtils - -from ....core.mlops.mlops_runtime_log import MLOpsRuntimeLog - -from ....core.distributed.communication.mqtt.mqtt_manager import MqttManager -from ..comm_utils.yaml_utils import load_yaml_config -from .device_client_constants import ClientConstants - -from ....core.mlops.mlops_metrics import MLOpsMetrics - -from ....core.mlops.mlops_configs import MLOpsConfigs -from ....core.mlops.mlops_runtime_log_daemon import MLOpsRuntimeLogDaemon -from ....core.mlops.mlops_status import MLOpsStatus -from ..comm_utils.sys_utils import get_sys_runner_info, get_python_program -from .device_model_deployment import start_deployment, run_http_inference_with_curl_request -from .device_client_data_interface import FedMLClientDataInterface -from ....core.mlops.mlops_utils import MLOpsUtils -from ..comm_utils.job_utils import JobRunnerUtils -from fedml.computing.scheduler.comm_utils.run_process_utils import RunProcessUtils -from .device_mqtt_inference_protocol import FedMLMqttInference -from .device_model_db import FedMLModelDatabase -from ..comm_utils.constants import SchedulerConstants -from fedml.computing.scheduler.comm_utils.job_monitor import JobMonitor - - -class RunnerError(Exception): - """ Runner failed. """ - pass - - -class RunnerCompletedError(Exception): - """ Runner completed. """ - pass - - -class FedMLClientRunner: - FEDML_BOOTSTRAP_RUN_OK = "[FedML]Bootstrap Finished" - - def __init__(self, args, edge_id=0, request_json=None, agent_config=None, run_id=0): - self.local_api_process = None - self.run_process_event = None - self.run_process_event_map = dict() - self.run_process_completed_event = None - self.run_process_completed_event_map = dict() - self.run_inference_event_map = dict() - self.run_inference_response_map = dict() - self.run_process_map = dict() - self.device_status = None - self.current_training_status = None - self.mqtt_mgr = None - self.client_mqtt_mgr = None - self.client_mqtt_is_connected = False - self.client_mqtt_lock = None - self.edge_id = edge_id - self.run_id = run_id - self.unique_device_id = None - self.args = args - self.request_json = request_json - self.version = args.version - self.device_id = args.device_id - self.cur_dir = os.path.split(os.path.realpath(__file__))[0] - if args.current_running_dir is not None: - self.cur_dir = args.current_running_dir - self.sudo_cmd = "" - self.is_mac = False - if platform.system() == "Darwin": - self.is_mac = True - - self.agent_config = agent_config - self.fedml_data_base_package_dir = os.path.join("/", "fedml", "data") - self.fedml_data_local_package_dir = os.path.join("/", "fedml", "fedml-package", "fedml", "data") - self.fedml_data_dir = self.fedml_data_base_package_dir - self.fedml_config_dir = os.path.join("/", "fedml", "conf") - - self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES = {} - - self.mlops_metrics = None - self.client_active_list = dict() - self.infer_host = "127.0.0.1" - self.redis_addr = "local" - self.redis_port = "6379" - self.redis_password = "fedml_default" - self.model_is_from_open = False - - self.model_runner_mapping = dict() - self.ntp_offset = MLOpsUtils.get_ntp_offset() - self.running_request_json = dict() - self.endpoint_inference_runners = dict() - self.mqtt_inference_obj = None - - self.subscribed_topics = list() - self.user_name = None - - def unzip_file(self, zip_file, unzip_file_path) -> str: - unziped_file_name = "" - if zipfile.is_zipfile(zip_file): - with zipfile.ZipFile(zip_file, "r") as zipf: - zipf.extractall(unzip_file_path) - unziped_file_name = zipf.namelist()[0] - else: - raise Exception("Invalid zip file {}".format(zip_file)) - - return unziped_file_name - - def retrieve_and_unzip_package(self, package_name, package_url): - local_package_path = ClientConstants.get_model_package_dir() - os.makedirs(local_package_path, exist_ok=True) - filename, filename_without_extension, file_extension = ClientConstants.get_filename_and_extension(package_url) - local_package_file = os.path.join(local_package_path, - f"fedml_run_{self.run_id}_{self.edge_id}_{filename_without_extension}") - if os.path.exists(local_package_file): - os.remove(local_package_file) - logging.info("Download from package_url {}".format(package_url)) - - package_url_without_query_path = urljoin(package_url, urlparse(package_url).path) - urllib.request.urlretrieve(package_url_without_query_path, local_package_file, - reporthook=self.package_download_progress) - unzip_package_path = os.path.join(ClientConstants.get_package_unzip_dir(), - f"unzip_fedml_run_{self.run_id}_{self.edge_id}_{filename_without_extension}") - try: - shutil.rmtree(unzip_package_path, ignore_errors=True) - except Exception as e: - pass - - package_dir_name = self.unzip_file(local_package_file, unzip_package_path) # Using unziped folder name - unzip_package_full_path = os.path.join(unzip_package_path, package_dir_name) - model_bin_file = os.path.join(unzip_package_path, "fedml_model.bin") - - logging.info("local_package_file {}, unzip_package_path {}, unzip file full path {}".format( - local_package_file, unzip_package_path, unzip_package_full_path)) - - return unzip_package_full_path, model_bin_file - - def retrieve_binary_model_file(self, package_name, package_url): - local_package_path = ClientConstants.get_model_package_dir() - if not os.path.exists(local_package_path): - os.makedirs(local_package_path, exist_ok=True) - unzip_package_path = ClientConstants.get_model_dir() - local_package_file = "{}".format(os.path.join(local_package_path, package_name)) - if os.path.exists(local_package_file): - os.remove(local_package_file) - package_url_without_query_path = urljoin(package_url, urlparse(package_url).path) - urllib.request.urlretrieve(package_url_without_query_path, local_package_file, - reporthook=self.package_download_progress) - - unzip_package_path = os.path.join(unzip_package_path, package_name) - if not os.path.exists(unzip_package_path): - os.makedirs(unzip_package_path, exist_ok=True) - dst_model_file = os.path.join(unzip_package_path, package_name) - if os.path.exists(local_package_file): - shutil.copy(local_package_file, dst_model_file) - - return unzip_package_path, dst_model_file - - def package_download_progress(self, count, blksize, filesize): - self.check_runner_stop_event() - - downloaded = count * blksize - downloaded = filesize if downloaded > filesize else downloaded - progress = (downloaded / filesize * 100) if filesize != 0 else 0 - progress_int = int(progress) - downloaded_kb = format(downloaded / 1024, '.2f') - - # since this hook funtion is stateless, we need a state to avoid printing progress repeatly - if count == 0: - self.prev_download_progress = 0 - if progress_int != self.prev_download_progress and progress_int % 5 == 0: - self.prev_download_progress = progress_int - logging.info("package downloaded size {} KB, progress {}%".format(downloaded_kb, progress_int)) - - def build_dynamic_constrain_variables(self, run_id, run_config): - pass - - def update_local_fedml_config(self, run_id, model_config, model_config_parameters): - model_name = model_config["model_name"] - model_storage_url = model_config["model_storage_url"] - scale_min = model_config.get("instance_scale_min", 0) - scale_max = model_config.get("instance_scale_max", 0) - inference_engine = model_config.get("inference_engine", 0) - inference_end_point_id = run_id - - # Retrieve model package or model binary file. - if self.model_is_from_open: - unzip_package_path, model_bin_file = self.retrieve_binary_model_file(model_name, model_storage_url) - else: - unzip_package_path, model_bin_file = self.retrieve_and_unzip_package(model_name, model_storage_url) - - # Load the config to memory - package_conf_object = {} - fedml_local_config_file = os.path.join(unzip_package_path, "fedml_model_config.yaml") - if os.path.exists(fedml_local_config_file): - package_conf_object = load_yaml_config(fedml_local_config_file) - else: - if model_config_parameters is not None: - logging.warning(f"The fedml_local_config_file {fedml_local_config_file} does not exist, will \ - create a new one with the model_config_parameters from json.") - package_conf_object = model_config_parameters - with open(fedml_local_config_file, 'w') as f: - json.dump(package_conf_object, f) - else: - logging.info(f"The fedml_local_config_file {fedml_local_config_file} does not exist,\ - and the model_config_parameters is None.") - logging.info("The package_conf_object is {}".format(package_conf_object)) - - return unzip_package_path, model_bin_file, package_conf_object - - def build_dynamic_args(self, run_config, package_conf_object, base_dir): - pass - - def download_model_package(self, package_name, package_url): - # Copy config file from the client - unzip_package_path = self.retrieve_and_unzip_package( - package_name, package_url - ) - - return unzip_package_path - - def run(self, process_event, completed_event): - # print(f"Model worker runner process id {os.getpid()}, run id {self.run_id}") - - if platform.system() != "Windows": - os.setsid() - - os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning' - os.environ.setdefault('PYTHONWARNINGS', 'ignore:semaphore_tracker:UserWarning') - - self.run_process_event = process_event - self.run_process_completed_event = completed_event - run_id = self.request_json.get("end_point_id") - - try: - FedMLModelDatabase.get_instance().set_database_base_dir(ClientConstants.get_database_dir()) - FedMLModelDatabase.get_instance().create_table() - - MLOpsUtils.set_ntp_offset(self.ntp_offset) - self.setup_client_mqtt_mgr() - - if not self.run_impl(): - logging.info( - f"[endpoint/device][{run_id}/{self.edge_id}] Release gpu resource when the worker deployment returned false.") - self.release_gpu_ids(run_id) - except RunnerError: - logging.info("Runner stopped.") - logging.info( - f"[endpoint/device][{run_id}/{self.edge_id}] Release gpu resource when the worker deployment stopped.") - self.release_gpu_ids(run_id) - self.reset_devices_status(self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED) - MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, self.edge_id) - except RunnerCompletedError: - logging.info( - f"[endpoint/device][{run_id}/{self.edge_id}] Release gpu resource when the worker deployment completed.") - self.release_gpu_ids(run_id) - logging.info("Runner completed.") - MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, self.edge_id) - except Exception as e: - logging.error("Runner exits with exceptions. {}".format(traceback.format_exc())) - self.cleanup_run_when_starting_failed() - self.mlops_metrics.client_send_exit_train_msg( - run_id, self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED) - logging.info( - f"[endpoint/device][{run_id}/{self.edge_id}] Release gpu resource when the worker deployment occurred exceptions.") - self.release_gpu_ids(run_id) - MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, self.edge_id) - time.sleep(2) - sys.exit(1) - finally: - logging.info("Release resources.") - MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, self.edge_id) - if self.mlops_metrics is not None: - self.mlops_metrics.stop_sys_perf() - time.sleep(3) - self.release_client_mqtt_mgr() - - def release_gpu_ids(self, run_id): - JobRunnerUtils.get_instance().release_gpu_ids(run_id, self.edge_id) - - def check_runner_stop_event(self): - if self.run_process_event.is_set(): - logging.info("Received stopping event.") - raise RunnerError("Runner stopped") - - if self.run_process_completed_event is not None and self.run_process_completed_event.is_set(): - logging.info("Received completed event.") - raise RunnerCompletedError("Runner completed") - - def inference_run(self): - # run_id, end_point_name, token, user_id, user_name, device_ids, device_objs, model_config, model_name, \ - # model_id, model_storage_url, scale_min, scale_max, inference_engine, model_is_from_open, \ - # inference_end_point_id, use_gpu, memory_size, model_version = self.parse_model_run_params(self.request_json) - # - # inference_client = FedMLModelServingClient(self.args, - # end_point_name, - # model_name, - # model_version, - # inference_request=self.request_json) - # inference_client.run() - pass - - def run_impl(self): - run_id = self.request_json["end_point_id"] - end_point_name = self.request_json["end_point_name"] - token = self.request_json["token"] - user_id = self.request_json["user_id"] - user_name = self.request_json["user_name"] - device_ids = self.request_json["device_ids"] - device_objs = self.request_json["device_objs"] - master_ip = self.request_json["master_node_ip"] - - model_config = self.request_json["model_config"] - model_name = model_config["model_name"] - model_id = model_config["model_id"] - model_version = model_config["model_version"] - model_storage_url = model_config["model_storage_url"] - scale_min = model_config.get("instance_scale_min", 0) - scale_max = model_config.get("instance_scale_max", 0) - model_config_parameters = self.request_json["parameters"] - - inference_port = model_config_parameters.get("worker_internal_port", - ClientConstants.MODEL_INFERENCE_DEFAULT_PORT) - inference_port_external = model_config_parameters.get("worker_external_port", inference_port) - - if "using_triton" in model_config_parameters and model_config_parameters["using_triton"]: - inference_engine = ClientConstants.INFERENCE_ENGINE_TYPE_INT_TRITON - else: - inference_engine = ClientConstants.INFERENCE_ENGINE_TYPE_INT_DEFAULT - - logging.info("[Critical] The inference_engine is: {}".format(inference_engine)) - - self.model_is_from_open = True if model_config.get("is_from_open", 0) == 1 else False - if self.model_is_from_open: - model_net_url = model_config["model_net_url"] - inference_end_point_id = run_id - use_gpu = "gpu" # TODO: Get GPU from device infos - memory_size = "4096m" # TODO: Get Memory size for each instance - - self.mlops_metrics.report_sys_perf(self.args, self.agent_config["mqtt_config"], run_id=run_id) - - self.check_runner_stop_event() - - logging.info("model deployment request: {}".format(self.request_json)) - - MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO) - - # Initiate an FedMLInferenceClient object - # client_runner = FedMLClientRunner( - # self.args, edge_id=self.edge_id, run_id=self.run_id, request_json=self.request_json, - # agent_config=self.agent_config - # ) - # inference_process = Process(target=client_runner.inference_run) - # inference_process.start() - - self.mlops_metrics.report_client_training_status( - self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_INITIALIZING, - is_from_model=True, running_json=json.dumps(self.request_json), run_id=run_id) - - self.mlops_metrics.report_client_training_status( - self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_RUNNING, - is_from_model=True, run_id=run_id) - - self.check_runner_stop_event() - - # update local config with real time parameters from server and dynamically replace variables value - logging.info("download and unzip model to local...") - unzip_package_path, model_bin_file, fedml_config_object = \ - self.update_local_fedml_config(run_id, model_config, model_config_parameters) - if unzip_package_path is None or fedml_config_object is None: - logging.info("failed to update local fedml config.") - self.check_runner_stop_event() - self.cleanup_run_when_starting_failed() - self.mlops_metrics.client_send_exit_train_msg(run_id, self.edge_id, - ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED) - return False - - logging.info("check downloaded packages...") - if not os.path.exists(unzip_package_path): - logging.info("failed to unzip file.") - self.check_runner_stop_event() - self.cleanup_run_when_starting_failed() - self.mlops_metrics.client_send_exit_train_msg(run_id, self.edge_id, - ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED) - return False - - # download model net and load into the torch model - model_from_open = None - self.model_is_from_open = None - if self.model_is_from_open: - logging.info("process the model net from open...") - self.check_runner_stop_event() - s3_config = self.agent_config.get("s3_config", None) - if s3_config is not None and model_net_url is not None and model_net_url != "": - s3_client = S3Storage(s3_config) - url_parsed = urlparse(model_net_url) - path_list = url_parsed.path.split("/") - if len(path_list) > 0: - model_key = path_list[-1] - model_from_open = s3_client.read_model_net(model_key, - ClientConstants.get_model_cache_dir()) - - model_input_size, model_input_type = mlops.get_training_model_input_info(model_net_url, s3_config) - if model_input_size is not None and model_input_type is not None: - model_config_parameters["input_size"] = model_input_size - model_config_parameters["input_types"] = model_input_type - logging.info( - f"model input size {model_input_size}, input type {model_input_type} from the open platform.") - - logging.info("Check if need update / removing existed container...") - if "diff_devices" in self.request_json and str(self.edge_id) in self.request_json["diff_devices"] and \ - self.request_json["diff_devices"][str(self.edge_id)] == ClientConstants.DEVICE_DIFF_REPLACE_OPERATION: - self.handle_replaced_device() - - logging.info("start the model deployment...") - self.check_runner_stop_event() - running_model_name, inference_output_url, inference_model_version, model_metadata, model_config = \ - "", "", model_version, {}, {} - try: - client_ip = self.get_ip_address(self.request_json) - running_model_name, inference_output_url, inference_model_version, model_metadata, model_config = \ - start_deployment( - inference_end_point_id, end_point_name, model_id, model_version, - unzip_package_path, model_bin_file, model_name, inference_engine, - ClientConstants.INFERENCE_HTTP_PORT, - ClientConstants.INFERENCE_GRPC_PORT, - ClientConstants.INFERENCE_METRIC_PORT, - use_gpu, memory_size, - ClientConstants.INFERENCE_CONVERTOR_IMAGE, - ClientConstants.INFERENCE_SERVER_IMAGE, - client_ip, - self.model_is_from_open, model_config_parameters, - model_from_open, - token, - master_ip, self.edge_id, master_device_id=device_ids[0]) - except Exception as e: - inference_output_url = "" - logging.error(f"Exception at deployment: {traceback.format_exc()}") - - if inference_output_url == "": - logging.error("failed to deploy the model...") - - result_payload = self.send_deployment_results( - end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED, - model_id, model_name, inference_output_url, inference_model_version, inference_port, - inference_engine, model_metadata, model_config) - - self.mlops_metrics.run_id = self.run_id - self.mlops_metrics.broadcast_client_training_status( - self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED, - is_from_model=True, run_id=self.run_id) - - self.mlops_metrics.client_send_exit_train_msg( - run_id, self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED) - - # After sending the deployment status, we should wait for the master to delete the deployment status - status_payload = self.send_deployment_status( - end_point_name, self.edge_id, model_id, model_name, model_version, inference_output_url, - ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED, inference_port=inference_port) - - return False - else: - logging.info("finished deployment, continue to send results to master...") - status_payload = self.send_deployment_status( # Send Master the external port - end_point_name, self.edge_id, model_id, model_name, model_version, inference_output_url, - ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED, inference_port=inference_port_external) - result_payload = self.send_deployment_results( - end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED, - model_id, model_name, inference_output_url, model_version, inference_port_external, - inference_engine, model_metadata, model_config) - - if inference_port_external != inference_port: # For Worker, use internal port - logging.info("inference_port_external {} != inference_port {}".format( - inference_port_external, inference_port)) - status_payload = self.construct_deployment_status( - end_point_name, self.edge_id, model_id, model_name, model_version, inference_output_url, - ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED, inference_port=inference_port) - result_payload = self.construct_deployment_results( - end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED, - model_id, model_name, inference_output_url, model_version, inference_port, - inference_engine, model_metadata, model_config) - - FedMLModelDatabase.get_instance().set_deployment_result( - run_id, end_point_name, model_name, model_version, self.edge_id, json.dumps(result_payload)) - - FedMLModelDatabase.get_instance().set_deployment_status( - run_id, end_point_name, model_name, model_version, self.edge_id, json.dumps(status_payload)) - - time.sleep(1) - self.mlops_metrics.run_id = self.run_id - self.mlops_metrics.broadcast_client_training_status( - self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED, - is_from_model=True, run_id=self.run_id) - return True - - def handle_replaced_device(self): - """ - Strategy-1: - (1) clean local records (2) find and clean current container using diff_version: {device_id: old_version} - """ - end_point_id = self.request_json["end_point_id"] - end_point_name = self.request_json["end_point_name"] - model_config = self.request_json["model_config"] - model_name = model_config["model_name"] - model_id = model_config["model_id"] - new_model_version = model_config["model_version"] - old_model_version = self.request_json["diff_version"][str(self.edge_id)] - - logging.info(f"[endpoint/device][{end_point_id}/{self.edge_id}] " - f"Start to handle replaced device {self.edge_id} to new version {new_model_version}." - f"which originally has old version {old_model_version}.") - - try: - JobRunnerUtils.get_instance().release_gpu_ids(end_point_id, self.edge_id) - - # Instead of deleting the records, need to change the job status to "UPGRADING" - FedMLClientDataInterface.get_instance().save_job_status( - end_point_id, self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING, - ClientConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING - ) - - FedMLModelDatabase.get_instance().delete_deployment_result_with_device_id( - end_point_id, end_point_name, model_name, self.edge_id) - - ClientConstants.remove_deployment( - end_point_name, model_name, old_model_version, - end_point_id, model_id, edge_id=self.edge_id) - except Exception as e: - # TODO: 1. Check this release action cause the resource seized by other run - # 2. If this atomic op failed, should rolling back - logging.info(f"Exception when removing deployment {traceback.format_exc()}") - pass - - def construct_deployment_results(self, end_point_name, device_id, model_status, - model_id, model_name, model_inference_url, - model_version, inference_port, inference_engine, - model_metadata, model_config): - deployment_results_payload = {"end_point_id": self.run_id, "end_point_name": end_point_name, - "model_id": model_id, "model_name": model_name, - "model_url": model_inference_url, "model_version": model_version, - "port": inference_port, - "inference_engine": inference_engine, - "model_metadata": model_metadata, - "model_config": model_config, - "model_status": model_status, - "inference_port": inference_port} - return deployment_results_payload - - def construct_deployment_status(self, end_point_name, device_id, - model_id, model_name, model_version, - model_inference_url, model_status, - inference_port=ClientConstants.MODEL_INFERENCE_DEFAULT_PORT): - deployment_status_payload = {"end_point_id": self.run_id, "end_point_name": end_point_name, - "device_id": device_id, - "model_id": model_id, "model_name": model_name, - "model_version": model_version, - "model_url": model_inference_url, "model_status": model_status, - "inference_port": inference_port} - return deployment_status_payload - - def send_deployment_results(self, end_point_name, device_id, model_status, - model_id, model_name, model_inference_url, - model_version, inference_port, inference_engine, - model_metadata, model_config): - deployment_results_topic = "model_device/model_device/return_deployment_result/{}".format(device_id) - deployment_results_payload = self.construct_deployment_results( - end_point_name, device_id, model_status, - model_id, model_name, model_inference_url, - model_version, inference_port, inference_engine, - model_metadata, model_config) - - logging.info("[client] send_deployment_results: topic {}, payload {}.".format(deployment_results_topic, - deployment_results_payload)) - self.client_mqtt_mgr.send_message_json(deployment_results_topic, json.dumps(deployment_results_payload)) - return deployment_results_payload - - def send_deployment_status(self, end_point_name, device_id, - model_id, model_name, model_version, - model_inference_url, model_status, - inference_port=ClientConstants.MODEL_INFERENCE_DEFAULT_PORT): - deployment_status_topic = "model_device/model_device/return_deployment_status/{}".format(device_id) - deployment_status_payload = self.construct_deployment_status( - end_point_name, device_id, - model_id, model_name, model_version, - model_inference_url, model_status, - inference_port=inference_port) - - logging.info("[client] send_deployment_status: topic {}, payload {}.".format(deployment_status_topic, - deployment_status_payload)) - self.client_mqtt_mgr.send_message_json(deployment_status_topic, json.dumps(deployment_status_payload)) - return deployment_status_payload - - def reset_devices_status(self, edge_id, status): - self.mlops_metrics.run_id = self.run_id - self.mlops_metrics.edge_id = edge_id - self.mlops_metrics.broadcast_client_training_status( - edge_id, status, is_from_model=True, run_id=self.run_id) - - def cleanup_run_when_starting_failed(self): - logging.info("Cleanup run successfully when starting failed.") - - self.reset_devices_status(self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED) - - time.sleep(2) - - try: - self.mlops_metrics.stop_sys_perf() - except Exception as ex: - pass - - time.sleep(1) - - def cleanup_run_when_finished(self): - logging.info("Cleanup run successfully when finished.") - - self.reset_devices_status(self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED) - - time.sleep(2) - - try: - self.mlops_metrics.stop_sys_perf() - except Exception as ex: - pass - - time.sleep(1) - - def on_client_mqtt_disconnected(self, mqtt_client_object): - if self.client_mqtt_lock is None: - self.client_mqtt_lock = threading.Lock() - - self.client_mqtt_lock.acquire() - self.client_mqtt_is_connected = False - self.client_mqtt_lock.release() - - def on_client_mqtt_connected(self, mqtt_client_object): - if self.mlops_metrics is None: - self.mlops_metrics = MLOpsMetrics() - - self.mlops_metrics.set_messenger(self.client_mqtt_mgr) - self.mlops_metrics.run_id = self.run_id - - if self.client_mqtt_lock is None: - self.client_mqtt_lock = threading.Lock() - - self.client_mqtt_lock.acquire() - self.client_mqtt_is_connected = True - self.client_mqtt_lock.release() - - def setup_client_mqtt_mgr(self): - if self.client_mqtt_mgr is not None: - return - - if self.client_mqtt_lock is None: - self.client_mqtt_lock = threading.Lock() - - self.client_mqtt_mgr = MqttManager( - self.agent_config["mqtt_config"]["BROKER_HOST"], - self.agent_config["mqtt_config"]["BROKER_PORT"], - self.agent_config["mqtt_config"]["MQTT_USER"], - self.agent_config["mqtt_config"]["MQTT_PWD"], - self.agent_config["mqtt_config"]["MQTT_KEEPALIVE"], - "FedML_ModelClientAgent_Metrics_@{}@_{}_{}_{}".format(self.user_name, self.args.current_device_id, - str(os.getpid()), - str(uuid.uuid4())) - ) - - self.client_mqtt_mgr.add_connected_listener(self.on_client_mqtt_connected) - self.client_mqtt_mgr.add_disconnected_listener(self.on_client_mqtt_disconnected) - self.client_mqtt_mgr.connect() - self.client_mqtt_mgr.loop_start() - - if self.mlops_metrics is None: - self.mlops_metrics = MLOpsMetrics() - self.mlops_metrics.set_messenger(self.client_mqtt_mgr) - self.mlops_metrics.run_id = self.run_id - - def release_client_mqtt_mgr(self): - try: - if self.client_mqtt_mgr is not None: - self.client_mqtt_mgr.loop_stop() - self.client_mqtt_mgr.disconnect() - - self.client_mqtt_lock.acquire() - if self.client_mqtt_mgr is not None: - self.client_mqtt_is_connected = False - self.client_mqtt_mgr = None - self.client_mqtt_lock.release() - except Exception: - pass - - def ota_upgrade(self, payload, request_json): - run_id = request_json["end_point_id"] - force_ota = False - ota_version = None - - try: - parameters = request_json.get("parameters", None) - common_args = parameters.get("common_args", None) - force_ota = common_args.get("force_ota", False) - ota_version = common_args.get("ota_version", None) - except Exception as e: - pass - - if force_ota and ota_version is not None: - should_upgrade = True if ota_version != fedml.__version__ else False - upgrade_version = ota_version - else: - try: - fedml_is_latest_version, local_ver, remote_ver = sys_utils.check_fedml_is_latest_version(self.version) - except Exception as e: - return - - should_upgrade = False if fedml_is_latest_version else True - upgrade_version = remote_ver - - if should_upgrade: - FedMLClientDataInterface.get_instance(). \ - save_started_job(run_id, self.edge_id, time.time(), - ClientConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING, - ClientConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING, - payload) - - logging.info(f"Upgrade to version {upgrade_version} ...") - - sys_utils.do_upgrade(self.version, upgrade_version) - - raise Exception("Restarting after upgraded...") - - def callback_start_deployment(self, topic, payload): - """ - topic: model_ops/model_device/start_deployment/model-agent-device-id - payload: {"model_name": "image-model", "model_storage_url":"s3-url", - "instance_scale_min":1, "instance_scale_max":3, "inference_engine":"onnx (or tensorrt)"} - """ - # get deployment params - request_json = json.loads(payload) - run_id = request_json["end_point_id"] - token = request_json["token"] - user_id = request_json["user_id"] - user_name = request_json["user_name"] - device_ids = request_json["device_ids"] - device_objs = request_json["device_objs"] - - model_config = request_json["model_config"] - model_name = model_config["model_name"] - model_storage_url = model_config["model_storage_url"] - scale_min = model_config.get("instance_scale_min", 0) - scale_max = model_config.get("instance_scale_max", 0) - inference_engine = model_config.get("inference_engine", 0) - inference_end_point_id = run_id - - try: - MLOpsConfigs.fetch_all_configs() - except Exception as e: - pass - - # Start log processor for current run - run_id = inference_end_point_id - self.args.run_id = run_id - self.args.edge_id = self.edge_id - MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO) - MLOpsRuntimeLogDaemon.get_instance(self.args).set_log_source( - ClientConstants.FEDML_LOG_SOURCE_TYPE_MODEL_END_POINT) - MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor(run_id, self.edge_id) - - self.ota_upgrade(payload, request_json) - - # Start client with multiprocessing mode - request_json["run_id"] = run_id - run_id_str = str(run_id) - self.request_json = request_json - self.running_request_json[run_id_str] = request_json - client_runner = FedMLClientRunner( - self.args, edge_id=self.edge_id, request_json=request_json, agent_config=self.agent_config, run_id=run_id - ) - client_runner.infer_host = self.get_ip_address(request_json) - self.run_process_event_map[run_id_str] = multiprocessing.Event() - self.run_process_event_map[run_id_str].clear() - client_runner.run_process_event = self.run_process_event_map[run_id_str] - self.run_process_completed_event_map[run_id_str] = multiprocessing.Event() - self.run_process_completed_event_map[run_id_str].clear() - client_runner.run_process_completed_event = self.run_process_completed_event_map[run_id_str] - self.model_runner_mapping[run_id_str] = client_runner - self.run_id = run_id - self.run_process_map[run_id_str] = Process(target=client_runner.run, args=( - self.run_process_event_map[run_id_str], self.run_process_completed_event_map[run_id_str] - )) - # client_runner.run() - self.run_process_map[run_id_str].start() - ClientConstants.save_run_process(run_id, self.run_process_map[run_id_str].pid) - ClientConstants.save_runner_infos(self.args.device_id + "." + self.args.os_name, self.edge_id, run_id=run_id) - - def set_runner_stopped_event(self, run_id): - run_id_str = str(run_id) - client_runner = self.model_runner_mapping.get(run_id_str, None) - if client_runner is not None: - if client_runner.run_process_event is not None: - client_runner.run_process_event.set() - self.model_runner_mapping.pop(run_id_str) - - def set_runner_completed_event(self, run_id): - run_id_str = str(run_id) - client_runner = self.model_runner_mapping.get(run_id_str, None) - if client_runner is not None: - if client_runner.run_process_completed_event is not None: - client_runner.run_process_completed_event.set() - self.model_runner_mapping.pop(run_id_str) - - def callback_delete_deployment(self, topic, payload): - logging.info("callback_delete_deployment: topic = %s, payload = %s" % (topic, payload)) - - # Parse payload as the model message object. - model_msg_object = FedMLModelMsgObject(topic, payload) - - try: - ClientConstants.remove_deployment( - model_msg_object.end_point_name, model_msg_object.model_name, model_msg_object.model_version, - model_msg_object.run_id, model_msg_object.model_id, edge_id=self.edge_id) - except Exception as e: - logging.info(f"Exception when removing deployment {traceback.format_exc()}") - pass - - self.set_runner_stopped_event(model_msg_object.run_id) - - logging.info(f"[endpoint/device][{model_msg_object.run_id}/{self.edge_id}] " - f"Release gpu resource when the worker deployment deleted.") - JobRunnerUtils.get_instance().release_gpu_ids(model_msg_object.run_id, self.edge_id) - - if self.running_request_json.get(str(model_msg_object.run_id)) is not None: - try: - self.running_request_json.pop(str(model_msg_object.run_id)) - except Exception as e: - pass - - FedMLClientDataInterface.get_instance().delete_job_from_db(model_msg_object.run_id) - FedMLModelDatabase.get_instance().delete_deployment_result_with_device_id( - model_msg_object.run_id, model_msg_object.end_point_name, model_msg_object.model_name, - self.edge_id) - - def exit_run_with_exception_entry(self): - try: - self.setup_client_mqtt_mgr() - self.exit_run_with_exception() - except Exception as e: - self.release_client_mqtt_mgr() - sys.exit(1) - finally: - self.release_client_mqtt_mgr() - - def exit_run_with_exception(self): - logging.info("Exit run successfully.") - - ClientConstants.cleanup_learning_process(self.run_id) - ClientConstants.cleanup_run_process(self.run_id) - - self.mlops_metrics.report_client_id_status( - self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED, - is_from_model=True, run_id=self.run_id) - - time.sleep(1) - - def callback_exit_train_with_exception(self, topic, payload): - request_json = json.loads(payload) - is_retain = request_json.get("is_retain", False) - if is_retain: - return - run_id = request_json.get("runId", None) - if run_id is None: - run_id = request_json.get("run_id", None) - if run_id is None: - run_id = request_json.get("id", None) - - if run_id is None: - return - - # Stop client with multiprocessing mode - self.request_json = request_json - client_runner = FedMLClientRunner( - self.args, edge_id=self.edge_id, request_json=request_json, agent_config=self.agent_config, run_id=run_id - ) - try: - Process(target=client_runner.exit_run_with_exception_entry).start() - except Exception as e: - pass - - def cleanup_client_with_status(self): - self.setup_client_mqtt_mgr() - - if self.device_status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED: - self.cleanup_run_when_finished() - elif self.device_status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED: - self.cleanup_run_when_starting_failed() - - self.release_client_mqtt_mgr() - - def callback_runner_id_status(self, topic, payload): - # logging.info("callback_runner_id_status: topic = %s, payload = %s" % (topic, payload)) - - request_json = json.loads(payload) - run_id = request_json["run_id"] - edge_id = request_json["edge_id"] - status = request_json["status"] - - self.save_training_status(edge_id, status) - - if status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED or \ - status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED: - # Stop client with multiprocessing mode - self.request_json = request_json - client_runner = FedMLClientRunner( - self.args, - edge_id=self.edge_id, - request_json=request_json, - agent_config=self.agent_config, - run_id=run_id, - ) - client_runner.device_status = status - status_process = Process(target=client_runner.cleanup_client_with_status) - status_process.start() - status_process.join(15) - - # Stop log processor for current run - MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, edge_id) - - def callback_report_current_status(self, topic, payload): - self.send_agent_active_msg() - - @staticmethod - def process_ota_upgrade_msg(): - os.system("pip install -U fedml") - - def callback_client_ota_msg(self, topic, payload): - request_json = json.loads(payload) - cmd = request_json["cmd"] - - if cmd == ClientConstants.FEDML_OTA_CMD_UPGRADE: - FedMLClientRunner.process_ota_upgrade_msg() - # Process(target=FedMLClientRunner.process_ota_upgrade_msg).start() - raise Exception("After upgraded, restart runner...") - elif cmd == ClientConstants.FEDML_OTA_CMD_RESTART: - raise Exception("Restart runner...") - - def save_training_status(self, edge_id, training_status): - self.current_training_status = training_status - ClientConstants.save_training_infos(edge_id, training_status) - - @staticmethod - def get_device_id(): - device_file_path = os.path.join(ClientConstants.get_data_dir(), - ClientConstants.LOCAL_RUNNER_INFO_DIR_NAME) - file_for_device_id = os.path.join(device_file_path, "devices.id") - if not os.path.exists(device_file_path): - os.makedirs(device_file_path) - elif os.path.exists(file_for_device_id): - with open(file_for_device_id, 'r', encoding='utf-8') as f: - device_id_from_file = f.readline() - if device_id_from_file is not None and device_id_from_file != "": - return device_id_from_file - - if platform.system() == "Darwin": - cmd_get_serial_num = "system_profiler SPHardwareDataType | grep Serial | awk '{gsub(/ /,\"\")}{print}' " \ - "|awk -F':' '{print $2}' " - device_id = os.popen(cmd_get_serial_num).read() - device_id = device_id.replace('\n', '').replace(' ', '') - if device_id is None or device_id == "": - device_id = hex(uuid.getnode()) - else: - device_id = "0x" + device_id - else: - if "nt" in os.name: - - def get_uuid(): - guid = "" - try: - cmd = "wmic csproduct get uuid" - guid = str(subprocess.check_output(cmd)) - pos1 = guid.find("\\n") + 2 - guid = guid[pos1:-15] - except Exception as ex: - pass - return str(guid) - - device_id = str(get_uuid()) - logging.info(device_id) - elif "posix" in os.name: - device_id = sys_utils.get_device_id_in_docker() - if device_id is None: - device_id = hex(uuid.getnode()) - else: - device_id = sys_utils.run_subprocess_open( - "hal-get-property --udi /org/freedesktop/Hal/devices/computer --key system.hardware.uuid".split() - ) - device_id = hex(device_id) - - if device_id is not None and device_id != "": - with open(file_for_device_id, 'w', encoding='utf-8') as f: - f.write(device_id) - else: - device_id = hex(uuid.uuid4()) - with open(file_for_device_id, 'w', encoding='utf-8') as f: - f.write(device_id) - - return device_id - - def get_ip_address(self, request_json): - # OPTION 1: Use local ip - ip = ClientConstants.get_local_ip() - - # OPTION 2: Auto detect public ip - if "parameters" in request_json and \ - ClientConstants.AUTO_DETECT_PUBLIC_IP in request_json["parameters"] and \ - request_json["parameters"][ClientConstants.AUTO_DETECT_PUBLIC_IP]: - ip = ClientConstants.get_public_ip() - logging.info("Auto detect public ip for worker: " + ip) - - # OPTION 3: Use user indicated ip - if self.infer_host is not None and self.infer_host != "127.0.0.1" and self.infer_host != "localhost": - ip = self.infer_host - - return ip - - def bind_account_and_device_id(self, url, account_id, device_id, os_name, role="md.on_premise_device"): - ip = requests.get('https://checkip.amazonaws.com').text.strip() - fedml_ver, exec_path, os_ver, cpu_info, python_ver, torch_ver, mpi_installed, \ - cpu_usage, available_mem, total_mem, gpu_info, gpu_available_mem, gpu_total_mem, \ - gpu_count, gpu_vendor, cpu_count, gpu_device_name = get_sys_runner_info() - host_name = sys_utils.get_host_name() - json_params = { - "accountid": account_id, - "deviceid": device_id, - "state": ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE, - "status": ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE, - "type": os_name, - "processor": cpu_info, - "core_type": cpu_info, - "network": "", - "role": role, - "os_ver": os_ver, - "memory": total_mem, - "ip": ip, - "extra_infos": {"fedml_ver": fedml_ver, "exec_path": exec_path, "os_ver": os_ver, - "cpu_info": cpu_info, "python_ver": python_ver, "torch_ver": torch_ver, - "mpi_installed": mpi_installed, "cpu_usage": cpu_usage, - "available_mem": available_mem, "total_mem": total_mem, - "cpu_count": cpu_count, "gpu_count": 0, "host_name": host_name} - } - if gpu_count > 0: - if gpu_total_mem is not None: - json_params["gpu"] = gpu_info if gpu_info is not None else "" + ", Total GPU Memory: " + gpu_total_mem - else: - json_params["gpu"] = gpu_info if gpu_info is not None else "" - json_params["extra_infos"]["gpu_info"] = gpu_info if gpu_info is not None else "" - if gpu_available_mem is not None: - json_params["extra_infos"]["gpu_available_mem"] = gpu_available_mem - if gpu_total_mem is not None: - json_params["extra_infos"]["gpu_total_mem"] = gpu_total_mem - - json_params["extra_infos"]["gpu_count"] = gpu_count - json_params["extra_infos"]["gpu_vendor"] = gpu_vendor - json_params["extra_infos"]["gpu_device_name"] = gpu_device_name - - gpu_available_id_list = sys_utils.get_available_gpu_id_list(limit=gpu_count) - gpu_available_count = len(gpu_available_id_list) if gpu_available_id_list is not None else 0 - gpu_list = sys_utils.get_gpu_list() - json_params["extra_infos"]["gpu_available_count"] = gpu_available_count - json_params["extra_infos"]["gpu_available_id_list"] = gpu_available_id_list - json_params["extra_infos"]["gpu_list"] = gpu_list - else: - json_params["gpu"] = "None" - json_params["extra_infos"]["gpu_available_count"] = 0 - json_params["extra_infos"]["gpu_available_id_list"] = [] - json_params["extra_infos"]["gpu_list"] = [] - - _, cert_path = MLOpsConfigs.get_request_params() - if cert_path is not None: - try: - requests.session().verify = cert_path - response = requests.post( - url, json=json_params, verify=True, - headers={"content-type": "application/json", "Connection": "close"} - ) - except requests.exceptions.SSLError as err: - MLOpsConfigs.install_root_ca_file() - response = requests.post( - url, json=json_params, verify=True, - headers={"content-type": "application/json", "Connection": "close"} - ) - else: - response = requests.post(url, json=json_params, headers={"Connection": "close"}) - edge_id = -1 - user_name = None - extra_url = None - if response.status_code != 200: - print(f"Binding to MLOps with response.status_code = {response.status_code}, " - f"response.content: {response.content}") - pass - else: - # print("url = {}, response = {}".format(url, response)) - status_code = response.json().get("code") - if status_code == "SUCCESS": - edge_id = response.json().get("data").get("id") - user_name = response.json().get("data").get("userName", None) - extra_url = response.json().get("data").get("url", None) - if edge_id is None or edge_id <= 0: - print(f"Binding to MLOps with response.status_code = {response.status_code}, " - f"response.content: {response.content}") - else: - if status_code == SchedulerConstants.BINDING_ACCOUNT_NOT_EXIST_ERROR: - raise SystemExit(SchedulerConstants.BINDING_ACCOUNT_NOT_EXIST_ERROR) - print(f"Binding to MLOps with response.status_code = {response.status_code}, " - f"response.content: {response.content}") - return -1, None, None - return edge_id, user_name, extra_url - - def fetch_configs(self): - return MLOpsConfigs.fetch_all_configs() - - def send_agent_active_msg(self): - active_topic = "flclient_agent/active" - status = MLOpsStatus.get_instance().get_client_agent_status(self.edge_id) - if ( - status is not None - and status != ClientConstants.MSG_MLOPS_CLIENT_STATUS_OFFLINE - and status != ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE - ): - return - - try: - current_job = FedMLClientDataInterface.get_instance().get_job_by_id(self.run_id) - except Exception as e: - current_job = None - if current_job is None: - if status is not None and status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_OFFLINE: - status = ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE - else: - return - else: - status = ClientConstants.get_device_state_from_run_edge_state(current_job.status) - active_msg = {"ID": self.edge_id, "status": status} - MLOpsStatus.get_instance().set_client_agent_status(self.edge_id, status) - self.mqtt_mgr.send_message_json(active_topic, json.dumps(active_msg)) - - def recover_start_deployment_msg_after_upgrading(self): - try: - current_job = FedMLClientDataInterface.get_instance().get_current_job() - if current_job is not None and \ - current_job.status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING: - logging.info("start deployment after upgrading.") - topic_start_deployment = "model_ops/model_device/start_deployment/{}".format(str(self.edge_id)) - self.callback_start_deployment(topic_start_deployment, current_job.running_json) - except Exception as e: - logging.info("recover starting deployment message after upgrading: {}".format(traceback.format_exc())) - - def on_agent_mqtt_connected(self, mqtt_client_object): - # The MQTT message topic format is as follows: // - - # Setup MQTT message listener for starting deployment - topic_start_deployment = "model_ops/model_device/start_deployment/{}".format(str(self.edge_id)) - self.mqtt_mgr.add_message_listener(topic_start_deployment, self.callback_start_deployment) - - # Setup MQTT message listener for delete deployment - topic_delete_deployment = "model_ops/model_device/delete_deployment/{}".format(str(self.edge_id)) - self.mqtt_mgr.add_message_listener(topic_delete_deployment, self.callback_delete_deployment) - - # Setup MQTT message listener for running failed - topic_exit_train_with_exception = "flserver_agent/" + str(self.edge_id) + "/exit_train_with_exception" - self.mqtt_mgr.add_message_listener(topic_exit_train_with_exception, self.callback_exit_train_with_exception) - - # Setup MQTT message listener for client status switching - topic_client_status = "fl_client/flclient_agent_" + str(self.edge_id) + "/status" - self.mqtt_mgr.add_message_listener(topic_client_status, self.callback_runner_id_status) - - # Setup MQTT message listener to report current device status. - topic_report_status = "mlops/report_device_status" - self.mqtt_mgr.add_message_listener(topic_report_status, self.callback_report_current_status) - - # Setup MQTT message listener to OTA messages from the MLOps. - topic_ota_msg = "mlops/flclient_agent_" + str(self.edge_id) + "/ota" - self.mqtt_mgr.add_message_listener(topic_ota_msg, self.callback_client_ota_msg) - - if self.mqtt_inference_obj is None: - self.mqtt_inference_obj = FedMLMqttInference(agent_config=self.agent_config, mqtt_mgr=self.mqtt_mgr) - self.mqtt_inference_obj.setup_listener_for_endpoint_inference_request(self.edge_id) - - # Subscribe topics for starting deployment, stopping deployment and fetching client status. - mqtt_client_object.subscribe(topic_start_deployment, qos=2) - mqtt_client_object.subscribe(topic_delete_deployment, qos=2) - mqtt_client_object.subscribe(topic_client_status, qos=2) - mqtt_client_object.subscribe(topic_report_status, qos=2) - mqtt_client_object.subscribe(topic_exit_train_with_exception, qos=2) - mqtt_client_object.subscribe(topic_ota_msg, qos=2) - - self.subscribed_topics.clear() - self.subscribed_topics.append(topic_start_deployment) - self.subscribed_topics.append(topic_delete_deployment) - self.subscribed_topics.append(topic_client_status) - self.subscribed_topics.append(topic_report_status) - self.subscribed_topics.append(topic_exit_train_with_exception) - self.subscribed_topics.append(topic_ota_msg) - - # Broadcast the first active message. - self.send_agent_active_msg() - - # Echo results - # print("\n\nCongratulations, your device is connected to the FedML MLOps platform successfully!") - # print( - # "Your FedML Edge ID is " + str(self.edge_id) + ", unique device ID is " - # + str(self.unique_device_id) - # + "\n" - # ) - - MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO) - - def on_agent_mqtt_disconnected(self, mqtt_client_object): - MLOpsStatus.get_instance().set_client_agent_status( - self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_OFFLINE - ) - - try: - if self.mqtt_inference_obj is not None: - self.mqtt_inference_obj.remove_listener_for_endpoint_inference_request(self.edge_id) - except Exception as e: - pass - - def setup_agent_mqtt_connection(self, service_config): - # Setup MQTT connection - self.mqtt_mgr = MqttManager( - service_config["mqtt_config"]["BROKER_HOST"], - service_config["mqtt_config"]["BROKER_PORT"], - service_config["mqtt_config"]["MQTT_USER"], - service_config["mqtt_config"]["MQTT_PWD"], - service_config["mqtt_config"]["MQTT_KEEPALIVE"], - "FedML_ModelClientAgent_Daemon_@" + self.user_name + "@_" + self.args.current_device_id + str(uuid.uuid4()), - "flclient_agent/last_will_msg", - json.dumps({"ID": self.edge_id, "status": ClientConstants.MSG_MLOPS_CLIENT_STATUS_OFFLINE}) - ) - self.agent_config = service_config - - # Init local database - FedMLClientDataInterface.get_instance().create_job_table() - try: - FedMLModelDatabase.get_instance().set_database_base_dir(ClientConstants.get_database_dir()) - FedMLModelDatabase.get_instance().create_table() - except Exception as e: - pass - - client_api_cmd = "fedml.computing.scheduler.model_scheduler.device_client_api:api" - client_api_pids = RunProcessUtils.get_pid_from_cmd_line(client_api_cmd) - if client_api_pids is None or len(client_api_pids) <= 0: - # Start local API services - cur_dir = os.path.dirname(__file__) - fedml_base_dir = os.path.dirname(os.path.dirname(os.path.dirname(cur_dir))) - python_program = get_python_program() - self.local_api_process = ClientConstants.exec_console_with_script( - "{} -m uvicorn {} --host 0.0.0.0 --port {} --reload --reload-delay 3 --reload-dir {} " - "--log-level critical".format( - python_program, client_api_cmd, - ClientConstants.LOCAL_CLIENT_API_PORT, fedml_base_dir - ), - should_capture_stdout=False, - should_capture_stderr=False - ) - # if self.local_api_process is not None and self.local_api_process.pid is not None: - # print(f"Model worker local API process id {self.local_api_process.pid}") - - # MLOpsRuntimeLogDaemon.get_instance(self.args).stop_all_log_processor() - - # Setup MQTT connected listener - self.mqtt_mgr.add_connected_listener(self.on_agent_mqtt_connected) - self.mqtt_mgr.add_disconnected_listener(self.on_agent_mqtt_disconnected) - self.mqtt_mgr.connect() - - self.setup_client_mqtt_mgr() - self.mlops_metrics.report_client_training_status( - self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE, is_from_model=True) - MLOpsStatus.get_instance().set_client_agent_status(self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE) - - self.recover_start_deployment_msg_after_upgrading() - - def stop_agent(self): - if self.run_process_event is not None: - self.run_process_event.set() - - if self.mqtt_mgr is not None: - try: - for topic in self.subscribed_topics: - self.mqtt_mgr.unsubscribe_msg(topic) - except Exception as e: - pass - - self.mqtt_mgr.loop_stop() - self.mqtt_mgr.disconnect() - - self.release_client_mqtt_mgr() - - def start_agent_mqtt_loop(self, should_exit_sys=False): - # Start MQTT message loop - try: - self.mqtt_mgr.loop_forever() - except Exception as e: - if str(e) == "Restarting after upgraded...": - logging.info("Restarting after upgraded...") - else: - logging.info("Client tracing: {}".format(traceback.format_exc())) - finally: - self.stop_agent() - - if should_exit_sys: - time.sleep(5) - sys.exit(1) diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py b/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py index 9a997a80e2..f637ccde1d 100755 --- a/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py +++ b/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py @@ -18,6 +18,11 @@ class FedMLModelCache(Singleton): FEDML_MODEL_DEVICE_INFO_TAG = "FEDML_MODEL_DEVICE_INFO_TAG-" FEDML_MODEL_END_POINT_TOKEN_TAG = "FEDML_MODEL_END_POINT_TOKEN_TAG-" FEDML_MODEL_ROUND_ROBIN_PREVIOUS_DEVICE_TAG = "FEDML_MODEL_ROUND_ROBIN_PREVIOUS_DEVICE_TAG-" + FEDML_MODEL_ENDPOINT_REPLICA_NUM_TAG = "FEDML_MODEL_ENDPOINT_REPLICA_NUM_TAG-" + + # On the worker + FEDML_MODEL_REPLICA_GPU_IDS_TAG = "FEDML_MODEL_REPLICA_GPU_IDS_TAG-" + FEDML_KEY_COUNT_PER_SCAN = 1000 def __init__(self): @@ -85,16 +90,16 @@ def get_instance(redis_addr="local", redis_port=6379): return FedMLModelCache() def set_deployment_result(self, end_point_id, end_point_name, - model_name, model_version, device_id, deployment_result): - result_dict = {"cache_device_id": device_id, "result": deployment_result} + model_name, model_version, device_id, deployment_result, replica_no): + result_dict = {"cache_device_id": device_id, "cache_replica_no": replica_no, "result": deployment_result} try: - # Delete old result + # Delete old result using (e_id, end_point_name, model_name, device_id, replica_no) # In this list, find the result's complete record, delete it. result_list = self.redis_connection.lrange( self.get_deployment_result_key(end_point_id, end_point_name, model_name), 0, -1) for result_item in result_list: - result_device_id, result_payload = self.get_result_item_info(result_item) - if result_device_id == device_id: + res_device_id, res_replica_no, res_payload = self.get_result_item_info(result_item) + if res_device_id == device_id and res_replica_no == replica_no: self.redis_connection.lrem( self.get_deployment_result_key(end_point_id, end_point_name, model_name), 0, result_item) @@ -105,18 +110,20 @@ def set_deployment_result(self, end_point_id, end_point_name, pass self.model_deployment_db.set_deployment_result(end_point_id, end_point_name, model_name, model_version, - device_id, deployment_result) + device_id, deployment_result, replica_no) def set_deployment_status(self, end_point_id, end_point_name, - model_name, model_version, device_id, deployment_status): + model_name, model_version, device_id, deployment_status, replica_no): status_dict = {"cache_device_id": device_id, "status": deployment_status} try: - self.redis_connection.rpush(self.get_deployment_status_key(end_point_id, end_point_name, model_name), json.dumps(status_dict)) + # rpush could tolerate the same e_id, d_id with different r_no + self.redis_connection.rpush(self.get_deployment_status_key(end_point_id, end_point_name, model_name), + json.dumps(status_dict)) except Exception as e: pass self.model_deployment_db.set_deployment_status(end_point_id, end_point_name, model_name, model_version, - device_id, deployment_status) + device_id, deployment_status, replica_no) def delete_deployment_status(self, element: str, end_point_id, end_point_name, model_name): self.redis_connection.lrem(self.get_deployment_status_key(end_point_id, end_point_name, model_name), @@ -131,10 +138,32 @@ def delete_deployment_result(self, element: str, end_point_id, end_point_name, m except Exception as e: pass + def delete_deployment_result_with_device_id_and_replica_no(self, end_point_id, end_point_name, model_name, + device_id, replica_no_to_delete): + result_item_found = None + + result_list = self.get_deployment_result_list( + end_point_id, end_point_name, model_name) + + for result_item in result_list: + cache_device_id, cache_replica_no, result_payload = ( + self.get_result_item_info(result_item)) + + if str(cache_device_id) == str(device_id) and cache_replica_no == replica_no_to_delete: + result_item_found = result_item + break + + # Delete the replica element + if result_item_found is not None: + self.delete_deployment_result( + result_item_found, end_point_id, end_point_name, model_name) + def get_deployment_result_list(self, end_point_id, end_point_name, model_name): try: - result_list = self.redis_connection.lrange(self.get_deployment_result_key(end_point_id, end_point_name, model_name), 0, -1) + result_list = self.redis_connection.lrange( + self.get_deployment_result_key(end_point_id, end_point_name, model_name), 0, -1) except Exception as e: + logging.info(e) result_list = None if result_list is None or len(result_list) <= 0: @@ -144,13 +173,14 @@ def get_deployment_result_list(self, end_point_id, end_point_name, model_name): self.redis_connection.rpush(self.get_deployment_result_key(end_point_id, end_point_name, model_name), json.dumps(result)) except Exception as e: + logging.info(e) pass return result_list def delete_deployment_result(self, element: str, end_point_id, end_point_name, model_name): self.redis_connection.lrem(self.get_deployment_result_key(end_point_id, end_point_name, model_name), 0, element) - device_id, _ = self.get_result_item_info(element) + device_id, _, _ = self.get_result_item_info(element) self.model_deployment_db.delete_deployment_result(device_id, end_point_id, end_point_name, model_name) def get_deployment_result_list_size(self, end_point_id, end_point_name, model_name): @@ -192,50 +222,43 @@ def get_result_item_info(self, result_item): result_item_json = json.loads(result_item) if isinstance(result_item_json, str): result_item_json = json.loads(result_item_json) + device_id = result_item_json["cache_device_id"] + replica_no = result_item_json["cache_replica_no"] + if isinstance(result_item_json["result"], str): result_payload = json.loads(result_item_json["result"]) else: result_payload = result_item_json["result"] - return device_id, result_payload + return device_id, replica_no, result_payload def get_idle_device(self, end_point_id, end_point_name, model_name, model_version, - check_end_point_status=True): - # Find all deployed devices - try: - status_list = self.get_deployment_status_list(end_point_id, end_point_name, model_name) # DEPLOYMENT_STATUS - except Exception as e: - logging.error(f"get_deployment_status_list failed {e}") - return None, None + check_end_point_status=True, limit_specific_model_version=False): + # Deprecated the model status logic, query directly from the deployment result list + idle_device_list = list() - if len(status_list) == 0: - return None, None + result_list = self.get_deployment_result_list(end_point_id, end_point_name, model_name) - idle_device_list = list() - if model_version == "latest": - model_version = self.get_latest_version(status_list) - logging.info(f"model_version {model_version}") + for result_item in result_list: + device_id, _, result_payload = self.get_result_item_info(result_item) + found_end_point_id = result_payload["end_point_id"] + found_end_point_name = result_payload["end_point_name"] + found_model_name = result_payload["model_name"] + found_model_version = result_payload["model_version"] + + if (str(found_end_point_id) == str(end_point_id) and found_end_point_name == end_point_name and + found_model_name == model_name and + (not limit_specific_model_version or found_model_version == model_version)): + if "model_status" in result_payload and result_payload["model_status"] == "DEPLOYED": + idle_device_list.append({"device_id": device_id, "end_point_id": end_point_id}) + + logging.info(f"{len(idle_device_list)} devices has this model on it: {idle_device_list}") - # iterate all devices, find those with correct version and deployed - for status_item in status_list: - try: - device_id, status_payload = self.get_status_item_info(status_item) - logging.info(f"status_payload {status_payload}") - model_status = status_payload["model_status"] - model_version_cached = status_payload["model_version"] - end_point_id_cache = status_payload["end_point_id"] - logging.info(f"model_version {model_version}, model_version_cache {model_version_cached}") - if (model_version == model_version_cached or model_version == "*") and \ - model_status == ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED: - idle_device_list.append({"device_id": device_id, "end_point_id": end_point_id_cache}) - except Exception as e: - logging.info(f"Get idle device list Failed: {e}, status_item {status_item}") - pass if len(idle_device_list) <= 0: return None, None - logging.info(f"{len(idle_device_list)} devices has this model on it: {idle_device_list}") - # Randomly shuffle + + # # Randomly shuffle # shuffle the list of deployed devices and get the first one as the target idle device. # if len(idle_device_list) <= 0: # return None, None @@ -271,18 +294,17 @@ def get_idle_device(self, end_point_id, end_point_name, # Find deployment result from the target idle device. try: - result_list = self.get_deployment_result_list(end_point_id, end_point_name, model_name) for result_item in result_list: - device_id, result_payload = self.get_result_item_info(result_item) + logging.info("enter the for loop") + device_id, _, result_payload = self.get_result_item_info(result_item) found_end_point_id = result_payload["end_point_id"] found_end_point_name = result_payload["end_point_name"] - # Check whether the end point is activated. - if check_end_point_status: - end_point_activated = self.get_end_point_activation(found_end_point_id) - if not end_point_activated: - continue + found_model_status = result_payload["model_status"] - if found_end_point_id == idle_device_dict["end_point_id"] \ + if found_model_status != "DEPLOYED": + continue + + if str(found_end_point_id) == str(idle_device_dict["end_point_id"]) \ and device_id == idle_device_dict["device_id"]: if same_model_device_rank > 0: same_model_device_rank -= 1 @@ -316,10 +338,13 @@ def get_latest_version(self, status_list): return latest_version def get_deployment_result_with_device_id(self, end_point_id, end_point_name, model_name, device_id): + """" + TODO: Return multiple replicas' result for the same device_id + """ try: result_list = self.get_deployment_result_list(end_point_id, end_point_name, model_name) for result_item in result_list: - result_device_id, result_payload = self.get_result_item_info(result_item) + result_device_id, _, result_payload = self.get_result_item_info(result_item) found_end_point_id = result_payload["end_point_id"] end_point_activated = self.get_end_point_activation(found_end_point_id) @@ -363,9 +388,29 @@ def set_end_point_activation(self, end_point_id, end_point_name, activate_status pass self.model_deployment_db.set_end_point_activation(end_point_id, end_point_name, status) + def set_replica_gpu_ids(self, end_point_id, end_point_name, model_name, device_id, replica_no, gpu_ids): + # Convert the list to string + try: + self.redis_connection.set(self.get_replica_gpu_ids_key(end_point_id, end_point_name, + model_name, device_id, replica_no), str(gpu_ids)) + except Exception as e: + print(e) + logging.error(e) + + # TODO: Use Sqlite for the replica backup + + def get_replica_gpu_ids(self, end_point_id, end_point_name, model_name, device_id, replica_no): + try: + if self.redis_connection.exists(self.get_replica_gpu_ids_key(end_point_id, end_point_name, + model_name, device_id, replica_no)): + return self.redis_connection.get(self.get_replica_gpu_ids_key(end_point_id, end_point_name, + model_name, device_id, replica_no)) + except Exception as e: + pass + def delete_end_point(self, end_point_id, end_point_name, model_name, model_version): try: - print("Will Delete the realated redis keys permanently") + logging.info("Will Delete the related redis keys permanently") self.redis_connection.expire(self.get_deployment_result_key(end_point_id, end_point_name, model_name), ServerConstants.MODEL_CACHE_KEY_EXPIRE_TIME) self.redis_connection.expire(self.get_deployment_status_key(end_point_id, end_point_name, model_name), ServerConstants.MODEL_CACHE_KEY_EXPIRE_TIME) self.redis_connection.expire(self.get_monitor_metrics_key(end_point_id, end_point_name, model_name, model_version), ServerConstants.MODEL_CACHE_KEY_EXPIRE_TIME) @@ -375,29 +420,16 @@ def delete_end_point(self, end_point_id, end_point_name, model_name, model_versi self.redis_connection.expire(self.get_end_point_activation_key(end_point_id), ServerConstants.MODEL_CACHE_KEY_EXPIRE_TIME) self.redis_connection.expire(self.get_end_point_status_key(end_point_id), ServerConstants.MODEL_CACHE_KEY_EXPIRE_TIME) except Exception as e: + logging.error(f"error when deleting the redis keys: {e}") pass def get_end_point_activation(self, end_point_id): - status_int = -1 - try: - if self.redis_connection.exists(self.get_end_point_activation_key(end_point_id)): - status_int = self.redis_connection.get(self.get_end_point_activation_key(end_point_id)) - except Exception as e: - pass - - if status_int == -1: - status_int = self.model_deployment_db.get_end_point_activation(end_point_id) - try: - self.redis_connection.set(self.get_end_point_activation_key(end_point_id), status_int) - except Exception as e: - pass - - status = True if int(status_int) == 1 else False - return status + # [Deprecated] activation logic is removed + return True def get_end_point_full_key_by_id(self, end_point_id): - # e.g. FEDML_MODEL_DEPLOYMENT_STATUS--1234-dummy_endpoint_name-dummy_model_name - target_prefix = f"{FedMLModelCache.FEDML_MODEL_DEPLOYMENT_STATUS_TAG}-{end_point_id}-*" + # e.g. FEDML_MODEL_DEPLOYMENT_RESULT--1234-dummy_endpoint_name-dummy_model_name + target_prefix = f"{FedMLModelCache.FEDML_MODEL_DEPLOYMENT_RESULT_TAG}-{end_point_id}-*" status_list = list() for key in self.redis_connection.scan_iter(target_prefix): status_list.append(key) @@ -497,6 +529,19 @@ def get_end_point_token(self, end_point_id, end_point_name, model_name): return token + def get_endpoint_devices_replica_num(self, end_point_id): + """ + Return a endpoint_devices_replica_num dict {id1: 1, id2: 1}, if not exist, return None + """ + try: + replica_num = self.redis_connection.get( + self.get_endpoint_replica_num_key(end_point_id)) + except Exception as e: + replica_num = None + # TODO: Use Sqlite for the replica backup + + return replica_num + def get_deployment_result_key(self, end_point_id, end_point_name, model_name): return "{}-{}-{}-{}".format(FedMLModelCache.FEDML_MODEL_DEPLOYMENT_RESULT_TAG, end_point_id, end_point_name, model_name) @@ -518,6 +563,14 @@ def get_deployment_token_key(self, end_point_id, end_point_name, model_name): def get_round_robin_prev_device(self, end_point_id, end_point_name, model_name, version): return "{}-{}-{}-{}-{}".format(FedMLModelCache.FEDML_MODEL_ROUND_ROBIN_PREVIOUS_DEVICE_TAG, end_point_id, end_point_name, model_name, version) + def get_endpoint_replica_num_key(self, end_point_id): + return "{}-{}-{}-{}-{}".format(FedMLModelCache.FEDML_MODEL_ENDPOINT_REPLICA_NUM_TAG, end_point_id, "replica_num", "key") + + @staticmethod + def get_replica_gpu_ids_key(end_point_id, end_point_name, model_name, device_id, replica_no): + return "{}-{}-{}-{}-{}-{}".format(FedMLModelCache.FEDML_MODEL_REPLICA_GPU_IDS_TAG, end_point_id, + end_point_name, model_name, device_id, replica_no) + def set_monitor_metrics(self, end_point_id, end_point_name, model_name, model_version, total_latency, avg_latency, diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_db.py b/python/fedml/computing/scheduler/model_scheduler/device_model_db.py index e11e098caf..6ee7af1cdd 100755 --- a/python/fedml/computing/scheduler/model_scheduler/device_model_db.py +++ b/python/fedml/computing/scheduler/model_scheduler/device_model_db.py @@ -31,20 +31,25 @@ def set_database_base_dir(self, database_base_dir): self.db_base_dir = database_base_dir def set_deployment_result(self, end_point_id, end_point_name, model_name, model_version, - device_id, deployment_result): + device_id, deployment_result, replica_no): self.set_deployment_results_info(end_point_id, end_point_name, model_name, model_version, - device_id, deployment_result=deployment_result) + device_id, deployment_result=deployment_result, replica_no=replica_no) def set_deployment_status(self, end_point_id, end_point_name, model_name, model_version, - device_id, deployment_status): + device_id, deployment_status, replica_no): self.set_deployment_results_info(end_point_id, end_point_name, model_name, model_version, - device_id, deployment_status=deployment_status) + device_id, deployment_status=deployment_status, replica_no=replica_no) def get_deployment_result_list(self, end_point_id, end_point_name, model_name, model_version=None): + """ + query from sqlite db using e_id + """ result_list = self.get_deployment_results_info(end_point_id, end_point_name, model_name, model_version) ret_result_list = list() for result in result_list: - result_dict = {"cache_device_id": result.device_id, "result": result.deployment_result} + result_dict = {"cache_device_id": result.device_id, + "cache_replica_no": result.replica_no, + "result": result.deployment_result} ret_result_list.append(json.dumps(result_dict)) return ret_result_list @@ -58,18 +63,24 @@ def get_deployment_status_list(self, end_point_id, end_point_name, model_name, m return ret_status_list def get_deployment_result_with_device_id(self, end_point_id, end_point_name, model_name, device_id): + """ + Return a list of replica's result given end_point_id, end_point_name, model_name, device_id + """ + replica_result_list = list() try: result_list = self.get_deployment_result_list(end_point_id, end_point_name, model_name) for result_item in result_list: - result_device_id, result_payload = self.get_result_item_info(result_item) + result_device_id, _, result_payload = self.get_result_item_info(result_item) found_end_point_id = result_payload["end_point_id"] if str(found_end_point_id) == str(end_point_id) and str(result_device_id) == str(device_id): - return result_payload + replica_result_list.append(result_payload) except Exception as e: - logging.info(e) + # Do not intervene other endpoints on this device + logging.error(f"Error in get_deployment_result_with_device_id: {e}") + return None - return None + return replica_result_list def get_deployment_status_with_device_id(self, end_point_id, end_point_name, model_name, device_id): try: @@ -124,6 +135,18 @@ def delete_deployment_result_with_device_id(self, end_point_id, end_point_name, FedMLDeploymentResultInfoModel.device_id == f'{device_id}')).delete() self.db_connection.commit() + def delete_deployment_result_with_device_id_and_rank(self, end_point_id, end_point_name, model_name, + device_id, replica_rank): + replica_no = replica_rank + 1 + self.open_job_db() + self.db_connection.query(FedMLDeploymentResultInfoModel).filter( + and_(FedMLDeploymentResultInfoModel.end_point_id == f'{end_point_id}', + FedMLDeploymentResultInfoModel.end_point_name == f'{end_point_name}', + FedMLDeploymentResultInfoModel.model_name == f'{model_name}', + FedMLDeploymentResultInfoModel.device_id == f'{device_id}', + FedMLDeploymentResultInfoModel.replica_no == f'{replica_no}')).delete() + self.db_connection.commit() + def delete_deployment_run_info(self, end_point_id): # db / table -> model-deployment.db / "deployment_run_info" self.open_job_db() @@ -136,11 +159,13 @@ def get_result_item_info(self, result_item): if isinstance(result_item_json, dict): result_item_json = json.loads(result_item) device_id = result_item_json["cache_device_id"] + replica_no = result_item_json["cache_replica_no"] + if isinstance(result_item_json["result"], str): result_payload = json.loads(result_item_json["result"]) else: result_payload = result_item_json["result"] - return device_id, result_payload + return device_id, replica_no, result_payload def get_status_item_info(self, status_item): status_item_json = json.loads(status_item) @@ -274,17 +299,19 @@ def get_deployment_results_info(self, end_point_id, end_point_name, model_name, def set_deployment_results_info(self, end_point_id, end_point_name, model_name, model_version, device_id, - deployment_result=None, deployment_status=None): - ''' - end_point_id + device_id is unique identifier, + deployment_result=None, deployment_status=None, replica_no=None): + """ + end_point_id + device_id + replica_no is unique identifier, we do not allow duplicate records - ''' + """ self.open_job_db() result_info = self.db_connection.query(FedMLDeploymentResultInfoModel). \ filter(and_(FedMLDeploymentResultInfoModel.end_point_id == f'{end_point_id}', FedMLDeploymentResultInfoModel.end_point_name == f'{end_point_name}', FedMLDeploymentResultInfoModel.model_name == f'{model_name}', - FedMLDeploymentResultInfoModel.device_id == f'{device_id}')).first() + FedMLDeploymentResultInfoModel.device_id == f'{device_id}', + FedMLDeploymentResultInfoModel.replica_no == f'{replica_no}' + )).first() # Insert if result_info is None: result_info = FedMLDeploymentResultInfoModel(end_point_id=end_point_id, @@ -293,7 +320,9 @@ def set_deployment_results_info(self, end_point_id, end_point_name, model_version=model_version, device_id=device_id, deployment_result=deployment_result, - deployment_status=deployment_status) + deployment_status=deployment_status, + replica_no=replica_no + ) self.db_connection.add(result_info) self.db_connection.commit() return @@ -439,6 +468,7 @@ class FedMLDeploymentResultInfoModel(Base): device_id = Column(TEXT) deployment_result = Column(TEXT) deployment_status = Column(TEXT) + replica_no = Column(TEXT) class FedMLDeploymentRunInfoModel(Base): diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py index 0a8c3b6ce9..3a6d891e58 100755 --- a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py +++ b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py @@ -34,36 +34,15 @@ from .device_http_inference_protocol import FedMLHttpInference +from fedml.computing.scheduler.model_scheduler.device_model_cache import FedMLModelCache no_real_gpu_allocation = None -class CPUUnpickler(pickle.Unpickler): - def find_class(self, module, name): - if module == 'torch.storage' and name == '_load_from_bytes': - return lambda b: torch.load(io.BytesIO(b), map_location='cpu') - else: - return super().find_class(module, name) - - def request_gpu_ids_on_deployment(edge_id, end_point_id, num_gpus=None, master_device_id=None): gpu_ids = None client_device_id = os.getenv("FEDML_CURRENT_EDGE_ID") - try: - ComputeCacheManager.get_instance().set_redis_params() - with ComputeCacheManager.get_instance().lock( - ComputeCacheManager.get_instance().get_gpu_cache().get_device_run_lock_key(edge_id, end_point_id) - ): - if num_gpus is None: - num_gpus = ComputeCacheManager.get_instance().get_gpu_cache().get_device_run_num_gpus(edge_id, end_point_id) - num_gpus = int(num_gpus) if num_gpus is not None and str(num_gpus) != "" else 1 - gpu_ids = ComputeCacheManager.get_instance().get_gpu_cache().get_device_run_gpu_ids(edge_id, end_point_id) - except Exception as e: - logging.info(f"Execption when request gpu ids. {traceback.format_exc()}") - gpu_ids = None - raise e - if gpu_ids is None: cuda_visable_gpu_ids = JobRunnerUtils.get_instance().occupy_gpu_ids( end_point_id, num_gpus, client_device_id, inner_id=end_point_id, @@ -92,234 +71,127 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version, inference_use_gpu, inference_memory_size, inference_convertor_image, inference_server_image, infer_host, model_is_from_open, model_params, - model_from_open, token, master_ip, edge_id, master_device_id=None): + model_from_open, token, master_ip, edge_id, master_device_id=None, replica_rank=0, + gpu_per_replica=1): logging.info("Model deployment is starting...") - use_simulation_test_without_triton = False - model_metadata = {'name': inference_model_name, - 'versions': ['1'], 'platform': 'onnxruntime_onnx', - 'inputs': [{'name': 'input2', 'datatype': 'INT32', 'shape': [1, 24]}, - {'name': 'input1', 'datatype': 'FP32', 'shape': [1, 2]}], - 'outputs': [{'name': 'output', 'datatype': 'FP32', 'shape': [1]}]} - model_config = { - "platform": "onnxruntime", - "max_batch_size": 1, - "input_size": [[1, 24], [1, 2]], - "input_types": ["int", "float"], - "input": [ - { - "name": "input", - "data_type": "TYPE_FP32", - "dims": [] - } - ], - "output": [ - { - "name": "output", - "data_type": "TYPE_FP32", - "dims": [] - } - ] - } - sudo_prefix = "sudo " sys_name = platform.system() if sys_name == "Darwin": sudo_prefix = "" - num_gpus = 0 + num_gpus = gpu_per_replica # Real gpu per replica (container) gpu_ids, gpu_attach_cmd = None, "" running_model_name = ClientConstants.get_running_model_name( end_point_name, inference_model_name, model_version, end_point_id, model_id, edge_id=edge_id) - # Check whether triton server is running. - triton_server_is_running = False - if not use_simulation_test_without_triton: - triton_server_container_name = "{}".format(ClientConstants.FEDML_TRITON_SERVER_CONTAINER_NAME_PREFIX) - if not ClientConstants.is_running_on_k8s(): - check_triton_server_running_cmds = "{}docker ps |grep {}".format(sudo_prefix, triton_server_container_name) - running_process = ClientConstants.exec_console_with_script(check_triton_server_running_cmds, - should_capture_stdout=True, - should_capture_stderr=True) - ret_code, out, err = ClientConstants.get_console_pipe_out_err_results(running_process) - if out is not None: - out_str = sys_utils.decode_our_err_result(out) - if str(out_str) != "": - triton_server_is_running = True - - # Convert models from pytorch to onnx format if model_is_from_open: - if model_from_open is None: - return running_model_name, "", model_version, {}, {} - - logging.info("model binary file: {}".format(model_bin_file)) - with open(model_bin_file, 'rb') as model_pkl_file: - if not torch.cuda.is_available(): - try: - open_model_params = CPUUnpickler(model_pkl_file).load() - except Exception as ex: - logging.info("load model exceptions when using CPU_Unpickler: {}".format(traceback.format_exc())) - return "", "", model_version, model_metadata, model_config - else: - open_model_params = pickle.load(model_pkl_file) - model_from_open.load_state_dict(open_model_params) - model_from_open.eval() - - if inference_engine == ClientConstants.INFERENCE_ENGINE_TYPE_INT_TRITON: - logging.info("convert the onnx model when the mode is from FedML® Nexus AI Platform..") - logging.info("Input size {}, input types {}".format(model_params["input_size"], - model_params["input_types"])) - input_size = model_params["input_size"] - input_types = model_params["input_types"] - - dummy_input_list = [] - for index, input_i in enumerate(input_size): - if input_types[index] == "int": - this_input = torch.randint(0, 1, input_i).clone().detach() - else: - this_input = torch.zeros(input_i).clone().detach() - dummy_input_list.append(this_input) - - onnx_model_path = os.path.join(model_storage_local_path, - ClientConstants.FEDML_CONVERTED_MODEL_DIR_NAME, - running_model_name, ClientConstants.INFERENCE_MODEL_VERSION) - if not os.path.exists(onnx_model_path): - os.makedirs(onnx_model_path, exist_ok=True) - onnx_model_path = os.path.join(onnx_model_path, "model.onnx") - - convert_model_to_onnx(model_from_open, onnx_model_path, dummy_input_list, input_size) - elif ClientConstants.INFERENCE_ENGINE_TYPE_INT_DEFAULT: # we do not convert the model to onnx in llm - logging.info("LLM model loaded from the open") + logging.error("The model is directly export from open, currently do not convert the model to servable format.") + return "", "", None, None, None + + # Parse the model config file and get the necessary information for the deployment + model_config_path = os.path.join(model_storage_local_path, "fedml_model_config.yaml") + with open(model_config_path, 'r') as file: + config = yaml.safe_load(file) + # Resource related + use_gpu = config.get('use_gpu', True) + in_gpu_ids = config.get('gpu_ids', gpu_ids) + num_gpus_frm_yml = config.get('num_gpus', None) + if not use_gpu: + num_gpus = 0 else: - raise Exception("Unsupported inference engine type: {}".format(inference_engine)) - elif model_is_from_open == False or model_is_from_open is None: - model_location = os.path.join(model_storage_local_path, "fedml_model.bin") - try: - model = torch.jit.load(model_location) - model.eval() - except Exception as e: - logging.info( - "Cannot locate the .bin file, will read it from" - " the fedml_model_config.yaml with the key [local_model_dir] ") - model_config_path = os.path.join(model_storage_local_path, "fedml_model_config.yaml") - with open(model_config_path, 'r') as file: - config = yaml.safe_load(file) - # Resource related - use_gpu = config.get('use_gpu', False) - in_gpu_ids = config.get('gpu_ids', gpu_ids) - num_gpus = config.get('num_gpus', None) - if not use_gpu: - num_gpus = 0 - else: - if num_gpus is None: - num_gpus = len(in_gpu_ids) if in_gpu_ids is not None else 1 - usr_indicated_wait_time = config.get('deploy_timeout', 900) - usr_indicated_worker_port = config.get('worker_port', "") - if usr_indicated_worker_port == "": - usr_indicated_worker_port = os.environ.get("FEDML_WORKER_PORT", "") - shm_size = config.get('shm_size', None) - storage_opt = config.get('storage_opt', None) - tmpfs = config.get('tmpfs', None) - cpus = config.get('cpus', None) - if cpus is not None: - cpus = int(cpus) - memory = config.get('memory', None) - - if usr_indicated_worker_port == "": - usr_indicated_worker_port = None - else: - usr_indicated_worker_port = int(usr_indicated_worker_port) - - worker_port_env = os.environ.get("FEDML_WORKER_PORT", "") - worker_port_from_config = config.get('worker_port', "") - print(f"usr_indicated_worker_port {usr_indicated_worker_port}, worker port env {worker_port_env}, " - f"worker port from config {worker_port_from_config}") - - usr_indicated_retry_cnt = max(int(usr_indicated_wait_time) // 10, 1) - inference_image_name = config.get('inference_image_name', - ClientConstants.INFERENCE_SERVER_CUSTOME_IMAGE) - image_pull_policy = config.get('image_pull_policy', SchedulerConstants.IMAGE_PULL_POLICY_IF_NOT_PRESENT) - - # Source code dir, bootstrap dir, data cache dir - src_code_dir = os.path.join(model_storage_local_path, config.get('source_code_dir', "")) - - # Get the bootstrap and job commands inside the yaml file - bootstrap_cmds_str_frm_yaml = config.get('bootstrap', "") - job_cmds_str_frm_yaml = config.get('job', "") - - if bootstrap_cmds_str_frm_yaml != "" or job_cmds_str_frm_yaml != "": - auto_gen_bootstrap_file_name = "fedml-deploy-bootstrap-entry-auto-gen.sh" - src_bootstrap_file_path = os.path.join(model_storage_local_path, auto_gen_bootstrap_file_name) - with open(src_bootstrap_file_path, 'w') as f: - f.write("cd /home/fedml/models_serving/\n") - f.write(bootstrap_cmds_str_frm_yaml) - f.write("\n") - f.write("cd /home/fedml/models_serving/\n") - f.write(job_cmds_str_frm_yaml) - else: - src_bootstrap_file_path = "" + if num_gpus_frm_yml is not None: + num_gpus = int(num_gpus_frm_yml) + usr_indicated_wait_time = config.get('deploy_timeout', 900) + usr_indicated_worker_port = config.get('worker_port', "") + if usr_indicated_worker_port == "": + usr_indicated_worker_port = os.environ.get("FEDML_WORKER_PORT", "") + shm_size = config.get('shm_size', None) + storage_opt = config.get('storage_opt', None) + tmpfs = config.get('tmpfs', None) + cpus = config.get('cpus', None) + if cpus is not None: + cpus = int(cpus) + memory = config.get('memory', None) + + if usr_indicated_worker_port == "": + usr_indicated_worker_port = None + else: + usr_indicated_worker_port = int(usr_indicated_worker_port) + + worker_port_env = os.environ.get("FEDML_WORKER_PORT", "") + worker_port_from_config = config.get('worker_port', "") + print(f"usr_indicated_worker_port {usr_indicated_worker_port}, worker port env {worker_port_env}, " + f"worker port from config {worker_port_from_config}") + + usr_indicated_retry_cnt = max(int(usr_indicated_wait_time) // 10, 1) + inference_image_name = config.get('inference_image_name', + ClientConstants.INFERENCE_SERVER_CUSTOME_IMAGE) + image_pull_policy = config.get('image_pull_policy', SchedulerConstants.IMAGE_PULL_POLICY_IF_NOT_PRESENT) + + # Source code dir, bootstrap dir, data cache dir + src_code_dir = os.path.join(model_storage_local_path, config.get('source_code_dir', "")) + + # Get the bootstrap and job commands inside the yaml file + bootstrap_cmds_str_frm_yaml = config.get('bootstrap', "") + job_cmds_str_frm_yaml = config.get('job', "") + + if bootstrap_cmds_str_frm_yaml != "" or job_cmds_str_frm_yaml != "": + auto_gen_bootstrap_file_name = "fedml-deploy-bootstrap-entry-auto-gen.sh" + src_bootstrap_file_path = os.path.join(model_storage_local_path, auto_gen_bootstrap_file_name) + with open(src_bootstrap_file_path, 'w') as f: + f.write("cd /home/fedml/models_serving/\n") + f.write(bootstrap_cmds_str_frm_yaml) + f.write("\n") + f.write("cd /home/fedml/models_serving/\n") + f.write(job_cmds_str_frm_yaml) + else: + src_bootstrap_file_path = "" - data_cache_dir_input = config.get('data_cache_dir', "") - request_input_example = config.get('request_input_example', None) - extra_envs = config.get('environment_variables', None) + data_cache_dir_input = config.get('data_cache_dir', "") + request_input_example = config.get('request_input_example', None) + extra_envs = config.get('environment_variables', None) - # Serving dir inside docker - dst_model_serving_dir = "/home/fedml/models_serving" - relative_entry = config.get('entry_point') - if src_bootstrap_file_path != "": - dst_bootstrap_dir = os.path.join(dst_model_serving_dir, auto_gen_bootstrap_file_name) - else: - dst_bootstrap_dir = "" + # Serving dir inside docker + dst_model_serving_dir = "/home/fedml/models_serving" + relative_entry = config.get('entry_point') + if src_bootstrap_file_path != "": + dst_bootstrap_dir = os.path.join(dst_model_serving_dir, auto_gen_bootstrap_file_name) + else: + dst_bootstrap_dir = "" - # If using customized image, then bootstrap + job will be the entry point - enable_custom_image = config.get("enable_custom_image", False) - customized_image_entry_cmd = \ - "/bin/bash /home/fedml/models_serving/fedml-deploy-bootstrap-entry-auto-gen.sh" + # If using customized image, then bootstrap + job will be the entry point + enable_custom_image = config.get("enable_custom_image", False) + customized_image_entry_cmd = \ + "/bin/bash /home/fedml/models_serving/fedml-deploy-bootstrap-entry-auto-gen.sh" - docker_registry_user_name = config.get("docker_registry_user_name", "") - docker_registry_user_password = config.get("docker_registry_user_password", "") - docker_registry = config.get("docker_registry", "") + docker_registry_user_name = config.get("docker_registry_user_name", "") + docker_registry_user_password = config.get("docker_registry_user_password", "") + docker_registry = config.get("docker_registry", "") - port_inside_container = int(config.get("port_inside_container", 2345)) - use_triton = config.get("use_triton", False) - if use_triton: - inference_type = "triton" - else: - inference_type = "default" - - if src_code_dir == "": - raise Exception("Please indicate source_code_dir in the fedml_model_config.yaml") - if relative_entry == "": - logging.warning("You missed main_entry in the fedml_model_config.yaml") - - if num_gpus > 0: - gpu_ids, gpu_attach_cmd = request_gpu_ids_on_deployment( - edge_id, end_point_id, num_gpus=num_gpus, master_device_id=master_device_id) - - if inference_engine == ClientConstants.INFERENCE_ENGINE_TYPE_INT_TRITON: - # configuration passed by user in the Cli - input_size = model_params["input_size"] - input_types = model_params["input_types"] - logging.info("convert the onnx model when the mode is from the general PyTorch...") - logging.info("Input size {}, input types {}".format(model_params["input_size"], - model_params["input_types"])) - dummy_input_list = [] - for index, input_i in enumerate(input_size): - if input_types[index] == "int": - this_input = torch.randint(0, 1, input_i).clone().detach() - else: - this_input = torch.zeros(input_i).clone().detach() - dummy_input_list.append(this_input) + port_inside_container = int(config.get("port_inside_container", 2345)) + use_triton = config.get("use_triton", False) + if use_triton: + inference_type = "triton" + else: + inference_type = "default" + + # Config check + if src_code_dir == "": + raise Exception("Please indicate source_code_dir in the fedml_model_config.yaml") + if relative_entry == "": + logging.warning("You missed main_entry in the fedml_model_config.yaml") - onnx_model_path = os.path.join(model_storage_local_path, - ClientConstants.FEDML_CONVERTED_MODEL_DIR_NAME, - running_model_name, ClientConstants.INFERENCE_MODEL_VERSION) - logging.info("converted onnx model path: {}".format(onnx_model_path)) - if not os.path.exists(onnx_model_path): - os.makedirs(onnx_model_path, exist_ok=True) - onnx_model_path = os.path.join(onnx_model_path, "model.onnx") + # Request the GPU ids for the deployment + if num_gpus > 0: + gpu_ids, gpu_attach_cmd = request_gpu_ids_on_deployment( + edge_id, end_point_id, num_gpus=num_gpus, master_device_id=master_device_id) - convert_model_to_onnx(model, onnx_model_path, dummy_input_list, input_size) + # set replica and their gpu ids + FedMLModelCache.get_instance().set_redis_params() + FedMLModelCache.get_instance().set_replica_gpu_ids( + end_point_id, end_point_name, inference_model_name, edge_id, replica_rank+1, gpu_ids) + logging.info("GPU ids allocated: {}".format(gpu_ids)) logging.info("move converted model to serving dir for inference...") model_serving_dir = ClientConstants.get_model_serving_dir() @@ -339,265 +211,199 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version, if not os.path.exists(dst_model_file): shutil.copyfile(src_model_file, dst_model_file) - if inference_engine == ClientConstants.INFERENCE_ENGINE_TYPE_INT_DEFAULT: - logging.info(f"master ip: {master_ip}, worker ip: {infer_host}") - if infer_host == master_ip: - logging.info("infer_host is the same as master ip, will use 127.0.0.1 to avoid firewall issue") - infer_host = "127.0.0.1" + if inference_engine != ClientConstants.INFERENCE_ENGINE_TYPE_INT_DEFAULT: + raise Exception(f"inference engine {inference_engine} is not supported") - try: - client = docker.from_env() - if enable_custom_image and docker_registry_user_name != "" and docker_registry_user_password != "" \ - and docker_registry != "": - client.login(username=docker_registry_user_name, password=docker_registry_user_password, - registry=docker_registry) - except Exception: - logging.error("Failed to connect to the docker daemon, please ensure that you have " - "installed Docker Desktop or Docker Engine, and the docker is running") - return "", "", None, None, None - - container_prefix = "{}".format(ClientConstants.FEDML_DEFAULT_SERVER_CONTAINER_NAME_PREFIX) + "__" + \ - security_utils.get_content_hash(running_model_name) - - same_model_container_rank = ContainerUtils.get_container_rank_same_model(container_prefix) - if same_model_container_rank == -1: - logging.error(f"Fail to get existed docker with {end_point_name} {inference_model_name}") - raise Exception("Failed to get the container rank") - default_server_container_name = container_prefix + "__" + str(same_model_container_rank) + # Get the master device id + logging.info(f"master ip: {master_ip}, worker ip: {infer_host}") + if infer_host == master_ip: + logging.info("infer_host is the same as master ip, will use 127.0.0.1 to avoid firewall issue") + infer_host = "127.0.0.1" - try: - exist_container_obj = client.containers.get(default_server_container_name) - except docker.errors.NotFound: - exist_container_obj = None - except docker.errors.APIError: - raise Exception("Failed to get the container object") - - if exist_container_obj is not None: - client.api.remove_container(exist_container_obj.id, v=True, force=True) - device_requests = [] - if no_real_gpu_allocation is not None: - use_gpu = not no_real_gpu_allocation - if use_gpu: - logging.info("Number of GPUs: {}".format(num_gpus)) - if gpu_ids is not None: - gpu_id_list = map(lambda x: str(x), gpu_ids) - device_requests.append( - docker.types.DeviceRequest(device_ids=list(gpu_id_list), capabilities=[['gpu']])) + try: + client = docker.from_env() + if enable_custom_image and docker_registry_user_name != "" and docker_registry_user_password != "" \ + and docker_registry != "": + client.login(username=docker_registry_user_name, password=docker_registry_user_password, + registry=docker_registry) + except Exception: + logging.error("Failed to connect to the docker daemon, please ensure that you have " + "installed Docker Desktop or Docker Engine, and the docker is running") + return "", "", None, None, None + + container_prefix = ("{}".format(ClientConstants.FEDML_DEFAULT_SERVER_CONTAINER_NAME_PREFIX) + "__" + + security_utils.get_content_hash(running_model_name)) + + default_server_container_name = container_prefix + "__" + str(replica_rank) + + try: + exist_container_obj = client.containers.get(default_server_container_name) + except docker.errors.NotFound: + exist_container_obj = None + except docker.errors.APIError: + raise Exception("Failed to get the container object") + + # Allocate the GPU + # TODO: Make sure no competition for each replica in a single deployment + if exist_container_obj is not None: + client.api.remove_container(exist_container_obj.id, v=True, force=True) + device_requests = [] + if no_real_gpu_allocation is not None: + use_gpu = not no_real_gpu_allocation + use_gpu = False + if use_gpu: + logging.info("Number of GPUs: {}".format(num_gpus)) + if gpu_ids is not None: + gpu_id_list = map(lambda x: str(x), gpu_ids) + device_requests.append( + docker.types.DeviceRequest(device_ids=list(gpu_id_list), capabilities=[['gpu']])) + else: + device_requests.append( + docker.types.DeviceRequest(count=num_gpus, capabilities=[['gpu']])) + logging.info(f"device_requests: {device_requests}") + + # Pull the inference image + logging.info(f"Start pulling the inference image {inference_image_name}..., may take a few minutes...") + ContainerUtils.get_instance().pull_image_with_policy(image_pull_policy, inference_image_name) + + logging.info("Start creating the inference container...") + volumns = [] + binds = {} + environment = {} + + # data_cache_dir mounting + assert type(data_cache_dir_input) == dict or type(data_cache_dir_input) == str + if type(data_cache_dir_input) == str: + # In this case, we mount to the same folder, if has ~, we replace it with /home/fedml + src_data_cache_dir, dst_data_cache_dir = "", "" + if data_cache_dir_input != "": + if data_cache_dir_input[0] == "~": + src_data_cache_dir = os.path.expanduser(data_cache_dir_input) + dst_data_cache_dir = data_cache_dir_input.replace("~", "/home/fedml") else: - device_requests.append( - docker.types.DeviceRequest(count=num_gpus, capabilities=[['gpu']])) - logging.info(f"device_requests: {device_requests}") - logging.info(f"Start pulling the inference image {inference_image_name}..., may take a few minutes...") - - ContainerUtils.get_instance().pull_image_with_policy(image_pull_policy, inference_image_name) - - logging.info("Start creating the inference container...") - volumns = [] - binds = {} - environment = {} - - assert type(data_cache_dir_input) == dict or type(data_cache_dir_input) == str - if type(data_cache_dir_input) == str: - # In this case, we mount to the same folder, if has ~, we replace it with /home/fedml - src_data_cache_dir, dst_data_cache_dir = "", "" - if data_cache_dir_input != "": - if data_cache_dir_input[0] == "~": - src_data_cache_dir = os.path.expanduser(data_cache_dir_input) - dst_data_cache_dir = data_cache_dir_input.replace("~", "/home/fedml") + # check if the data_cache_dir is a relative path + if data_cache_dir_input[0] != "/": + raise "data_cache_dir_input has to be an absolute path or start with ~" else: - # check if the data_cache_dir is a relative path - if data_cache_dir_input[0] != "/": - raise "data_cache_dir_input has to be an absolute path or start with ~" - else: - src_data_cache_dir = data_cache_dir_input - dst_data_cache_dir = data_cache_dir_input - logging.info(f"src_data_cache_dir: {src_data_cache_dir}, dst_data_cache_dir: {dst_data_cache_dir}") - - if type(src_data_cache_dir) == str and src_data_cache_dir != "": - logging.info("Start copying the data cache to the container...") - if os.path.exists(src_data_cache_dir): - volumns.append(src_data_cache_dir) - binds[src_data_cache_dir] = { - "bind": dst_data_cache_dir, - "mode": "rw" - } - environment["DATA_CACHE_FOLDER"] = dst_data_cache_dir - else: - for k, v in data_cache_dir_input.items(): - if os.path.exists(k): - volumns.append(v) - binds[k] = { - "bind": v, + src_data_cache_dir = data_cache_dir_input + dst_data_cache_dir = data_cache_dir_input + logging.info(f"src_data_cache_dir: {src_data_cache_dir}, dst_data_cache_dir: {dst_data_cache_dir}") + + if type(src_data_cache_dir) == str and src_data_cache_dir != "": + logging.info("Start copying the data cache to the container...") + if os.path.exists(src_data_cache_dir): + volumns.append(src_data_cache_dir) + binds[src_data_cache_dir] = { + "bind": dst_data_cache_dir, "mode": "rw" } - else: - logging.warning(f"{k} does not exist, skip mounting it to the container") - logging.info(f"Data cache mount: {volumns}, {binds}") - - # Default - if not enable_custom_image or (enable_custom_image and relative_entry != ""): - logging.info("Start copying the source code to the container...") - volumns.append(src_code_dir) - binds[src_code_dir] = { - "bind": dst_model_serving_dir, - "mode": "rw" - } - environment["MAIN_ENTRY"] = relative_entry - - if not enable_custom_image: - # For some image, the default user is root. Unified to fedml. - environment["HOME"] = "/home/fedml" - environment["BOOTSTRAP_DIR"] = dst_bootstrap_dir - environment["FEDML_CURRENT_RUN_ID"] = end_point_id - environment["FEDML_CURRENT_EDGE_ID"] = edge_id - environment["FEDML_CURRENT_VERSION"] = fedml.get_env_version() - environment["FEDML_ENV_VERSION"] = fedml.get_env_version() - environment["FEDML_ENV_LOCAL_ON_PREMISE_PLATFORM_HOST"] = fedml.get_local_on_premise_platform_host() - environment["FEDML_ENV_LOCAL_ON_PREMISE_PLATFORM_PORT"] = fedml.get_local_on_premise_platform_port() - logging.info(f"volume: {volumns}, binds: {binds}, environment: {environment}") - logging.info(f"dst_model_serving_dir: {dst_model_serving_dir}") - logging.info(f"relative_entry: {relative_entry}") - logging.info(f"src_bootstrap_file_path: {src_bootstrap_file_path}") - logging.info(f"dst_bootstrap_dir: {dst_bootstrap_dir}") - logging.info(f"src_code_dir: {src_code_dir}") - logging.info(f"model_serving_dir: {model_serving_dir}") - - if extra_envs is not None: - for key in extra_envs: - environment[key] = extra_envs[key] + environment["DATA_CACHE_FOLDER"] = dst_data_cache_dir + else: + for k, v in data_cache_dir_input.items(): + if os.path.exists(k): + volumns.append(v) + binds[k] = { + "bind": v, + "mode": "rw" + } + else: + logging.warning(f"{k} does not exist, skip mounting it to the container") + logging.info(f"Data cache mount: {volumns}, {binds}") + + # Default mounting + if not enable_custom_image or (enable_custom_image and relative_entry != ""): + logging.info("Start copying the source code to the container...") + volumns.append(src_code_dir) + binds[src_code_dir] = { + "bind": dst_model_serving_dir, + "mode": "rw" + } + environment["MAIN_ENTRY"] = relative_entry + + # Environment variables + if not enable_custom_image: + # For some image, the default user is root. Unified to fedml. + environment["HOME"] = "/home/fedml" + + environment["BOOTSTRAP_DIR"] = dst_bootstrap_dir + environment["FEDML_CURRENT_RUN_ID"] = end_point_id + environment["FEDML_CURRENT_EDGE_ID"] = edge_id + environment["FEDML_CURRENT_VERSION"] = fedml.get_env_version() + environment["FEDML_ENV_VERSION"] = fedml.get_env_version() + environment["FEDML_ENV_LOCAL_ON_PREMISE_PLATFORM_HOST"] = fedml.get_local_on_premise_platform_host() + environment["FEDML_ENV_LOCAL_ON_PREMISE_PLATFORM_PORT"] = fedml.get_local_on_premise_platform_port() + + if extra_envs is not None: + for key in extra_envs: + environment[key] = extra_envs[key] + try: + new_container = client.api.create_container( + image=inference_image_name, + name=default_server_container_name, + volumes=volumns, + ports=[port_inside_container], # port open inside the container + environment=environment, + host_config=client.api.create_host_config( + binds=binds, + port_bindings={ + port_inside_container: usr_indicated_worker_port # Could be either None or a port number + }, + device_requests=device_requests, + shm_size=shm_size, + storage_opt=storage_opt, + tmpfs=tmpfs, + cpu_count=cpus, + mem_limit=memory, + ), + detach=True, + command=customized_image_entry_cmd if enable_custom_image else None + ) + client.api.start(container=new_container.get("Id")) + except Exception as e: + logging.error(f"Failed to create the container with exception {e}, traceback : {traceback.format_exc()}") + return "", "", None, None, None + + # Get the port allocation + cnt = 0 + while True: + cnt += 1 try: - new_container = client.api.create_container( - image=inference_image_name, - name=default_server_container_name, - volumes=volumns, - ports=[port_inside_container], # port open inside the container - environment=environment, - host_config=client.api.create_host_config( - binds=binds, - port_bindings={ - port_inside_container: usr_indicated_worker_port # Could be either None or a port number - }, - device_requests=device_requests, - shm_size=shm_size, - storage_opt=storage_opt, - tmpfs=tmpfs, - cpu_count=cpus, - mem_limit=memory, - ), - detach=True, - command=customized_image_entry_cmd if enable_custom_image else None - ) - client.api.start(container=new_container.get("Id")) - except Exception as e: - logging.error(f"Failed to create the container with exception {e}, traceback : {traceback.format_exc()}") + if usr_indicated_worker_port is not None: + inference_http_port = usr_indicated_worker_port + break + else: + # Find the random port + port_info = client.api.port(new_container.get("Id"), port_inside_container) + inference_http_port = port_info[0]["HostPort"] + logging.info("inference_http_port: {}".format(inference_http_port)) + break + except: + if cnt >= 5: + raise Exception("Failed to get the port allocation") + time.sleep(3) - # Get the port allocation - cnt = 0 - while True: - cnt += 1 - try: - if usr_indicated_worker_port is not None: - inference_http_port = usr_indicated_worker_port - break - else: - # Find the random port - port_info = client.api.port(new_container.get("Id"), port_inside_container) - inference_http_port = port_info[0]["HostPort"] - logging.info("inference_http_port: {}".format(inference_http_port)) - break - except: - if cnt >= 5: - raise Exception("Failed to get the port allocation") - time.sleep(3) - - # Logging the info from the container - log_deployment_result(end_point_id, model_id, default_server_container_name, - ClientConstants.CMD_TYPE_RUN_DEFAULT_SERVER, - inference_model_name, inference_engine, inference_http_port, inference_type, - retry_interval=10, deploy_attempt_threshold=usr_indicated_retry_cnt, - request_input_example=request_input_example, infer_host=infer_host, - enable_custom_image=enable_custom_image) - - # Check if the inference server is ready - inference_output_url, running_model_version, ret_model_metadata, ret_model_config = \ - get_model_info(inference_model_name, inference_engine, inference_http_port, - infer_host, False, inference_type, request_input_example=request_input_example, - enable_custom_image=enable_custom_image) - - if inference_output_url == "": - return running_model_name, "", None, None, None - - # testing the inference container - test_input = ret_model_metadata["inputs"] - - # try: - # inference_response = run_http_inference_with_curl_request(inference_output_url, test_input, [], - # inference_type="default") - # logging.info(f"Tested the inference backend with {test_input}, the response is {inference_response}") - # except Exception as e: - # logging.info("Tested the inference backend, exceptions occurred: {}".format(traceback.format_exc())) - # inference_output_url = "" - - model_metadata = ret_model_metadata - logging.info(model_metadata) - elif inference_engine == ClientConstants.INFERENCE_ENGINE_TYPE_INT_TRITON: - logging.info("prepare to run triton server...") - if not use_simulation_test_without_triton: - if not triton_server_is_running and not ClientConstants.is_running_on_k8s(): - triton_server_cmd = "{}docker stop {}; {}docker rm {}; {}docker run --name {} {} -p{}:8000 " \ - "-p{}:8001 -p{}:8002 " \ - "--shm-size {} " \ - "-v {}:/models {} " \ - "bash -c \"pip install transformers && tritonserver --strict-model-config=false " \ - "--model-control-mode=poll --repository-poll-secs={} " \ - "--model-repository=/models\" ".format(sudo_prefix, triton_server_container_name, - sudo_prefix, triton_server_container_name, - sudo_prefix, triton_server_container_name, - gpu_attach_cmd, - inference_http_port, - inference_grpc_port, - inference_metric_port, - inference_memory_size, - model_serving_dir, - inference_server_image, - ClientConstants.FEDML_MODEL_SERVING_REPO_SCAN_INTERVAL) - logging.info("Run triton inference server: {}".format(triton_server_cmd)) - triton_server_process = ClientConstants.exec_console_with_script(triton_server_cmd, - should_capture_stdout=False, - should_capture_stderr=False, - no_sys_out_err=True) - log_deployment_result(end_point_id, model_id, triton_server_container_name, - ClientConstants.CMD_TYPE_RUN_TRITON_SERVER, triton_server_process.pid, - running_model_name, inference_engine, inference_http_port) - - inference_output_url, running_model_version, ret_model_metadata, ret_model_config = \ - get_model_info(running_model_name, inference_engine, inference_http_port, infer_host) - if inference_output_url != "": - # Send the test request to the inference backend and check if the response is normal - input_json, output_json = build_inference_req(end_point_name, inference_model_name, - token, ret_model_metadata) - try: - inference_response = run_http_inference_with_curl_request(inference_output_url, - input_json["inputs"], - input_json["outputs"]) - logging.info("Tested the inference backend, the response is {}".format(inference_response)) - except Exception as e: - logging.info("Tested the inference backend, exceptions occurred: {}".format(traceback.format_exc())) - inference_output_url = "" - - if inference_output_url != "": - logging.info( - "Deploy model successfully, inference url: {}, model metadata: {}, model config: {}".format( - inference_output_url, model_metadata, model_config)) - model_metadata = ret_model_metadata - model_config = ret_model_config - else: - inference_output_url = f"http://localhost:{inference_http_port}/v2/models/{running_model_name}/versions/1/infer" - else: - raise Exception("inference engine {} is not supported".format(inference_engine)) + # Logging the info from the container + log_deployment_result(end_point_id, model_id, default_server_container_name, + ClientConstants.CMD_TYPE_RUN_DEFAULT_SERVER, + inference_model_name, inference_engine, inference_http_port, inference_type, + retry_interval=10, deploy_attempt_threshold=usr_indicated_retry_cnt, + request_input_example=request_input_example, infer_host=infer_host, + enable_custom_image=enable_custom_image) + + # Check if the inference server is ready + inference_output_url, running_model_version, ret_model_metadata, ret_model_config = \ + get_model_info(inference_model_name, inference_engine, inference_http_port, + infer_host, False, inference_type, request_input_example=request_input_example, + enable_custom_image=enable_custom_image) + + if inference_output_url == "": + return running_model_name, "", None, None, None - return running_model_name, inference_output_url, model_version, model_metadata, model_config + model_metadata = ret_model_metadata + logging.info(model_metadata) + + return running_model_name, inference_output_url, model_version, model_metadata, ret_model_config def build_inference_req(end_point_name, model_name, token, in_model_metadata): @@ -719,8 +525,14 @@ def log_deployment_result(end_point_id, model_id, cmd_container_name, cmd_type, break if container_obj is not None: - out_logs = container_obj.logs(stdout=True, stderr=False, stream=False, follow=False, since=last_log_time) - err_logs = container_obj.logs(stdout=False, stderr=True, stream=False, follow=False, since=last_log_time) + try: + out_logs = container_obj.logs(stdout=True, stderr=False, stream=False, follow=False, + since=last_log_time) + err_logs = container_obj.logs(stdout=False, stderr=True, stream=False, follow=False, + since=last_log_time) + except Exception as e: + logging.error(f"Failed to get the logs from the container with exception {e}") + pass last_log_time = datetime.datetime.now() @@ -741,9 +553,7 @@ def log_deployment_result(end_point_id, model_id, cmd_container_name, cmd_type, logging.info(f"Logs from docker: {format(out_logs)}") if container_obj.status == "exited": - logging.info("Container {} has exited, automatically" - " remove it ...".format(cmd_container_name)) - client.api.remove_container(container_obj.id, v=True, force=True) + logging.info("Container {} has exited".format(cmd_container_name)) break # should_exit_logs will ping the inference container diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py index 6e99851d73..faa16e7b4c 100755 --- a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py +++ b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py @@ -26,34 +26,34 @@ pass -class Settings(BaseSettings): - redis_addr: str - redis_port: str - redis_password: str - end_point_name: str - model_name: str - model_version: str - model_infer_url: str - version: str - use_mqtt_inference: bool - use_worker_gateway: bool - ext_info: str - - -settings = Settings() - -# class settings: -# redis_addr = "127.0.0.1" -# redis_port = 6379 -# redis_password = "fedml_default" -# end_point_name = "" -# model_name = "" -# model_version = "" -# model_infer_url = "127.0.0.1" -# version = "dev" -# use_mqtt_inference = False -# use_worker_gateway = False -# ext_info = "2b34303961245c4f175f2236282d7a272c040b0904747579087f6a760112030109010c215d54505707140005190a051c347f365c4a430c020a7d39120e26032a78730f797f7c031f0901657e75" +# class Settings(BaseSettings): +# redis_addr: str +# redis_port: str +# redis_password: str +# end_point_name: str +# model_name: str +# model_version: str +# model_infer_url: str +# version: str +# use_mqtt_inference: bool +# use_worker_gateway: bool +# ext_info: str +# +# +# settings = Settings() + +class settings: + redis_addr = "127.0.0.1" + redis_port = 6379 + redis_password = "fedml_default" + end_point_name = "" + model_name = "" + model_version = "" + model_infer_url = "127.0.0.1" + version = "dev" + use_mqtt_inference = False + use_worker_gateway = False + ext_info = "2b34303961245c4f175f2236282d7a272c040b0904747579087f6a760112030109010c215d54505707140005190a051c347f365c4a430c020a7d39120e26032a78730f797f7c031f0901657e75" api = FastAPI() @@ -176,7 +176,8 @@ async def _predict( idle_device, end_point_id, model_id, model_name, model_version, inference_host, inference_output_url = \ found_idle_inference_device(in_end_point_id, in_end_point_name, in_model_name, in_model_version) if idle_device is None or idle_device == "": - return {"error": True, "error_code": status.HTTP_404_NOT_FOUND, "message": "can not found the active endpoint."} + return {"error": True, "error_code": status.HTTP_404_NOT_FOUND, + "message": "can not found active inference worker for this endpoint."} # Start timing for model metrics model_metrics = FedMLModelMetrics(end_point_id, in_end_point_name, @@ -224,12 +225,17 @@ def retrieve_info_by_endpoint_id(end_point_id, in_end_point_name=None, in_model_ redis_key = FedMLModelCache.get_instance(settings.redis_addr, settings.redis_port). \ get_end_point_full_key_by_id(end_point_id) if redis_key is not None: + end_point_name = "" + model_name = "" if in_end_point_name is not None: end_point_name = in_end_point_name model_name = redis_key[len(f"{FedMLModelCache.FEDML_MODEL_DEPLOYMENT_STATUS_TAG}-{end_point_id}-{in_end_point_name}-"):] else: # e.g. FEDML_MODEL_DEPLOYMENT_STATUS--1234-dummy_endpoint_name-dummy_model_name - end_point_id, end_point_name, model_name = redis_key.split("--")[1].split("-") + try: + end_point_id, end_point_name, model_name = redis_key.split("--")[1].split("-") + except Exception as e: + logging.warning(f"Failed to parse redis_key: {redis_key}. Could not retrieve only use end_point_id.") if enable_check: if end_point_name != in_end_point_name or model_name != in_model_name: @@ -352,6 +358,6 @@ def logging_inference_request(request, response): if __name__ == "__main__": import uvicorn - port = 2204 + port = 2203 logging.basicConfig(level=logging.INFO) uvicorn.run(api, host="0.0.0.0", port=port, log_level="info") diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_msg_object.py b/python/fedml/computing/scheduler/model_scheduler/device_model_msg_object.py index 062b591853..a6c6244108 100755 --- a/python/fedml/computing/scheduler/model_scheduler/device_model_msg_object.py +++ b/python/fedml/computing/scheduler/model_scheduler/device_model_msg_object.py @@ -1,5 +1,5 @@ - import json +import logging class FedMLModelMsgObject(object): @@ -37,7 +37,10 @@ def __init__(self, topic, payload): }""" # get deployment params - request_json = json.loads(payload) + if isinstance(payload, dict): + request_json = payload + else: + request_json = json.loads(payload) self.msg_topic = topic self.request_json = request_json self.run_id = request_json["end_point_id"] @@ -58,13 +61,54 @@ def __init__(self, topic, payload): self.inference_engine = self.model_config.get("inference_engine", 0) self.inference_end_point_id = self.run_id - self. request_json["run_id"] = self.run_id + self.request_json["run_id"] = self.run_id + + self.gpu_topology = self.get_devices_avail_gpus() + self.gpu_per_replica = self.get_gpu_per_replica() + + self.max_unavailable_rate = self.model_config.get("max_unavailable_rate", 0.1) + + def get_devices_avail_gpus(self): + """ + { + "gpu_topology": {"id1": 1, "id2": 1} # Here the 1 means gpu card, not replica + } + """ + # [Test1] using self.request_json["parameters"]["gpu_topology"] + # logging.info(f"[Replica Controller] [endpoint {self.run_id} ] devices_avail_gpus:" + # f" {self.request_json['parameters']['gpu_topology']}") + # res = self.request_json["parameters"]["gpu_topology"] + + # [Test2] Using self.scale_min + # res = {} + # for id in self.request_json["device_ids"]: + # if str(id) == str(self.device_ids[0]): + # continue + # res[id] = int(self.scale_min) + # return res + + # [Prod] Using self.request_json["gpu_topology"] + if "gpu_topology" not in self.request_json: + logging.warning("gpu_topology not found in request_json, using scale_min instead") + res = {} + for id in self.request_json["device_ids"]: + if str(id) == str(self.device_ids[0]): + continue + res[id] = int(self.scale_min) + return res + + logging.info(f"[Replica Controller] [endpoint {self.run_id}] " + f"devices_avail_gpus: {self.request_json['gpu_topology']}") + + return self.request_json["gpu_topology"] + + def get_gpu_per_replica(self): + """ + Read gpu_per_replica from user's config yaml file. Default 1. + """ + if "parameters" in self.request_json and "gpu_per_replica" in self.request_json["parameters"]: + return self.request_json["parameters"]["gpu_per_replica"] + return 1 def show(self, prefix=""): - print("{}end point id: {}, model name: {}, model id: {}," - " model version: {}, model url: {}".format(prefix, - self.inference_end_point_id, - self.model_name, - self.id, - self.model_version, - self.model_url)) + logging.info(f"{prefix} [FedMLModelMsgObject] [run_id {self.run_id}] [end_point_name {self.end_point_name}]") diff --git a/python/fedml/computing/scheduler/model_scheduler/device_replica_controller.py b/python/fedml/computing/scheduler/model_scheduler/device_replica_controller.py new file mode 100644 index 0000000000..9c43130687 --- /dev/null +++ b/python/fedml/computing/scheduler/model_scheduler/device_replica_controller.py @@ -0,0 +1,437 @@ +import logging +import copy +from .device_model_cache import FedMLModelCache +from .device_model_msg_object import FedMLModelMsgObject +from .device_client_constants import ClientConstants + + +class FedMLDeviceReplicaController: + def __init__(self, master_id, request_json: dict): + """ + For each deployment, we have: + master_id: unique id for the master device + e_id: unique id (i.e. endpoint_id) for each deployment + devices_avail_gpus = {device_id1: gpu_num, device_id2: gpu_num, ...} + request_json: json from MLOps for this deployment + total_gpu_num: total number of gpus will be used for this deployment + gpu_per_replica: number of gpus required per replica + min_replica_num: minimum number of replicas required + max_replica_num: maximum number of replicas required + endpoint_name: endpoint name + model_name: model name + target_replica_num: target replica number for each device + target_replica_version: target replica version + curr_replica_num: current replica number for each device + intermediate_replica_num: intermediate replica number for each device + total_replica_version_diff_num: total replica version difference number + max_unavailable_rate: maximum unavailable rate + curr_replica_updating_window: current replica updating window + curr_replica_version: current replica version for each device + intermediate_replica_version: intermediate replica version for each device + """ + self.master_id = master_id + self.request_json = request_json + self.request_msg_obj = FedMLModelMsgObject("replica_controller", request_json) + + self.e_id = self.request_msg_obj.run_id + self.devices_avail_gpus = self.request_msg_obj.gpu_topology + self.total_gpu_num = self.calc_total_gpu_num() + self.gpu_per_replica = self.request_msg_obj.gpu_per_replica + self.min_replica_num = self.request_msg_obj.scale_min + self.max_replica_num = self.request_msg_obj.scale_max + self.endpoint_name = self.request_msg_obj.end_point_name + self.model_name = self.request_msg_obj.model_name + + self.target_replica_num = self.init_id_replica_num() + + self.curr_replica_num = self.get_curr_replica_num_state_frm_db() + self.intermediate_replica_num = copy.deepcopy(self.curr_replica_num) + + # Version control + self.target_replica_version = self.request_msg_obj.model_version + self.max_unavailable_rate = self.request_msg_obj.max_unavailable_rate + self.curr_replica_updating_window = {} + + self.curr_replica_version = self.get_curr_replica_version_frm_db() + self.intermediate_replica_version = copy.deepcopy(self.curr_replica_version) + + self.total_replica_version_diff_num, self.total_replica_version_diff = self.diff_target_curr_replica_version() + + def calc_total_gpu_num(self): + total_gpu_num = 0 + for device_id, gpu_num in self.devices_avail_gpus.items(): + total_gpu_num += gpu_num + return total_gpu_num + + def init_id_replica_num(self): + """ + Initialize the target replica number for each device. + id_replica_num[id] = avail_num // self.gpu_per_replica + """ + id_replica_num = {} + for id, avail_num in self.devices_avail_gpus.items(): + if avail_num % self.gpu_per_replica != 0: + raise ValueError("The number of gpus for each device should be divisible by gpu_per_replica") + id_replica_num[str(id)] = avail_num // self.gpu_per_replica + return id_replica_num + + def diff_target_curr_replica_num(self): + logging.info(f"[Replica Controller] [endpoint {self.e_id} ]target_replica_state: {self.target_replica_num}") + logging.info(f"[Replica Controller] [endpoint {self.e_id} ]curr_replica_state: {self.curr_replica_num}") + diff = self.diff_target_curr_replica_num_impl(self.target_replica_num, self.curr_replica_num) + logging.info( + f"[Replica Controller] [endpoint {self.e_id} ]diff_target_curr_replica_num: {diff}") + return diff + + def diff_target_curr_replica_version(self): + logging.info(f"[Replica Controller] [endpoint {self.e_id} ]" + f"target_replica_version: {self.target_replica_version}") + logging.info(f"[Replica Controller] [endpoint {self.e_id} ]" + f"curr_replica_version: {self.curr_replica_version}") + + num_diff, diff = self.diff_target_curr_replica_version_impl( + self.target_replica_version, self.curr_replica_version) + + logging.info( + f"[Replica Controller] [endpoint {self.e_id} ]diff_target_curr_replica_version: {diff}") + return num_diff, diff + + @staticmethod + def diff_target_curr_replica_num_impl(target_replica_state, curr_replica_state): + """ + Return the difference between target and current replica number. + "op" could only be "add" or "remove". + e.g. + curr_replica_state = {id1: 1, id2: 1} + target_replica_state = {id1: 2, id2: 2} + + return {id1: {"op": "add", "curr_num": 1, "target_num": 2}, id2: {"op": "add", "curr_num": 1, "target_num": 2}} + """ + diff_target_curr_replica_num = {} + assert target_replica_state is not None + + if curr_replica_state is None: + curr_replica_state = {} + for id, target_num in target_replica_state.items(): + diff_target_curr_replica_num[id] = {"op": "add", "curr_num": 0, "target_num": target_num} + return diff_target_curr_replica_num + + for id, target_num in target_replica_state.items(): + if id not in curr_replica_state: + # In one scale-out operation, the device may not be deployed yet. + diff_target_curr_replica_num[id] = {"op": "add", "curr_num": 0, "target_num": target_num} + elif target_num > curr_replica_state[id]: + diff_target_curr_replica_num[id] = {"op": "add", "curr_num": curr_replica_state[id], + "target_num": target_num} + elif target_num < curr_replica_state[id]: + diff_target_curr_replica_num[id] = {"op": "remove", "curr_num": curr_replica_state[id], + "target_num": target_num} + else: + pass + + for id, curr_num in curr_replica_state.items(): + if id not in target_replica_state: + diff_target_curr_replica_num[id] = {"op": "remove", "curr_num": curr_num, "target_num": 0} + + return diff_target_curr_replica_num + + @staticmethod + def diff_target_curr_replica_version_impl(target_replica_version: str, curr_replica_version): + """ + Return the number of difference, and difference between target and current replica version. + "op" could only be "update". + e.g. + curr_replica_version = { + "id1": {$replica_no: "v1", $replica_no: "v1"}, + "id2": {$replica_no: "v1", $replica_no: "v1"}, + } + target_replica_version = "v2" # Could be different for each device in the future. + + return { + "id1": { + $replica_no: {"op": "update", "new_version": "v2", "old_version": "v1"}, + $replica_no: {"op": "update", "new_version": "v2", "old_version": "v1"} + }, + "id2": { + $replica_no: {"op": "update", "new_version": "v2", "old_version": "v1"}, + $replica_no: {"op": "update", "new_version": "v2", "old_version": "v1"} + } + } + + Return None if curr_replica_version is None.(i.e. this model has not been deployed yet.) + """ + if curr_replica_version is None: + return 0, None + + diff_target_curr_replica_version = {} + num_diff = 0 + for device_id, device_replicas_version in curr_replica_version.items(): + diff_target_curr_replica_version[device_id] = {} + for replica_no, curr_version in device_replicas_version.items(): + if curr_version != target_replica_version: + num_diff += 1 + diff_target_curr_replica_version[device_id][replica_no] = { + "op": "update", + "new_version": target_replica_version, + "old_version": curr_version + } + if num_diff == 0: + return 0, None + + return num_diff, diff_target_curr_replica_version + + def get_curr_replica_num_state_frm_db(self): + """ + Sync the current replica number state from the database. + Return the current replica number state. + """ + res_frm_db = FedMLModelCache.get_instance().get_deployment_result_list( + self.e_id, self.endpoint_name, self.model_name) + + curr_state = {} + if res_frm_db is None or len(res_frm_db) == 0: + # First time to get the replica number from the database + for id, target_num in self.target_replica_num.items(): + curr_state[str(id)] = 0 + else: + for result_item in res_frm_db: + # Unpack the result_item + result_device_id, _, result_payload = FedMLModelCache.get_instance().get_result_item_info(result_item) + curr_state[str(result_device_id)] = curr_state.get(str(result_device_id), 0) + 1 + + logging.info(f"[Replica Controller] [endpoint {self.e_id} ] curr_replica_state from db: {curr_state}") + return curr_state + + def get_curr_replica_version_frm_db(self): + """ + Sync the current replica version from the database. + Return the current replica version. + { + "id1": {$replica_no: "v1", $replica_no: "v2"}, + "id2": {$replica_no: "v1", $replica_no: "v2"}, + } + Return None if this model has not been deployed yet. + """ + curr_versions = {} + res_frm_db = FedMLModelCache.get_instance().get_deployment_result_list( + self.e_id, self.endpoint_name, self.model_name) + if res_frm_db is None or len(res_frm_db) == 0: + return None + else: + for result_item in res_frm_db: + # Unpack the result_item + result_device_id, replica_no, result_payload = (FedMLModelCache.get_instance(). + get_result_item_info(result_item)) + if str(result_device_id) not in curr_versions: + curr_versions[str(result_device_id)] = {} + curr_versions[str(result_device_id)][str(replica_no)] = result_payload["model_version"] + + return curr_versions + + def generate_diff_to_request_json(self): + """ + Write the diff (curr <> target) to the self.request_json. e.g. + { + "replica_num_diff": { + id1: {"op": "add", "curr_num": 1, "target_num": 2}, + id2: {"op": "add", "curr_num": 1, "target_num": 2}, + id3: {"op": "remove", "curr_num": 1, "target_num": 0} + }, + "replica_version_diff": { + { + "id1": { + $replica_no: {"op": "update", "new_version": "v2", "old_version": "v1"}, + $replica_no: {"op": "update", "new_version": "v2", "old_version": "v1"} + }, + "id2": { + $replica_no: {"op": "update", "new_version": "v2", "old_version": "v1"}, + $replica_no: {"op": "update", "new_version": "v2", "old_version": "v1"} + } + } + "gpus_per_replica": 1, + } + """ + replica_num_diff_key = "replica_num_diff" + gpu_per_replica_key = "gpus_per_replica" + + replica_num_diff = self.diff_target_curr_replica_num() + self.request_json[replica_num_diff_key] = replica_num_diff + + self.request_json[gpu_per_replica_key] = self.gpu_per_replica + return self.request_json + + def callback_update_curr_replica_num_state(self, changed_device_id, replica_no, op_type): + """ + Callback function to update the current replica number. + curr_state: {id1: 1, id2: 1} + target_replica_state = {id1: 2, id2: 2} + intermediate_state = {id1: 2, id2: 1} + op_type: "add" or "remove" + """ + if (str(changed_device_id) in self.curr_replica_updating_window) and \ + (str(replica_no) in self.curr_replica_updating_window[str(changed_device_id)]): + # Should be viewed as updated, replica number will not be changed. + return + + if str(changed_device_id) not in self.intermediate_replica_num: + assert op_type == ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED + + # Intermediate state is not initialized yet. Since it may derive from the database. + self.intermediate_replica_num[str(changed_device_id)] = 0 + + if op_type == ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED: + self.intermediate_replica_num[str(changed_device_id)] += 1 + elif op_type == ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DELETED: + self.intermediate_replica_num[str(changed_device_id)] -= 1 + + def is_all_replica_num_reconciled(self): + """ + Check if all the replicas are ready. Including the number and version. + """ + for id, replica_no in self.intermediate_replica_num.items(): + if id not in self.target_replica_num: # Delete all replica in this device + if replica_no != 0: + return False + else: + continue + if replica_no != self.target_replica_num[id]: + return False + + for id, target_replica_num in self.target_replica_num.items(): + if id not in self.intermediate_replica_num or self.intermediate_replica_num[id] != target_replica_num: + return False + + logging.info(f"[Replica Controller] [endpoint {self.e_id} ] Replicas are reconciled as expected.") + logging.info(f"[Replica Controller] [endpoint {self.e_id} ] " + f"intermediate_replica_num: {self.intermediate_replica_num}") + logging.info(f"[Replica Controller] [endpoint {self.e_id} ] " + f"target_replica_num: {self.target_replica_num}") + return True + + def get_first_chunk_devices_replica_update(self): + """ + Scroll update. + Set the schema request json, which, will trans to subprocess (device_server_runner). + The subprocess will send the init deployment msg to the worker device(s), + then, the callback_deployment_result will handle the rest updating msg. + + e.g. + { + "replica_version_diff": { + "id1": { + $replica_no: {"op": "update", "new_version": "v2", "old_version": "v1"}, + $replica_no: {"op": "update", "new_version": "v2", "old_version": "v1"} + }, + "id2": { + $replica_no: {"op": "update", "new_version": "v2", "old_version": "v1"}, + $replica_no: {"op": "update", "new_version": "v2", "old_version": "v1"} + } + }, + } + + Return None if there is no replica version difference. + """ + if self.total_replica_version_diff_num == 0: + return None + + window_size = max(1, int(self.total_replica_version_diff_num * self.max_unavailable_rate)) + + first_chunk_devices_update = {} + + for device_id, device_replicas_version in self.total_replica_version_diff.items(): + for replica_no, diff in device_replicas_version.items(): + if len(first_chunk_devices_update) >= window_size: + break + if device_id not in first_chunk_devices_update: + first_chunk_devices_update[device_id] = {} + first_chunk_devices_update[device_id][replica_no] = diff + + return first_chunk_devices_update + + def init_update_updating_window(self, first_chunk_devices_update): + """ + Initialize the current replica updating window. + """ + self.curr_replica_updating_window = copy.deepcopy(first_chunk_devices_update) + + def callback_update_updating_window(self, device_id, replica_no): + """ + Update the current replica updating window. + """ + if str(device_id) not in self.curr_replica_updating_window: + return + + if str(replica_no) not in self.curr_replica_updating_window[str(device_id)]: + return + + # Remove the replica_no from the updating window + del self.curr_replica_updating_window[str(device_id)][str(replica_no)] + + if len(self.curr_replica_updating_window[str(device_id)]) == 0: + del self.curr_replica_updating_window[str(device_id)] + + # Change this replica's state in the global map + self.intermediate_replica_version[str(device_id)][str(replica_no)] = self.target_replica_version + + def get_next_chunk_devices_replica(self): + """ + If no need for updating, return None + If the intermediate equal to target, return None + If the current updating window is not empty, return None + else, determine the next window, and send the request msg to the device -> replica handler. + """ + if self.total_replica_version_diff_num == 0: + return None + + if self.is_all_replica_version_reconciled(): + return None + + if len(self.curr_replica_updating_window) > 0: + return None + + # Determine the next window + window_size = max(1, int(self.total_replica_version_diff_num * self.max_unavailable_rate)) + + next_chunk_devices_replicas_update = {} + + for id, device_replicas_version in self.intermediate_replica_version.items(): + for replica_no, version in device_replicas_version.items(): + if version != self.target_replica_version: + if id not in next_chunk_devices_replicas_update: + next_chunk_devices_replicas_update[id] = {} + next_chunk_devices_replicas_update[id][replica_no] = { + "op": "update", + "new_version": self.target_replica_version, + "old_version": version + } + if len(next_chunk_devices_replicas_update) >= window_size: + break + + return next_chunk_devices_replicas_update + + def is_all_replica_version_reconciled(self): + """ + Check if all the replicas are ready. Including the number and version. + """ + if self.total_replica_version_diff_num == 0: + return True + + for id, device_replicas_version in self.intermediate_replica_version.items(): + for replica_no, version in device_replicas_version.items(): + if version != self.target_replica_version: + return False + return True + + def init_first_update_device_replica_mapping(self): + # Check if there is no replica version difference. return first_chunk_devices_update + first_chunk_dict = self.get_first_chunk_devices_replica_update() + if first_chunk_dict is None: + return self.request_json + + # Update the updating window + self.init_update_updating_window(first_chunk_dict) + + # Prepare and return the request json + replica_num_diff_key = "replica_version_diff" + self.request_json[replica_num_diff_key] = first_chunk_dict + return self.request_json diff --git a/python/fedml/computing/scheduler/model_scheduler/device_replica_handler.py b/python/fedml/computing/scheduler/model_scheduler/device_replica_handler.py new file mode 100644 index 0000000000..d8865ed854 --- /dev/null +++ b/python/fedml/computing/scheduler/model_scheduler/device_replica_handler.py @@ -0,0 +1,138 @@ +import logging +from ..scheduler_core.compute_cache_manager import ComputeCacheManager +from ..comm_utils.container_utils import ContainerUtils +from ..comm_utils import security_utils +from .device_client_constants import ClientConstants +from .device_model_msg_object import FedMLModelMsgObject + + +class FedMLDeviceReplicaHandler: + def __init__(self, worker_id, request_json: dict): + """ + Handler on the worker to actually exec the reconciliation logic (Including add, remove, update). + + e_id: unique id (i.e. endpoint_id) for each deployment + devices_avail_gpus = {device_id1: gpu_num, device_id2: gpu_num, ...} + request_json: json from MLOps for this deployment + total_gpu_num: total number of gpus will be used for this deployment + gpu_per_replica: number of gpus required per replica + """ + self.worker_id = worker_id + self.request_json = request_json + self.request_msg_obj = FedMLModelMsgObject("replica_handler", request_json) + self.e_id = self.request_msg_obj.run_id + self.gpu_per_replica = self.request_msg_obj.gpu_per_replica + + self.replica_num_diff = self.get_diff_replica_num_frm_request_json() + self.replica_version_diff = self.get_diff_replica_version_frm_request_json() + + self.end_point_name = self.request_msg_obj.end_point_name + self.inference_model_name = self.request_msg_obj.model_name + self.model_version = self.request_msg_obj.model_version + self.model_id = self.request_msg_obj.model_id + + self.device_avail_gpus = self.get_device_avail_gpus_frm_db() + + def get_device_avail_gpus_frm_db(self): + """ + Get the available gpus from db. + """ + available_gpu_ids = ComputeCacheManager.get_instance().get_gpu_cache().get_device_available_gpu_ids( + self.worker_id) + logging.info(f"[Replica Handler] [endpoint {self.e_id} ] [worker {self.worker_id}] " + f"All device_avail_gpus: {available_gpu_ids}") + return available_gpu_ids + + def get_diff_replica_num_frm_request_json(self): + """ + Read replica_diff passing by master's request json. + Return: + { + id1_str: {"op": "add", "curr_num": 1, "target_num": 2}, + id2_str: {"op": "add", "curr_num": 1, "target_num": 2} + } + """ + if "replica_num_diff" in self.request_json and str(self.worker_id) in self.request_json["replica_num_diff"]: + return self.request_json["replica_num_diff"][str(self.worker_id)] + return None + + def get_diff_replica_version_frm_request_json(self): + """ + Read replica_diff passing by master's request json. + Return: + { + "id1": { + $replica_no: {"op": "update", "new_version": "v2", "old_version": "v1"}, + $replica_no: {"op": "update", "new_version": "v2", "old_version": "v1"} + }, + "id2": { + $replica_no: {"op": "update", "new_version": "v2", "old_version": "v1"}, + $replica_no: {"op": "update", "new_version": "v2", "old_version": "v1"} + } + } + """ + if ("replica_version_diff" in self.request_json and + str(self.worker_id) in self.request_json["replica_version_diff"]): + return self.request_json["replica_version_diff"][str(self.worker_id)] + + return None + + def reconcile_num_replica(self): + """ + To solve the conflict between different reconciliation requests. The request & delete reqs should be + executed in order and atomic (i.e. rollback). + + return (op, number of op) + """ + if not self.replica_num_diff: + logging.info(f"replica_num_diff is empty, will not reconcile.") + return None, None, None + + if self.replica_num_diff["op"] not in ["add", "remove"]: + raise ValueError(f"op should be add or remove. Got {self.replica_num_diff['op']}") + + prev_rank = self.replica_num_diff["curr_num"] - 1 + if self.replica_num_diff["op"] == "add": + assert self.replica_num_diff["target_num"] > self.replica_num_diff["curr_num"] + op, op_num = (self.replica_num_diff["op"], + self.replica_num_diff["target_num"] - self.replica_num_diff["curr_num"]) + else: + assert self.replica_num_diff["target_num"] < self.replica_num_diff["curr_num"] + op, op_num = (self.replica_num_diff["op"], + self.replica_num_diff["curr_num"] - self.replica_num_diff["target_num"]) + return prev_rank, op, op_num + + def remove_replica(self, rank): + """ + Remove replica_num replicas from device_id. + """ + running_model_name = ClientConstants.get_running_model_name( + self.end_point_name, self.inference_model_name, self.model_version, self.e_id, self.model_id, + self.worker_id) + container_prefix = ("{}".format(ClientConstants.FEDML_DEFAULT_SERVER_CONTAINER_NAME_PREFIX) + "__" + + security_utils.get_content_hash(running_model_name)) + container_name = container_prefix + "__" + str(rank) + logging.info(f"[Replica Handler] [Remove Replica] [Device {self.worker_id}] [Endpoint {self.e_id}]" + f" [Replica {rank}] [Container {container_name}]") + ContainerUtils.get_instance().remove_container(container_name) + + def reconcile_replica_version(self): + """ + Return a list of replica_rank to be updated. + Giving { + $replica_no: {"op": "update", "new_version": "v2", "old_version": "v1"}, + $replica_no: {"op": "update", "new_version": "v2", "old_version": "v1"} + } + for all replicas, update the version. i.e. stop and remove the container, records in db, then start the new + container, and report when the new container is ready. + """ + replica_rank_to_update = [] + ret_op = "update" + if not self.replica_version_diff: + logging.info(f"replica_version_diff is empty, will not reconcile.") + return None, None + + for replica_no, diff in self.replica_version_diff.items(): + replica_rank_to_update.append(int(replica_no)-1) + + return replica_rank_to_update, ret_op diff --git a/python/fedml/computing/scheduler/model_scheduler/device_server_runner.py b/python/fedml/computing/scheduler/model_scheduler/device_server_runner.py deleted file mode 100755 index 41e2e5cd44..0000000000 --- a/python/fedml/computing/scheduler/model_scheduler/device_server_runner.py +++ /dev/null @@ -1,2160 +0,0 @@ -import copy -import json -import logging -import multiprocessing -import platform -import sys - -from multiprocessing import Process -import os -import shutil -import subprocess -import threading - -import time -import traceback -import urllib -import uuid -import zipfile -from os import listdir - -import requests -import torch - -import fedml -from fedml.computing.scheduler.comm_utils.run_process_utils import RunProcessUtils - -from ..comm_utils import sys_utils -from .device_server_data_interface import FedMLServerDataInterface -from ..scheduler_core.endpoint_sync_protocol import FedMLEndpointSyncProtocol -from ....core.mlops.mlops_runtime_log import MLOpsRuntimeLog - -from ....core.distributed.communication.mqtt.mqtt_manager import MqttManager -from ..comm_utils.yaml_utils import load_yaml_config -from .device_client_constants import ClientConstants -from .device_server_constants import ServerConstants - -from ....core.mlops.mlops_metrics import MLOpsMetrics - -from ....core.mlops.mlops_configs import MLOpsConfigs -from ....core.mlops.mlops_runtime_log_daemon import MLOpsRuntimeLogDaemon -from ....core.mlops.mlops_status import MLOpsStatus -from ..comm_utils.sys_utils import get_sys_runner_info, get_python_program -from .device_model_cache import FedMLModelCache -from .device_model_msg_object import FedMLModelMsgObject -#from ....serving.fedml_server import FedMLModelServingServer -from ....core.mlops.mlops_utils import MLOpsUtils -from ..comm_utils.constants import SchedulerConstants -from .device_model_db import FedMLModelDatabase - - -class RunnerError(BaseException): - """ Runner failed. """ - pass - - -class RunnerCompletedError(Exception): - """ Runner completed. """ - pass - - -class FedMLServerRunner: - FEDML_CLOUD_SERVER_PREFIX = "fedml-server-run-" - - def __init__(self, args, run_id=0, request_json=None, agent_config=None, edge_id=0): - self.inference_gateway_process = None - self.local_api_process = None - self.run_process_event = None - self.run_process_event_map = dict() - self.run_process_completed_event = None - self.run_process_completed_event_map = dict() - self.run_as_cloud_agent = False - self.run_as_cloud_server = False - self.run_as_edge_server_and_agent = False - self.run_as_cloud_server_and_agent = False - self.fedml_packages_base_dir = None - self.fedml_packages_unzip_dir = None - self.mqtt_mgr = None - self.running_request_json = dict() - self.run_id = run_id - self.client_mqtt_mgr = None - self.client_mqtt_is_connected = False - self.client_mqtt_lock = None - self.unique_device_id = None - self.edge_id = edge_id - self.server_agent_id = 0 - if request_json is not None: - self.server_agent_id = request_json.get("server_id", 0) - self.process = None - self.args = args - self.request_json = copy.deepcopy(request_json) - self.version = args.version - self.device_id = args.device_id - self.cur_dir = os.path.split(os.path.realpath(__file__))[0] - if args.current_running_dir is not None: - self.cur_dir = args.current_running_dir - - self.agent_config = agent_config - self.fedml_data_base_package_dir = os.path.join("/", "fedml", "data") - self.fedml_data_local_package_dir = os.path.join("/", "fedml", "fedml-package", "fedml", "data") - self.fedml_data_dir = self.fedml_data_base_package_dir - self.fedml_config_dir = os.path.join("/", "fedml", "conf") - - self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES = {} - - self.mlops_metrics = None - self.run_status = None - self.infer_host = "127.0.0.1" - self.redis_addr = "local" - self.redis_port = "6379" - self.redis_password = "fedml_default" - - self.slave_deployment_statuses_mapping = dict() - self.slave_deployment_results_mapping = dict() - self.slave_update_result_mapping = dict() - - self.model_runner_mapping = dict() - self.ntp_offset = MLOpsUtils.get_ntp_offset() - - self.subscribed_topics = list() - self.user_name = None - - def build_dynamic_constrain_variables(self, run_id, run_config): - pass - - def unzip_file(self, zip_file, unzip_file_path): - unziped_file_name = "" - if zipfile.is_zipfile(zip_file): - with zipfile.ZipFile(zip_file, "r") as zipf: - zipf.extractall(unzip_file_path) - unziped_file_name = zipf.namelist()[0] - - return unziped_file_name - - def package_download_progress(self, count, blksize, filesize): - self.check_runner_stop_event() - - downloaded = count * blksize - downloaded = filesize if downloaded > filesize else downloaded - progress = (downloaded / filesize * 100) if filesize != 0 else 0 - progress_int = int(progress) - downloaded_kb = format(downloaded / 1024, '.2f') - - # since this hook funtion is stateless, we need a state to avoid printing progress repeatly - if count == 0: - self.prev_download_progress = 0 - if progress_int != self.prev_download_progress and progress_int % 5 == 0: - self.prev_download_progress = progress_int - logging.info("package downloaded size {} KB, progress {}%".format(downloaded_kb, progress_int)) - - def retrieve_and_unzip_package(self, package_name, package_url): - local_package_path = ServerConstants.get_model_package_dir() - if not os.path.exists(local_package_path): - os.makedirs(local_package_path, exist_ok=True) - local_package_file = "{}.zip".format(os.path.join(local_package_path, package_name)) - if os.path.exists(local_package_file): - os.remove(local_package_file) - urllib.request.urlretrieve(package_url, filename=None, reporthook=self.package_download_progress) # do not rename - unzip_package_path = ServerConstants.get_model_dir() - self.fedml_packages_base_dir = unzip_package_path - try: - shutil.rmtree( - os.path.join(unzip_package_path, package_name), ignore_errors=True - ) - except Exception as e: - pass - logging.info("local_package_file {}, unzip_package_path {}".format( - local_package_file, unzip_package_path)) - package_name = self.unzip_file(local_package_file, unzip_package_path) - unzip_package_path = os.path.join(unzip_package_path, package_name) - return unzip_package_path - - def update_local_fedml_config(self, run_id, run_config): - model_config = run_config - model_name = model_config["model_name"] - model_storage_url = model_config["model_storage_url"] - scale_min = model_config.get("instance_scale_min", 0) - scale_max = model_config.get("instance_scale_max", 0) - inference_engine = model_config.get("inference_engine", 0) - inference_end_point_id = run_id - - # Copy config file from the client - unzip_package_path = self.retrieve_and_unzip_package( - model_name, model_storage_url - ) - fedml_local_config_file = os.path.join(unzip_package_path, "fedml_model_config.yaml") - - # Load the above config to memory - package_conf_object = {} - if os.path.exists(fedml_local_config_file): - package_conf_object = load_yaml_config(fedml_local_config_file) - - return unzip_package_path, package_conf_object - - def get_usr_indicated_token(self, request_json) -> str: - usr_indicated_token = "" - if "parameters" in request_json and "authentication_token" in request_json["parameters"]: - usr_indicated_token = request_json["parameters"]["authentication_token"] - return usr_indicated_token - - def build_dynamic_args(self, run_config, package_conf_object, base_dir): - pass - - def run(self, process_event, completed_event): - # print(f"Model master runner process id {os.getpid()}, run id {self.run_id}") - - if platform.system() != "Windows": - os.setsid() - - os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning' - os.environ.setdefault('PYTHONWARNINGS', 'ignore:semaphore_tracker:UserWarning') - - self.run_process_event = process_event - self.run_process_completed_event = completed_event - run_id = self.request_json.get("end_point_id") - - try: - MLOpsUtils.set_ntp_offset(self.ntp_offset) - - self.setup_client_mqtt_mgr() - - self.run_impl() - except RunnerError: - logging.info("Runner stopped.") - self.mlops_metrics.report_server_training_status( - self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED, - is_from_model=True, edge_id=self.edge_id) - except RunnerCompletedError: - logging.info("Runner completed.") - except Exception as e: - logging.error("Runner exits with exceptions.") - logging.error(traceback.format_exc()) - logging.error(e) - self.mlops_metrics.report_server_training_status( - self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED, - is_from_model=True, edge_id=self.edge_id) - MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, self.edge_id) - if self.mlops_metrics is not None: - self.mlops_metrics.stop_sys_perf() - time.sleep(3) - sys.exit(1) - finally: - logging.info("Release resources.") - MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, self.edge_id) - if self.mlops_metrics is not None: - self.mlops_metrics.stop_sys_perf() - time.sleep(3) - if not self.run_as_cloud_server: - self.release_client_mqtt_mgr() - - def parse_model_run_params(self, running_json): - run_id = running_json["end_point_id"] - end_point_name = running_json["end_point_name"] - token = running_json["token"] - user_id = running_json["user_id"] - user_name = running_json["user_name"] - device_ids = running_json["device_ids"] - device_objs = running_json["device_objs"] - - model_config = running_json["model_config"] - model_name = model_config["model_name"] - model_id = model_config["model_id"] - model_storage_url = model_config["model_storage_url"] - scale_min = model_config.get("instance_scale_min", 0) - scale_max = model_config.get("instance_scale_max", 0) - inference_engine = model_config.get("inference_engine", 0) - model_is_from_open = model_config["is_from_open"] - inference_end_point_id = run_id - use_gpu = "gpu" # TODO: Get GPU from device infos - memory_size = "256m" # TODO: Get Memory size for each instance - model_version = model_config["model_version"] - model_config_parameters = running_json.get("parameters", {}) - - inference_port = model_config_parameters.get("server_internal_port", # Internal port is for the gateway - ServerConstants.MODEL_INFERENCE_DEFAULT_PORT) - inference_port_external = model_config_parameters.get("server_external_port", inference_port) - - return run_id, end_point_name, token, user_id, user_name, device_ids, device_objs, model_config, model_name, \ - model_id, model_storage_url, scale_min, scale_max, inference_engine, model_is_from_open, \ - inference_end_point_id, use_gpu, memory_size, model_version, inference_port - - def inference_run(self): - # run_id, end_point_name, token, user_id, user_name, device_ids, device_objs, model_config, model_name, \ - # model_id, model_storage_url, scale_min, scale_max, inference_engine, model_is_from_open, \ - # inference_end_point_id, use_gpu, memory_size, model_version, inference_port = self.parse_model_run_params(self.request_json) - # - # inference_server = FedMLModelServingServer(self.args, - # end_point_name, - # model_name, - # model_version, - # inference_request=self.request_json) - # inference_server.run() - pass - - def run_impl(self): - run_id, end_point_name, token, user_id, user_name, device_ids, device_objs, model_config, model_name, \ - model_id, model_storage_url, scale_min, scale_max, inference_engine, model_is_from_open, \ - inference_end_point_id, use_gpu, memory_size, model_version, inference_port = self.parse_model_run_params(self.request_json) - - logging.info("model deployment request: {}".format(self.request_json)) - - # Initiate an FedMLInferenceServer object which the request will be forwarded to - # server_runner = FedMLServerRunner( - # self.args, run_id=self.run_id, request_json=self.request_json, agent_config=self.agent_config - # ) - # inference_process = Process(target=server_runner.inference_run) - # inference_process.start() - - logging.info("send deployment stages...") - - self.mlops_metrics.report_sys_perf(self.args, self.agent_config["mqtt_config"], run_id=run_id) - - self.check_runner_stop_event() - - # Send stage: MODEL_DEPLOYMENT_STAGE4 = "ForwardRequest2Slave" - self.send_deployment_stages(self.run_id, model_name, model_id, - "", - ServerConstants.MODEL_DEPLOYMENT_STAGE4["index"], - ServerConstants.MODEL_DEPLOYMENT_STAGE4["text"], - ServerConstants.MODEL_DEPLOYMENT_STAGE4["text"]) - - self.args.run_id = self.run_id - MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO) - - # report server running status - logging.info("report deployment status...") - self.check_runner_stop_event() - self.mlops_metrics.report_server_training_status( - run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_STARTING, - is_from_model=True, running_json=json.dumps(self.request_json), edge_id=self.edge_id) - self.send_deployment_status(self.run_id, end_point_name, - model_name, "", - ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYING) - - # start unified inference server - self.start_device_inference_gateway( - run_id, end_point_name, model_id, model_name, model_version, inference_port=inference_port) - - # start inference monitor server - self.stop_device_inference_monitor(run_id, end_point_name, model_id, model_name, model_version) - self.start_device_inference_monitor(run_id, end_point_name, model_id, model_name, model_version) - - # Changed the status to "IDLE" - self.mlops_metrics.broadcast_server_training_status( - run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED, - is_from_model=True, edge_id=self.edge_id) - - # forward deployment request to slave devices - logging.info("send the model inference request to slave devices...") - self.check_runner_stop_event() - - # handle "op:replace" - first_chunk_devices_update = self.request_json.get("first_chunk_devices_update", list()) - if len(first_chunk_devices_update) > 0: - self.send_first_scroll_update_msg(first_chunk_devices_update) - - # handle "op:add" - should_added_devices = self.send_deployment_start_request_to_edges() - - # handle "op:delete" - self.send_deployment_delete_request_to_edges(payload=json.dumps(self.request_json), model_msg_object=None) - - if len(should_added_devices) == 0 and len(first_chunk_devices_update) == 0: - ''' - If just including delete op, we do not need to wait for the slave devices to finish the delete. - ''' - ip = self.get_ip_address(self.request_json) - master_port = os.getenv("FEDML_MASTER_PORT", None) - if master_port is not None: - inference_port = int(master_port) - model_inference_port = inference_port - if ip.startswith("http://") or ip.startswith("https://"): - model_inference_url = "{}/api/v1/predict".format(ip) - else: - model_inference_url = "http://{}:{}/api/v1/predict".format(ip, model_inference_port) - - self.send_deployment_status(self.run_id, end_point_name, - model_name, - model_inference_url, - ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED) - - while True: - self.check_runner_stop_event() - time.sleep(3) - - def check_runner_stop_event(self): - if self.run_process_event is not None and self.run_process_event.is_set(): - logging.info("Received stopping event.") - raise RunnerError("Runner stopped") - - if self.run_process_completed_event is not None and self.run_process_completed_event.is_set(): - logging.info("Received completed event.") - raise RunnerCompletedError("Runner completed") - - def start_device_inference_gateway( - self, run_id, end_point_name, model_id, - model_name, model_version, inference_port=ServerConstants.MODEL_INFERENCE_DEFAULT_PORT): - # start unified inference server - running_model_name = ServerConstants.get_running_model_name(end_point_name, - model_name, model_version, run_id, model_id) - python_program = get_python_program() - master_port = os.getenv("FEDML_MASTER_PORT", None) - if master_port is not None: - inference_port = int(master_port) - if not ServerConstants.is_running_on_k8s(): - logging.info(f"start the model inference gateway, end point {run_id}, " - f"model name {model_name} at port {inference_port}...") - self.check_runner_stop_event() - - use_mqtt_inference = os.getenv("FEDML_USE_MQTT_INFERENCE", "False") - use_mqtt_inference = True if use_mqtt_inference.lower() == 'true' else False - use_worker_gateway = os.getenv("FEDML_USE_WORKER_GATEWAY", "False") - use_worker_gateway = True if use_worker_gateway.lower() == 'true' else False - inference_gw_cmd = "fedml.computing.scheduler.model_scheduler.device_model_inference:api" - inference_gateway_pids = RunProcessUtils.get_pid_from_cmd_line(inference_gw_cmd) - if inference_gateway_pids is None or len(inference_gateway_pids) <= 0: - cur_dir = os.path.dirname(__file__) - fedml_base_dir = os.path.dirname(os.path.dirname(os.path.dirname(cur_dir))) - connect_str = "@FEDML@" - ext_info = sys_utils.random1( - self.agent_config["mqtt_config"]["BROKER_HOST"] + connect_str + - str(self.agent_config["mqtt_config"]["BROKER_PORT"]) + connect_str + - self.agent_config["mqtt_config"]["MQTT_USER"] + connect_str + - self.agent_config["mqtt_config"]["MQTT_PWD"] + connect_str + - str(self.agent_config["mqtt_config"]["MQTT_KEEPALIVE"]), "FEDML@9999GREAT") - self.inference_gateway_process = ServerConstants.exec_console_with_script( - "REDIS_ADDR=\"{}\" REDIS_PORT=\"{}\" REDIS_PASSWORD=\"{}\" " - "END_POINT_NAME=\"{}\" " - "MODEL_NAME=\"{}\" MODEL_VERSION=\"{}\" MODEL_INFER_URL=\"{}\" VERSION=\"{}\" " - "USE_MQTT_INFERENCE={} USE_WORKER_GATEWAY={} EXT_INFO={} " - "{} -m uvicorn {} --host 0.0.0.0 --port {} --reload --reload-delay 3 --reload-dir {} " - "--log-level critical".format( - self.redis_addr, self.redis_port, self.redis_password, - end_point_name, - model_name, model_version, "", self.args.version, - use_mqtt_inference, use_worker_gateway, ext_info, - python_program, inference_gw_cmd, str(inference_port), fedml_base_dir - ), - should_capture_stdout=False, - should_capture_stderr=False - ) - - def start_device_inference_monitor(self, run_id, end_point_name, - model_id, model_name, model_version, check_stopped_event=True): - # start inference monitor server - logging.info(f"start the model inference monitor, end point {run_id}, model name {model_name}...") - if check_stopped_event: - self.check_runner_stop_event() - run_id_str = str(run_id) - pip_source_dir = os.path.dirname(__file__) - monitor_file = os.path.join(pip_source_dir, "device_model_monitor.py") - python_program = get_python_program() - running_model_name = ServerConstants.get_running_model_name(end_point_name, - model_name, model_version, run_id, model_id) - self.monitor_process = ServerConstants.exec_console_with_shell_script_list( - [ - python_program, - monitor_file, - "-v", - self.args.version, - "-ep", - run_id_str, - "-epn", - str(end_point_name), - "-mi", - str(model_id), - "-mn", - model_name, - "-mv", - model_version, - "-iu", - "infer_url", - "-ra", - self.redis_addr, - "-rp", - self.redis_port, - "-rpw", - self.redis_password - ], - should_capture_stdout=False, - should_capture_stderr=False - ) - - def stop_device_inference_monitor(self, run_id, end_point_name, model_id, model_name, model_version): - # stop inference monitor server - logging.info(f"stop the model inference monitor, end point {run_id}, model name {model_name}...") - sys_utils.cleanup_model_monitor_processes(run_id, end_point_name, - model_id, model_name, model_version) - - def cleanup_run_when_finished(self): - logging.info("Cleanup run successfully when finished.") - - self.mlops_metrics.broadcast_server_training_status( - self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED, - is_from_model=True, edge_id=self.edge_id - ) - - try: - self.mlops_metrics.stop_sys_perf() - except Exception as ex: - pass - - time.sleep(1) - - try: - local_package_path = ServerConstants.get_package_download_dir() - for package_file in listdir(local_package_path): - if os.path.basename(package_file).startswith("run_" + str(self.run_id)): - shutil.rmtree(os.path.join(local_package_path, package_file), ignore_errors=True) - except Exception as e: - pass - - def cleanup_run_when_starting_failed(self): - logging.info("Cleanup run successfully when starting failed.") - - self.mlops_metrics.broadcast_server_training_status( - self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED, - is_from_model=True, edge_id=self.edge_id) - - try: - self.mlops_metrics.stop_sys_perf() - except Exception as ex: - pass - - time.sleep(1) - - try: - local_package_path = ServerConstants.get_package_download_dir() - for package_file in listdir(local_package_path): - if os.path.basename(package_file).startswith("run_" + str(self.run_id)): - shutil.rmtree(os.path.join(local_package_path, package_file), ignore_errors=True) - except Exception as e: - pass - - def cleanup_run_when_deploy_failed(self): - topic = f"model_ops/model_device/delete_deployment/{self.edge_id}" - self.callback_delete_deployment(topic, payload=json.dumps(self.request_json)) - - def callback_deployment_result_message(self, topic=None, payload=None): - # Save deployment result to local cache - topic_splits = str(topic).split('/') - device_id = topic_splits[-1] - payload_json = json.loads(payload) - end_point_id = payload_json["end_point_id"] - end_point_name = payload_json["end_point_name"] - model_id = payload_json["model_id"] - model_name = payload_json["model_name"] - model_version = payload_json["model_version"] - model_status = payload_json["model_status"] - run_id_str = str(end_point_id) - FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password) - FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - set_deployment_result(end_point_id, end_point_name, - model_name, model_version, - device_id, payload) - if self.slave_deployment_results_mapping.get(run_id_str, None) is None: - self.slave_deployment_results_mapping[run_id_str] = dict() - self.slave_deployment_results_mapping[run_id_str][str(device_id)] = model_status - - logging.info("callback_deployment_result_message: topic {}, payload {}, mapping {}.".format( - topic, payload, self.slave_deployment_results_mapping[run_id_str])) - - request_json = self.running_request_json.get(run_id_str, None) - if request_json is None: - logging.error(f"The endpoint {end_point_id} is not running.") - self.send_deployment_status( - end_point_id, end_point_name, payload_json["model_name"], "", - ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED) - return - - all_device_id_list = request_json["device_ids"] - - device_id_list = [] - - for device in all_device_id_list: - if str(device) == str(self.edge_id): - continue - - if device in request_json["diff_devices"] and \ - (request_json["diff_devices"][device] == ServerConstants.DEVICE_DIFF_ADD_OPERATION or - request_json["diff_devices"][device] == ServerConstants.DEVICE_DIFF_REPLACE_OPERATION): - device_id_list.append(device) - - if request_json["diff_devices"].get(int(device_id), None) == ServerConstants.DEVICE_DIFF_REPLACE_OPERATION: - if model_status == ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED: - # TODO: Support rollback - return - else: - # Get record from the first message that Java mlops sent - total_device_objs_list = self.request_json["device_objs"] - device_obj_to_insert = None - - for device_obj in total_device_objs_list: - if device_obj["id"] == int(device_id): - device_obj["status"] = ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED - device_obj_to_insert = device_obj - break - if not device_obj_to_insert: - raise Exception(f"Cannot find device {device_id} in the device list {total_device_objs_list}") - - FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - add_end_point_device_info(request_json["end_point_id"], end_point_name, - json.dumps(device_obj_to_insert)) - - self.send_next_scroll_update_msg(int(device_id)) - - if len(self.slave_deployment_results_mapping[run_id_str].keys()) >= len(device_id_list): - ''' - When all the devices have finished the add / update operation - ''' - failed_to_deploy_all_models = False - for device_item in device_id_list: - if device_item == self.edge_id: # Skip the master - continue - status = self.slave_deployment_results_mapping[run_id_str]. \ - get(str(device_item), ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED) - if status == ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED: - failed_to_deploy_all_models = True - break - - # Failed to deploy models. - if failed_to_deploy_all_models: - # Send stage: MODEL_DEPLOYMENT_STAGE5 = "StartInferenceIngress" - self.send_deployment_stages(end_point_id, model_name, model_id, - "", - ServerConstants.MODEL_DEPLOYMENT_STAGE5["index"], - ServerConstants.MODEL_DEPLOYMENT_STAGE5["text"], - "Failed to deploy the model to all devices.") - FedMLModelDatabase.get_instance().delete_deployment_status( - run_id_str, end_point_name, model_name, model_version=model_version) - FedMLModelDatabase.get_instance().delete_deployment_result( - run_id_str, end_point_name, model_name, model_version=model_version) - - # reset slave_deployment_results_mapping, incase user might use this for redeployment - self.slave_deployment_results_mapping[run_id_str] = dict() - return - - # 1. We should generate one unified inference api - # Note that here we use the gateway port instead of the inference port that is used by the slave device - model_config_parameters = request_json["parameters"] - inference_port = model_config_parameters.get("server_internal_port", ServerConstants.MODEL_INFERENCE_DEFAULT_PORT) - inference_port_external = model_config_parameters.get("server_external_port", inference_port) - ip = self.get_ip_address(request_json) - - if ip.startswith("http://") or ip.startswith("https://"): - model_inference_url = "{}/inference/{}".format(ip, end_point_id) - else: - model_inference_url = "http://{}:{}/inference/{}".format(ip, inference_port_external, end_point_id) - - # Send stage: MODEL_DEPLOYMENT_STAGE5 = "StartInferenceIngress" - self.send_deployment_stages(end_point_id, model_name, model_id, - model_inference_url, - ServerConstants.MODEL_DEPLOYMENT_STAGE5["index"], - ServerConstants.MODEL_DEPLOYMENT_STAGE5["text"], - "inference url: {}".format(model_inference_url)) - - # 2. We should send to MBE(ModelOps Backend) - model_slave_url = payload_json["model_url"] - payload_json["model_url"] = model_inference_url - payload_json["port"] = inference_port_external - token = FedMLModelCache.get_instance(self.redis_addr, self.redis_port).get_end_point_token(end_point_id, end_point_name, model_name) - - model_metadata = payload_json["model_metadata"] - model_inputs = model_metadata["inputs"] - ret_inputs = list() - if "type" in model_metadata and model_metadata["type"] == "default": - payload_json["input_json"] = {"end_point_name": end_point_name, - "model_name": model_name, - "token": str(token), - "inputs": model_inputs, - "outputs": []} - payload_json["output_json"] = model_metadata["outputs"] - else: - # triton model, auto generate inputs - for input_item in model_inputs: - ret_item = input_item - shape = ret_item["shape"] - data_type = ret_item["datatype"] - if ServerConstants.MODEL_DATA_TYPE_MAPPING[data_type] == ServerConstants.MODEL_DATA_TYPE_INT: - for i in range(len(shape)): - if shape[i] == -1: # if input shape is dynamic, we set a default value 1 - shape[i] = 1 - ret_item["data"] = torch.randint(0, 1, shape).tolist() - else: - for i in range(len(shape)): - if shape[i] == -1: # if input shape is dynamic, we set a default value 1 - shape[i] = 1 - ret_item["data"] = torch.zeros(shape).tolist() - ret_inputs.append(ret_item) - - payload_json["input_json"] = {"end_point_name": end_point_name, - "model_name": model_name, - "token": str(token), - "inputs": {"inputs": ret_inputs}, # Nested inputs - "outputs": model_metadata["outputs"]} - payload_json["output_json"] = model_metadata["outputs"] - FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - set_deployment_result(end_point_id, end_point_name, - model_name, model_version, - self.edge_id, json.dumps(payload_json)) - FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - set_end_point_activation(end_point_id, end_point_name, True) - self.send_deployment_results_with_payload(end_point_id, end_point_name, payload_json) - - payload_json_saved = payload_json - payload_json_saved["model_slave_url"] = model_slave_url - FedMLServerDataInterface.get_instance().save_job_result(end_point_id, self.edge_id, - json.dumps(payload_json_saved)) - - self.slave_deployment_results_mapping[run_id_str] = dict() - - time.sleep(3) - self.set_runner_completed_event(end_point_id) - - def callback_deployment_status_message(self, topic=None, payload=None): - # Save deployment status to local cache - topic_splits = str(topic).split('/') - device_id = topic_splits[-1] - payload_json = json.loads(payload) - end_point_id = payload_json["end_point_id"] - end_point_name = payload_json["end_point_name"] - model_name = payload_json["model_name"] - model_version = payload_json["model_version"] - inference_port = payload_json.get("inference_external_api_port", ServerConstants.MODEL_INFERENCE_DEFAULT_PORT) - run_id_str = str(end_point_id) - - model_status = payload_json["model_status"] - FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password) - FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - set_deployment_status(end_point_id, end_point_name, - model_name, model_version, - device_id, payload) - if self.slave_deployment_statuses_mapping.get(run_id_str, None) is None: - self.slave_deployment_statuses_mapping[run_id_str] = dict() - self.slave_deployment_statuses_mapping[run_id_str][device_id] = model_status - logging.info("callback_deployment_status_message: topic {}, payload {}, mapping {}.".format( - topic, payload, self.slave_deployment_statuses_mapping[run_id_str])) - - # When all deployments are finished - request_json = self.running_request_json.get(run_id_str, None) - if request_json is None: - logging.error(f"The endpoint {end_point_id} is not running.") - self.send_deployment_status( - self.run_id, end_point_name, payload_json["model_name"], "", - ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED) - return - - device_id_list = [] - for device in request_json["device_ids"]: - if str(device) == str(self.edge_id): - continue - if device in request_json["diff_devices"] and \ - (request_json["diff_devices"][device] == ServerConstants.DEVICE_DIFF_ADD_OPERATION or - request_json["diff_devices"][device] == ServerConstants.DEVICE_DIFF_REPLACE_OPERATION): - device_id_list.append(device) - - if len(self.slave_deployment_statuses_mapping[run_id_str].keys()) >= len(device_id_list): - failed_to_deploy_all_models = False - for device_item in device_id_list: - if device_item == self.edge_id: # Skip the master - continue - status = self.slave_deployment_statuses_mapping[run_id_str]. \ - get(str(device_item), ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED) - if status == ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED: - failed_to_deploy_all_models = True - break - - # Failed to deploy the model to all devices - if failed_to_deploy_all_models: - FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - set_end_point_activation(end_point_id, end_point_name, False) - FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - set_end_point_status(end_point_id, end_point_name, - ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED) - self.send_deployment_status(end_point_id, end_point_name, - payload_json["model_name"], "", - ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED) - - time.sleep(2) - self.cleanup_run_when_deploy_failed() - - # reset slave_deployment_statuses_mapping, incase user might use this for redeployment - self.slave_deployment_statuses_mapping[run_id_str] = dict() - return - - # Send deployment finished message to ModelOps - ip = self.get_ip_address(request_json) - master_port = os.getenv("FEDML_MASTER_PORT", None) - if master_port is not None: - inference_port = int(master_port) - model_inference_port = inference_port - if ip.startswith("http://") or ip.startswith("https://"): - model_inference_url = "{}/inference/{}".format(ip, end_point_id) - else: - model_inference_url = "http://{}:{}/inference/{}".format(ip, model_inference_port, end_point_id) - FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - set_end_point_activation(end_point_id, end_point_name, True) - FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - set_end_point_status(end_point_id, end_point_name, - ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED) - self.send_deployment_status(end_point_id, end_point_name, - payload_json["model_name"], - model_inference_url, - ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED) - - # Clean the status in case next deployment - self.slave_deployment_statuses_mapping[run_id_str] = dict() - - def send_deployment_start_request_to_edges(self): - run_id = self.request_json["run_id"] - - edge_id_list = [] - for device_id in self.request_json["device_ids"]: - if device_id in self.request_json["diff_devices"] and \ - (self.request_json["diff_devices"][device_id] == ServerConstants.DEVICE_DIFF_ADD_OPERATION): - edge_id_list.append(device_id) - - logging.info("Edge ids before diff: " + str(self.request_json["device_ids"])) - logging.info("Edge ids diff: " + str(self.request_json["diff_devices"])) - logging.info("Edge ids after diff: " + str(edge_id_list)) - - self.request_json["master_node_ip"] = self.get_ip_address(self.request_json) - should_added_devices = [] - for edge_id in edge_id_list: - if edge_id == self.edge_id: - continue - should_added_devices.append(edge_id) - # send start deployment request to each device - self.send_deployment_start_request_to_edge(edge_id) - return should_added_devices - - def send_deployment_start_request_to_edge(self, edge_id): - topic_start_deployment = "model_ops/model_device/start_deployment/{}".format(str(edge_id)) - logging.info("start_deployment: send topic " + topic_start_deployment + " to client...") - self.client_mqtt_mgr.send_message_json(topic_start_deployment, json.dumps(self.request_json)) - - def get_ip_address(self, request_json): - # OPTION 1: Use local ip - ip = ServerConstants.get_local_ip() - - # OPTION 2: Auto detect public ip - if "parameters" in request_json and \ - ServerConstants.AUTO_DETECT_PUBLIC_IP in request_json["parameters"] and \ - request_json["parameters"][ServerConstants.AUTO_DETECT_PUBLIC_IP]: - ip = ServerConstants.get_public_ip() - logging.info("Auto detect public ip for master: " + ip) - - # OPTION 3: Use user indicated ip - if self.infer_host is not None and self.infer_host != "127.0.0.1" and self.infer_host != "localhost": - ip = self.infer_host - - return ip - - def send_deployment_delete_request_to_edges(self, payload, model_msg_object): - if model_msg_object is None: # Called after the diff operation - if "diff_devices" not in self.request_json or self.request_json["diff_devices"] is None: - return - else: - edge_id_list_to_delete = [] - for device_id in self.request_json["diff_devices"]: - if self.request_json["diff_devices"][device_id] == ServerConstants.DEVICE_DIFF_DELETE_OPERATION: - edge_id_list_to_delete.append(device_id) - if len(edge_id_list_to_delete) == 0: - return - - try: - FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, - self.redis_password) - - # 1. Get & Delete Deployment Status in Redis / SQLite - devices_status_list = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - get_deployment_status_list(self.request_json["end_point_id"], - self.request_json["end_point_name"], - self.request_json["model_config"]["model_name"]) - delete_devices_status_list = [] - for device_status in devices_status_list: - device_status_dict = json.loads(device_status) - if int(device_status_dict["cache_device_id"]) in edge_id_list_to_delete: - delete_devices_status_list.append(device_status) - - for delete_item in delete_devices_status_list: - FedMLModelCache.get_instance(self.redis_addr, self.redis_port).delete_deployment_status( - delete_item, self.request_json["end_point_id"], - self.request_json["end_point_name"], - self.request_json["model_config"]["model_name"] - ) - - # 2. Get & Delete the endpoint device info in Redis / SQLite - device_objs = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - get_end_point_device_info(self.request_json["run_id"]) - - if device_objs is None: - raise Exception("The device list in local redis is None") - else: - total_device_objs_list = json.loads(device_objs) - for device_obj in total_device_objs_list: - if device_obj["id"] in edge_id_list_to_delete: - total_device_objs_list.remove(device_obj) - - FedMLModelCache.get_instance(self.redis_addr, self.redis_port).set_end_point_device_info( - self.request_json["end_point_id"], self.request_json["end_point_name"], - json.dumps(total_device_objs_list)) - - # 3. Delete the result in deployment result list in Redis / SQLite - device_result_list = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - get_deployment_result_list(self.request_json["end_point_id"], - self.request_json["end_point_name"], - self.request_json["model_config"]["model_name"]) - delete_device_result_list = [] - for device_result in device_result_list: - device_result_dict = json.loads(device_result) - if int(device_result_dict["cache_device_id"]) in edge_id_list_to_delete: - delete_device_result_list.append(device_result) - - for delete_item in delete_device_result_list: - FedMLModelCache.get_instance(self.redis_addr, self.redis_port).delete_deployment_result( - delete_item, self.request_json["end_point_id"], - self.request_json["end_point_name"], - self.request_json["model_config"]["model_name"] - ) - - except Exception as e: - run_id = self.request_json["run_id"] - error_log_path = f"~/.fedml/fedml-model-server/fedml/logs/error_delete_{run_id}.txt" - if not os.path.exists(os.path.dirname(os.path.expanduser(error_log_path))): - os.makedirs(os.path.dirname(os.path.expanduser(error_log_path))) - with open(os.path.expanduser(error_log_path), "w") as f: - f.write(str(self.request_json)) - f.write(str(e)) - f.write('\n') - raise e - - else: # Delete the whole endpoint - edge_id_list_to_delete = model_msg_object.device_ids - - # For Debug - if payload is not None: - debug_log_path = f"~/.fedml/fedml-model-server/fedml/logs/tmp_debug_delete_payload.txt" - if not os.path.exists(os.path.dirname(os.path.expanduser(debug_log_path))): - os.makedirs(os.path.dirname(os.path.expanduser(debug_log_path))) - with open(os.path.expanduser(debug_log_path), "w") as f: - f.write(str(payload)) - - logging.info("Device ids to be deleted: " + str(edge_id_list_to_delete)) - for edge_id in edge_id_list_to_delete: - if edge_id == self.edge_id: - continue - # send delete deployment request to each model device - topic_delete_deployment = "model_ops/model_device/delete_deployment/{}".format(str(edge_id)) - logging.info("delete_deployment: send topic " + topic_delete_deployment + " to client...") - self.client_mqtt_mgr.send_message_json(topic_delete_deployment, payload) - - def ota_upgrade(self, payload, request_json): - run_id = request_json["end_point_id"] - force_ota = False - ota_version = None - - try: - parameters = request_json.get("parameters", None) - common_args = parameters.get("common_args", None) - force_ota = common_args.get("force_ota", False) - ota_version = common_args.get("ota_version", None) - except Exception as e: - pass - - if force_ota and ota_version is not None: - should_upgrade = True if ota_version != fedml.__version__ else False - upgrade_version = ota_version - else: - try: - fedml_is_latest_version, local_ver, remote_ver = sys_utils.check_fedml_is_latest_version(self.version) - except Exception as e: - return - - should_upgrade = False if fedml_is_latest_version else True - upgrade_version = remote_ver - - if should_upgrade: - job_obj = FedMLServerDataInterface.get_instance().get_job_by_id(run_id) - if job_obj is None: - FedMLServerDataInterface.get_instance(). \ - save_started_job(run_id, self.edge_id, time.time(), - ServerConstants.MSG_MLOPS_SERVER_STATUS_UPGRADING, - ServerConstants.MSG_MLOPS_SERVER_STATUS_UPGRADING, - payload) - - logging.info(f"Upgrade to version {upgrade_version} ...") - - sys_utils.do_upgrade(self.version, upgrade_version) - - raise Exception("Restarting after upgraded...") - - def callback_start_deployment(self, topic, payload): - """ - topic: model_ops/model_device/start_deployment/model-agent-device-id - payload: - { - "timestamp": 1671440005119, - "end_point_id": 4325, - "token": "FCpWU", - "state": "STARTING", - "user_id": "105", - "user_name": "alex.liang2", - "device_ids": [ - 693 - ], - "device_objs": [ - { - "device_id": "0xT3630FW2YM@MacOS.Edge.Device", - "os_type": "MacOS", - "id": 693, - "ip": "1.1.1.1", - "memory": 1024, - "cpu": "1.7", - "gpu": "Nvidia", - "extra_infos": {} - } - ], - "model_config": { - "model_name": "image-model", - "model_id": 111, - "model_version": "v1", - "is_from_open": 0, - "model_storage_url": "https://fedml.s3.us-west-1.amazonaws.com/1666239314792client-package.zip", - "instance_scale_min": 1, - "instance_scale_max": 3, - "inference_engine": "onnx" - }, - "parameters": { - "hidden_size": 128, - "hidden_act": "gelu", - "initializer_range": 0.02, - "vocab_size": 30522, - "hidden_dropout_prob": 0.1, - "num_attention_heads": 2, - "type_vocab_size": 2, - "max_position_embeddings": 512, - "num_hidden_layers": 2, - "intermediate_size": 512, - "attention_probs_dropout_prob": 0.1 - } - } - """ - try: - MLOpsConfigs.fetch_all_configs() - except Exception as e: - pass - - # get deployment params - request_json = json.loads(payload) - run_id = request_json["end_point_id"] - end_point_name = request_json["end_point_name"] - token = request_json["token"] - user_id = request_json["user_id"] - user_name = request_json["user_name"] - device_ids = request_json["device_ids"] - device_objs = request_json["device_objs"] - - model_config = request_json["model_config"] - model_name = model_config["model_name"] - model_id = model_config["model_id"] - model_storage_url = model_config["model_storage_url"] - scale_min = model_config.get("instance_scale_min", 0) - scale_max = model_config.get("instance_scale_max", 0) - inference_engine = model_config.get("inference_engine", 0) - inference_end_point_id = run_id - - # Start log processor for current run - self.args.run_id = run_id - self.args.edge_id = self.edge_id - MLOpsRuntimeLog.get_instance(self.args).init_logs() - MLOpsRuntimeLogDaemon.get_instance(self.args).set_log_source( - ServerConstants.FEDML_LOG_SOURCE_TYPE_MODEL_END_POINT) - MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor(run_id, self.edge_id) - - logging.info("callback_start_deployment {}".format(payload)) - - self.ota_upgrade(payload, request_json) - - run_id = inference_end_point_id - self.args.run_id = run_id - self.run_id = run_id - request_json["run_id"] = run_id - self.request_json = request_json - run_id_str = str(run_id) - self.running_request_json[run_id_str] = request_json - - diff_devices, diff_version = self.get_diff_devices(run_id) - self.request_json["diff_devices"] = diff_devices - self.request_json["diff_version"] = diff_version - self.request_json["master_node_ip"] = self.get_ip_address(self.request_json) - - self.init_device_update_map() - - FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password) - - # Target status of the devices - FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - set_end_point_device_info(request_json["end_point_id"], end_point_name, json.dumps(device_objs)) - - usr_indicated_token = self.get_usr_indicated_token(request_json) - if usr_indicated_token != "": - logging.info(f"Change Token from{token} to {usr_indicated_token}") - token = usr_indicated_token - FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - set_end_point_token(run_id, end_point_name, model_name, token) - - self.subscribe_slave_devices_message(request_json) - - # Send stage: MODEL_DEPLOYMENT_STAGE1 = "Received" - time.sleep(2) - self.send_deployment_stages(self.run_id, model_name, model_id, - "", - ServerConstants.MODEL_DEPLOYMENT_STAGE1["index"], - ServerConstants.MODEL_DEPLOYMENT_STAGE1["text"], - "Received request for end point {}".format(run_id)) - time.sleep(1) - - # Send stage: MODEL_DEPLOYMENT_STAGE2 = "Initializing" - self.send_deployment_stages(self.run_id, model_name, model_id, - "", - ServerConstants.MODEL_DEPLOYMENT_STAGE2["index"], - ServerConstants.MODEL_DEPLOYMENT_STAGE2["text"], - ServerConstants.MODEL_DEPLOYMENT_STAGE2["text"]) - - ServerConstants.save_runner_infos(self.args.device_id + "." + self.args.os_name, self.edge_id, run_id=run_id) - time.sleep(1) - - if self.run_as_edge_server_and_agent: - server_runner = FedMLServerRunner( - self.args, run_id=run_id, request_json=request_json, agent_config=self.agent_config - ) - server_runner.run_as_edge_server_and_agent = self.run_as_edge_server_and_agent - server_runner.edge_id = self.edge_id - server_runner.infer_host = self.infer_host - server_runner.redis_addr = self.redis_addr - server_runner.redis_port = self.redis_port - server_runner.redis_password = self.redis_password - self.run_process_event_map[run_id_str] = multiprocessing.Event() - self.run_process_event_map[run_id_str].clear() - server_runner.run_process_event = self.run_process_event_map[run_id_str] - self.run_process_completed_event_map[run_id_str] = multiprocessing.Event() - self.run_process_completed_event_map[run_id_str].clear() - server_runner.run_process_completed_event = self.run_process_completed_event_map[run_id_str] - self.model_runner_mapping[run_id_str] = server_runner - server_process = Process(target=server_runner.run, args=( - self.run_process_event_map[run_id_str], self.run_process_completed_event_map[run_id_str] - )) - server_process.start() - ServerConstants.save_run_process(run_id, server_process.pid) - - # Send stage: MODEL_DEPLOYMENT_STAGE3 = "StartRunner" - self.send_deployment_stages(self.run_id, model_name, model_id, - "", - ServerConstants.MODEL_DEPLOYMENT_STAGE3["index"], - ServerConstants.MODEL_DEPLOYMENT_STAGE3["text"], - ServerConstants.MODEL_DEPLOYMENT_STAGE3["text"]) - - def get_diff_devices(self, run_id) -> (dict, dict): - ''' - {device_id(int): "op: add" | "op: delete" | "op: replace"} - "op: add" -> need to add - "op: delete" -> need to delete device - "op: replace" -> need to restart the container of the device on same port with new (same) model pkg - - {device_id(int): "old_version"} - ''' - try: - logging.info(f"Get diff devices for run {run_id}") - request_json = self.running_request_json.get(str(run_id)) - - diff_devices = {} - diff_version = {} - FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password) - device_objs = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - get_end_point_device_info(run_id) - if device_objs is None: - for new_device_id in request_json["device_ids"]: - diff_devices[new_device_id] = ServerConstants.DEVICE_DIFF_ADD_OPERATION - else: - device_objs_dict = json.loads(device_objs) - device_ids_frm_db = [d["id"] for d in device_objs_dict] - - for exist_device_id in device_ids_frm_db: - if exist_device_id not in request_json["device_ids"]: - diff_devices[exist_device_id] = ServerConstants.DEVICE_DIFF_DELETE_OPERATION - - for new_device_id in request_json["device_ids"]: - if new_device_id not in device_ids_frm_db: - diff_devices[new_device_id] = ServerConstants.DEVICE_DIFF_ADD_OPERATION - else: - if new_device_id == self.edge_id: - continue - - old_version = self.should_update_device(request_json, new_device_id) - if old_version: - diff_devices[new_device_id] = ServerConstants.DEVICE_DIFF_REPLACE_OPERATION - diff_version[new_device_id] = old_version - else: - pass - logging.info(f"Diff devices: {diff_devices}") - except Exception as e: - error_log_path = f"~/.fedml/fedml-model-server/fedml/logs/{run_id}_error.txt" - if not os.path.exists(os.path.dirname(os.path.expanduser(error_log_path))): - os.makedirs(os.path.dirname(os.path.expanduser(error_log_path))) - with open(os.path.expanduser(error_log_path), "w") as f: - f.write(str(e)) - raise e - return diff_devices, diff_version - - def should_update_device(self, payload, new_device_id): - ''' - Query the device info in local redis, if the device info is different from the payload, - return the old model version - ''' - device_result_list = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - get_deployment_result_list(self.request_json["end_point_id"], - self.request_json["end_point_name"], - self.request_json["model_config"]["model_name"]) - - for device_result in device_result_list: - if device_result is None: - continue - device_result_dict = json.loads(device_result) - - if int(device_result_dict["cache_device_id"]) == new_device_id: - result_body = json.loads(device_result_dict["result"]) - if result_body["model_version"] != payload["model_config"]["model_version"]: - return result_body["model_version"] - else: - return None - return None - - def init_device_update_map(self): - """ - Scroll update. - Send first scroll update message to the device(s), then the callback_deployment_result will handle the rest - """ - self.slave_update_result_mapping[self.run_id] = { - "devices_need_update": [], - "total_updated_devices": [], - "curr_update_window": [], - "max_unavailable_rate": 0.1 - } - - for device_id, device_op in self.request_json["diff_devices"].items(): - if device_op == ServerConstants.DEVICE_DIFF_REPLACE_OPERATION: - self.slave_update_result_mapping[self.run_id]["devices_need_update"].append(device_id) - - total_num = len(self.slave_update_result_mapping[self.run_id]["devices_need_update"]) - - if total_num == 0: - return - - max_unavailable_rate = self.request_json["parameters"].get("max_unavailable_rate", 0.1) - - window_size = max(1, int(total_num * max_unavailable_rate)) - - first_chunk_devices_update = \ - self.slave_update_result_mapping[self.run_id]["devices_need_update"][:window_size].copy() - - self.slave_update_result_mapping[self.run_id]["curr_update_window"] = first_chunk_devices_update - - self.request_json["first_chunk_devices_update"] = first_chunk_devices_update.copy() # to Notify sub-process - - def send_first_scroll_update_msg(self, first_chunk_devices_update): - """ - Delete the record of the replaced device and send the deployment msg to the devices - """ - if len(first_chunk_devices_update) == 0: - return - - # Delete the record of the replaced device - self.delete_device_info_on_master(first_chunk_devices_update) - - # Send the deployment msg to the devices, (we reuse the start_deployment msg) - for edge_id in first_chunk_devices_update: - if edge_id == self.edge_id: - continue - # send start deployment request to each device - self.send_deployment_start_request_to_edge(edge_id) - return - - def delete_device_info_on_master(self, edge_id_list_to_delete): - # Remove the record of the replaced device - FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password) - # 1.1 Get & Delete Deployment Status in Redis / SQLite - devices_status_list = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - get_deployment_status_list(self.request_json["end_point_id"], self.request_json["end_point_name"], - self.request_json["model_config"]["model_name"]) - delete_devices_status_list = [] - for device_status in devices_status_list: - device_status_dict = json.loads(device_status) - if int(device_status_dict["cache_device_id"]) in edge_id_list_to_delete: - delete_devices_status_list.append(device_status) - - for delete_item in delete_devices_status_list: - FedMLModelCache.get_instance(self.redis_addr, self.redis_port).delete_deployment_status( - delete_item, self.request_json["end_point_id"], - self.request_json["end_point_name"], - self.request_json["model_config"]["model_name"] - ) - - # 1.2 Get & Delete the endpoint device info in Redis / SQLite - device_objs = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - get_end_point_device_info(self.request_json["run_id"]) - - if device_objs is None: - return - - total_device_objs_list = json.loads(device_objs) - for device_obj in total_device_objs_list: - if device_obj["id"] in edge_id_list_to_delete: - total_device_objs_list.remove(device_obj) - - FedMLModelCache.get_instance(self.redis_addr, self.redis_port).set_end_point_device_info( - self.request_json["end_point_id"], self.request_json["end_point_name"], - json.dumps(total_device_objs_list)) - - # 1.3 Delete the result in deployment result list in Redis / SQLite - device_result_list = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - get_deployment_result_list(self.request_json["end_point_id"], self.request_json["end_point_name"], - self.request_json["model_config"]["model_name"]) - delete_device_result_list = [] - for device_result in device_result_list: - device_result_dict = json.loads(device_result) - if int(device_result_dict["cache_device_id"]) in edge_id_list_to_delete: - delete_device_result_list.append(device_result) - - for delete_item in delete_device_result_list: - FedMLModelCache.get_instance(self.redis_addr, self.redis_port).delete_deployment_result( - delete_item, self.request_json["end_point_id"], - self.request_json["end_point_name"], - self.request_json["model_config"]["model_name"] - ) - - logging.info(f"Deleted the record of the replaced device {edge_id_list_to_delete}") - - def send_next_scroll_update_msg(self, device_id: int): - this_run_meta_data = self.slave_update_result_mapping[self.run_id] - - devices_need_update = this_run_meta_data["devices_need_update"] - devices_updated = this_run_meta_data["total_updated_devices"] - curr_update_window = this_run_meta_data["curr_update_window"] - max_unavailable_rate = this_run_meta_data["max_unavailable_rate"] - - if (device_id not in devices_need_update) or (device_id in devices_updated): - # Prevent duplicate message / cross talk - # TODO: Check the cross talk if multiple run update the same device - logging.info(f"Device {device_id} is not in the update window nor need to be updated") - return - - devices_updated.append(device_id) - curr_update_window.remove(device_id) - - logging.info(f"Current update window {curr_update_window} after deleting: Device {device_id}") - - if len(curr_update_window) == 0: - remain_devices = list(set(devices_need_update) - set(devices_updated)) - logging.info(f"Devices need to be updated: {remain_devices}") - if len(remain_devices) == 0: # All devices are updated - return - else: - window_size = max(1, int(len(remain_devices) * max_unavailable_rate)) - edges_in_window = remain_devices[:window_size] - logging.info(f"Devices in next round window: {edges_in_window}") - curr_update_window = edges_in_window.copy() # Slide the window - self.slave_update_result_mapping[self.run_id]["curr_update_window"] = edges_in_window.copy() - - self.delete_device_info_on_master(edges_in_window) - - # Send the deployment msg to the devices, (we reuse the deployment msg) - for edge_id in edges_in_window: - if edge_id == self.edge_id: - continue - self.send_deployment_start_request_to_edge(edge_id) - else: - pass # Wait for the callback of other devices in this window - - def callback_activate_deployment(self, topic, payload): - logging.info("callback_activate_deployment: topic = %s, payload = %s" % (topic, payload)) - - # Parse payload as the model message object. - model_msg_object = FedMLModelMsgObject(topic, payload) - - # Get the previous deployment status. - FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password) - endpoint_status = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - get_end_point_status(model_msg_object.inference_end_point_id) - if endpoint_status != ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED: - return - - # Set end point as activated status - FedMLModelCache.get_instance(self.redis_addr, self.redis_port).set_end_point_activation( - model_msg_object.inference_end_point_id, model_msg_object.end_point_name, True) - - def callback_deactivate_deployment(self, topic, payload): - logging.info("callback_deactivate_deployment: topic = %s, payload = %s" % (topic, payload)) - - # Parse payload as the model message object. - model_msg_object = FedMLModelMsgObject(topic, payload) - - # Get the endpoint status - FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password) - endpoint_status = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - get_end_point_status(model_msg_object.inference_end_point_id) - if endpoint_status != ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED: - return - - # Set end point as deactivated status - FedMLModelCache.get_instance(self.redis_addr, self.redis_port).set_end_point_activation( - model_msg_object.inference_end_point_id, model_msg_object.model_name, False) - - def set_runner_stopped_event(self, run_id): - run_id_str = str(run_id) - server_runner = self.model_runner_mapping.get(run_id_str, None) - if server_runner is not None: - if server_runner.run_process_event is not None: - server_runner.run_process_event.set() - self.model_runner_mapping.pop(run_id_str) - - def set_runner_completed_event(self, run_id): - run_id_str = str(run_id) - server_runner = self.model_runner_mapping.get(run_id_str, None) - if server_runner is not None: - if server_runner.run_process_completed_event is not None: - server_runner.run_process_completed_event.set() - self.model_runner_mapping.pop(run_id_str) - - def callback_delete_deployment(self, topic, payload): - logging.info("callback_delete_deployment: topic = %s, payload = %s" % (topic, payload)) - - # Parse payload as the model message object. - model_msg_object = FedMLModelMsgObject(topic, payload) - - # Set end point as deactivated status - FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password) - FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - set_end_point_activation(model_msg_object.inference_end_point_id, - model_msg_object.end_point_name, False) - FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - delete_end_point(model_msg_object.inference_end_point_id, model_msg_object.end_point_name, model_msg_object.model_name, model_msg_object.model_version) - - self.send_deployment_delete_request_to_edges(payload, model_msg_object) - - self.set_runner_stopped_event(model_msg_object.run_id) - - self.stop_device_inference_monitor(model_msg_object.run_id, model_msg_object.end_point_name, - model_msg_object.model_id, model_msg_object.model_name, - model_msg_object.model_version) - - FedMLServerDataInterface.get_instance().delete_job_from_db(model_msg_object.run_id) - FedMLModelDatabase.get_instance().delete_deployment_status( - model_msg_object.run_id, model_msg_object.end_point_name, model_msg_object.model_name, - model_version=model_msg_object.model_version) - FedMLModelDatabase.get_instance().delete_deployment_result( - model_msg_object.run_id, model_msg_object.end_point_name, model_msg_object.model_name, - model_version=model_msg_object.model_version) - FedMLModelDatabase.get_instance().delete_deployment_run_info( - end_point_id=model_msg_object.inference_end_point_id) - - def send_deployment_results_with_payload(self, end_point_id, end_point_name, payload): - self.send_deployment_results(end_point_id, end_point_name, - payload["model_name"], payload["model_url"], - payload["model_version"], payload["port"], - payload["inference_engine"], - payload["model_metadata"], - payload["model_config"], - payload["input_json"], - payload["output_json"]) - - def send_deployment_results(self, end_point_id, end_point_name, - model_name, model_inference_url, - model_version, inference_port, inference_engine, - model_metadata, model_config, input_json, output_json): - deployment_results_topic_prefix = "model_ops/model_device/return_deployment_result" - deployment_results_topic = "{}/{}".format(deployment_results_topic_prefix, end_point_id) - deployment_results_payload = {"end_point_id": end_point_id, "end_point_name": end_point_name, - "model_name": model_name, "model_url": model_inference_url, - "version": model_version, "port": inference_port, - "inference_engine": inference_engine, - "model_metadata": model_metadata, - "model_config": model_config, - "input_json": input_json, - "output_json": output_json, - "timestamp": int(format(time.time_ns() / 1000.0, '.0f'))} - logging.info(f"[Server] deployment_results_payload to mlops: {deployment_results_payload}") - - self.client_mqtt_mgr.send_message_json(deployment_results_topic, json.dumps(deployment_results_payload)) - self.client_mqtt_mgr.send_message_json(deployment_results_topic_prefix, json.dumps(deployment_results_payload)) - - def send_deployment_status(self, end_point_id, end_point_name, model_name, model_inference_url, model_status): - deployment_status_topic_prefix = "model_ops/model_device/return_deployment_status" - deployment_status_topic = "{}/{}".format(deployment_status_topic_prefix, end_point_id) - deployment_status_payload = {"end_point_id": end_point_id, "end_point_name": end_point_name, - "model_name": model_name, - "model_url": model_inference_url, - "model_status": model_status, - "timestamp": int(format(time.time_ns() / 1000.0, '.0f'))} - logging.info(f"[Server] deployment_status_payload to mlops: {deployment_status_payload}") - - self.client_mqtt_mgr.send_message_json(deployment_status_topic, json.dumps(deployment_status_payload)) - self.client_mqtt_mgr.send_message_json(deployment_status_topic_prefix, json.dumps(deployment_status_payload)) - - def send_deployment_stages(self, end_point_id, model_name, model_id, model_inference_url, - model_stages_index, model_stages_title, model_stage_detail): - deployment_stages_topic_prefix = "model_ops/model_device/return_deployment_stages" - deployment_stages_topic = "{}/{}".format(deployment_stages_topic_prefix, end_point_id) - deployment_stages_payload = {"model_name": model_name, - "model_id": model_id, - "model_url": model_inference_url, - "end_point_id": end_point_id, - "model_stage_index": model_stages_index, - "model_stage_title": model_stages_title, - "model_stage_detail": model_stage_detail, - "timestamp": int(format(time.time_ns() / 1000.0, '.0f'))} - logging.info("-------- Stages{}:{} --------".format(model_stages_index, deployment_stages_payload)) - - self.client_mqtt_mgr.send_message_json(deployment_stages_topic, json.dumps(deployment_stages_payload)) - self.client_mqtt_mgr.send_message_json(deployment_stages_topic_prefix, json.dumps(deployment_stages_payload)) - - def on_client_mqtt_disconnected(self, mqtt_client_object): - if self.client_mqtt_lock is None: - self.client_mqtt_lock = threading.Lock() - - self.client_mqtt_lock.acquire() - self.client_mqtt_is_connected = False - self.client_mqtt_lock.release() - - logging.info("on_client_mqtt_disconnected: {}.".format(self.client_mqtt_is_connected)) - - def on_client_mqtt_connected(self, mqtt_client_object): - if self.mlops_metrics is None: - self.mlops_metrics = MLOpsMetrics() - - self.mlops_metrics.set_messenger(self.client_mqtt_mgr) - self.mlops_metrics.run_id = self.run_id - self.mlops_metrics.edge_id = self.edge_id - self.mlops_metrics.server_agent_id = self.server_agent_id - - if self.client_mqtt_lock is None: - self.client_mqtt_lock = threading.Lock() - - self.client_mqtt_lock.acquire() - self.client_mqtt_is_connected = True - self.client_mqtt_lock.release() - - # logging.info("on_client_mqtt_connected: {}.".format(self.client_mqtt_is_connected)) - - def setup_client_mqtt_mgr(self): - if self.client_mqtt_mgr is not None: - return - - if self.client_mqtt_lock is None: - self.client_mqtt_lock = threading.Lock() - - # logging.info( - # "server agent config: {},{}".format( - # self.agent_config["mqtt_config"]["BROKER_HOST"], self.agent_config["mqtt_config"]["BROKER_PORT"] - # ) - # ) - - self.client_mqtt_mgr = MqttManager( - self.agent_config["mqtt_config"]["BROKER_HOST"], - self.agent_config["mqtt_config"]["BROKER_PORT"], - self.agent_config["mqtt_config"]["MQTT_USER"], - self.agent_config["mqtt_config"]["MQTT_PWD"], - self.agent_config["mqtt_config"]["MQTT_KEEPALIVE"], - "FedML_ModelServerAgent_Metrics_@{}@_{}_{}_{}".format(self.user_name, self.args.current_device_id, - str(os.getpid()), - str(uuid.uuid4())) - ) - self.client_mqtt_mgr.add_connected_listener(self.on_client_mqtt_connected) - self.client_mqtt_mgr.add_disconnected_listener(self.on_client_mqtt_disconnected) - self.client_mqtt_mgr.connect() - self.client_mqtt_mgr.loop_start() - - if self.mlops_metrics is None: - self.mlops_metrics = MLOpsMetrics() - self.mlops_metrics.set_messenger(self.client_mqtt_mgr) - self.mlops_metrics.run_id = self.run_id - self.mlops_metrics.edge_id = self.edge_id - self.mlops_metrics.server_agent_id = self.server_agent_id - - def release_client_mqtt_mgr(self): - try: - if self.client_mqtt_mgr is not None: - self.client_mqtt_mgr.loop_stop() - self.client_mqtt_mgr.disconnect() - - self.client_mqtt_lock.acquire() - if self.client_mqtt_mgr is not None: - self.client_mqtt_is_connected = False - self.client_mqtt_mgr = None - self.client_mqtt_lock.release() - except Exception: - pass - - def send_deployment_stop_request_to_edges(self, edge_id_list, payload): - for edge_id in edge_id_list: - topic_stop_deployment = "model_ops/model_device/stop_deployment/{}".format(str(self.edge_id)) - logging.info("stop_deployment: send topic " + topic_stop_deployment) - self.client_mqtt_mgr.send_message_json(topic_stop_deployment, payload) - - def send_exit_train_with_exception_request_to_edges(self, edge_id_list, payload): - for edge_id in edge_id_list: - topic_exit_train = "flserver_agent/" + str(edge_id) + "/exit_train_with_exception" - logging.info("exit_train_with_exception: send topic " + topic_exit_train) - self.client_mqtt_mgr.send_message_json(topic_exit_train, payload) - - def exit_run_with_exception_entry(self): - try: - self.setup_client_mqtt_mgr() - self.exit_run_with_exception() - except Exception as e: - self.release_client_mqtt_mgr() - sys_utils.cleanup_all_fedml_server_login_processes( - ServerConstants.SERVER_LOGIN_PROGRAM, clean_process_group=False) - sys.exit(1) - finally: - self.release_client_mqtt_mgr() - - def exit_run_with_exception(self): - logging.info("Exit run successfully.") - - ServerConstants.cleanup_learning_process(self.run_id) - ServerConstants.cleanup_run_process(self.run_id) - - self.mlops_metrics.report_server_id_status( - self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id) - - time.sleep(1) - - def callback_exit_train_with_exception(self, topic, payload): - # logging.info("callback_exit_train_with_exception: topic = %s, payload = %s" % (topic, payload)) - - request_json = json.loads(payload) - is_retain = request_json.get("is_retain", False) - if is_retain: - return - run_id = request_json.get("runId", None) - if run_id is None: - run_id = request_json.get("run_id", None) - if run_id is None: - run_id = request_json.get("id", None) - - if run_id is None: - return - - edge_ids = request_json.get("edgeids", None) - - self.send_exit_train_with_exception_request_to_edges(edge_ids, payload) - - # Stop server with multiprocessing mode - self.request_json = request_json - server_runner = FedMLServerRunner( - self.args, edge_id=self.edge_id, request_json=request_json, agent_config=self.agent_config, run_id=run_id - ) - try: - Process(target=server_runner.exit_run_with_exception_entry).start() - except Exception as e: - pass - - def callback_client_exit_train_with_exception(self, topic, payload): - # logging.info("callback_client_exit_train_with_exception: topic = %s, payload = %s" % (topic, payload)) - - request_json = json.loads(payload) - run_id = request_json.get("run_id", None) - edge_id = request_json.get("edge_id", None) - if run_id is None: - logging.info("callback_client_exit_train_with_exception run id is none") - return - - job = FedMLServerDataInterface.get_instance().get_job_by_id(run_id) - if job is not None and job.running_json is not None and job.running_json != "": - job_json_obj = json.loads(job.running_json) - edge_ids = job_json_obj.get("edgeids", None) - - self.mlops_metrics.broadcast_server_training_status( - run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED, - is_from_model=True, edge_id=edge_id) - - self.send_exit_train_with_exception_request_to_edges(edge_ids, job.running_json) - - self.exit_run_with_exception() - - def callback_runner_id_status(self, topic, payload): - logging.info("callback_runner_id_status: topic = %s, payload = %s" % (topic, payload)) - - request_json = json.loads(payload) - is_retain = request_json.get("is_retain", False) - if is_retain: - return - run_id = request_json["run_id"] - status = request_json["status"] - edge_id = request_json["edge_id"] - run_id_str = str(run_id) - - if ( - status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED - or status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED - ): - # Stop server with multiprocessing mode - stop_request_json = self.running_request_json.get(run_id_str, None) - if stop_request_json is None: - stop_request_json = request_json - if self.run_as_edge_server_and_agent: - server_runner = FedMLServerRunner( - self.args, run_id=run_id, request_json=stop_request_json, agent_config=self.agent_config - ) - server_runner.edge_id = self.edge_id - server_runner.run_as_edge_server_and_agent = self.run_as_edge_server_and_agent - server_runner.run_status = status - status_process = Process(target=server_runner.cleanup_client_with_status) - status_process.start() - status_process.join(10) - - # Stop log processor for current run - MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, self.edge_id) - - def cleanup_client_with_status(self): - if self.run_status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED: - logging.info("received to finished status.") - self.cleanup_run_when_finished() - elif self.run_status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED: - logging.info("received to failed status.") - self.cleanup_run_when_starting_failed() - - def callback_report_current_status(self, topic, payload): - request_json = json.loads(payload) - if self.run_as_edge_server_and_agent: - self.send_agent_active_msg() - elif self.run_as_cloud_agent: - self.send_agent_active_msg() - elif self.run_as_cloud_server: - pass - - @staticmethod - def process_ota_upgrade_msg(): - os.system("pip install -U fedml") - - def callback_server_ota_msg(self, topic, payload): - request_json = json.loads(payload) - cmd = request_json["cmd"] - - if cmd == ServerConstants.FEDML_OTA_CMD_UPGRADE: - try: - self.process_ota_upgrade_msg() - # Process(target=FedMLServerRunner.process_ota_upgrade_msg).start() - raise Exception("After upgraded, restart runner...") - except Exception as e: - pass - elif cmd == ServerConstants.FEDML_OTA_CMD_RESTART: - raise Exception("Restart runner...") - - @staticmethod - def get_device_id(): - device_file_path = os.path.join(ServerConstants.get_data_dir(), ServerConstants.LOCAL_RUNNER_INFO_DIR_NAME) - file_for_device_id = os.path.join(device_file_path, "devices.id") - if not os.path.exists(device_file_path): - os.makedirs(device_file_path) - elif os.path.exists(file_for_device_id): - with open(file_for_device_id, 'r', encoding='utf-8') as f: - device_id_from_file = f.readline() - if device_id_from_file is not None and device_id_from_file != "": - return device_id_from_file - - if platform.system() == "Darwin": - cmd_get_serial_num = "system_profiler SPHardwareDataType | grep Serial | awk '{gsub(/ /,\"\")}{print}' " \ - "|awk -F':' '{print $2}' " - device_id = os.popen(cmd_get_serial_num).read() - device_id = device_id.replace('\n', '').replace(' ', '') - if device_id is None or device_id == "": - device_id = hex(uuid.getnode()) - else: - device_id = "0x" + device_id - else: - if "nt" in os.name: - - def get_uuid(): - guid = "" - try: - cmd = "wmic csproduct get uuid" - guid = str(subprocess.check_output(cmd)) - pos1 = guid.find("\\n") + 2 - guid = guid[pos1:-15] - except Exception as ex: - pass - return str(guid) - - device_id = str(get_uuid()) - elif "posix" in os.name: - device_id = sys_utils.get_device_id_in_docker() - if device_id is None: - device_id = hex(uuid.getnode()) - else: - device_id = sys_utils.run_subprocess_open( - "hal-get-property --udi /org/freedesktop/Hal/devices/computer --key system.hardware.uuid".split() - ) - device_id = hex(device_id) - - if device_id is not None and device_id != "": - with open(file_for_device_id, 'w', encoding='utf-8') as f: - f.write(device_id) - else: - device_id = hex(uuid.uuid4()) - with open(file_for_device_id, 'w', encoding='utf-8') as f: - f.write(device_id) - - return device_id - - def bind_account_and_device_id(self, url, account_id, device_id, os_name): - role = ServerConstants.login_role_list[ServerConstants.LOGIN_MODE_ON_PREMISE_MASTER_INDEX] - if self.run_as_edge_server_and_agent: - role = ServerConstants.login_role_list[ServerConstants.LOGIN_MODE_ON_PREMISE_MASTER_INDEX] - elif self.run_as_cloud_agent: - role = ServerConstants.login_role_list[ServerConstants.LOGIN_MODE_FEDML_CLOUD_MASTER_INDEX] - elif self.run_as_cloud_server: - role = ServerConstants.login_role_list[ServerConstants.LOGIN_MODE_INFERENCE_INSTANCE_INDEX] - - ip = requests.get('https://checkip.amazonaws.com').text.strip() - fedml_ver, exec_path, os_ver, cpu_info, python_ver, torch_ver, mpi_installed, \ - cpu_usage, available_mem, total_mem, gpu_info, gpu_available_mem, gpu_total_mem, \ - gpu_count, gpu_vendor, cpu_count, gpu_device_name = get_sys_runner_info() - host_name = sys_utils.get_host_name() - json_params = { - "accountid": account_id, - "deviceid": device_id, - "type": os_name, - "state": ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE, - "status": ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE, - "processor": cpu_info, - "core_type": cpu_info, - "network": "", - "role": role, - "os_ver": os_ver, - "memory": total_mem, - "ip": ip, - "extra_infos": {"fedml_ver": fedml_ver, "exec_path": exec_path, "os_ver": os_ver, - "cpu_info": cpu_info, "python_ver": python_ver, "torch_ver": torch_ver, - "mpi_installed": mpi_installed, "cpu_usage": cpu_usage, - "available_mem": available_mem, "total_mem": total_mem, - "cpu_count": cpu_count, "gpu_count": 0, "host_name": host_name} - } - if gpu_count > 0: - if gpu_total_mem is not None: - json_params["gpu"] = gpu_info if gpu_info is not None else "" + ", Total GPU Memory: " + gpu_total_mem - else: - json_params["gpu"] = gpu_info if gpu_info is not None else "" - json_params["extra_infos"]["gpu_info"] = gpu_info if gpu_info is not None else "" - if gpu_available_mem is not None: - json_params["extra_infos"]["gpu_available_mem"] = gpu_available_mem - if gpu_total_mem is not None: - json_params["extra_infos"]["gpu_total_mem"] = gpu_total_mem - - json_params["extra_infos"]["gpu_count"] = gpu_count - json_params["extra_infos"]["gpu_vendor"] = gpu_vendor - json_params["extra_infos"]["gpu_device_name"] = gpu_device_name - - gpu_available_id_list = sys_utils.get_available_gpu_id_list(limit=gpu_count) - gpu_available_count = len(gpu_available_id_list) if gpu_available_id_list is not None else 0 - gpu_list = sys_utils.get_gpu_list() - json_params["extra_infos"]["gpu_available_count"] = gpu_available_count - json_params["extra_infos"]["gpu_available_id_list"] = gpu_available_id_list - json_params["extra_infos"]["gpu_list"] = gpu_list - else: - json_params["gpu"] = "None" - json_params["extra_infos"]["gpu_available_count"] = 0 - json_params["extra_infos"]["gpu_available_id_list"] = [] - json_params["extra_infos"]["gpu_list"] = [] - - _, cert_path = MLOpsConfigs.get_request_params() - if cert_path is not None: - try: - requests.session().verify = cert_path - response = requests.post( - url, json=json_params, verify=True, - headers={"content-type": "application/json", "Connection": "close"} - ) - except requests.exceptions.SSLError as err: - MLOpsConfigs.install_root_ca_file() - response = requests.post( - url, json=json_params, verify=True, - headers={"content-type": "application/json", "Connection": "close"} - ) - else: - response = requests.post(url, json=json_params, headers={"Connection": "close"}) - edge_id = -1 - user_name = None - extra_url = None - if response.status_code != 200: - print(f"Binding to MLOps with response.status_code = {response.status_code}, " - f"response.content: {response.content}") - pass - else: - # print("url = {}, response = {}".format(url, response)) - status_code = response.json().get("code") - if status_code == "SUCCESS": - edge_id = response.json().get("data").get("id") - user_name = response.json().get("data").get("userName", None) - extra_url = response.json().get("data").get("url", None) - if edge_id is None or edge_id <= 0: - print(f"Binding to MLOps with response.status_code = {response.status_code}, " - f"response.content: {response.content}") - else: - if status_code == SchedulerConstants.BINDING_ACCOUNT_NOT_EXIST_ERROR: - raise SystemExit(SchedulerConstants.BINDING_ACCOUNT_NOT_EXIST_ERROR) - print(f"Binding to MLOps with response.status_code = {response.status_code}, " - f"response.content: {response.content}") - return -1, None, None - return edge_id, user_name, extra_url - - def fetch_configs(self): - return MLOpsConfigs.fetch_all_configs() - - def send_agent_active_msg(self): - active_topic = "flserver_agent/active" - status = MLOpsStatus.get_instance().get_server_agent_status(self.edge_id) - if ( - status is not None - and status != ServerConstants.MSG_MLOPS_SERVER_STATUS_OFFLINE - and status != ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE - ): - return - - status = ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE - active_msg = {"ID": self.edge_id, "status": status} - MLOpsStatus.get_instance().set_server_agent_status(self.edge_id, status) - self.mqtt_mgr.send_message_json(active_topic, json.dumps(active_msg)) - - def subscribe_slave_devices_message(self, request_json): - if request_json is None: - return - run_id = request_json["run_id"] - edge_id_list = request_json["device_ids"] - logging.info("Edge ids: " + str(edge_id_list)) - for edge_id in edge_id_list: - if str(edge_id) == str(self.edge_id): - continue - # subscribe deployment result message for each model device - deployment_results_topic = "model_device/model_device/return_deployment_result/{}".format(edge_id) - self.mqtt_mgr.add_message_listener(deployment_results_topic, self.callback_deployment_result_message) - self.mqtt_mgr.subscribe_msg(deployment_results_topic) - - # subscribe deployment status message for each model device - deployment_status_topic = "model_device/model_device/return_deployment_status/{}".format(edge_id) - self.mqtt_mgr.add_message_listener(deployment_status_topic, self.callback_deployment_status_message) - self.mqtt_mgr.subscribe_msg(deployment_status_topic) - - logging.info("subscribe device messages {}, {}".format( - deployment_results_topic, deployment_status_topic)) - - def on_agent_mqtt_connected(self, mqtt_client_object): - # The MQTT message topic format is as follows: // - - # Setup MQTT message listener for starting deployment - server_agent_id = self.edge_id - topic_start_deployment = "model_ops/model_device/start_deployment/{}".format(str(self.edge_id)) - self.mqtt_mgr.add_message_listener(topic_start_deployment, self.callback_start_deployment) - - # Setup MQTT message listener for activating deployment - topic_activate_deployment = "model_ops/model_device/activate_deployment/{}".format(str(self.edge_id)) - self.mqtt_mgr.add_message_listener(topic_activate_deployment, self.callback_activate_deployment) - - # Setup MQTT message listener for deactivating deployment - topic_deactivate_deployment = "model_ops/model_device/deactivate_deployment/{}".format(str(self.edge_id)) - self.mqtt_mgr.add_message_listener(topic_deactivate_deployment, self.callback_deactivate_deployment) - - # Setup MQTT message listener for delete deployment - topic_delete_deployment = "model_ops/model_device/delete_deployment/{}".format(str(self.edge_id)) - self.mqtt_mgr.add_message_listener(topic_delete_deployment, self.callback_delete_deployment) - - # Setup MQTT message listener for server status switching - topic_server_status = "fl_server/flserver_agent_" + str(server_agent_id) + "/status" - self.mqtt_mgr.add_message_listener(topic_server_status, self.callback_runner_id_status) - - # Setup MQTT message listener to report current device status. - topic_report_status = "mlops/report_device_status" - self.mqtt_mgr.add_message_listener(topic_report_status, self.callback_report_current_status) - - # Setup MQTT message listener to OTA messages from the MLOps. - topic_ota_msg = "mlops/flserver_agent_" + str(server_agent_id) + "/ota" - self.mqtt_mgr.add_message_listener(topic_ota_msg, self.callback_server_ota_msg) - - # Subscribe topics for starting train, stopping train and fetching client status. - mqtt_client_object.subscribe(topic_start_deployment, qos=2) - mqtt_client_object.subscribe(topic_activate_deployment, qos=2) - mqtt_client_object.subscribe(topic_deactivate_deployment, qos=2) - mqtt_client_object.subscribe(topic_delete_deployment, qos=2) - mqtt_client_object.subscribe(topic_server_status, qos=2) - mqtt_client_object.subscribe(topic_report_status, qos=2) - mqtt_client_object.subscribe(topic_ota_msg, qos=2) - - self.subscribed_topics.clear() - self.subscribed_topics.append(topic_start_deployment) - self.subscribed_topics.append(topic_activate_deployment) - self.subscribed_topics.append(topic_deactivate_deployment) - self.subscribed_topics.append(topic_delete_deployment) - self.subscribed_topics.append(topic_server_status) - self.subscribed_topics.append(topic_report_status) - self.subscribed_topics.append(topic_ota_msg) - - self.endpoint_sync_protocol = FedMLEndpointSyncProtocol(agent_config=self.agent_config, mqtt_mgr=self.mqtt_mgr) - self.endpoint_sync_protocol.setup_listener_for_sync_device_info(self.edge_id) - - # Broadcast the first active message. - self.send_agent_active_msg() - - # Echo results - # print("\n\nCongratulations, your device is connected to the FedML MLOps platform successfully!") - # print( - # "Your FedML Edge ID is " + str(self.edge_id) + ", unique device ID is " - # + str(self.unique_device_id) - # + "\n" - # ) - - MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO) - - def on_agent_mqtt_disconnected(self, mqtt_client_object): - MLOpsStatus.get_instance().set_server_agent_status( - self.edge_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_OFFLINE - ) - - def recover_inference_and_monitor(self): - try: - history_jobs = FedMLServerDataInterface.get_instance().get_history_jobs() - for job in history_jobs.job_list: - if job.running_json is None: - continue - - if job.deployment_result == "": - continue - - run_id, end_point_name, token, user_id, user_name, device_ids, device_objs, model_config, model_name, \ - model_id, model_storage_url, scale_min, scale_max, inference_engine, model_is_from_open, \ - inference_end_point_id, use_gpu, memory_size, model_version, inference_port = \ - self.parse_model_run_params(json.loads(job.running_json)) - - FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password) - is_activated = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - get_end_point_activation(run_id) - if not is_activated: - continue - - self.start_device_inference_gateway(run_id, end_point_name, model_id, model_name, model_version, - inference_port=inference_port) - - self.stop_device_inference_monitor(run_id, end_point_name, model_id, model_name, model_version) - self.start_device_inference_monitor(run_id, end_point_name, model_id, model_name, model_version) - except Exception as e: - logging.info("recover inference and monitor: {}".format(traceback.format_exc())) - - def recover_start_deployment_msg_after_upgrading(self): - try: - current_job = FedMLServerDataInterface.get_instance().get_current_job() - if current_job is not None and \ - current_job.status == ServerConstants.MSG_MLOPS_SERVER_STATUS_UPGRADING: - FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password) - is_activated = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - get_end_point_activation(current_job.job_id) - if not is_activated: - return - logging.info("start deployment after upgrading.") - topic_start_deployment = "model_ops/model_device/start_deployment/{}".format(str(self.edge_id)) - self.callback_start_deployment(topic_start_deployment, current_job.running_json) - except Exception as e: - logging.info("recover starting deployment message after upgrading: {}".format(traceback.format_exc())) - - def setup_agent_mqtt_connection(self, service_config): - # Setup MQTT connection - self.mqtt_mgr = MqttManager( - service_config["mqtt_config"]["BROKER_HOST"], - service_config["mqtt_config"]["BROKER_PORT"], - service_config["mqtt_config"]["MQTT_USER"], - service_config["mqtt_config"]["MQTT_PWD"], - service_config["mqtt_config"]["MQTT_KEEPALIVE"], - "FedML_ModelServerAgent_Daemon_@" + self.user_name + "@_" + self.args.current_device_id + str(uuid.uuid4()), - "flserver_agent/last_will_msg", - json.dumps({"ID": self.edge_id, "status": ServerConstants.MSG_MLOPS_SERVER_STATUS_OFFLINE}) - ) - self.agent_config = service_config - - # Init local database - FedMLServerDataInterface.get_instance().create_job_table() - try: - FedMLModelDatabase.get_instance().set_database_base_dir(ServerConstants.get_database_dir()) - FedMLModelDatabase.get_instance().create_table() - except Exception as e: - pass - - server_api_cmd = "fedml.computing.scheduler.model_scheduler.device_server_api:api" - server_api_pids = RunProcessUtils.get_pid_from_cmd_line(server_api_cmd) - if server_api_pids is None or len(server_api_pids) <= 0: - # Start local API services - cur_dir = os.path.dirname(__file__) - fedml_base_dir = os.path.dirname(os.path.dirname(os.path.dirname(cur_dir))) - python_program = get_python_program() - self.local_api_process = ServerConstants.exec_console_with_script( - "{} -m uvicorn {} --host 0.0.0.0 --port {} --reload --reload-delay 3 --reload-dir {} " - "--log-level critical".format( - python_program, server_api_cmd, ServerConstants.LOCAL_SERVER_API_PORT, - fedml_base_dir - ), - should_capture_stdout=False, - should_capture_stderr=False - ) - # if self.local_api_process is not None and self.local_api_process.pid is not None: - # print(f"Model master local API process id {self.local_api_process.pid}") - - self.recover_inference_and_monitor() - - # MLOpsRuntimeLogDaemon.get_instance(self.args).stop_all_log_processor() - - # Setup MQTT connected listener - self.mqtt_mgr.add_connected_listener(self.on_agent_mqtt_connected) - self.mqtt_mgr.add_disconnected_listener(self.on_agent_mqtt_disconnected) - self.mqtt_mgr.connect() - - self.setup_client_mqtt_mgr() - self.mlops_metrics.report_server_training_status( - self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE, - is_from_model=True, edge_id=self.edge_id) - MLOpsStatus.get_instance().set_server_agent_status( - self.edge_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE - ) - - self.recover_start_deployment_msg_after_upgrading() - - def stop_agent(self): - if self.run_process_event is not None: - self.run_process_event.set() - - if self.mqtt_mgr is not None: - try: - for topic in self.subscribed_topics: - self.mqtt_mgr.unsubscribe_msg(topic) - except Exception as e: - pass - - self.mqtt_mgr.loop_stop() - self.mqtt_mgr.disconnect() - - self.release_client_mqtt_mgr() - - def start_agent_mqtt_loop(self, should_exit_sys=True): - # Start MQTT message loop - try: - self.mqtt_mgr.loop_forever() - except Exception as e: - if str(e) == "Restarting after upgraded...": - logging.info("Restarting after upgraded...") - else: - print("Server tracing: {}".format(traceback.format_exc())) - finally: - self.stop_agent() - - if should_exit_sys: - time.sleep(5) - sys_utils.cleanup_all_fedml_server_login_processes( - ServerConstants.SERVER_LOGIN_PROGRAM, clean_process_group=False) - sys.exit(1) diff --git a/python/fedml/computing/scheduler/model_scheduler/job_runner_msg_sender.py b/python/fedml/computing/scheduler/model_scheduler/job_runner_msg_sender.py new file mode 100755 index 0000000000..3fe45401ac --- /dev/null +++ b/python/fedml/computing/scheduler/model_scheduler/job_runner_msg_sender.py @@ -0,0 +1,204 @@ + +import json +import logging +import os +import time +from .device_model_cache import FedMLModelCache +from .device_server_constants import ServerConstants +from ..scheduler_core.general_constants import GeneralConstants + + +class FedMLDeployJobRunnerMsgSender(object): + def __init__(self): + self.infer_host = "127.0.0.1" + self.redis_addr = "local" + self.redis_port = "6379" + self.redis_password = "fedml_default" + self.message_center = None + self.request_json = None + self.edge_id = None + + def send_deployment_results_with_payload(self, end_point_id, end_point_name, payload): + self.send_deployment_results(end_point_id, end_point_name, + payload["model_name"], payload["model_url"], + payload["model_version"], payload["port"], + payload["inference_engine"], + payload["model_metadata"], + payload["model_config"], + payload["input_json"], + payload["output_json"]) + + def send_deployment_results(self, end_point_id, end_point_name, + model_name, model_inference_url, + model_version, inference_port, inference_engine, + model_metadata, model_config, input_json, output_json): + deployment_results_topic_prefix = "model_ops/model_device/return_deployment_result" + deployment_results_topic = "{}/{}".format(deployment_results_topic_prefix, end_point_id) + deployment_results_payload = {"end_point_id": end_point_id, "end_point_name": end_point_name, + "model_name": model_name, "model_url": model_inference_url, + "version": model_version, "port": inference_port, + "inference_engine": inference_engine, + "model_metadata": model_metadata, + "model_config": model_config, + "input_json": input_json, + "output_json": output_json, + "timestamp": int(format(time.time_ns() / 1000.0, '.0f'))} + logging.info(f"[Master] deployment_results_payload is sent to mlops: {deployment_results_payload}") + + self.message_center.send_message_json(deployment_results_topic, json.dumps(deployment_results_payload)) + self.message_center.send_message_json(deployment_results_topic_prefix, json.dumps(deployment_results_payload)) + + @staticmethod + def send_deployment_status( + end_point_id, end_point_name, model_name, model_inference_url, model_status, message_center=None): + if message_center is None: + return + deployment_status_topic_prefix = "model_ops/model_device/return_deployment_status" + deployment_status_topic = "{}/{}".format(deployment_status_topic_prefix, end_point_id) + deployment_status_payload = {"end_point_id": end_point_id, "end_point_name": end_point_name, + "model_name": model_name, + "model_url": model_inference_url, + "model_status": model_status, + "timestamp": int(format(time.time_ns() / 1000.0, '.0f'))} + logging.info(f"[Master] deployment_status_payload is sent to mlops: {deployment_status_payload}") + + message_center.send_message_json(deployment_status_topic, json.dumps(deployment_status_payload)) + message_center.send_message_json(deployment_status_topic_prefix, json.dumps(deployment_status_payload)) + + @staticmethod + def send_deployment_stages(end_point_id, model_name, model_id, model_inference_url, + model_stages_index, model_stages_title, model_stage_detail, + message_center=None): + if message_center is None: + return + deployment_stages_topic_prefix = "model_ops/model_device/return_deployment_stages" + deployment_stages_topic = "{}/{}".format(deployment_stages_topic_prefix, end_point_id) + deployment_stages_payload = {"model_name": model_name, + "model_id": model_id, + "model_url": model_inference_url, + "end_point_id": end_point_id, + "model_stage_index": model_stages_index, + "model_stage_title": model_stages_title, + "model_stage_detail": model_stage_detail, + "timestamp": int(format(time.time_ns() / 1000.0, '.0f'))} + + message_center.send_message_json(deployment_stages_topic, json.dumps(deployment_stages_payload)) + message_center.send_message_json(deployment_stages_topic_prefix, json.dumps(deployment_stages_payload)) + + logging.info(f"-------- Stages has been sent to mlops with stage {model_stages_index} and " + f"payload {deployment_stages_payload}") + + def send_deployment_start_request_to_edges(self): + # Iterate through replica_num_diff, both add and replace should be sent to the edge devices + if "replica_num_diff" not in self.request_json or self.request_json["replica_num_diff"] is None: + return [] + + edge_id_list = [] + for device_id in self.request_json["replica_num_diff"].keys(): + edge_id_list.append(device_id) + + self.request_json["master_node_ip"] = GeneralConstants.get_ip_address(self.request_json) + should_added_devices = [] + for edge_id in edge_id_list: + if edge_id == self.edge_id: + continue + should_added_devices.append(edge_id) + # send start deployment request to each device + self.send_deployment_start_request_to_edge(edge_id) + return should_added_devices + + def send_deployment_start_request_to_edge(self, edge_id): + topic_start_deployment = "model_ops/model_device/start_deployment/{}".format(str(edge_id)) + logging.info("start_deployment: send topic " + topic_start_deployment + " to client...") + self.message_center.send_message_json(topic_start_deployment, json.dumps(self.request_json)) + + def send_deployment_delete_request_to_edges(self, payload, model_msg_object): + if model_msg_object is None: # Called after the diff operation + if "diff_devices" not in self.request_json or self.request_json["diff_devices"] is None: + return + else: + edge_id_list_to_delete = [] + for device_id in self.request_json["diff_devices"]: + if self.request_json["diff_devices"][device_id] == ServerConstants.DEVICE_DIFF_DELETE_OPERATION: + edge_id_list_to_delete.append(device_id) + if len(edge_id_list_to_delete) == 0: + return + + try: + FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, + self.redis_password) + + # 1. Get & Delete the endpoint device info in Redis / SQLite + device_objs = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ + get_end_point_device_info(self.request_json["run_id"]) + + if device_objs is None: + raise Exception("The device list in local redis is None") + else: + total_device_objs_list = json.loads(device_objs) + for device_obj in total_device_objs_list: + if device_obj["id"] in edge_id_list_to_delete: + total_device_objs_list.remove(device_obj) + + FedMLModelCache.get_instance(self.redis_addr, self.redis_port).set_end_point_device_info( + self.request_json["end_point_id"], self.request_json["end_point_name"], + json.dumps(total_device_objs_list)) + + # 2 Delete the result in deployment result list in Redis / SQLite + device_result_list = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ + get_deployment_result_list(self.request_json["end_point_id"], + self.request_json["end_point_name"], + self.request_json["model_config"]["model_name"]) + delete_device_result_list = [] + for device_result in device_result_list: + device_result_dict = json.loads(device_result) + if int(device_result_dict["cache_device_id"]) in edge_id_list_to_delete: + delete_device_result_list.append(device_result) + + for delete_item in delete_device_result_list: + FedMLModelCache.get_instance(self.redis_addr, self.redis_port).delete_deployment_result( + delete_item, self.request_json["end_point_id"], + self.request_json["end_point_name"], + self.request_json["model_config"]["model_name"] + ) + + except Exception as e: + run_id = self.request_json["run_id"] + error_log_path = f"~/.fedml/fedml-model-server/fedml/logs/error_delete_{run_id}.txt" + if not os.path.exists(os.path.dirname(os.path.expanduser(error_log_path))): + os.makedirs(os.path.dirname(os.path.expanduser(error_log_path))) + with open(os.path.expanduser(error_log_path), "w") as f: + f.write(str(self.request_json)) + f.write(str(e)) + f.write('\n') + raise e + + else: # Delete the whole endpoint + edge_id_list_to_delete = model_msg_object.device_ids + + # For Debug + if payload is not None: + debug_log_path = f"~/.fedml/fedml-model-server/fedml/logs/tmp_debug_delete_payload.txt" + if not os.path.exists(os.path.dirname(os.path.expanduser(debug_log_path))): + os.makedirs(os.path.dirname(os.path.expanduser(debug_log_path))) + with open(os.path.expanduser(debug_log_path), "w") as f: + f.write(str(payload)) + + # Remove the model master node id from the list using index 0 + edge_id_list_to_delete = edge_id_list_to_delete[1:] + + logging.info("Device ids to be deleted: " + str(edge_id_list_to_delete)) + + for edge_id in edge_id_list_to_delete: + if edge_id == self.edge_id: + continue + # send delete deployment request to each model device + topic_delete_deployment = "model_ops/model_device/delete_deployment/{}".format(str(edge_id)) + logging.info("delete_deployment: send topic " + topic_delete_deployment + " to client...") + self.message_center.send_message_json(topic_delete_deployment, payload) + + def send_deployment_stop_request_to_edges(self, edge_id_list, payload): + for edge_id in edge_id_list: + topic_stop_deployment = "model_ops/model_device/stop_deployment/{}".format(str(self.edge_id)) + logging.info("stop_deployment: send topic " + topic_stop_deployment) + self.message_center.send_message_json(topic_stop_deployment, payload) diff --git a/python/fedml/computing/scheduler/model_scheduler/master_agent.py b/python/fedml/computing/scheduler/model_scheduler/master_agent.py new file mode 100755 index 0000000000..2f30ae8b8e --- /dev/null +++ b/python/fedml/computing/scheduler/model_scheduler/master_agent.py @@ -0,0 +1,27 @@ + +from .device_server_constants import ServerConstants +from .device_server_data_interface import FedMLServerDataInterface +from .master_protocol_manager import FedMLDeployMasterProtocolManager +from ..master.base_master_agent import FedMLBaseMasterAgent + + +class FedMLDeployMasterAgent(FedMLBaseMasterAgent): + + def __init__(self): + FedMLBaseMasterAgent.__init__(self) + + # Override + def _get_log_file_dir(self): + return ServerConstants.get_log_file_dir() + + # Override + def _save_agent_info(self, unique_device_id, edge_id): + ServerConstants.save_runner_infos(unique_device_id, edge_id) + + # Override + def _init_database(self): + FedMLServerDataInterface.get_instance().create_job_table() + + # Override + def _generate_protocol_manager_instance(self, args, agent_config=None): + return FedMLDeployMasterProtocolManager(args, agent_config=agent_config) \ No newline at end of file diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py new file mode 100755 index 0000000000..f3d68c1f6a --- /dev/null +++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py @@ -0,0 +1,578 @@ +import copy +import json +import logging +import os +import time +import queue +import traceback +from abc import ABC +from multiprocessing import Queue + +import fedml +from fedml.core.mlops import MLOpsRuntimeLog +from .device_client_constants import ClientConstants +from .device_model_cache import FedMLModelCache +from .device_server_constants import ServerConstants +from .device_server_data_interface import FedMLServerDataInterface +from ..comm_utils import sys_utils +from ..comm_utils.run_process_utils import RunProcessUtils +from ..comm_utils.sys_utils import get_python_program +from ..scheduler_core.general_constants import GeneralConstants +from ..master.base_master_job_runner import FedMLBaseMasterJobRunner +from .device_replica_controller import FedMLDeviceReplicaController +from .job_runner_msg_sender import FedMLDeployJobRunnerMsgSender + + +class FedMLDeployMasterJobRunner(FedMLBaseMasterJobRunner, FedMLDeployJobRunnerMsgSender, ABC): + + default_redis_addr = "local" + default_redis_port = "6379" + default_redis_password = "fedml_default" + + def __init__(self, args, run_id=0, request_json=None, agent_config=None, edge_id=0, + cuda_visible_gpu_ids_str=None): + FedMLDeployJobRunnerMsgSender.__init__(self) + FedMLBaseMasterJobRunner.__init__( + self, args, edge_id=edge_id, request_json=request_json, agent_config=agent_config, run_id=run_id, + cuda_visible_gpu_ids_str=cuda_visible_gpu_ids_str, agent_data_dir=ServerConstants.get_data_dir(), + agent_package_download_dir=ServerConstants.get_package_download_dir(), + agent_package_unzip_dir=GeneralConstants.get_package_unzip_dir(ServerConstants.get_package_download_dir()), + agent_log_file_dir=ServerConstants.get_log_file_dir() + ) + + self.infer_host = "127.0.0.1" + self.redis_addr = "local" + self.redis_port = "6379" + self.redis_password = "fedml_default" + self.inference_gateway_process = None + self.monitor_process = None + self.replica_controller = None + self.deployed_replica_payload = None + self.slave_deployment_results_map = dict() + self.deployment_result_queue = Queue() + + # Override + def _generate_job_runner_instance(self, args, run_id=None, request_json=None, agent_config=None, edge_id=None,): + return FedMLDeployMasterJobRunner( + args, run_id=run_id, request_json=request_json, agent_config=self.agent_config, edge_id=edge_id + ) + + # Override + def _generate_extend_queue_list(self): + return [self.deployment_result_queue] + + # Override + def run_impl( + self, edge_id_status_queue, edge_device_info_queue, run_metrics_queue, + run_event_queue, run_artifacts_queue, run_logs_queue, edge_device_info_global_queue, + run_extend_queue_list=None, sender_message_queue=None, listener_message_queue=None, + status_center_queue=None + ): + # Parse the model parameters. + run_id, end_point_name, token, user_id, user_name, device_ids, device_objs, model_config, model_name, \ + model_id, model_storage_url, scale_min, scale_max, inference_engine, model_is_from_open, \ + inference_end_point_id, use_gpu, memory_size, model_version, inference_port = \ + FedMLDeployMasterJobRunner.parse_model_run_params(self.request_json) + + # Print request parameters. + logging.info("model deployment request: {}".format(self.request_json)) + logging.info("send deployment stages...") + + # Generate the replica controller object. + self.replica_controller = FedMLDeviceReplicaController(self.edge_id, self.request_json) + + # Start the process to report system performance(cpu,memory,etc.) to MLOps + self.mlops_metrics.report_sys_perf(self.args, self.agent_config["mqtt_config"], run_id=run_id) + + # Check if we should stop the runner + self.check_runner_stop_event() + + # Send stage: MODEL_DEPLOYMENT_STAGE4 = "ForwardRequest2Slave" + self.send_deployment_stages( + self.run_id, model_name, model_id, "", ServerConstants.MODEL_DEPLOYMENT_STAGE4["index"], + ServerConstants.MODEL_DEPLOYMENT_STAGE4["text"], ServerConstants.MODEL_DEPLOYMENT_STAGE4["text"], + message_center=self.message_center) + + # Init the runtime logs + self.args.run_id = self.run_id + MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO) + + # Report server running status + logging.info("report deployment status...") + self.check_runner_stop_event() + self.status_reporter.report_server_id_status( + run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_STARTING, + is_from_model=True, running_json=json.dumps(self.request_json), + server_agent_id=self.edge_id, server_id=self.edge_id, edge_id=self.edge_id) + self.send_deployment_status( + self.run_id, end_point_name, model_name, "", + ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYING, + message_center=self.message_center) + + # start unified inference server + self.start_device_inference_gateway( + run_id, end_point_name, model_id, model_name, model_version, + agent_config=self.agent_config, inference_port=inference_port) + + # start inference monitor server + self.stop_device_inference_monitor( + run_id, end_point_name, model_id, model_name, model_version) + self.start_device_inference_monitor( + run_id, end_point_name, model_id, model_name, model_version, + redis_addr=self.redis_addr, redis_port=self.redis_port, redis_password=self.redis_password + ) + + # Changed the status to "IDLE" + self.status_reporter.report_server_id_status( + run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED, + is_from_model=True, server_agent_id=self.edge_id, server_id=self.edge_id, edge_id=self.edge_id,) + + # Check if we should stop the runner + logging.info("send the model inference request to slave devices...") + self.check_runner_stop_event() + + # Forward deployment request to slave devices + # Handle "op:add" && "op:remove" + devices_sent_add_or_remove_msg = self.send_deployment_start_request_to_edges() + + # Handle "op:update" + devices_sent_update_remove_msg = self.send_first_scroll_update_msg() + + if len(devices_sent_add_or_remove_msg) == 0 and len(devices_sent_update_remove_msg) == 0: + # No device is added or removed, and no device is updated or removed + ip = GeneralConstants.get_ip_address(self.request_json) + master_port = os.getenv("FEDML_MASTER_PORT", None) + if master_port is not None: + inference_port = int(master_port) + model_inference_port = inference_port + if ip.startswith("http://") or ip.startswith("https://"): + model_inference_url = "{}/api/v1/predict".format(ip) + else: + model_inference_url = "http://{}:{}/api/v1/predict".format(ip, model_inference_port) + + self.send_deployment_status( + run_id, end_point_name, model_name, model_inference_url, + ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED, + message_center=self.message_center + ) + + self.trigger_completed_event() + return + + self.deployment_result_queue = run_extend_queue_list[0] + while True: + self.check_runner_stop_event() + + try: + deployment_result = self.deployment_result_queue.get(block=False, timeout=0.2) + result_topic = deployment_result.get("topic", None) + result_payload = deployment_result.get("payload", None) + self.process_deployment_result_message(topic=result_topic, payload=result_payload) + except queue.Empty as e: # If queue is empty, then continue + pass + + time.sleep(0.5) + + def save_deployment_result(self, topic=None, payload=None): + self.deployment_result_queue.put({"topic": topic, "payload": payload}) + + def process_deployment_result_message(self, topic=None, payload=None): + # Parse the parameters + topic_splits = str(topic).split('/') + device_id = topic_splits[-1] + payload_json = json.loads(payload) + end_point_id = payload_json["end_point_id"] + end_point_name = payload_json["end_point_name"] + model_id = payload_json["model_id"] + model_name = payload_json["model_name"] + model_version = payload_json["model_version"] + model_status = payload_json["model_status"] + replica_no = payload_json.get("replica_no", None) # Idx start from 1 + run_id_str = str(end_point_id) + + # Set redis + sqlite deployment result + FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password) + + # Save deployment result to local cache + if model_status == ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DELETED: + FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ + delete_deployment_result_with_device_id_and_replica_no( + end_point_id, end_point_name, model_name, device_id, replica_no) + elif model_status == ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED: + # add or update + FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ + set_deployment_result(end_point_id, end_point_name, + model_name, model_version, + device_id, payload, replica_no) + + # Note: To display the result in the UI, we need to save successful deployment result to the database + self.save_deployed_replica_payload(payload_json) + else: + if model_status != ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED: + logging.error(f"Unsupported model status {model_status}.") + self.send_deployment_status( + end_point_id, end_point_name, payload_json["model_name"], "", + ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED, + message_center=self.message_center + ) + + # Notify the replica number controller + self.callback_update_curr_replica_num_state(device_id, replica_no, model_status) + + # Notify the replica version controller, which might trigger the next rolling update + self.send_next_scroll_update_msg(device_id, replica_no) + + # Update the global deployment result mapping + self.slave_deployment_results_map[str(device_id)] = model_status + + # Check if the endpoint is running + request_json = self.request_json + if request_json is None: + logging.error(f"The endpoint {end_point_id} is not running.") + self.send_deployment_status( + end_point_id, end_point_name, payload_json["model_name"], "", + ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED, + message_center=self.message_center + ) + return + + # Wait for all replica's result, not device-level + if self.is_all_replica_num_reconciled() and self.is_all_replica_version_reconciled(): + ''' + When all the devices have finished the add / delete / update operation + ''' + # 1. We should generate one unified inference api + # Note that here we use the gateway port instead of the inference port that is used by the slave device + model_config_parameters = request_json["parameters"] + inference_port = model_config_parameters.get("server_internal_port", + ServerConstants.MODEL_INFERENCE_DEFAULT_PORT) + inference_port_external = model_config_parameters.get("server_external_port", inference_port) + ip = GeneralConstants.get_ip_address(request_json) + + if ip.startswith("http://") or ip.startswith("https://"): + model_inference_url = "{}/inference/{}".format(ip, end_point_id) + else: + model_inference_url = "http://{}:{}/inference/{}".format(ip, inference_port_external, end_point_id) + + # Send stage: MODEL_DEPLOYMENT_STAGE5 = "StartInferenceIngress" + self.send_deployment_stages( + end_point_id, model_name, model_id, model_inference_url, + ServerConstants.MODEL_DEPLOYMENT_STAGE5["index"], ServerConstants.MODEL_DEPLOYMENT_STAGE5["text"], + "inference url: {}".format(model_inference_url), message_center=self.message_center) + + # Prepare the result to MLOps + deployed_replica_payload = self.get_deployed_replica_payload() + if deployed_replica_payload is not None: + payload_json = deployed_replica_payload + model_slave_url = payload_json["model_url"] + payload_json["model_url"] = model_inference_url + payload_json["port"] = inference_port_external + token = FedMLModelCache.get_instance(self.redis_addr, self.redis_port).get_end_point_token( + end_point_id, end_point_name, model_name) + + model_metadata = payload_json["model_metadata"] + model_inputs = model_metadata["inputs"] + ret_inputs = list() + if "type" in model_metadata and model_metadata["type"] == "default": + payload_json["input_json"] = { + "end_point_name": end_point_name, "model_name": model_name, "token": str(token), + "inputs": model_inputs, "outputs": []} + payload_json["output_json"] = model_metadata["outputs"] + else: + raise Exception(f"Unsupported model metadata type {model_metadata['type']}") + + self.send_deployment_results_with_payload( + end_point_id, end_point_name, payload_json) + + payload_json_saved = payload_json + payload_json_saved["model_slave_url"] = model_slave_url + FedMLServerDataInterface.get_instance().save_job_result(end_point_id, self.edge_id, + json.dumps(payload_json_saved)) + else: + # Arrive here because only contains remove ops, so we do not need to update the model metadata + pass + + FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ + set_end_point_activation(end_point_id, end_point_name, True) + + self.send_deployment_status( + end_point_id, end_point_name, payload_json["model_name"], + model_inference_url, ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED, + message_center=self.message_center + ) + + self.trigger_completed_event() + + @staticmethod + def start_device_inference_gateway( + run_id, end_point_name, model_id, + model_name, model_version, inference_port=ServerConstants.MODEL_INFERENCE_DEFAULT_PORT, + agent_config=None, redis_addr=None, redis_port=None, redis_password=None + ): + # start unified inference server + running_model_name = ServerConstants.get_running_model_name(end_point_name, + model_name, model_version, run_id, model_id) + python_program = get_python_program() + master_port = os.getenv("FEDML_MASTER_PORT", None) + if master_port is not None: + inference_port = int(master_port) + if not ServerConstants.is_running_on_k8s(): + logging.info(f"start the model inference gateway, end point {run_id}, " + f"model name {model_name} at port {inference_port}...") + use_mqtt_inference = os.getenv("FEDML_USE_MQTT_INFERENCE", "False") + use_mqtt_inference = True if use_mqtt_inference.lower() == 'true' else False + use_worker_gateway = os.getenv("FEDML_USE_WORKER_GATEWAY", "False") + use_worker_gateway = True if use_worker_gateway.lower() == 'true' else False + inference_gw_cmd = "fedml.computing.scheduler.model_scheduler.device_model_inference:api" + inference_gateway_pids = RunProcessUtils.get_pid_from_cmd_line(inference_gw_cmd) + if inference_gateway_pids is None or len(inference_gateway_pids) <= 0: + cur_dir = os.path.dirname(__file__) + fedml_base_dir = os.path.dirname(os.path.dirname(os.path.dirname(cur_dir))) + connect_str = "@FEDML@" + ext_info = sys_utils.random1( + agent_config["mqtt_config"]["BROKER_HOST"] + connect_str + + str(agent_config["mqtt_config"]["BROKER_PORT"]) + connect_str + + agent_config["mqtt_config"]["MQTT_USER"] + connect_str + + agent_config["mqtt_config"]["MQTT_PWD"] + connect_str + + str(agent_config["mqtt_config"]["MQTT_KEEPALIVE"]), "FEDML@9999GREAT") + inference_gateway_process = ServerConstants.exec_console_with_script( + "REDIS_ADDR=\"{}\" REDIS_PORT=\"{}\" REDIS_PASSWORD=\"{}\" " + "END_POINT_NAME=\"{}\" " + "MODEL_NAME=\"{}\" MODEL_VERSION=\"{}\" MODEL_INFER_URL=\"{}\" VERSION=\"{}\" " + "USE_MQTT_INFERENCE={} USE_WORKER_GATEWAY={} EXT_INFO={} " + "{} -m uvicorn {} --host 0.0.0.0 --port {} --reload --reload-delay 3 --reload-dir {} " + "--log-level critical".format( + redis_addr, redis_port, redis_password, end_point_name, + model_name, model_version, "", fedml.get_env_version(), use_mqtt_inference, + use_worker_gateway, ext_info, python_program, inference_gw_cmd, str(inference_port), + fedml_base_dir), + should_capture_stdout=False, should_capture_stderr=False) + + return inference_gateway_process + + return None + + @staticmethod + def start_device_inference_monitor( + run_id, end_point_name, model_id, model_name, model_version, check_stopped_event=True, + redis_addr=None, redis_port=None, redis_password=None + ): + # start inference monitor server + logging.info(f"start the model inference monitor, end point {run_id}, model name {model_name}...") + run_id_str = str(run_id) + pip_source_dir = os.path.dirname(__file__) + monitor_file = os.path.join(pip_source_dir, "device_model_monitor.py") + python_program = get_python_program() + running_model_name = ServerConstants.get_running_model_name(end_point_name, + model_name, model_version, run_id, model_id) + monitor_process = ServerConstants.exec_console_with_shell_script_list( + [python_program, monitor_file, "-v", fedml.get_env_version(), "-ep", run_id_str, + "-epn", str(end_point_name), "-mi", str(model_id), "-mn", model_name, + "-mv", model_version, "-iu", "infer_url", "-ra", redis_addr, + "-rp", redis_port, "-rpw", redis_password], + should_capture_stdout=False, should_capture_stderr=False + ) + return monitor_process + + @staticmethod + def stop_device_inference_monitor(run_id, end_point_name, model_id, model_name, model_version): + # stop inference monitor server + logging.info(f"stop the model inference monitor, end point {run_id}, model name {model_name}...") + sys_utils.cleanup_model_monitor_processes(run_id, end_point_name, + model_id, model_name, model_version) + + @staticmethod + def recover_inference_and_monitor(redis_addr=None, redis_port=None, redis_password=None): + # noinspection PyBroadException + try: + history_jobs = FedMLServerDataInterface.get_instance().get_history_jobs() + for job in history_jobs.job_list: + if job.running_json is None: + continue + + if job.deployment_result == "": + continue + + run_id, end_point_name, token, user_id, user_name, device_ids, device_objs, model_config, model_name, \ + model_id, model_storage_url, scale_min, scale_max, inference_engine, model_is_from_open, \ + inference_end_point_id, use_gpu, memory_size, model_version, inference_port = \ + FedMLDeployMasterJobRunner.parse_model_run_params(json.loads(job.running_json)) + + FedMLModelCache.get_instance().set_redis_params(redis_addr, redis_password) + is_activated = FedMLModelCache.get_instance(redis_addr, redis_port). \ + get_end_point_activation(run_id) + if not is_activated: + continue + + FedMLDeployMasterJobRunner.start_device_inference_gateway( + run_id, end_point_name, model_id, model_name, model_version, inference_port=inference_port) + + FedMLDeployMasterJobRunner.stop_device_inference_monitor( + run_id, end_point_name, model_id, model_name, model_version) + FedMLDeployMasterJobRunner.start_device_inference_monitor( + run_id, end_point_name, model_id, model_name, model_version, + redis_addr=FedMLDeployMasterJobRunner.default_redis_addr, + redis_port=FedMLDeployMasterJobRunner.default_redis_port, + redis_password=FedMLDeployMasterJobRunner.default_redis_password + ) + except Exception as e: + logging.info("recover inference and monitor: {}".format(traceback.format_exc())) + + def send_first_scroll_update_msg(self): + """ + Replica-level rolling update. + Delete the record of the replaced device and send the deployment msg to the devices + """ + if "replica_version_diff" not in self.request_json or self.request_json["replica_version_diff"] is None: + return [] + + first_chunk_dict = self.request_json["replica_version_diff"] + + # Delete the record of the replaced device + self.delete_device_replica_info_on_master(first_chunk_dict) + + # Send the deployment msg to the devices, (we reuse the start_deployment msg) + for edge_id in first_chunk_dict.keys(): + if edge_id == self.edge_id: + continue + # send start deployment request to each device + self.send_deployment_start_request_to_edge(edge_id) + return list(first_chunk_dict.keys()) + + def send_next_scroll_update_msg(self, device_id, replica_no): + if replica_no is None: + return + + replica_controller = self.replica_controller + + if replica_controller.total_replica_version_diff_num == 0: + return + + replica_controller.callback_update_updating_window(device_id, replica_no) + + # Decide whether to send the next scroll update + next_chunk_dict = replica_controller.get_next_chunk_devices_replica() + + replica_controller.curr_replica_updating_window = copy.deepcopy(next_chunk_dict) + + if next_chunk_dict: + self.request_json["replica_version_diff"] = next_chunk_dict + self.delete_device_replica_info_on_master(next_chunk_dict) + + # Send the deployment msg to the devices, (we reuse the start_deployment msg) + for edge_id in next_chunk_dict.keys(): + if edge_id == self.edge_id: + continue + # send start deployment request to each device + self.send_deployment_start_request_to_edge(edge_id) + return + + def delete_device_replica_info_on_master(self, edge_id_replica_no_dict): + FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password) + # Remove the record of the replaced device + # [Deprecated] deployment status & device info + # Delete the result in deployment result list in Redis / SQLite + device_result_list = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ + get_deployment_result_list(self.request_json["end_point_id"], self.request_json["end_point_name"], + self.request_json["model_config"]["model_name"]) + delete_device_result_list = [] + for device_result in device_result_list: + device_result_dict = json.loads(device_result) + if (str(device_result_dict["cache_device_id"]) in edge_id_replica_no_dict.keys() and + str(device_result_dict["cache_replica_no"]) in + edge_id_replica_no_dict[str(device_result_dict["cache_device_id"])]): + delete_device_result_list.append(device_result) + + for delete_item in delete_device_result_list: + FedMLModelCache.get_instance(self.redis_addr, self.redis_port).delete_deployment_result( + delete_item, self.request_json["end_point_id"], + self.request_json["end_point_name"], + self.request_json["model_config"]["model_name"] + ) + + logging.info(f"Deleted the record of the replaced device {delete_device_result_list}") + + def save_deployed_replica_payload(self, payload_json): + self.deployed_replica_payload = copy.deepcopy(payload_json) + + def get_deployed_replica_payload(self): + return self.deployed_replica_payload + + def callback_update_curr_replica_num_state(self, changed_device_id, replica_no, op_type): + if self.replica_controller is not None: + self.replica_controller.callback_update_curr_replica_num_state(changed_device_id, replica_no, op_type) + + def is_all_replica_num_reconciled(self): + if self.replica_controller is not None: + return self.replica_controller.is_all_replica_num_reconciled() + + return False + + def is_all_replica_version_reconciled(self): + if self.replica_controller is not None: + return self.replica_controller.is_all_replica_version_reconciled() + + return False + + @staticmethod + def generate_request_json_with_replica_diff(run_id, edge_id, request_json): + # Replica Controller is per deployment! + replica_controller = FedMLDeviceReplicaController(edge_id, request_json) + logging.info(f"Start Diff Replica controller for run {run_id} on edge {edge_id}") + + # Prepare num diff + run_id_str = str(run_id) + new_request_with_num_diff = replica_controller.generate_diff_to_request_json() + request_json = new_request_with_num_diff + + # Prepare version diff + new_request_with_version_diff = replica_controller.init_first_update_device_replica_mapping() + request_json = new_request_with_version_diff + + return request_json + + @staticmethod + def parse_model_run_params(running_json): + run_id = running_json["end_point_id"] + end_point_name = running_json["end_point_name"] + token = running_json["token"] + user_id = running_json["user_id"] + user_name = running_json["user_name"] + device_ids = running_json["device_ids"] + device_objs = running_json["device_objs"] + + model_config = running_json["model_config"] + model_name = model_config["model_name"] + model_id = model_config["model_id"] + model_storage_url = model_config["model_storage_url"] + scale_min = model_config.get("instance_scale_min", 0) + scale_max = model_config.get("instance_scale_max", 0) + inference_engine = model_config.get("inference_engine", 0) + model_is_from_open = model_config["is_from_open"] + inference_end_point_id = run_id + use_gpu = "gpu" # TODO: Get GPU from device infos + memory_size = "256m" # TODO: Get Memory size for each instance + model_version = model_config["model_version"] + model_config_parameters = running_json.get("parameters", {}) + + inference_port = model_config_parameters.get("server_internal_port", # Internal port is for the gateway + ServerConstants.MODEL_INFERENCE_DEFAULT_PORT) + inference_port_external = model_config_parameters.get("server_external_port", inference_port) + + return run_id, end_point_name, token, user_id, user_name, device_ids, device_objs, model_config, model_name, \ + model_id, model_storage_url, scale_min, scale_max, inference_engine, model_is_from_open, \ + inference_end_point_id, use_gpu, memory_size, model_version, inference_port + + # Override + def get_download_package_info(self, packages_config=None): + model_name = packages_config["model_name"] + model_storage_url = packages_config["model_storage_url"] + return model_name, model_storage_url + + # Override + def build_dynamic_args(self, run_id, run_config, package_conf_object, base_dir): + pass + + # Override + def build_dynamic_constrain_variables(self, run_id, run_config): + pass diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py new file mode 100755 index 0000000000..40896b9ee8 --- /dev/null +++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py @@ -0,0 +1,62 @@ + +import json +from fedml.core.common.singleton import Singleton +from ..master.base_master_job_runner_manager import FedMLBaseMasterJobRunnerManager +from .master_job_runner import FedMLDeployMasterJobRunner +from ..scheduler_core.general_constants import GeneralConstants + + +class FedMLDeployJobRunnerManager(FedMLBaseMasterJobRunnerManager, Singleton): + def __init__(self): + FedMLBaseMasterJobRunnerManager.__init__(self) + + @staticmethod + def get_instance(): + return FedMLDeployJobRunnerManager() + + # Override + def _generate_job_runner_instance( + self, args, run_id=None, request_json=None, agent_config=None, edge_id=None + ): + job_runner = FedMLDeployMasterJobRunner( + args, run_id=run_id, request_json=request_json, agent_config=agent_config, edge_id=edge_id) + job_runner.infer_host = GeneralConstants.get_ip_address(request_json) + return job_runner + + def save_deployment_result(self, topic, payload): + payload_json = json.loads(payload) + endpoint_id = payload_json["end_point_id"] + run_id_str = str(endpoint_id) + if self.job_runners.get(run_id_str, None) is not None: + self.job_runners[run_id_str].save_deployment_result(topic=topic, payload=payload) + + def send_deployment_stages( + self, end_point_id, model_name, model_id, model_inference_url, + model_stages_index, model_stages_title, model_stage_detail, message_center=None + ): + run_id_str = str(end_point_id) + if self.job_runners.get(run_id_str, None) is not None: + self.job_runners[run_id_str].send_deployment_stages( + end_point_id, model_name, model_id, model_inference_url, + model_stages_index, model_stages_title, model_stage_detail, + message_center=message_center + ) + + def send_deployment_delete_request_to_edges(self, end_point_id, payload, model_msg_object): + run_id_str = str(end_point_id) + if self.job_runners.get(run_id_str, None) is not None: + self.job_runners[run_id_str].send_deployment_delete_request_to_edges(payload, model_msg_object) + + def stop_device_inference_monitor(self, run_id, end_point_name, model_id, model_name, model_version): + run_id_str = str(run_id) + if self.job_runners.get(run_id_str, None) is not None: + self.job_runners[run_id_str].stop_device_inference_monitor( + run_id, end_point_name, model_id, model_name, model_version) + + @staticmethod + def recover_inference_and_monitor(): + FedMLDeployMasterJobRunner.recover_inference_and_monitor() + + @staticmethod + def generate_request_json_with_replica_diff(run_id, edge_id, request_json): + return FedMLDeployMasterJobRunner.generate_request_json_with_replica_diff(run_id, edge_id, request_json) diff --git a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py new file mode 100755 index 0000000000..e8be50f77f --- /dev/null +++ b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py @@ -0,0 +1,365 @@ + +import json +import logging +import os +from fedml.core.mlops import MLOpsConfigs, MLOpsRuntimeLog, MLOpsRuntimeLogDaemon +from .device_model_cache import FedMLModelCache +from .device_model_db import FedMLModelDatabase +from .device_model_msg_object import FedMLModelMsgObject +from .device_server_constants import ServerConstants +from .device_server_data_interface import FedMLServerDataInterface +from ..master.base_master_protocol_manager import FedMLBaseMasterProtocolManager +from .master_job_runner_manager import FedMLDeployJobRunnerManager +from ..scheduler_core.general_constants import GeneralConstants +from ..scheduler_core.endpoint_sync_protocol import FedMLEndpointSyncProtocol + + +class FedMLDeployMasterProtocolManager(FedMLBaseMasterProtocolManager): + def __init__(self, args, agent_config=None): + FedMLBaseMasterProtocolManager.__init__(self, args, agent_config=agent_config) + + self.topic_start_deployment = None + self.topic_activate_endpoint = None + self.topic_deactivate_deployment = None + self.topic_delete_deployment = None + + self.infer_host = "127.0.0.1" + self.redis_addr = "local" + self.redis_port = "6379" + self.redis_password = "fedml_default" + self.endpoint_sync_protocol = None + + # Override + def _generate_protocol_manager_instance(self, args, agent_config=None): + return FedMLDeployMasterProtocolManager(args, agent_config=agent_config) + + # Override + def generate_topics(self): + super().generate_topics() + + # The topic for start deployment + self.topic_start_deployment = "model_ops/model_device/start_deployment/{}".format(str(self.edge_id)) + + # The topic for activating endpoint + self.topic_activate_endpoint = "model_ops/model_device/activate_deployment/{}".format(str(self.edge_id)) + + # The topic for activating endpoint + self.topic_deactivate_deployment = "model_ops/model_device/deactivate_deployment/{}".format(str(self.edge_id)) + + # The topic for deleting endpoint + self.topic_delete_deployment = "model_ops/model_device/delete_deployment/{}".format(str(self.edge_id)) + + # Subscribe topics for endpoints + self.add_subscribe_topic(self.topic_start_deployment) + self.add_subscribe_topic(self.topic_activate_endpoint) + self.add_subscribe_topic(self.topic_deactivate_deployment) + self.add_subscribe_topic(self.topic_delete_deployment) + + # Override + def add_protocol_handler(self): + super().add_protocol_handler() + + # Add the message listeners for endpoint related topics + self.add_message_listener(self.topic_start_deployment, self.callback_start_deployment) + self.add_message_listener(self.topic_activate_endpoint, self.callback_activate_deployment) + self.add_message_listener(self.topic_deactivate_deployment, self.callback_deactivate_deployment) + self.add_message_listener(self.topic_delete_deployment, self.callback_delete_deployment) + + # Override + def _get_job_runner_manager(self): + return FedMLDeployJobRunnerManager.get_instance() + + # Override + def _init_extra_items(self): + # Init local database + FedMLServerDataInterface.get_instance().create_job_table() + try: + FedMLModelDatabase.get_instance().set_database_base_dir(ServerConstants.get_database_dir()) + FedMLModelDatabase.get_instance().create_table() + except Exception as e: + pass + + FedMLDeployJobRunnerManager.recover_inference_and_monitor() + + # Override + def _process_connection_ready(self): + self.endpoint_sync_protocol = FedMLEndpointSyncProtocol( + agent_config=self.agent_config, mqtt_mgr=self.message_center) + self.endpoint_sync_protocol.setup_listener_for_sync_device_info(self.edge_id) + + MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO) + + # Override + def _process_connection_lost(self): + pass + + # Override + def print_connected_info(self): + pass + + def callback_deployment_result_message(self, topic=None, payload=None): + logging.info(f"Received deployment result: {self}") + FedMLDeployJobRunnerManager.get_instance().save_deployment_result(topic, payload) + + def callback_delete_deployment(self, topic, payload): + # Parse payload as the model message object. + logging.info("[Master] callback_delete_deployment") + model_msg_object = FedMLModelMsgObject(topic, payload) + + # Set end point as deactivated status + FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password) + FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ + set_end_point_activation(model_msg_object.inference_end_point_id, + model_msg_object.end_point_name, False) + FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ + delete_end_point(model_msg_object.inference_end_point_id, model_msg_object.end_point_name, + model_msg_object.model_name, model_msg_object.model_version) + + FedMLDeployJobRunnerManager.get_instance().send_deployment_delete_request_to_edges( + model_msg_object.inference_end_point_id, payload, model_msg_object) + + FedMLDeployJobRunnerManager.get_instance().stop_job_runner(model_msg_object.run_id) + + FedMLDeployJobRunnerManager.get_instance().stop_device_inference_monitor( + model_msg_object.run_id, model_msg_object.end_point_name, model_msg_object.model_id, + model_msg_object.model_name, model_msg_object.model_version) + + FedMLServerDataInterface.get_instance().delete_job_from_db(model_msg_object.run_id) + FedMLModelDatabase.get_instance().delete_deployment_result( + model_msg_object.run_id, model_msg_object.end_point_name, model_msg_object.model_name, + model_version=model_msg_object.model_version) + FedMLModelDatabase.get_instance().delete_deployment_run_info( + end_point_id=model_msg_object.inference_end_point_id) + + def callback_start_deployment(self, topic, payload): + # noinspection PyBroadException + try: + MLOpsConfigs.fetch_all_configs() + except Exception as e: + pass + + # Parse the deployment parameters + request_json = json.loads(payload) + run_id = request_json["end_point_id"] + end_point_name = request_json["end_point_name"] + token = request_json["token"] + user_id = request_json["user_id"] + user_name = request_json["user_name"] + device_ids = request_json["device_ids"] + device_objs = request_json["device_objs"] + model_config = request_json["model_config"] + model_name = model_config["model_name"] + model_id = model_config["model_id"] + model_storage_url = model_config["model_storage_url"] + scale_min = model_config.get("instance_scale_min", 0) + scale_max = model_config.get("instance_scale_max", 0) + inference_engine = model_config.get("inference_engine", 0) + inference_end_point_id = run_id + + # Start log processor for current run + self.args.run_id = run_id + self.args.edge_id = self.edge_id + MLOpsRuntimeLog.get_instance(self.args).init_logs() + MLOpsRuntimeLogDaemon.get_instance(self.args).set_log_source( + ServerConstants.FEDML_LOG_SOURCE_TYPE_MODEL_END_POINT) + MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor(run_id, self.edge_id) + + # Generate the deployment new parameters + logging.info("callback_start_deployment {}".format(payload)) + run_id = inference_end_point_id + run_id_str = str(run_id) + request_json["run_id"] = run_id + self.request_json = request_json + self.running_request_json[run_id_str] = request_json + diff_devices, diff_version = self.get_diff_devices(run_id) + self.request_json["diff_devices"] = diff_devices + self.request_json["diff_version"] = diff_version + self.request_json["master_node_ip"] = GeneralConstants.get_ip_address(self.request_json) + + # Save the endpoint device info + self.init_device_update_map() + FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password) + FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ + set_end_point_device_info(request_json["end_point_id"], end_point_name, json.dumps(device_objs)) + + # Save the endpoint token + usr_indicated_token = FedMLDeployMasterProtocolManager.get_usr_indicated_token(request_json) + if usr_indicated_token != "": + logging.info(f"Change Token from{token} to {usr_indicated_token}") + token = usr_indicated_token + FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ + set_end_point_token(run_id, end_point_name, model_name, token) + + # Subscribe deployment result messages from slave devices + self.subscribe_deployment_messages_from_slave_devices(request_json) + + # Send stage: MODEL_DEPLOYMENT_STAGE1 = "Received" + FedMLDeployJobRunnerManager.get_instance().send_deployment_stages( + self.run_id, model_name, model_id, "", ServerConstants.MODEL_DEPLOYMENT_STAGE1["index"], + ServerConstants.MODEL_DEPLOYMENT_STAGE1["text"], "Received request for end point {}".format(run_id), + message_center=self.message_center) + + # Send stage: MODEL_DEPLOYMENT_STAGE2 = "Initializing" + FedMLDeployJobRunnerManager.get_instance().send_deployment_stages( + self.run_id, model_name, model_id, "", ServerConstants.MODEL_DEPLOYMENT_STAGE2["index"], + ServerConstants.MODEL_DEPLOYMENT_STAGE2["text"], ServerConstants.MODEL_DEPLOYMENT_STAGE2["text"], + message_center=self.message_center) + + # Save the runner info + ServerConstants.save_runner_infos(self.args.device_id + "." + self.args.os_name, self.edge_id, run_id=run_id) + + # Start the job runner to deploy models + self.running_request_json[run_id_str] = FedMLDeployJobRunnerManager.generate_request_json_with_replica_diff( + run_id, self.edge_id, request_json + ) + self._get_job_runner_manager().start_job_runner( + run_id, request_json, args=self.args, edge_id=self.edge_id, + sender_message_queue=self.message_center.get_sender_message_queue(), + listener_message_queue=self.get_listener_message_queue(), + status_center_queue=self.get_status_queue() + ) + process = self._get_job_runner_manager().get_runner_process(run_id) + if process is not None: + ServerConstants.save_run_process(run_id, process.pid) + + # Send stage: MODEL_DEPLOYMENT_STAGE3 = "StartRunner" + FedMLDeployJobRunnerManager.get_instance().send_deployment_stages( + self.run_id, model_name, model_id, "", ServerConstants.MODEL_DEPLOYMENT_STAGE3["index"], + ServerConstants.MODEL_DEPLOYMENT_STAGE3["text"], ServerConstants.MODEL_DEPLOYMENT_STAGE3["text"], + message_center=self.message_center) + + def callback_activate_deployment(self, topic, payload): + logging.info("callback_activate_deployment: topic = %s, payload = %s" % (topic, payload)) + + # Parse payload as the model message object. + model_msg_object = FedMLModelMsgObject(topic, payload) + + # Get the previous deployment status. + FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password) + endpoint_status = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ + get_end_point_status(model_msg_object.inference_end_point_id) + if endpoint_status != ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED: + return + + # Set end point as activated status + FedMLModelCache.get_instance(self.redis_addr, self.redis_port).set_end_point_activation( + model_msg_object.inference_end_point_id, model_msg_object.end_point_name, True) + + def callback_deactivate_deployment(self, topic, payload): + logging.info("callback_deactivate_deployment: topic = %s, payload = %s" % (topic, payload)) + + # Parse payload as the model message object. + model_msg_object = FedMLModelMsgObject(topic, payload) + + # Get the endpoint status + FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password) + endpoint_status = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ + get_end_point_status(model_msg_object.inference_end_point_id) + if endpoint_status != ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED: + return + + # Set end point as deactivated status + FedMLModelCache.get_instance(self.redis_addr, self.redis_port).set_end_point_activation( + model_msg_object.inference_end_point_id, model_msg_object.model_name, False) + + def get_diff_devices(self, run_id) -> (dict, dict): + """ + {device_id(int): "op: add" | "op: delete" | "op: replace"} + "op: add" -> need to add + "op: delete" -> need to delete device + "op: replace" -> need to restart the container of the device on same port with new (same) model pkg + + {device_id(int): "old_version"} + """ + try: + logging.info(f"Get diff devices for run {run_id}") + request_json = self.running_request_json.get(str(run_id)) + + diff_devices = {} + diff_version = {} + FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password) + device_objs = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ + get_end_point_device_info(run_id) + if device_objs is None: + for new_device_id in request_json["device_ids"]: + diff_devices[new_device_id] = ServerConstants.DEVICE_DIFF_ADD_OPERATION + else: + device_objs_dict = json.loads(device_objs) + device_ids_frm_db = [d["id"] for d in device_objs_dict] + + for exist_device_id in device_ids_frm_db: + if exist_device_id not in request_json["device_ids"]: + diff_devices[exist_device_id] = ServerConstants.DEVICE_DIFF_DELETE_OPERATION + + for new_device_id in request_json["device_ids"]: + if new_device_id not in device_ids_frm_db: + diff_devices[new_device_id] = ServerConstants.DEVICE_DIFF_ADD_OPERATION + else: + if new_device_id == self.edge_id: + continue + + old_version = self.should_update_device(request_json, new_device_id) + if old_version: + diff_devices[new_device_id] = ServerConstants.DEVICE_DIFF_REPLACE_OPERATION + diff_version[new_device_id] = old_version + else: + pass + logging.info(f"Diff devices: {diff_devices}") + except Exception as e: + error_log_path = f"~/.fedml/fedml-model-server/fedml/logs/{run_id}_error.txt" + if not os.path.exists(os.path.dirname(os.path.expanduser(error_log_path))): + os.makedirs(os.path.dirname(os.path.expanduser(error_log_path))) + with open(os.path.expanduser(error_log_path), "w") as f: + f.write(str(e)) + raise e + return diff_devices, diff_version + + def should_update_device(self, payload, new_device_id): + """ + Query the device info in local redis, if the device info is different from the payload, + return the old model version + """ + device_result_list = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ + get_deployment_result_list(self.request_json["end_point_id"], + self.request_json["end_point_name"], + self.request_json["model_config"]["model_name"]) + + for device_result in device_result_list: + if device_result is None: + continue + device_result_dict = json.loads(device_result) + + if int(device_result_dict["cache_device_id"]) == new_device_id: + result_body = json.loads(device_result_dict["result"]) + if result_body["model_version"] != payload["model_config"]["model_version"]: + return result_body["model_version"] + else: + return None + return None + + @staticmethod + def get_usr_indicated_token(request_json) -> str: + usr_indicated_token = "" + if "parameters" in request_json and "authentication_token" in request_json["parameters"]: + usr_indicated_token = request_json["parameters"]["authentication_token"] + return usr_indicated_token + + def init_device_update_map(self): + # [Deprecated] Use the replica controller to manage the device update + pass + + def subscribe_deployment_messages_from_slave_devices(self, request_json): + if request_json is None: + return + run_id = request_json["run_id"] + edge_id_list = request_json["device_ids"] + logging.info("Edge ids: " + str(edge_id_list)) + for edge_id in edge_id_list: + if str(edge_id) == str(self.edge_id): + continue + # subscribe deployment result message for each model device + deployment_results_topic = "model_device/model_device/return_deployment_result/{}".format(edge_id) + self.add_message_listener(deployment_results_topic, self.callback_deployment_result_message) + self.subscribe_msg(deployment_results_topic) + + logging.info("subscribe device messages {}".format(deployment_results_topic)) diff --git a/python/fedml/computing/scheduler/model_scheduler/model_device_client.py b/python/fedml/computing/scheduler/model_scheduler/model_device_client.py index f397c5421f..05f43afc5f 100755 --- a/python/fedml/computing/scheduler/model_scheduler/model_device_client.py +++ b/python/fedml/computing/scheduler/model_scheduler/model_device_client.py @@ -1,16 +1,12 @@ -import json + +import copy import logging import multiprocessing -import os import time import traceback from multiprocessing import Process - -import click -from fedml.computing.scheduler.comm_utils.constants import SchedulerConstants - -from fedml.computing.scheduler.model_scheduler import device_client_runner -from fedml.computing.scheduler.model_scheduler import device_client_constants +from ..scheduler_core.account_manager import FedMLAccountManager +from .worker_agent import FedMLDeployWorkerAgent class FedMLModelDeviceClientRunner: @@ -18,8 +14,7 @@ def __init__(self, args, current_device_id, os_name, is_from_docker, service_con self.agent_process = None self.agent_runner = None self.agent_process_event = None - self.real_client_runner = None - self.args = args + self.args = copy.deepcopy(args) self.service_config = service_config self.unique_device_id = None self.current_device_id = current_device_id @@ -31,8 +26,6 @@ def __init__(self, args, current_device_id, os_name, is_from_docker, service_con self.redis_port = "6379" self.redis_password = "fedml_default" - self.agent_runner = None - def get_edge_id(self): return self.edge_id @@ -45,33 +38,34 @@ def start(self): self.agent_runner.redis_password = self.redis_password if self.agent_process_event is None: self.agent_process_event = multiprocessing.Event() - self.agent_process = Process(target=self.agent_runner.run_entry, args=(self.agent_process_event,)) - self.edge_id = self.bind_device(init_params=False) + self.agent_process = Process(target=self.agent_runner.run_entry, args=(self.agent_process_event, self.args,)) + self.edge_id = self.bind_device() self.agent_process.start() - def run_entry(self, process_event): + def run_entry(self, process_event, in_args): # print(f"Model worker process id {os.getpid()}") self.agent_process_event = process_event + worker_agent = FedMLDeployWorkerAgent() + while not self.agent_process_event.is_set(): try: try: - if self.real_client_runner is not None: - self.real_client_runner.stop_agent() + worker_agent.logout() except Exception as e: pass - self.bind_device() - - self.start_agent() + worker_agent.login( + in_args.account_id, api_key=in_args.api_key, device_id=in_args.device_id, + os_name=in_args.os_name, role=FedMLAccountManager.ROLE_DEPLOY_WORKER_ON_PREM + ) except Exception as e: logging.info("Restart model device client: {}".format(traceback.format_exc())) pass finally: try: - if self.real_client_runner is not None: - self.real_client_runner.stop_agent() + worker_agent.logout() except Exception as e: pass time.sleep(15) @@ -87,100 +81,18 @@ def check_runner_stop_event(self): raise Exception("Runner stopped") def stop(self): - if self.real_client_runner is not None: - self.real_client_runner.stop_agent() + FedMLDeployWorkerAgent.logout() if self.agent_process_event is not None: self.agent_process_event.set() - def get_binding_unique_device_id(self, current_device_id, os_name, is_from_docker=False): - role_str = "OnPremise" - - # Judge whether running from fedml docker hub - is_from_fedml_docker_hub = False - dock_loc_file = device_client_constants.ClientConstants.get_docker_location_file() - if os.path.exists(dock_loc_file): - is_from_fedml_docker_hub = True - - # Build unique device id - is_from_k8s = device_client_constants.ClientConstants.is_running_on_k8s() - if is_from_k8s: - unique_device_id = current_device_id + "@" + os_name + ".MDA.K8S." + role_str + ".Device" - elif is_from_docker: - unique_device_id = current_device_id + "@" + os_name + ".MDA.Docker." + role_str + ".Device" + def bind_device(self): + # Login account + login_result = FedMLAccountManager.get_instance().login( + self.args.account_id, api_key=self.args.api_key, device_id=self.args.device_id, + os_name=self.args.os_name, role=FedMLAccountManager.ROLE_DEPLOY_WORKER_ON_PREM + ) + if login_result is not None: + return login_result.edge_id else: - unique_device_id = current_device_id + "@" + os_name + ".MDA." + role_str + ".Device" - if is_from_fedml_docker_hub: - unique_device_id = current_device_id + "@" + os_name + ".MDA.DockerHub." + role_str + ".Device" - - return unique_device_id - - def init_logs_param(self, edge_id): - # Init runtime logs - self.args.log_file_dir = device_client_constants.ClientConstants.get_log_file_dir() - self.args.run_id = 0 - self.args.role = "client" - client_ids = list() - client_ids.append(edge_id) - self.args.client_id_list = json.dumps(client_ids) - setattr(self.args, "using_mlops", True) - - def bind_device(self, init_params=True): - self.unique_device_id = self.get_binding_unique_device_id(self.current_device_id, self.os_name, - self.is_from_docker) - - # Create client runner for communication with the FedML server. - if self.real_client_runner is None: - self.real_client_runner = device_client_runner.FedMLClientRunner(self.args) - - # Bind account id to the ModelOps platform. - register_try_count = 0 - edge_id = -1 - user_name = None - extra_url = None - while register_try_count < 5: - try: - edge_id, user_name, extra_url = self.real_client_runner.bind_account_and_device_id( - self.service_config["ml_ops_config"]["EDGE_BINDING_URL"], self.args.account_id, - self.unique_device_id, self.os_name - ) - if edge_id > 0: - self.real_client_runner.edge_id = edge_id - break - except Exception as e: - click.echo("{}\n{}".format(SchedulerConstants.ERR_MSG_BINDING_EXCEPTION_2, traceback.format_exc())) - click.echo(SchedulerConstants.ERR_MSG_BINDING_EXIT_RETRYING) - register_try_count += 1 - time.sleep(3) - continue - - if edge_id <= 0: - click.echo("") - click.echo("Oops, you failed to login the FedML ModelOps platform.") - click.echo("Please check whether your network is normal!") - return - self.edge_id = edge_id - - # Init runtime logs - if init_params: - setattr(self.args, "client_id", edge_id) - self.init_logs_param(edge_id) - self.real_client_runner.args = self.args - self.real_client_runner.user_name = user_name - - return edge_id - - def start_agent(self): - self.real_client_runner.unique_device_id = self.unique_device_id - device_client_constants.ClientConstants.save_runner_infos(self.current_device_id + "." + self.os_name, - self.edge_id, run_id=0) - - # Setup MQTT connection for communication with the FedML server. - self.real_client_runner.infer_host = self.infer_host - self.real_client_runner.redis_addr = self.redis_addr - self.real_client_runner.redis_port = self.redis_port - self.real_client_runner.redis_password = self.redis_password - self.real_client_runner.setup_agent_mqtt_connection(self.service_config) - - # Start mqtt looper - self.real_client_runner.start_agent_mqtt_loop(should_exit_sys=False) + return None diff --git a/python/fedml/computing/scheduler/model_scheduler/model_device_server.py b/python/fedml/computing/scheduler/model_scheduler/model_device_server.py index 01228125aa..b2ecd144b1 100755 --- a/python/fedml/computing/scheduler/model_scheduler/model_device_server.py +++ b/python/fedml/computing/scheduler/model_scheduler/model_device_server.py @@ -1,16 +1,12 @@ -import json + +import copy import logging import multiprocessing -import os import time import traceback from multiprocessing import Process - -import click -from fedml.computing.scheduler.comm_utils.constants import SchedulerConstants - -from fedml.computing.scheduler.model_scheduler import device_server_runner -from fedml.computing.scheduler.model_scheduler import device_server_constants +from ..scheduler_core.account_manager import FedMLAccountManager +from .master_agent import FedMLDeployMasterAgent class FedMLModelDeviceServerRunner: @@ -18,8 +14,7 @@ def __init__(self, args, current_device_id, os_name, is_from_docker, service_con self.agent_process = None self.agent_runner = None self.agent_process_event = None - self.real_server_runner = None - self.args = args + self.args = copy.deepcopy(args) self.service_config = service_config self.unique_device_id = None self.current_device_id = current_device_id @@ -30,7 +25,6 @@ def __init__(self, args, current_device_id, os_name, is_from_docker, service_con self.redis_addr = "local" self.redis_port = "6379" self.redis_password = "fedml_default" - self.agent_runner = None def get_edge_id(self): return self.edge_id @@ -44,33 +38,33 @@ def start(self): self.agent_runner.redis_password = self.redis_password if self.agent_process_event is None: self.agent_process_event = multiprocessing.Event() - self.agent_process = Process(target=self.agent_runner.run_entry, args=(self.agent_process_event,)) - self.edge_id = self.bind_device(init_params=False) + self.agent_process = Process(target=self.agent_runner.run_entry, args=(self.agent_process_event, self.args)) + self.edge_id = self.bind_device() self.agent_process.start() - def run_entry(self, process_event): + def run_entry(self, process_event, in_args): # print(f"Model master process id {os.getpid()}") self.agent_process_event = process_event + master_agent = FedMLDeployMasterAgent() while not self.agent_process_event.is_set(): try: try: - if self.real_server_runner is not None: - self.real_server_runner.stop_agent() + master_agent.logout() except Exception as e: pass - self.bind_device() - - self.start_agent() + master_agent.login( + in_args.account_id, api_key=in_args.api_key, device_id=in_args.device_id, + os_name=in_args.os_name, role=FedMLAccountManager.ROLE_DEPLOY_MASTER_ON_PREM + ) except Exception as e: logging.info("Restart model device server: {}".format(traceback.format_exc())) pass finally: try: - if self.real_server_runner is not None: - self.real_server_runner.stop_agent() + master_agent.logout() except Exception as e: pass time.sleep(15) @@ -86,104 +80,18 @@ def check_runner_stop_event(self): raise Exception("Runner stopped") def stop(self): - if self.real_server_runner is not None: - self.real_server_runner.stop_agent() + FedMLDeployMasterAgent.logout() if self.agent_process_event is not None: self.agent_process_event.set() - def get_binding_unique_device_id(self, current_device_id, os_name, is_from_docker=False): - role_str = "OnPremise" - - # Judge whether running from fedml docker hub - is_from_fedml_docker_hub = False - dock_loc_file = device_server_constants.ServerConstants.get_docker_location_file() - if os.path.exists(dock_loc_file): - is_from_fedml_docker_hub = True - - # Build unique device id - is_from_k8s = device_server_constants.ServerConstants.is_running_on_k8s() - if is_from_k8s: - unique_device_id = current_device_id + "@" + os_name + ".MDA.K8S." + role_str + ".Master.Device" - elif is_from_docker: - unique_device_id = current_device_id + "@" + os_name + ".MDA.Docker." + role_str + ".Master.Device" + def bind_device(self): + # Login account + login_result = FedMLAccountManager.get_instance().login( + self.args.account_id, api_key=self.args.api_key, device_id=self.args.device_id, + os_name=self.args.os_name, role=FedMLAccountManager.ROLE_DEPLOY_MASTER_ON_PREM + ) + if login_result is not None: + return login_result.edge_id else: - unique_device_id = current_device_id + "@" + os_name + ".MDA." + role_str + ".Master.Device" - - if is_from_fedml_docker_hub: - unique_device_id = current_device_id + "@" + os_name + ".MDA.DockerHub." + role_str + ".Master.Device" - - return unique_device_id - - def init_logs_param(self, edge_id): - self.args.log_file_dir = device_server_constants.ServerConstants.get_log_file_dir() - self.args.run_id = 0 - self.args.role = "server" - self.args.edge_id = edge_id - setattr(self.args, "using_mlops", True) - setattr(self.args, "server_agent_id", edge_id) - - def bind_device(self, init_params=True): - self.unique_device_id = self.get_binding_unique_device_id(self.current_device_id, self.os_name, - self.is_from_docker) - - # Create client runner for communication with the FedML server. - if self.real_server_runner is None: - self.real_server_runner = device_server_runner.FedMLServerRunner(self.args) - - # Bind account id to the ModelOps platform. - register_try_count = 0 - edge_id = -1 - user_name = None - extra_url = None - while register_try_count < 5: - try: - edge_id, user_name, extra_url = self.real_server_runner.bind_account_and_device_id( - self.service_config["ml_ops_config"]["EDGE_BINDING_URL"], self.args.account_id, - self.unique_device_id, self.os_name - ) - if edge_id > 0: - self.real_server_runner.edge_id = edge_id - break - except Exception as e: - click.echo("{}\n{}".format(SchedulerConstants.ERR_MSG_BINDING_EXCEPTION_2, traceback.format_exc())) - click.echo(SchedulerConstants.ERR_MSG_BINDING_EXIT_RETRYING) - register_try_count += 1 - time.sleep(3) - continue - - if edge_id <= 0: - click.echo("") - click.echo("Oops, you failed to login the FedML ModelOps platform.") - click.echo("Please check whether your network is normal!") - return - self.edge_id = edge_id - - # Init runtime logs - if init_params: - setattr(self.args, "client_id", edge_id) - self.real_server_runner.infer_host = self.infer_host - self.real_server_runner.redis_addr = self.redis_addr - self.real_server_runner.redis_port = self.redis_port - self.real_server_runner.redis_password = self.redis_password - self.init_logs_param(edge_id) - self.real_server_runner.args = self.args - self.real_server_runner.run_as_edge_server_and_agent = True - self.real_server_runner.user_name = user_name - - return edge_id - - def start_agent(self): - # Log arguments and binding results. - # logging.info("login: unique_device_id = %s" % str(unique_device_id)) - # logging.info("login: edge_id = %s" % str(edge_id)) - self.real_server_runner.unique_device_id = self.unique_device_id - device_server_constants.ServerConstants.save_runner_infos(self.current_device_id + "." + self.os_name, - self.edge_id, run_id=0) - - # Setup MQTT connection for communication with the FedML server. - self.real_server_runner.infer_host = self.infer_host - self.real_server_runner.setup_agent_mqtt_connection(self.service_config) - - # Start mqtt looper - self.real_server_runner.start_agent_mqtt_loop(should_exit_sys=False) + return None diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_agent.py b/python/fedml/computing/scheduler/model_scheduler/worker_agent.py new file mode 100755 index 0000000000..bdbe5fc143 --- /dev/null +++ b/python/fedml/computing/scheduler/model_scheduler/worker_agent.py @@ -0,0 +1,27 @@ + +from .device_client_constants import ClientConstants +from .device_client_data_interface import FedMLClientDataInterface +from .worker_protocol_manager import FedMLDeployWorkerProtocolManager +from ..slave.base_slave_agent import FedMLBaseSlaveAgent + + +class FedMLDeployWorkerAgent(FedMLBaseSlaveAgent): + + def __init__(self): + FedMLBaseSlaveAgent.__init__(self) + + # Override + def _get_log_file_dir(self): + return ClientConstants.get_log_file_dir() + + # Override + def _save_agent_info(self, unique_device_id, edge_id): + ClientConstants.save_runner_infos(unique_device_id, edge_id) + + # Override + def _init_database(self): + FedMLClientDataInterface.get_instance().create_job_table() + + # Override + def _generate_protocol_manager_instance(self, args, agent_config=None): + return FedMLDeployWorkerProtocolManager(args, agent_config=agent_config) diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py new file mode 100755 index 0000000000..5d6f1a4d8e --- /dev/null +++ b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py @@ -0,0 +1,489 @@ + +import json +import logging +import os +import shutil +import time +import traceback +import urllib +from abc import ABC +from urllib.parse import urljoin, urlparse +import yaml +from fedml.computing.scheduler.comm_utils.job_utils import JobRunnerUtils +from fedml.core.mlops import MLOpsRuntimeLog +from .device_client_constants import ClientConstants +from .device_model_cache import FedMLModelCache +from ..scheduler_core.general_constants import GeneralConstants +from ..slave.base_slave_job_runner import FedMLBaseSlaveJobRunner +from .device_model_deployment import start_deployment +from .device_model_db import FedMLModelDatabase +from .device_replica_handler import FedMLDeviceReplicaHandler + + +class FedMLDeployWorkerJobRunner(FedMLBaseSlaveJobRunner, ABC): + + def __init__(self, args, run_id=0, request_json=None, agent_config=None, edge_id=0, + cuda_visible_gpu_ids_str=None): + FedMLBaseSlaveJobRunner.__init__( + self, args, edge_id=edge_id, request_json=request_json, agent_config=agent_config, run_id=run_id, + cuda_visible_gpu_ids_str=cuda_visible_gpu_ids_str, agent_data_dir=ClientConstants.get_data_dir(), + agent_package_download_dir=ClientConstants.get_package_download_dir(), + agent_package_unzip_dir=GeneralConstants.get_package_unzip_dir(ClientConstants.get_package_download_dir()), + agent_log_file_dir=ClientConstants.get_log_file_dir() + ) + + self.infer_host = "127.0.0.1" + self.redis_addr = "local" + self.redis_port = "6379" + self.redis_password = "fedml_default" + self.model_is_from_open = False + self.replica_handler = None + + # Override + def _generate_job_runner_instance(self, args, run_id=None, request_json=None, agent_config=None, edge_id=None): + return FedMLDeployWorkerJobRunner( + args, run_id=run_id, request_json=request_json, agent_config=self.agent_config, edge_id=edge_id + ) + + # Override + def _generate_extend_queue_list(self): + return None + + def retrieve_binary_model_file(self, package_name, package_url): + local_package_path = ClientConstants.get_model_package_dir() + if not os.path.exists(local_package_path): + os.makedirs(local_package_path, exist_ok=True) + unzip_package_path = ClientConstants.get_model_dir() + local_package_file = "{}".format(os.path.join(local_package_path, package_name)) + if os.path.exists(local_package_file): + os.remove(local_package_file) + package_url_without_query_path = urljoin(package_url, urlparse(package_url).path) + urllib.request.urlretrieve(package_url_without_query_path, local_package_file, + reporthook=self.package_download_progress) + + unzip_package_path = os.path.join(unzip_package_path, package_name) + if not os.path.exists(unzip_package_path): + os.makedirs(unzip_package_path, exist_ok=True) + dst_model_file = os.path.join(unzip_package_path, package_name) + if os.path.exists(local_package_file): + shutil.copy(local_package_file, dst_model_file) + + return unzip_package_path, dst_model_file + + @staticmethod + def get_model_bin_file(unzip_package_full_path): + unzip_package_path = os.path.dirname(unzip_package_full_path) + model_bin_file = os.path.join(unzip_package_path, "fedml_model.bin") + return model_bin_file + + def update_local_fedml_config(self, run_id, model_config, model_config_parameters=None): + model_name = model_config["model_name"] + model_storage_url = model_config["model_storage_url"] + scale_min = model_config.get("instance_scale_min", 0) + scale_max = model_config.get("instance_scale_max", 0) + inference_engine = model_config.get("inference_engine", 0) + inference_end_point_id = run_id + + # Retrieve model package or model binary file. + if self.model_is_from_open: + unzip_package_path, model_bin_file = self.retrieve_binary_model_file(model_name, model_storage_url) + else: + unzip_package_path = self.retrieve_and_unzip_package(model_name, model_storage_url) + model_bin_file = FedMLDeployWorkerJobRunner.get_model_bin_file(unzip_package_path) + + # Load the config to memory + package_conf_object = {} + fedml_local_config_file = os.path.join(unzip_package_path, "fedml_model_config.yaml") + + # Inject the config from UI to pkg yaml + package_conf_object = model_config_parameters + + # Save the config to local + with open(fedml_local_config_file, "w") as f: + yaml.dump(package_conf_object, f) + + logging.info("The package_conf_object is {}".format(package_conf_object)) + + return unzip_package_path, model_bin_file, package_conf_object + + def download_model_package(self, package_name, package_url): + # Copy config file from the client + unzip_package_path = self.retrieve_and_unzip_package( + package_name, package_url + ) + + return unzip_package_path + + # Override + def run_impl(self, run_extend_queue_list, sender_message_center, + listener_message_queue, status_center_queue): + run_id = self.request_json["end_point_id"] + end_point_name = self.request_json["end_point_name"] + token = self.request_json["token"] + user_id = self.request_json["user_id"] + user_name = self.request_json["user_name"] + device_ids = self.request_json["device_ids"] + device_objs = self.request_json["device_objs"] + master_ip = self.request_json["master_node_ip"] + + model_config = self.request_json["model_config"] + model_name = model_config["model_name"] + model_id = model_config["model_id"] + model_version = model_config["model_version"] + model_storage_url = model_config["model_storage_url"] + scale_min = model_config.get("instance_scale_min", 0) + scale_max = model_config.get("instance_scale_max", 0) + model_config_parameters = self.request_json["parameters"] + + self.replica_handler = FedMLDeviceReplicaHandler(self.edge_id, self.request_json) + + inference_port = model_config_parameters.get("worker_internal_port", + ClientConstants.MODEL_INFERENCE_DEFAULT_PORT) + inference_port_external = model_config_parameters.get("worker_external_port", inference_port) + + if "using_triton" in model_config_parameters and model_config_parameters["using_triton"]: + inference_engine = ClientConstants.INFERENCE_ENGINE_TYPE_INT_TRITON + else: + inference_engine = ClientConstants.INFERENCE_ENGINE_TYPE_INT_DEFAULT + + logging.info("[Critical] The inference_engine is: {}".format(inference_engine)) + + self.model_is_from_open = True if model_config.get("is_from_open", 0) == 1 else False + if self.model_is_from_open: + model_net_url = model_config["model_net_url"] + inference_end_point_id = run_id + use_gpu = "gpu" # TODO: Get GPU from device infos + memory_size = "4096m" # TODO: Get Memory size for each instance + + self.mlops_metrics.report_sys_perf(self.args, self.agent_config["mqtt_config"], run_id=run_id) + + self.check_runner_stop_event() + + logging.info("model deployment request: {}".format(self.request_json)) + + MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO) + + self.status_reporter.report_client_id_status( + self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_INITIALIZING, + is_from_model=True, running_json=json.dumps(self.request_json), run_id=run_id) + + self.status_reporter.report_client_id_status( + self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_RUNNING, + is_from_model=True, run_id=run_id) + + self.check_runner_stop_event() + + # update local config with real time parameters from server and dynamically replace variables value + logging.info("download and unzip model to local...") + unzip_package_path, model_bin_file, fedml_config_object = \ + self.update_local_fedml_config(run_id, model_config, model_config_parameters) + if unzip_package_path is None or fedml_config_object is None: + logging.info("failed to update local fedml config.") + self.check_runner_stop_event() + self.status_reporter.report_client_id_status( + self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED, + is_from_model=True, run_id=run_id) + return False + + logging.info("check downloaded packages...") + if not os.path.exists(unzip_package_path): + logging.info("failed to unzip file.") + self.check_runner_stop_event() + self.status_reporter.report_client_id_status( + self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED, + is_from_model=True, run_id=run_id) + return False + + # download model net and load into the torch model + model_from_open = None + self.model_is_from_open = None + + logging.info("start the model deployment...") + self.check_runner_stop_event() + running_model_name, inference_output_url, inference_model_version, model_metadata, model_config = \ + "", "", model_version, {}, {} + + # Reconcile the replica number (op: add, remove) + prev_rank, op, op_num = self.replica_handler.reconcile_num_replica() + + # Reconcile the replica version (op: update) + replica_rank_to_update = [] + if not op: + replica_rank_to_update, op = self.replica_handler.reconcile_replica_version() + + if not op: + logging.info("No need to reconcile.") + return True + + if op == "add": + worker_ip = GeneralConstants.get_ip_address(self.request_json) + for rank in range(prev_rank+1, prev_rank+1+op_num): + # TODO: Support Rollback if this for loop failed + try: + running_model_name, inference_output_url, inference_model_version, model_metadata, model_config = \ + start_deployment( + inference_end_point_id, end_point_name, model_id, model_version, + unzip_package_path, model_bin_file, model_name, inference_engine, + ClientConstants.INFERENCE_HTTP_PORT, + ClientConstants.INFERENCE_GRPC_PORT, + ClientConstants.INFERENCE_METRIC_PORT, + use_gpu, memory_size, + ClientConstants.INFERENCE_CONVERTOR_IMAGE, + ClientConstants.INFERENCE_SERVER_IMAGE, + worker_ip, + self.model_is_from_open, model_config_parameters, + model_from_open, + token, + master_ip, self.edge_id, master_device_id=device_ids[0], replica_rank=rank, + gpu_per_replica=int(self.replica_handler.gpu_per_replica) + ) + except Exception as e: + inference_output_url = "" + logging.error(f"Exception at deployment: {traceback.format_exc()}") + + if inference_output_url == "": + logging.error("failed to deploy the model...") + + result_payload = self.send_deployment_results( + end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED, + model_id, model_name, inference_output_url, inference_model_version, inference_port, + inference_engine, model_metadata, model_config) + + self.status_reporter.report_client_id_status( + self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED, + is_from_model=True, run_id=self.run_id) + return False + else: + logging.info("finished deployment, continue to send results to master...") + result_payload = self.send_deployment_results( + end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED, + model_id, model_name, inference_output_url, model_version, inference_port_external, + inference_engine, model_metadata, model_config, replica_no=rank + 1) + + if inference_port_external != inference_port: # Save internal port to local db + logging.info("inference_port_external {} != inference_port {}".format( + inference_port_external, inference_port)) + result_payload = self.construct_deployment_results( + end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED, + model_id, model_name, inference_output_url, model_version, inference_port, + inference_engine, model_metadata, model_config, replica_no=rank + 1) + + FedMLModelDatabase.get_instance().set_deployment_result( + run_id, end_point_name, model_name, model_version, self.edge_id, + json.dumps(result_payload), replica_no=rank + 1) + + logging.info(f"Deploy replica {rank+1} / {prev_rank+1+op_num} successfully.") + time.sleep(5) + + time.sleep(1) + self.status_reporter.report_client_id_status( + self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED, + is_from_model=True, run_id=self.run_id) + return True + elif op == "remove": + for rank_to_delete in range(prev_rank, prev_rank-op_num, -1): + self.replica_handler.remove_replica(rank_to_delete) + + FedMLModelCache.get_instance().set_redis_params() + replica_occupied_gpu_ids_str = FedMLModelCache.get_instance().get_replica_gpu_ids( + run_id, end_point_name, model_name, self.edge_id, rank_to_delete+1) + + replica_occupied_gpu_ids = json.loads(replica_occupied_gpu_ids_str) + + JobRunnerUtils.get_instance().release_partial_job_gpu(run_id, self.edge_id, replica_occupied_gpu_ids) + + FedMLModelDatabase.get_instance().delete_deployment_result_with_device_id_and_rank( + run_id, end_point_name, model_name, self.edge_id, rank_to_delete) + + # Report the deletion msg to master + result_payload = self.send_deployment_results( + end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DELETED, + model_id, model_name, inference_output_url, model_version, inference_port_external, + inference_engine, model_metadata, model_config, replica_no=rank_to_delete + 1) + + time.sleep(1) + self.status_reporter.report_client_id_status( + self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED, + is_from_model=True, run_id=self.run_id) + + # TODO: If delete all replica, then delete the job and related resources + if rank_to_delete == 0: + pass + return True + elif op == "update": + # Update is combine of delete and add + worker_ip = GeneralConstants.get_ip_address(self.request_json) + for rank in replica_rank_to_update: + # Delete the container + self.replica_handler.remove_replica(rank) + + FedMLModelCache.get_instance().set_redis_params() + replica_occupied_gpu_ids_str = FedMLModelCache.get_instance().get_replica_gpu_ids( + run_id, end_point_name, model_name, self.edge_id, rank + 1) + + replica_occupied_gpu_ids = json.loads(replica_occupied_gpu_ids_str) + + JobRunnerUtils.get_instance().release_partial_job_gpu(run_id, self.edge_id, replica_occupied_gpu_ids) + + # Delete the deployment result from local db + FedMLModelDatabase.get_instance().delete_deployment_result_with_device_id_and_rank( + run_id, end_point_name, model_name, self.edge_id, rank) + + time.sleep(1) + + # Add the container + # TODO: Reduce the duplicated code + try: + running_model_name, inference_output_url, inference_model_version, model_metadata, model_config = \ + start_deployment( + inference_end_point_id, end_point_name, model_id, model_version, + unzip_package_path, model_bin_file, model_name, inference_engine, + ClientConstants.INFERENCE_HTTP_PORT, + ClientConstants.INFERENCE_GRPC_PORT, + ClientConstants.INFERENCE_METRIC_PORT, + use_gpu, memory_size, + ClientConstants.INFERENCE_CONVERTOR_IMAGE, + ClientConstants.INFERENCE_SERVER_IMAGE, + worker_ip, + self.model_is_from_open, model_config_parameters, + model_from_open, + token, + master_ip, self.edge_id, master_device_id=device_ids[0], replica_rank=rank, + gpu_per_replica=int(self.replica_handler.gpu_per_replica) + ) + except Exception as e: + inference_output_url = "" + logging.error(f"Exception at deployment: {traceback.format_exc()}") + + if inference_output_url == "": + logging.error("failed to deploy the model...") + + result_payload = self.send_deployment_results( + end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED, + model_id, model_name, inference_output_url, inference_model_version, inference_port, + inference_engine, model_metadata, model_config) + + self.status_reporter.report_client_id_status( + self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED, + is_from_model=True, run_id=self.run_id) + + return False + else: + logging.info("finished deployment, continue to send results to master...") + result_payload = self.send_deployment_results( + end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED, + model_id, model_name, inference_output_url, model_version, inference_port_external, + inference_engine, model_metadata, model_config, replica_no=rank + 1) + + if inference_port_external != inference_port: # Save internal port to local db + logging.info("inference_port_external {} != inference_port {}".format( + inference_port_external, inference_port)) + result_payload = self.construct_deployment_results( + end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED, + model_id, model_name, inference_output_url, model_version, inference_port, + inference_engine, model_metadata, model_config, replica_no=rank + 1) + + FedMLModelDatabase.get_instance().set_deployment_result( + run_id, end_point_name, model_name, model_version, self.edge_id, + json.dumps(result_payload), replica_no=rank + 1) + + logging.info(f"Update replica with no {rank + 1} successfully. Op num {op_num}") + time.sleep(5) + time.sleep(1) + self.status_reporter.report_client_id_status( + self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED, + is_from_model=True, run_id=self.run_id) + return True + + else: + # The delete op will be handled by callback_delete_deployment + logging.error(f"Unsupported op {op} with op num {op_num}") + return False + + def construct_deployment_results(self, end_point_name, device_id, model_status, + model_id, model_name, model_inference_url, + model_version, inference_port, inference_engine, + model_metadata, model_config, replica_no=1): + deployment_results_payload = {"end_point_id": self.run_id, "end_point_name": end_point_name, + "model_id": model_id, "model_name": model_name, + "model_url": model_inference_url, "model_version": model_version, + "port": inference_port, + "inference_engine": inference_engine, + "model_metadata": model_metadata, + "model_config": model_config, + "model_status": model_status, + "inference_port": inference_port, + "replica_no": replica_no, + } + return deployment_results_payload + + def construct_deployment_status(self, end_point_name, device_id, + model_id, model_name, model_version, + model_inference_url, model_status, + inference_port=ClientConstants.MODEL_INFERENCE_DEFAULT_PORT, + replica_no=1, # start from 1 + ): + deployment_status_payload = {"end_point_id": self.run_id, "end_point_name": end_point_name, + "device_id": device_id, + "model_id": model_id, "model_name": model_name, + "model_version": model_version, + "model_url": model_inference_url, "model_status": model_status, + "inference_port": inference_port, + "replica_no": replica_no, + } + return deployment_status_payload + + def send_deployment_results(self, end_point_name, device_id, model_status, + model_id, model_name, model_inference_url, + model_version, inference_port, inference_engine, + model_metadata, model_config, replica_no=1): + deployment_results_topic = "model_device/model_device/return_deployment_result/{}".format(device_id) + deployment_results_payload = self.construct_deployment_results( + end_point_name, device_id, model_status, + model_id, model_name, model_inference_url, + model_version, inference_port, inference_engine, + model_metadata, model_config, replica_no=replica_no) + + logging.info("[client] send_deployment_results: topic {}, payload {}.".format(deployment_results_topic, + deployment_results_payload)) + self.message_center.send_message_json(deployment_results_topic, json.dumps(deployment_results_payload)) + return deployment_results_payload + + def send_deployment_status(self, end_point_name, device_id, + model_id, model_name, model_version, + model_inference_url, model_status, + inference_port=ClientConstants.MODEL_INFERENCE_DEFAULT_PORT, + replica_no=1, # start from 1 + ): + deployment_status_topic = "model_device/model_device/return_deployment_status/{}".format(device_id) + deployment_status_payload = self.construct_deployment_status( + end_point_name, device_id, + model_id, model_name, model_version, + model_inference_url, model_status, + inference_port=inference_port, + replica_no=replica_no) + + logging.info("[client] send_deployment_status: topic {}, payload {}.".format(deployment_status_topic, + deployment_status_payload)) + self.message_center.send_message_json(deployment_status_topic, json.dumps(deployment_status_payload)) + return deployment_status_payload + + def reset_devices_status(self, edge_id, status): + self.status_reporter.run_id = self.run_id + self.status_reporter.edge_id = edge_id + self.status_reporter.report_client_id_status( + edge_id, status, is_from_model=True, run_id=self.run_id) + + # Override + def get_download_package_info(self, packages_config=None): + model_name = packages_config["model_name"] + model_storage_url = packages_config["model_storage_url"] + return model_name, model_storage_url + + # Override + def build_dynamic_args(self, run_id, run_config, package_conf_object, base_dir): + pass + + # Override + def build_dynamic_constrain_variables(self, run_id, run_config): + pass diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner_manager.py b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner_manager.py new file mode 100755 index 0000000000..4fe35d5a8a --- /dev/null +++ b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner_manager.py @@ -0,0 +1,23 @@ + +from fedml.core.common.singleton import Singleton +from .worker_job_runner import FedMLDeployWorkerJobRunner +from ..scheduler_core.general_constants import GeneralConstants +from ..slave.base_slave_job_runner_manager import FedMLBaseSlaveJobRunnerManager + + +class FedMLDeployJobRunnerManager(FedMLBaseSlaveJobRunnerManager, Singleton): + def __init__(self): + FedMLBaseSlaveJobRunnerManager.__init__(self) + + @staticmethod + def get_instance(): + return FedMLDeployJobRunnerManager() + + # Override + def _generate_job_runner_instance( + self, args, run_id=None, request_json=None, agent_config=None, edge_id=None + ): + job_runner = FedMLDeployWorkerJobRunner( + args, run_id=run_id, request_json=request_json, agent_config=agent_config, edge_id=edge_id) + job_runner.infer_host = GeneralConstants.get_ip_address(request_json) + return job_runner diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py new file mode 100755 index 0000000000..43bb3c4582 --- /dev/null +++ b/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py @@ -0,0 +1,195 @@ + +import json +import logging +import os +import traceback + +from fedml.computing.scheduler.comm_utils.job_utils import JobRunnerUtils +from fedml.computing.scheduler.comm_utils.run_process_utils import RunProcessUtils +from fedml.computing.scheduler.comm_utils.sys_utils import get_python_program +from fedml.core.mlops import MLOpsConfigs, MLOpsRuntimeLog, MLOpsRuntimeLogDaemon +from .device_model_db import FedMLModelDatabase +from .device_model_msg_object import FedMLModelMsgObject +from .device_client_constants import ClientConstants +from .device_client_data_interface import FedMLClientDataInterface +from ..slave.base_slave_protocol_manager import FedMLBaseSlaveProtocolManager +from .worker_job_runner_manager import FedMLDeployJobRunnerManager +from .device_mqtt_inference_protocol import FedMLMqttInference + + +class FedMLDeployWorkerProtocolManager(FedMLBaseSlaveProtocolManager): + def __init__(self, args, agent_config=None): + FedMLBaseSlaveProtocolManager.__init__(self, args, agent_config=agent_config) + + self.topic_start_deployment = None + self.topic_delete_deployment = None + + self.infer_host = "127.0.0.1" + self.redis_addr = "local" + self.redis_port = "6379" + self.redis_password = "fedml_default" + self.endpoint_sync_protocol = None + self.local_api_process = None + self.mqtt_inference_obj = None + + # Override + def _generate_protocol_manager_instance(self, args, agent_config=None): + return FedMLDeployWorkerProtocolManager(args, agent_config=agent_config) + + # Override + def generate_topics(self): + super().generate_topics() + + # The topic for start deployment + self.topic_start_deployment = "model_ops/model_device/start_deployment/{}".format(str(self.edge_id)) + + # The topic for deleting endpoint + self.topic_delete_deployment = "model_ops/model_device/delete_deployment/{}".format(str(self.edge_id)) + + # Subscribe topics for endpoints + self.add_subscribe_topic(self.topic_start_deployment) + self.add_subscribe_topic(self.topic_delete_deployment) + + # Override + def add_protocol_handler(self): + super().add_protocol_handler() + + # Add the message listeners for endpoint related topics + self.add_message_listener(self.topic_start_deployment, self.callback_start_deployment) + self.add_message_listener(self.topic_delete_deployment, self.callback_delete_deployment) + + # Override + def _get_job_runner_manager(self): + return FedMLDeployJobRunnerManager.get_instance() + + # Override + def _init_extra_items(self): + # Init local database + FedMLClientDataInterface.get_instance().create_job_table() + try: + FedMLModelDatabase.get_instance().set_database_base_dir(ClientConstants.get_database_dir()) + FedMLModelDatabase.get_instance().create_table() + except Exception as e: + pass + + client_api_cmd = "fedml.computing.scheduler.model_scheduler.device_client_api:api" + client_api_pids = RunProcessUtils.get_pid_from_cmd_line(client_api_cmd) + if client_api_pids is None or len(client_api_pids) <= 0: + # Start local API services + cur_dir = os.path.dirname(__file__) + fedml_base_dir = os.path.dirname(os.path.dirname(os.path.dirname(cur_dir))) + python_program = get_python_program() + self.local_api_process = ClientConstants.exec_console_with_script( + "{} -m uvicorn {} --host 0.0.0.0 --port {} --reload --reload-delay 3 --reload-dir {} " + "--log-level critical".format( + python_program, client_api_cmd, + ClientConstants.LOCAL_CLIENT_API_PORT, fedml_base_dir + ), + should_capture_stdout=False, + should_capture_stderr=False + ) + + # Override + def _process_connection_ready(self): + MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO) + + if self.mqtt_inference_obj is None: + self.mqtt_inference_obj = FedMLMqttInference( + agent_config=self.agent_config, mqtt_mgr=self.communication_mgr) + self.mqtt_inference_obj.setup_listener_for_endpoint_inference_request(self.edge_id) + + # Override + def _process_connection_lost(self): + try: + if self.mqtt_inference_obj is not None: + self.mqtt_inference_obj.remove_listener_for_endpoint_inference_request(self.edge_id) + except Exception as e: + pass + + # Override + def print_connected_info(self): + pass + + def callback_start_deployment(self, topic, payload): + """ + topic: model_ops/model_device/start_deployment/model-agent-device-id + payload: {"model_name": "image-model", "model_storage_url":"s3-url", + "instance_scale_min":1, "instance_scale_max":3, "inference_engine":"onnx (or tensorrt)"} + """ + # Parse deployment parameters + request_json = json.loads(payload) + run_id = request_json["end_point_id"] + token = request_json["token"] + user_id = request_json["user_id"] + user_name = request_json["user_name"] + device_ids = request_json["device_ids"] + device_objs = request_json["device_objs"] + model_config = request_json["model_config"] + model_name = model_config["model_name"] + model_storage_url = model_config["model_storage_url"] + scale_min = model_config.get("instance_scale_min", 0) + scale_max = model_config.get("instance_scale_max", 0) + inference_engine = model_config.get("inference_engine", 0) + inference_end_point_id = run_id + + try: + MLOpsConfigs.fetch_all_configs() + except Exception as e: + pass + + # Start log processor for current run + run_id = inference_end_point_id + self.args.run_id = run_id + self.args.edge_id = self.edge_id + MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO) + MLOpsRuntimeLogDaemon.get_instance(self.args).set_log_source( + ClientConstants.FEDML_LOG_SOURCE_TYPE_MODEL_END_POINT) + MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor(run_id, self.edge_id) + + # Start the job runner + request_json["run_id"] = run_id + run_id_str = str(run_id) + self.request_json = request_json + self.running_request_json[run_id_str] = request_json + self._get_job_runner_manager().start_job_runner( + run_id, request_json, args=self.args, edge_id=self.edge_id, + sender_message_queue=self.message_center.get_sender_message_queue(), + listener_message_queue=self.get_listener_message_queue(), + status_center_queue=self.get_status_queue() + ) + process = self._get_job_runner_manager().get_runner_process(run_id) + if process is not None: + ClientConstants.save_run_process(run_id, process.pid) + + def callback_delete_deployment(self, topic, payload): + logging.info("[Worker] callback_delete_deployment") + + # Parse payload as the model message object. + model_msg_object = FedMLModelMsgObject(topic, payload) + + # Delete all replicas on this device + try: + ClientConstants.remove_deployment( + model_msg_object.end_point_name, model_msg_object.model_name, model_msg_object.model_version, + model_msg_object.run_id, model_msg_object.model_id, edge_id=self.edge_id) + except Exception as e: + logging.info(f"Exception when removing deployment {traceback.format_exc()}") + pass + + self._get_job_runner_manager().stop_job_runner(model_msg_object.run_id) + + logging.info(f"[endpoint/device][{model_msg_object.run_id}/{self.edge_id}] " + f"Release gpu resource when the worker deployment deleted.") + JobRunnerUtils.get_instance().release_gpu_ids(model_msg_object.run_id, self.edge_id) + + if self.running_request_json.get(str(model_msg_object.run_id)) is not None: + try: + self.running_request_json.pop(str(model_msg_object.run_id)) + except Exception as e: + logging.error(f"Error when removing running_request_json: {traceback.format_exc()}") + pass + + FedMLClientDataInterface.get_instance().delete_job_from_db(model_msg_object.run_id) + FedMLModelDatabase.get_instance().delete_deployment_result_with_device_id( + model_msg_object.run_id, model_msg_object.end_point_name, model_msg_object.model_name, + self.edge_id) diff --git a/python/fedml/computing/scheduler/scheduler_core/account_manager.py b/python/fedml/computing/scheduler/scheduler_core/account_manager.py new file mode 100755 index 0000000000..61ffd20988 --- /dev/null +++ b/python/fedml/computing/scheduler/scheduler_core/account_manager.py @@ -0,0 +1,460 @@ +import logging +import os +import platform +import subprocess +import time +import traceback +import uuid + +import requests + +import fedml +from fedml.computing.scheduler.comm_utils import sys_utils, security_utils +from fedml.computing.scheduler.comm_utils.constants import SchedulerConstants +from fedml.computing.scheduler.comm_utils.sys_utils import get_sys_runner_info +from fedml.computing.scheduler.scheduler_core.general_constants import GeneralConstants +from fedml.core.common.singleton import Singleton +from fedml.core.mlops import MLOpsConfigs + + +class FedMLAccountManager(Singleton): + LOCAL_RUNNER_INFO_DIR_NAME = 'runner_infos' + STATUS_IDLE = "IDLE" + ROLE_EDGE_SERVER = "edge_server" + ROLE_CLOUD_AGENT = "cloud_agent" + ROLE_CLOUD_SERVER = "cloud_server" + ROLE_EDGE_DEVICE = "client" + ROLE_GPU_PROVIDER = "gpu_supplier" + ROLE_DEPLOY_MASTER_ON_PREM = "md.on_premise_device.master" + ROLE_DEPLOY_WORKER_ON_PREM = "md.on_premise_device" + + DEVICE_ID_SUFFIX_EDGE_SERVER = ".Edge.Server" + DEVICE_ID_SUFFIX_CLOUD_AGENT = ".Public.Cloud" + DEVICE_ID_SUFFIX_CLOUD_SERVER = ".Public.Server" + DEVICE_ID_SUFFIX_EDGE_DEVICE = ".Edge.Device" + DEVICE_ID_SUFFIX_GPU_PROVIDER = ".Edge.GPU.Supplier" + DEVICE_ID_SUFFIX_DEPLOY = "MDA" + DEVICE_ID_SUFFIX_DEPLOY_MASTER_ON_PREM = ".OnPremise.Master.Device" + DEVICE_ID_SUFFIX_DEPLOY_WORKER_ON_PREM = ".OnPremise.Device" + + DEVICE_ID_DOCKER_TAG = ".Docker" + DEVICE_ID_DOCKER_HUB_TAG = ".DockerHub" + + def __init__(self): + if not hasattr(self, "agent_args"): + self.agent_args = None + + @staticmethod + def get_instance(): + return FedMLAccountManager() + + def login(self, user_id, api_key="", device_id=None, os_name=None, role=None): + # Build the agent args + self.build_agent_args( + user_id, api_key=api_key, device_id=device_id, os_name=os_name, role=role + ) + + # Fetch configs from the MLOps config server. + service_config = dict() + log_server_url = None + config_try_count = 0 + edge_id = 0 + while config_try_count < 5: + # noinspection PyBroadException + try: + mqtt_config, s3_config, mlops_config, docker_config = FedMLAccountManager.fetch_configs() + service_config["mqtt_config"] = mqtt_config + service_config["s3_config"] = s3_config + service_config["ml_ops_config"] = mlops_config + service_config["docker_config"] = docker_config + log_server_url = mlops_config.get("LOG_SERVER_URL", None) + break + except Exception as e: + print("{}\n{}".format(SchedulerConstants.ERR_MSG_BINDING_EXCEPTION_1, traceback.format_exc())) + print(SchedulerConstants.ERR_MSG_BINDING_EXIT_RETRYING) + config_try_count += 1 + time.sleep(3) + continue + + # Failed to fetch the config after retrying many times. + if config_try_count >= 5: + print("") + print("[5] Oops, you failed to login the FedML MLOps platform.") + print("Please check whether your network is normal!") + return None + + # Bind account id to FedML® Nexus AI Platform + register_try_count = 0 + edge_id = -1 + user_name = None + extra_url = None + general_edge_id = None + while register_try_count < 5: + # noinspection PyBroadException + try: + edge_id, user_name, extra_url, general_edge_id = FedMLAccountManager.bind_account_and_device_id( + service_config["ml_ops_config"]["EDGE_BINDING_URL"], self.agent_args.account_id, + self.agent_args.unique_device_id, self.agent_args.os_name, + api_key=api_key, role=role + ) + if edge_id > 0: + break + except SystemExit as e: + print("Your account does not exist. Please make sure your account correct.") + os.system("fedml logout -s") + return + except Exception as e: + print("{}\n{}".format(SchedulerConstants.ERR_MSG_BINDING_EXCEPTION_2, traceback.format_exc())) + print(SchedulerConstants.ERR_MSG_BINDING_EXIT_RETRYING) + register_try_count += 1 + time.sleep(3) + continue + + # Failed to bind your account after retrying many times. + if edge_id <= 0: + print("") + print("[6] Oops, you failed to login the FedML MLOps platform.") + print("Please check whether your network is normal!") + return None + + # Fill the bound result to agent args. + self.fill_argent_args( + log_server_url=log_server_url, server_id=edge_id, + edge_id=edge_id, general_edge_id=general_edge_id, + user_name=user_name, extra_url=extra_url, + agent_config=service_config) + + return self.agent_args + + def build_agent_args(self, user_id, api_key=None, device_id=None, os_name=None, role=None): + # Generate the suffix for device based on the role + device_id_suffix = None + is_master = False + is_deploy = False + if role == FedMLAccountManager.ROLE_EDGE_SERVER: + device_id_suffix = FedMLAccountManager.DEVICE_ID_SUFFIX_EDGE_SERVER + is_master = True + elif role == FedMLAccountManager.ROLE_CLOUD_AGENT: + device_id_suffix = FedMLAccountManager.DEVICE_ID_SUFFIX_CLOUD_AGENT + is_master = True + elif role == FedMLAccountManager.ROLE_CLOUD_SERVER: + device_id_suffix = FedMLAccountManager.DEVICE_ID_SUFFIX_CLOUD_SERVER + is_master = True + elif role == FedMLAccountManager.ROLE_EDGE_DEVICE: + device_id_suffix = FedMLAccountManager.DEVICE_ID_SUFFIX_EDGE_DEVICE + elif role == FedMLAccountManager.ROLE_GPU_PROVIDER: + device_id_suffix = FedMLAccountManager.DEVICE_ID_SUFFIX_GPU_PROVIDER + elif role == FedMLAccountManager.ROLE_DEPLOY_MASTER_ON_PREM: + device_id_suffix = FedMLAccountManager.DEVICE_ID_SUFFIX_DEPLOY_MASTER_ON_PREM + is_master = True + is_deploy = True + elif role == FedMLAccountManager.ROLE_DEPLOY_WORKER_ON_PREM: + device_id_suffix = FedMLAccountManager.DEVICE_ID_SUFFIX_DEPLOY_WORKER_ON_PREM + is_deploy = True + + # Build the agent args + version = fedml.get_env_version() + if self.agent_args is None: + self.agent_args = AgentArgs() + self.agent_args.role = role + self.agent_args.account_id = user_id + self.agent_args.api_key = api_key + self.agent_args.current_running_dir = GeneralConstants.get_deploy_fedml_home_dir(is_master=is_master) \ + if is_deploy else GeneralConstants.get_launch_fedml_home_dir(is_master=is_master) + sys_name = platform.system() + if sys_name == "Darwin": + sys_name = "MacOS" + self.agent_args.os_name = sys_name if os_name is None or os_name == "" else os_name + self.agent_args.version = version + self.agent_args.log_file_dir = GeneralConstants.get_deploy_log_file_dir(is_master=is_master) \ + if is_deploy else GeneralConstants.get_launch_log_file_dir(is_master=is_master) + is_from_docker = False + if device_id is not None and device_id != "0": + self.agent_args.current_device_id = device_id + else: + data_dir = GeneralConstants.get_deploy_data_dir(is_master=is_master) \ + if is_deploy else GeneralConstants.get_launch_data_dir(is_master=is_master) + is_gpu_provider = True if role == FedMLAccountManager.ROLE_GPU_PROVIDER else False + self.agent_args.current_device_id = FedMLAccountManager.get_device_id( + data_dir=data_dir, use_machine_id=is_gpu_provider) + self.agent_args.device_id = self.agent_args.current_device_id + self.agent_args.config_version = version + self.agent_args.cloud_region = "" + + # Check if it is running in the fedml docker hub + is_from_fedml_docker_hub = False + dock_loc_file = GeneralConstants.get_deploy_docker_location_file(is_master=is_master) \ + if is_deploy else GeneralConstants.get_deploy_docker_location_file(is_master=is_master) + if os.path.exists(dock_loc_file): + is_from_fedml_docker_hub = True + + # Build unique device id + docker_tag = FedMLAccountManager.DEVICE_ID_DOCKER_TAG if is_from_docker else "" + docker_tag = FedMLAccountManager.DEVICE_ID_DOCKER_HUB_TAG if is_from_fedml_docker_hub else docker_tag + unique_device_id = f"{self.agent_args.current_device_id}@{self.agent_args.os_name}" \ + f"{docker_tag}{device_id_suffix}" + + # Set the unique device id + self.agent_args.is_from_docker = is_from_docker or is_from_fedml_docker_hub + self.agent_args.unique_device_id = unique_device_id + + def fill_argent_args( + self, log_server_url=None, server_id=None, edge_id=None, + user_name=None, extra_url=None, general_edge_id=None, agent_config=None): + self.agent_args.log_server_url = log_server_url + self.agent_args.server_id = server_id + self.agent_args.edge_id = edge_id + self.agent_args.user_name = user_name + self.agent_args.extra_url = extra_url + self.agent_args.general_edge_id = general_edge_id + self.agent_args.agent_config = agent_config + + @staticmethod + def write_login_failed_file(is_client=True): + login_exit_file = os.path.join( + GeneralConstants.get_launch_log_file_dir(is_master=not is_client), "exited.log") + with open(login_exit_file, "w") as f: + f.writelines(f"{os.getpid()}.") + + @staticmethod + def get_device_id(data_dir, use_machine_id=False): + device_file_path = os.path.join(data_dir, FedMLAccountManager.LOCAL_RUNNER_INFO_DIR_NAME) + file_for_device_id = os.path.join(device_file_path, "devices.id") + if not os.path.exists(device_file_path): + os.makedirs(device_file_path, exist_ok=True) + elif os.path.exists(file_for_device_id): + with open(file_for_device_id, 'r', encoding='utf-8') as f: + device_id_from_file = f.readline() + if device_id_from_file is not None and device_id_from_file != "": + return device_id_from_file + + if platform.system() == "Darwin": + cmd_get_serial_num = "system_profiler SPHardwareDataType | grep Serial | awk '{gsub(/ /,\"\")}{print}' " \ + "|awk -F':' '{print $2}' " + device_id = os.popen(cmd_get_serial_num).read() + device_id = device_id.replace('\n', '').replace(' ', '') + if device_id is None or device_id == "": + if not use_machine_id: + device_id = hex(uuid.getnode()) + else: + device_id = FedMLAccountManager.get_gpu_machine_id() + else: + device_id = "0x" + device_id + else: + if "nt" in os.name: + + def get_uuid(): + guid = "" + try: + cmd = "wmic csproduct get uuid" + guid = str(subprocess.check_output(cmd)) + pos1 = guid.find("\\n") + 2 + guid = guid[pos1:-15] + except Exception as ex: + logging.error(f"Failed to get uuid with Exception {ex}. Traceback: {traceback.format_exc()}") + pass + return str(guid) + + device_id = str(get_uuid()) + logging.info(device_id) + elif "posix" in os.name: + device_id = sys_utils.get_device_id_in_docker() + if device_id is None: + if not use_machine_id: + device_id = hex(uuid.getnode()) + else: + device_id = device_id = FedMLAccountManager.get_gpu_machine_id() + else: + device_id = sys_utils.run_subprocess_open( + "hal-get-property --udi /org/freedesktop/Hal/devices/computer --key system.hardware.uuid".split() + ) + device_id = hex(device_id) + + if device_id is not None and device_id != "": + with open(file_for_device_id, 'w', encoding='utf-8') as f: + f.write(device_id) + else: + device_id = hex(uuid.uuid4()) + with open(file_for_device_id, 'w', encoding='utf-8') as f: + f.write(device_id) + + return device_id + + @staticmethod + def get_gpu_machine_id(): + gpu_list = sys_utils.get_gpu_list() + gpu_uuids = "" + if len(gpu_list) > 0: + for gpu in gpu_list: + gpu_uuids += gpu.get("uuid", "") + else: + gpu_uuids = str(uuid.uuid4()) + device_id_combination = \ + f"{FedMLAccountManager.get_machine_id()}-{hex(uuid.getnode())}-{gpu_uuids}" + device_id = security_utils.get_content_hash(device_id_combination) + return device_id + + @staticmethod + def get_machine_id(): + try: + import machineid + return machineid.id().replace('\n', '').replace('\r\n', '').strip() + except Exception as e: + logging.error(f"Failed to get machine id with Exception {e}. Traceback: {traceback.format_exc()}") + return hex(uuid.getnode()) + + @staticmethod + def bind_account_and_device_id( + url, account_id, device_id, os_name, api_key="", + role=ROLE_EDGE_SERVER): + ip = requests.get('https://checkip.amazonaws.com').text.strip() + fedml_ver, exec_path, os_ver, cpu_info, python_ver, torch_ver, mpi_installed, \ + cpu_usage, available_mem, total_mem, gpu_info, gpu_available_mem, gpu_total_mem, \ + gpu_count, gpu_vendor, cpu_count, gpu_device_name = get_sys_runner_info() + host_name = sys_utils.get_host_name() + json_params = { + "accountid": account_id, + "deviceid": device_id, + "type": os_name, + "state": FedMLAccountManager.STATUS_IDLE, + "status": FedMLAccountManager.STATUS_IDLE, + "processor": cpu_info, + "core_type": cpu_info, + "network": "", + "role": role, + "os_ver": os_ver, + "memory": total_mem, + "ip": ip, + "api_key": api_key, + "extra_infos": {"fedml_ver": fedml_ver, "exec_path": exec_path, "os_ver": os_ver, + "cpu_info": cpu_info, "python_ver": python_ver, "torch_ver": torch_ver, + "mpi_installed": mpi_installed, "cpu_usage": cpu_usage, + "available_mem": available_mem, "total_mem": total_mem, + "cpu_count": cpu_count, "gpu_count": 0, "host_name": host_name} + } + if gpu_count > 0: + if gpu_total_mem is not None: + json_params["gpu"] = gpu_info if gpu_info is not None else "" + ", Total GPU Memory: " + gpu_total_mem + else: + json_params["gpu"] = gpu_info if gpu_info is not None else "" + json_params["extra_infos"]["gpu_info"] = gpu_info if gpu_info is not None else "" + if gpu_available_mem is not None: + json_params["extra_infos"]["gpu_available_mem"] = gpu_available_mem + if gpu_total_mem is not None: + json_params["extra_infos"]["gpu_total_mem"] = gpu_total_mem + + json_params["extra_infos"]["gpu_count"] = gpu_count + json_params["extra_infos"]["gpu_vendor"] = gpu_vendor + json_params["extra_infos"]["gpu_device_name"] = gpu_device_name + + gpu_available_id_list = sys_utils.get_available_gpu_id_list(limit=gpu_count) + gpu_available_count = len(gpu_available_id_list) if gpu_available_id_list is not None else 0 + gpu_list = sys_utils.get_gpu_list() + json_params["extra_infos"]["gpu_available_count"] = gpu_available_count + json_params["extra_infos"]["gpu_available_id_list"] = gpu_available_id_list + json_params["extra_infos"]["gpu_list"] = gpu_list + else: + json_params["gpu"] = "None" + json_params["extra_infos"]["gpu_available_count"] = 0 + json_params["extra_infos"]["gpu_available_id_list"] = [] + json_params["extra_infos"]["gpu_list"] = [] + + _, cert_path = MLOpsConfigs.get_request_params() + if cert_path is not None: + try: + requests.session().verify = cert_path + response = requests.post( + url, json=json_params, verify=True, + headers={"content-type": "application/json", "Connection": "close"} + ) + except requests.exceptions.SSLError as err: + logging.error( + f"Failed to bind account and device id with error: {err}, traceback: {traceback.format_exc()}") + MLOpsConfigs.install_root_ca_file() + response = requests.post( + url, json=json_params, verify=True, + headers={"content-type": "application/json", "Connection": "close"} + ) + else: + response = requests.post(url, json=json_params, headers={"Connection": "close"}) + edge_id, user_name, extra_url, general_edge_id = -1, None, None, None + if response.status_code != 200: + print(f"Binding to MLOps with response.status_code = {response.status_code}, " + f"response.content: {response.content}") + pass + else: + # print("url = {}, response = {}".format(url, response)) + status_code = response.json().get("code") + if status_code == "SUCCESS": + edge_id = response.json().get("data").get("id") + user_name = response.json().get("data").get("userName", None) + extra_url = response.json().get("data").get("url", None) + general_edge_id = response.json().get("data").get("general_edge_id", None) + if edge_id is None or edge_id <= 0: + print(f"Binding to MLOps with response.status_code = {response.status_code}, " + f"response.content: {response.content}") + else: + if status_code == SchedulerConstants.BINDING_ACCOUNT_NOT_EXIST_ERROR: + raise SystemExit(SchedulerConstants.BINDING_ACCOUNT_NOT_EXIST_ERROR) + print(f"Binding to MLOps with response.status_code = {response.status_code}, " + f"response.content: {response.content}") + return -1, None, None, None + return edge_id, user_name, extra_url, general_edge_id + + @staticmethod + def fetch_configs(): + return MLOpsConfigs.fetch_all_configs() + + @staticmethod + def _role_is_slave_agent(role): + return True if role == FedMLAccountManager.ROLE_EDGE_DEVICE or \ + role == FedMLAccountManager.ROLE_GPU_PROVIDER else False + + +class AgentArgs: + def __init__(self, role=None, account_id=None, api_key=None, server_id=None, current_running_dir=None, + os_name=None, version=None, log_file_dir=None, log_server_url=None, device_id=None, + current_device_id=None, config_version=None, cloud_region=None, is_from_docker=False, + edge_id=None, agent_config=None, user_name=None, extra_url=None, unique_device_id=None): + self.role = role + self.account_id = account_id + self.api_key = api_key + self.current_running_dir = current_running_dir + self.server_id = server_id + self.os_name = os_name + self.version = version + self.log_file_dir = log_file_dir + self.log_server_url = log_server_url + self.device_id = device_id + self.current_device_id = current_device_id + self.config_version = config_version + self.cloud_region = cloud_region + self.is_from_docker = is_from_docker + self.edge_id = edge_id + self.client_id = edge_id + self.agent_config = agent_config + self.user_name = user_name + self.extra_url = extra_url + self.unique_device_id = unique_device_id + self.client_id_list = None + self.using_mlops = True + self.server_agent_id = None + self.general_edge_id = None + + def is_cloud_server(self): + return self.role == FedMLAccountManager.ROLE_CLOUD_SERVER + + def is_cloud_agent(self): + return self.role == FedMLAccountManager.ROLE_CLOUD_AGENT + + def is_edge_server(self): + return self.role == FedMLAccountManager.ROLE_EDGE_SERVER + + def is_edge_device(self): + return self.role == FedMLAccountManager.ROLE_EDGE_DEVICE + + def is_gpu_provider(self): + return self.role == FedMLAccountManager.ROLE_GPU_PROVIDER + + def is_slave_agent(self): + return self.is_edge_device() or self.is_gpu_provider() diff --git a/python/fedml/computing/scheduler/scheduler_core/compute_cache_manager.py b/python/fedml/computing/scheduler/scheduler_core/compute_cache_manager.py index f918c785e2..6247cebe4f 100755 --- a/python/fedml/computing/scheduler/scheduler_core/compute_cache_manager.py +++ b/python/fedml/computing/scheduler/scheduler_core/compute_cache_manager.py @@ -1,10 +1,11 @@ -import threading +import threading import redis from .compute_gpu_cache import ComputeGpuCache from .compute_logs_cache import ComputeLogsCache from .business_models import LogsUploadModel, MetricsModel from ..comm_utils.constants import SchedulerConstants +from .compute_status_cache import ComputeStatusCache class ComputeCacheManager(object): @@ -23,6 +24,7 @@ def init(self): self.redis_connection = None self.gpu_cache = ComputeGpuCache(self.redis_connection) self.logs_cache = ComputeLogsCache(self.redis_connection) + self.status_cache = ComputeStatusCache(self.redis_connection) self.local_lock = threading.Lock() def setup_redis_connection(self, redis_addr, redis_port, redis_password="fedml_default"): @@ -48,6 +50,7 @@ def setup_redis_connection(self, redis_addr, redis_port, redis_password="fedml_d self.redis_connection.set("FEDML_TEST_KEYS", "TEST") self.gpu_cache.redis_connection = self.redis_connection self.logs_cache.redis_connection = self.redis_connection + self.status_cache.redis_connection = self.redis_connection is_connected = True except Exception as e: is_connected = False @@ -69,6 +72,7 @@ def setup_public_redis_connection(self): self.redis_connection.set("FEDML_TEST_KEYS", "TEST") self.gpu_cache.redis_connection = self.redis_connection self.logs_cache.redis_connection = self.redis_connection + self.status_cache.redis_connection = self.redis_connection is_connected = True except Exception as e: pass @@ -134,6 +138,9 @@ def get_artifact_logs(self): def get_artifacts(self): pass + def get_status_cache(self): + return self.status_cache + diff --git a/python/fedml/computing/scheduler/scheduler_core/compute_status_cache.py b/python/fedml/computing/scheduler/scheduler_core/compute_status_cache.py new file mode 100755 index 0000000000..a1929abbef --- /dev/null +++ b/python/fedml/computing/scheduler/scheduler_core/compute_status_cache.py @@ -0,0 +1,76 @@ +import logging +import traceback +from .compute_status_db import ComputeStatusDatabase +from ..master.server_constants import ServerConstants + + +class ComputeStatusCache(object): + FEDML_JOB_STATUS_TAG = "FEDML_JOB_STATUS_TAG-" + FEDML_DEVICE_STATUS_IN_JOB_TAG = "FEDML_DEVICE_STATUS_IN_JOB_TAG-" + + def __init__(self, redis_connection): + self.redis_connection = redis_connection + ComputeStatusDatabase.get_instance().set_database_base_dir(ServerConstants.get_database_dir()) + ComputeStatusDatabase.get_instance().create_table() + + def save_job_status(self, run_id, status): + try: + self.redis_connection.set(self._get_job_status_key(run_id), status) + except Exception as e: + logging.error(f"Error setting job status: {e}, Traceback: {traceback.format_exc()}") + pass + + ComputeStatusDatabase.get_instance().set_job_status(run_id, status) + + def get_job_status(self, run_id): + status = None + try: + if self.redis_connection.exists(self._get_job_status_key(run_id)): + status = self.redis_connection.get(self._get_job_status_key(run_id)) + except Exception as e: + logging.error(f"Error getting job status: {e}, Traceback: {traceback.format_exc()}") + pass + + if status is None: + status = ComputeStatusDatabase.get_instance().get_job_status(run_id) + try: + if status is not None: + self.redis_connection.set(self._get_job_status_key(run_id), status) + except Exception as e: + pass + + return status + + def save_device_status_in_job(self, run_id, device_id, status): + try: + self.redis_connection.set(self._get_device_status_in_job_key(run_id, device_id), status) + except Exception as e: + logging.error(f"Error setting device status in job: {e}, Traceback: {traceback.format_exc()}") + pass + + ComputeStatusDatabase.get_instance().set_device_status_in_job(run_id, device_id, status) + + def get_device_status_in_job(self, run_id, device_id): + status = None + try: + if self.redis_connection.exists(self._get_device_status_in_job_key(run_id, device_id)): + status = self.redis_connection.get(self._get_device_status_in_job_key(run_id, device_id)) + except Exception as e: + logging.error(f"Error getting device status in job: {e}, Traceback: {traceback.format_exc()}") + pass + + if status is None: + status = ComputeStatusDatabase.get_instance().get_device_status_in_job(run_id, device_id) + try: + if status is not None: + self.redis_connection.set(self._get_device_status_in_job_key(run_id, device_id), status) + except Exception as e: + pass + + return status + + def _get_job_status_key(self, run_id): + return f"{ComputeStatusCache.FEDML_JOB_STATUS_TAG}{run_id}" + + def _get_device_status_in_job_key(self, run_id, device_id): + return f"{ComputeStatusCache.FEDML_DEVICE_STATUS_IN_JOB_TAG}{run_id}-{device_id}" diff --git a/python/fedml/computing/scheduler/scheduler_core/compute_status_db.py b/python/fedml/computing/scheduler/scheduler_core/compute_status_db.py new file mode 100755 index 0000000000..14219eeb6a --- /dev/null +++ b/python/fedml/computing/scheduler/scheduler_core/compute_status_db.py @@ -0,0 +1,123 @@ +import json +import os +import time + +from sqlalchemy import Column, String, TEXT, Integer, Float, create_engine, and_ +from sqlalchemy.orm import sessionmaker +from sqlalchemy.ext.declarative import declarative_base +from fedml.core.common.singleton import Singleton +from .base_db import FedMLBaseDb +from .compute_utils import ComputeUtils +from ..master.server_constants import ServerConstants + +Base = declarative_base() + + +class ComputeStatusDatabase(Singleton, FedMLBaseDb): + COMPUTE_STATUS_DB = "compute-status.db" + + def __init__(self): + super().__init__() + + @staticmethod + def get_instance(): + return ComputeStatusDatabase() + + def get_job_status(self, run_id): + self.open_job_db() + job = self.db_connection.query(FedMLJobStatus). \ + filter(FedMLJobStatus.job_id == f'{run_id}').first() + if job is None: + return + + return job.job_status + + def get_device_status_in_job(self, device_id, run_id): + self.open_job_db() + device = self.db_connection.query(FedMLDeviceStatusInJob). \ + filter(and_(FedMLDeviceStatusInJob.device_id == f'{device_id}', + FedMLDeviceStatusInJob.job_id == f'{run_id}')).first() + + return device.device_status + + def set_job_status(self, run_id, job_status): + self.open_job_db() + job = self.db_connection.query(FedMLJobStatus). \ + filter(FedMLJobStatus.job_id == f'{run_id}').first() + if job is None: + job = FedMLJobStatus(job_id=run_id, job_status=job_status) + self.db_connection.add(job) + self.db_connection.commit() + return + + if run_id is not None: + job.job_id = run_id + if job_status is not None: + job.job_status = job_status + + self.db_connection.commit() + + def set_device_status_in_job(self, run_id, device_id, status): + self.open_job_db() + device = self.db_connection.query(FedMLDeviceStatusInJob). \ + filter(and_(FedMLDeviceStatusInJob.device_id == f'{device_id}', + FedMLDeviceStatusInJob.job_id == f'{run_id}')).first() + if device is None: + job = FedMLDeviceStatusInJob(job_id=run_id, device_id=device_id, device_status=status) + self.db_connection.add(job) + self.db_connection.commit() + return + + if run_id is not None: + device.job_id = run_id + if device_id is not None: + device.device_id = device_id + if status is not None: + device.device_status = status + + self.db_connection.commit() + + def set_database_base_dir(self, database_base_dir): + self.db_base_dir = database_base_dir + self.init_db_path() + + def init_db_path(self): + if self.db_base_dir is None: + if not os.path.exists(ServerConstants.get_database_dir()): + os.makedirs(ServerConstants.get_database_dir(), exist_ok=True) + self.db_base_dir = ServerConstants.get_database_dir() + + self.db_path = os.path.join(self.db_base_dir, ComputeStatusDatabase.COMPUTE_STATUS_DB) + + def create_table(self): + self.open_job_db() + try: + Base.metadata.create_all(self.db_engine, checkfirst=True) + except Exception as e: + pass + + def drop_table(self): + self.open_job_db() + try: + Base.metadata.drop_all(self.db_engine, checkfirst=True) + except Exception as e: + pass + + +class FedMLJobStatus(Base): + __tablename__ = 'job_status' + + id = Column(Integer, primary_key=True) + job_id = Column(TEXT) + job_status = Column(TEXT) + timestamp = Column(Integer) + + +class FedMLDeviceStatusInJob(Base): + __tablename__ = 'device_status_in_job' + + id = Column(Integer, primary_key=True) + job_id = Column(TEXT) + device_id = Column(TEXT) + device_status = Column(TEXT) + timestamp = Column(Integer) diff --git a/python/fedml/computing/scheduler/scheduler_core/endpoint_sync_protocol.py b/python/fedml/computing/scheduler/scheduler_core/endpoint_sync_protocol.py index 545ba75650..91e0815645 100755 --- a/python/fedml/computing/scheduler/scheduler_core/endpoint_sync_protocol.py +++ b/python/fedml/computing/scheduler/scheduler_core/endpoint_sync_protocol.py @@ -4,7 +4,8 @@ from ..model_scheduler.device_model_cache import FedMLModelCache from ..model_scheduler.device_model_db import FedMLModelDatabase from ..model_scheduler.device_server_data_interface import FedMLServerDataInterface -from .endpoint_monitor_protocol import EndpointDeviceDeploymentResultModel, EndpointDeviceDeploymentStatusModel, EndpointDeviceDeploymentInfoModel +from .endpoint_monitor_protocol import EndpointDeviceDeploymentResultModel, \ + EndpointDeviceDeploymentStatusModel, EndpointDeviceDeploymentInfoModel from ..model_scheduler.device_server_constants import ServerConstants from urllib.parse import urlparse import logging @@ -82,8 +83,8 @@ def callback_sync_device_result(self, topic, payload): topic_splits = str(topic).split('/') device_id = topic_splits[-1] deployment_result = EndpointDeviceDeploymentResultModel(payload) - FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password) - FedMLModelCache.get_instance(self.redis_addr, self.redis_port).set_deployment_result( + FedMLModelCache.get_instance().set_redis_params() + FedMLModelCache.get_instance().set_deployment_result( deployment_result.endpoint_id, deployment_result.endpoint_name, deployment_result.model_name, deployment_result.model_version, device_id, payload) @@ -97,8 +98,8 @@ def callback_sync_device_status(self, topic, payload): topic_splits = str(topic).split('/') device_id = topic_splits[-1] deployment_status = EndpointDeviceDeploymentStatusModel(payload) - FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password) - FedMLModelCache.get_instance(self.redis_addr, self.redis_port).set_deployment_status( + FedMLModelCache.get_instance().set_redis_params() + FedMLModelCache.get_instance().set_deployment_status( deployment_status.endpoint_id, deployment_status.endpoint_name, deployment_status.model_name, deployment_status.model_version, device_id, deployment_status.model_status) @@ -130,7 +131,7 @@ def callback_sync_device_info(self, topic, payload): break if status_item_found is not None: - #print(f"status_item_found {status_item_found}, status_payload_found {status_payload_found}") + # print(f"status_item_found {status_item_found}, status_payload_found {status_payload_found}") # Delete Status FedMLModelCache.get_instance().delete_deployment_status( status_item_found, deployment_info.endpoint_id, deployment_info.endpoint_name, @@ -143,7 +144,8 @@ def callback_sync_device_info(self, topic, payload): # Update Status model_url_parsed = urlparse(status_payload_found.get("model_url", "")) - status_payload_found["model_url"] = f"http://{model_url_parsed.hostname}:{deployment_info.inference_port}{model_url_parsed.path}" + status_payload_found["model_url"] = f"http://{model_url_parsed.hostname}:{deployment_info.inference_port}" \ + f"{model_url_parsed.path}" status_payload_found["inference_port"] = deployment_info.inference_port FedMLModelCache.get_instance().set_deployment_status( deployment_info.endpoint_id, deployment_info.endpoint_name, deployment_info.model_name, @@ -163,7 +165,7 @@ def callback_sync_device_info(self, topic, payload): break if result_item_found is not None: - #print(f"result_item_found {result_item_found}, result_payload_found {result_payload_found}") + # print(f"result_item_found {result_item_found}, result_payload_found {result_payload_found}") FedMLModelCache.get_instance().delete_deployment_result( result_item_found, deployment_info.endpoint_id, deployment_info.endpoint_name, deployment_info.model_name) @@ -174,7 +176,8 @@ def callback_sync_device_info(self, topic, payload): result_payload_found["model_status"] = ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED model_url_parsed = urlparse(result_payload_found.get("model_url", "")) - result_payload_found["model_url"] = f"http://{model_url_parsed.hostname}:{deployment_info.inference_port}{model_url_parsed.path}" + result_payload_found["model_url"] = f"http://{model_url_parsed.hostname}:{deployment_info.inference_port}" \ + f"{model_url_parsed.path}" result_payload_found["inference_port"] = deployment_info.inference_port FedMLModelCache.get_instance().set_deployment_result( deployment_info.endpoint_id, deployment_info.endpoint_name, deployment_info.model_name, @@ -183,12 +186,12 @@ def callback_sync_device_info(self, topic, payload): def set_local_deployment_status_result( self, endpoint_id, endpoint_name, model_name, model_version, device_id, inference_port, status_payload, result_payload): - ''' + """ The result and status are saved in the local sqlite table. They both belong to the table deployment_result_info; deployment_result column is used to save the result; deployment_status column is used to save the status. - ''' + """ if status_payload is not None: model_url_parsed = urlparse(status_payload.get("model_url", "")) status_payload["model_url"] = f"http://{model_url_parsed.hostname}:{inference_port}{model_url_parsed.path}" diff --git a/python/fedml/computing/scheduler/scheduler_core/general_constants.py b/python/fedml/computing/scheduler/scheduler_core/general_constants.py new file mode 100755 index 0000000000..e642cacf1b --- /dev/null +++ b/python/fedml/computing/scheduler/scheduler_core/general_constants.py @@ -0,0 +1,193 @@ +import logging +import os + +from fedml.computing.scheduler.comm_utils.constants import SchedulerConstants +from fedml.computing.scheduler.comm_utils.run_process_utils import RunProcessUtils +from fedml.computing.scheduler.slave.client_constants import ClientConstants +from fedml.computing.scheduler.master.server_constants import ServerConstants +from fedml.computing.scheduler.model_scheduler import device_client_constants +from fedml.computing.scheduler.model_scheduler import device_server_constants + + +class GeneralConstants: + MSG_TOPIC_REQUEST_JOB_STATUS_PREFIX = f"anywhere/master_agent/request_job_status/" + MSG_TOPIC_REPORT_DEVICE_STATUS_IN_JOB = f"slave_job/slave_agent/report_device_status_in_job" + MSG_TOPIC_SEND_TRAINING_REQUEST_TO_EDGES = "job_runner/master_protocol_manager/send_training_request_to_edges" + + CLIENT_SHELL_BASH = SchedulerConstants.CLIENT_SHELL_BASH + CLIENT_SHELL_PS = SchedulerConstants.CLIENT_SHELL_PS + PLATFORM_WINDOWS = "Windows" + + MSG_MLOPS_CLIENT_STATUS_OFFLINE = "OFFLINE" + MSG_MLOPS_CLIENT_STATUS_PROVISIONING = "PROVISIONING" + MSG_MLOPS_CLIENT_STATUS_IDLE = "IDLE" + MSG_MLOPS_CLIENT_STATUS_UPGRADING = "UPGRADING" + MSG_MLOPS_CLIENT_STATUS_QUEUED = "QUEUED" + MSG_MLOPS_CLIENT_STATUS_INITIALIZING = "INITIALIZING" + MSG_MLOPS_CLIENT_STATUS_TRAINING = "TRAINING" + MSG_MLOPS_CLIENT_STATUS_RUNNING = "RUNNING" + MSG_MLOPS_CLIENT_STATUS_STOPPING = "STOPPING" + MSG_MLOPS_CLIENT_STATUS_KILLED = "KILLED" + MSG_MLOPS_CLIENT_STATUS_FAILED = "FAILED" + MSG_MLOPS_CLIENT_STATUS_EXCEPTION = "EXCEPTION" + MSG_MLOPS_CLIENT_STATUS_FINISHED = "FINISHED" + + MSG_MLOPS_SERVER_STATUS_OFFLINE = "OFFLINE" + MSG_MLOPS_SERVER_STATUS_PROVISIONING = "PROVISIONING" + MSG_MLOPS_SERVER_STATUS_IDLE = "IDLE" + MSG_MLOPS_SERVER_STATUS_UPGRADING = "UPGRADING" + MSG_MLOPS_SERVER_STATUS_STARTING = "STARTING" + MSG_MLOPS_SERVER_STATUS_RUNNING = "RUNNING" + MSG_MLOPS_SERVER_STATUS_STOPPING = "STOPPING" + MSG_MLOPS_SERVER_STATUS_KILLED = "KILLED" + MSG_MLOPS_SERVER_STATUS_FAILED = "FAILED" + MSG_MLOPS_SERVER_STATUS_FINISHED = "FINISHED" + MSG_MLOPS_SERVER_STATUS_EXCEPTION = "EXCEPTION" + + MASTER_LOGIN_PROGRAM = "server_login.py" + SLAVE_LOGIN_PROGRAM = "client_login.py" + + CONFIG_KEY_AUTO_DETECT_PUBLIC_IP = "auto_detect_public_ip" + FEDML_OTA_CMD_UPGRADE = "upgrade" + FEDML_OTA_CMD_RESTART = "restart" + + @staticmethod + def get_package_unzip_dir(package_download_dir): + package_unzip_dir = package_download_dir + if not os.path.exists(package_unzip_dir): + os.makedirs(package_unzip_dir, exist_ok=True) + return package_unzip_dir + + @staticmethod + def get_filename_and_extension(url): + return ClientConstants.get_filename_and_extension(url) + + @staticmethod + def generate_yaml_doc(run_config_object, yaml_file): + ClientConstants.generate_yaml_doc(run_config_object, yaml_file) + + @staticmethod + def execute_commands_with_live_logs(cmds, join='&&', should_write_log_file=True, + callback=None, error_processor=None): + return ClientConstants.execute_commands_with_live_logs( + cmds, join=join, should_write_log_file=should_write_log_file, + callback=callback, error_processor=error_processor + ) + + @staticmethod + def cleanup_run_process(run_id, is_master=False): + if is_master: + ServerConstants.cleanup_run_process(run_id) + else: + ClientConstants.cleanup_run_process(run_id) + + @staticmethod + def cleanup_learning_process(run_id, data_dir=None): + RunProcessUtils.cleanup_run_process( + run_id, data_dir, ClientConstants.LOCAL_RUNNER_INFO_DIR_NAME, + info_file_prefix=SchedulerConstants.RUN_PROCESS_TYPE_USER_PROCESS) + + @staticmethod + def cleanup_bootstrap_process(run_id, data_dir=None): + RunProcessUtils.cleanup_run_process( + run_id, data_dir, ClientConstants.LOCAL_RUNNER_INFO_DIR_NAME, + info_file_prefix=SchedulerConstants.RUN_PROCESS_TYPE_BOOTSTRAP_PROCESS) + + @staticmethod + def save_learning_process(run_id, learning_id, data_dir=None): + RunProcessUtils.save_run_process( + run_id, learning_id, data_dir, ClientConstants.LOCAL_RUNNER_INFO_DIR_NAME, + info_file_prefix=SchedulerConstants.RUN_PROCESS_TYPE_USER_PROCESS) + + @staticmethod + def save_bootstrap_process(run_id, process_id, data_dir=None): + RunProcessUtils.save_run_process( + run_id, process_id, data_dir, ClientConstants.LOCAL_RUNNER_INFO_DIR_NAME, + info_file_prefix=SchedulerConstants.RUN_PROCESS_TYPE_BOOTSTRAP_PROCESS) + + @staticmethod + def save_run_process(run_id, process_id, is_master=False): + RunProcessUtils.save_run_process( + run_id, process_id, ServerConstants.get_data_dir() if is_master else ClientConstants.get_data_dir(), + ClientConstants.LOCAL_RUNNER_INFO_DIR_NAME) + + @staticmethod + def get_learning_process_list(run_id, is_master=False): + return RunProcessUtils.get_run_process_list( + run_id, ServerConstants.get_data_dir() if is_master else ClientConstants.get_data_dir(), + ClientConstants.LOCAL_RUNNER_INFO_DIR_NAME, + info_file_prefix=SchedulerConstants.RUN_PROCESS_TYPE_USER_PROCESS) + + @staticmethod + def get_launch_fedml_home_dir(is_master=False): + return ServerConstants.get_fedml_home_dir() if is_master else ClientConstants.get_fedml_home_dir() + + @staticmethod + def get_deploy_fedml_home_dir(is_master=False): + return device_server_constants.ServerConstants.get_fedml_home_dir() if is_master \ + else device_client_constants.ClientConstants.get_fedml_home_dir() + + @staticmethod + def get_launch_log_file_dir(is_master=False): + return ServerConstants.get_log_file_dir() if is_master else ClientConstants.get_log_file_dir() + + @staticmethod + def get_deploy_log_file_dir(is_master=False): + return device_server_constants.ServerConstants.get_log_file_dir() if is_master \ + else device_client_constants.ClientConstants.get_log_file_dir() + + @staticmethod + def get_launch_data_dir(is_master=False): + return ServerConstants.get_data_dir() if is_master else ClientConstants.get_data_dir() + + @staticmethod + def get_deploy_data_dir(is_master=False): + return device_server_constants.ServerConstants.get_data_dir() if is_master \ + else device_client_constants.ClientConstants.get_data_dir() + + @staticmethod + def get_deploy_docker_location_file(is_master=False): + return device_server_constants.ServerConstants.get_docker_location_file() if is_master \ + else device_client_constants.ClientConstants.get_docker_location_file() + + @staticmethod + def get_launch_docker_location_file(is_master=False): + return ServerConstants.get_docker_location_file() if is_master \ + else ClientConstants.get_docker_location_file() + + @staticmethod + def get_local_ip(): + import socket + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + conn = s.connect(('8.8.8.8', 53)) + ip = s.getsockname()[0] + s.close() + return ip + + @staticmethod + def get_public_ip(): + import requests + ip = None + try: + ip = requests.get('https://checkip.amazonaws.com').text.strip() + except Exception as e: + logging.info("Failed to get public ip: {}".format(e)) + return ip + + @staticmethod + def get_ip_address(request_json, infer_host=None): + # OPTION 1: Use local ip + ip = GeneralConstants.get_local_ip() + + # OPTION 2: Auto detect public ip + if "parameters" in request_json and \ + GeneralConstants.CONFIG_KEY_AUTO_DETECT_PUBLIC_IP in request_json["parameters"] and \ + request_json["parameters"][GeneralConstants.CONFIG_KEY_AUTO_DETECT_PUBLIC_IP]: + ip = GeneralConstants.get_public_ip() + logging.info("Auto detect public ip for master: " + ip) + + # OPTION 3: Use user indicated ip + if infer_host is not None and infer_host != "127.0.0.1" and infer_host != "localhost": + ip = infer_host + + return ip diff --git a/python/fedml/computing/scheduler/scheduler_core/master_api_daemon.py b/python/fedml/computing/scheduler/scheduler_core/master_api_daemon.py index 5cebf757d6..5876a787ce 100755 --- a/python/fedml/computing/scheduler/scheduler_core/master_api_daemon.py +++ b/python/fedml/computing/scheduler/scheduler_core/master_api_daemon.py @@ -1,7 +1,8 @@ from fastapi import FastAPI, Request -from .log_manager import LogsManager -from .metrics_manager import MetricsManager -from ..comm_utils import sys_utils +from fedml.computing.scheduler.scheduler_core.log_manager import LogsManager +from fedml.computing.scheduler.scheduler_core.metrics_manager import MetricsManager +from fedml.computing.scheduler.comm_utils import sys_utils +from fedml.computing.scheduler.scheduler_core.compute_cache_manager import ComputeCacheManager import os @@ -52,6 +53,19 @@ async def update_log(request: Request): async def ready(): return {"status": "Success"} + @api.get("/get_job_status") + async def get_job_status(job_id): + ComputeCacheManager.get_instance().set_redis_params() + job_status = ComputeCacheManager.get_instance().get_status_cache().get_job_status(job_id) + return {"job_status": job_status} + + @api.get("/get_device_status_in_job") + async def get_device_status_in_job(job_id, device_id): + ComputeCacheManager.get_instance().set_redis_params() + device_status_in_job = ComputeCacheManager.get_instance().get_status_cache().get_device_status_in_job( + job_id, device_id) + return {"device_status_in_job": device_status_in_job} + import uvicorn port = 30800 if sys_utils.check_port("localhost", port): @@ -59,7 +73,6 @@ async def ready(): cur_dir = os.path.dirname(__file__) fedml_base_dir = os.path.dirname(os.path.dirname(os.path.dirname(cur_dir))) - uvicorn.run(api, host="0.0.0.0", port=port, reload=True, reload_delay=3, reload_dirs=fedml_base_dir) - + uvicorn.run(api, host="0.0.0.0", port=port) diff --git a/python/fedml/computing/scheduler/scheduler_core/message_center.py b/python/fedml/computing/scheduler/scheduler_core/message_center.py index c7af555a99..7ae1e4c0b5 100755 --- a/python/fedml/computing/scheduler/scheduler_core/message_center.py +++ b/python/fedml/computing/scheduler/scheduler_core/message_center.py @@ -1,3 +1,4 @@ +import json import logging import os import threading @@ -7,20 +8,28 @@ import multiprocessing from multiprocessing import Process, Queue import queue +from os.path import expanduser from fedml.core.distributed.communication.mqtt.mqtt_manager import MqttManager from ....core.mlops.mlops_metrics import MLOpsMetrics from operator import methodcaller +from .message_common import FedMLMessageEntity, FedMLMessageRecord -class FedMLMessageCenter: +class FedMLMessageCenter(object): FUNC_SETUP_MESSAGE_CENTER = "setup_message_center" FUNC_REBUILD_MESSAGE_CENTER = "rebuild_message_center" - - def __init__(self, agent_config=None, message_queue=None, listener_message_queue=None): + ENABLE_SAVE_MESSAGE_TO_FILE = True + PUBLISH_MESSAGE_RETRY_TIMEOUT = 60 * 1000.0 + PUBLISH_MESSAGE_RETRY_COUNT = 3 + MESSAGE_SENT_RECORDS_FILE = "message-sent-records.log" + MESSAGE_SENT_SUCCESS_RECORDS_FILE = "message-sent-success-records.log" + MESSAGE_RECEIVED_RECORDS_FILE = "message-received-records.log" + + def __init__(self, agent_config=None, sender_message_queue=None, listener_message_queue=None): self.sender_agent_config = agent_config self.listener_agent_config = agent_config - self.message_queue = message_queue + self.sender_message_queue = sender_message_queue self.message_event = None self.message_center_process = None self.sender_mqtt_mgr = None @@ -32,9 +41,15 @@ def __init__(self, agent_config=None, message_queue=None, listener_message_queue self.listener_payloads = dict() self.listener_handler_funcs = dict() self.listener_handler_object = None - self.listener_message_queue = None + self.listener_message_queue = listener_message_queue self.listener_message_event = None self.listener_message_center_process = None + self.sender_message_list = list() + self.receiver_message_list = list() + self.published_message_ids = list() + self.retry_sending_count_map = dict() + self.constants = FedMLMessageCenterConstants() + self.message_center_name = None def __repr__(self): return "<{klass} @{id:x} {attrs}>".format( @@ -64,6 +79,10 @@ def on_sender_mqtt_connected(self, mqtt_client_object): self.sender_mqtt_is_connected = True self.sender_mqtt_lock.release() + def on_sender_mqtt_published(self, mqtt_client_object, obj, mid): + self.published_message_ids.append({"message_id": mid, "timestamp": time.time_ns()/100.0/1000.0}) + self.save_published_message_record(mid) + def setup_sender_mqtt_mgr(self): if self.sender_mqtt_mgr is not None: return @@ -82,6 +101,7 @@ def setup_sender_mqtt_mgr(self): self.sender_mqtt_mgr.add_connected_listener(self.on_sender_mqtt_connected) self.sender_mqtt_mgr.add_disconnected_listener(self.on_sender_mqtt_disconnected) + self.sender_mqtt_mgr.add_published_listener(self.on_sender_mqtt_published) self.sender_mqtt_mgr.connect() self.sender_mqtt_mgr.loop_start() @@ -90,6 +110,7 @@ def setup_sender_mqtt_mgr(self): self.sender_mlops_metrics.set_messenger(self) def release_sender_mqtt_mgr(self): + # noinspection PyBroadException try: if self.sender_mqtt_mgr is not None: self.sender_mqtt_mgr.loop_stop() @@ -105,17 +126,19 @@ def release_sender_mqtt_mgr(self): f"Failed to release sender mqtt manager with Exception {e}. Traceback: {traceback.format_exc()}") pass - def get_message_queue(self): - return self.message_queue + def get_sender_message_queue(self): + return self.sender_message_queue - def start_sender(self): - self.message_queue = Queue() + def start_sender(self, message_center_name=None): + self.sender_message_queue = Queue() self.message_event = multiprocessing.Event() self.message_event.clear() - message_center = FedMLMessageCenter(agent_config=self.sender_agent_config, message_queue=self.message_queue) + message_center = FedMLMessageCenter(agent_config=self.sender_agent_config, + sender_message_queue=self.sender_message_queue) self.message_center_process = Process( target=message_center.run_sender, args=( - self.message_event, self.message_queue, + self.message_event, self.sender_message_queue, + message_center_name ) ) self.message_center_process.start() @@ -130,43 +153,96 @@ def stop(self): def check_message_stop_event(self): if self.message_event is not None and self.message_event.is_set(): logging.info("Received message center stopping event.") - raise Exception("Message center stopped (for sender)") + raise MessageCenterStoppedException("Message center stopped (for sender)") def send_message(self, topic, payload, run_id=None): message_entity = FedMLMessageEntity(topic=topic, payload=payload, run_id=run_id) - self.message_queue.put(message_entity.get_message_body()) + self.sender_message_queue.put(message_entity.get_message_body()) def send_message_json(self, topic, payload): self.send_message(topic, payload) - def run_sender(self, message_event, message_queue): + def retry_sending_undelivered_message(self): + for sender_message in self.sender_message_list: + # Check if the message is published. + message_record = FedMLMessageRecord(json_record=sender_message) + is_published = False + for published_message in self.published_message_ids: + published_message_record = FedMLMessageRecord(json_record=published_message) + if published_message_record.message_id == message_record.message_id: + is_published = True + break + if is_published: + continue + + # Retry to send the unpublished message based on the timeout value + timeout_ms = time.time() * 1000.0 - message_record.timestamp + if timeout_ms >= FedMLMessageCenter.PUBLISH_MESSAGE_RETRY_TIMEOUT and \ + self.retry_sending_count_map.get(message_record.message_id, 0) < \ + FedMLMessageCenter.PUBLISH_MESSAGE_RETRY_COUNT: + # Send the message + message_entity = FedMLMessageEntity(message_body=message_record.message_body) + message_id = self.sender_mqtt_mgr.send_message_json(message_entity.topic, message_entity.payload) + self.retry_sending_count_map[message_record.message_id] += 1 + + # Generate the new message record. + sent_message_record = FedMLMessageRecord(message_id=message_id, + message_body=message_record.message_body) + + # Save the message + self.save_message_record(message_entity.run_id, message_entity.device_id, sent_message_record) + + def run_sender(self, message_event, message_queue, message_center_name): self.message_event = message_event - self.message_queue = message_queue + self.sender_message_queue = message_queue + self.message_center_name = message_center_name self.setup_sender_mqtt_mgr() - time.sleep(5) while True: + message_entity = None try: self.check_message_stop_event() except MessageCenterStoppedException as e: break + # noinspection PyBroadException try: + # Setup the mqtt connection self.setup_sender_mqtt_mgr() + # Get the message from the queue try: - message_body = self.message_queue.get(block=False, timeout=0.1) + message_body = message_queue.get(block=False, timeout=0.1) except queue.Empty as e: # If queue is empty, then break loop message_body = None if message_body is None: time.sleep(0.1) + self.retry_sending_undelivered_message() continue + # Generate the message entity object message_entity = FedMLMessageEntity(message_body=message_body) - self.sender_mqtt_mgr.send_message_json(message_entity.topic, message_entity.payload) + + # Send the message to mqtt server + message_id = self.sender_mqtt_mgr.send_message_json(message_entity.topic, message_entity.payload) + + # Generate the message record. + message_record = FedMLMessageRecord(message_id=message_id, message_body=message_body) + + # Cache the message + self.cache_message_record(message_record, is_sender=True) + + # Save the message + self.save_message_record(message_entity.run_id, message_entity.device_id, message_record) + except Exception as e: - logging.info( - f"Failed to send the message with topic {message_entity.topic}, payload {message_entity.payload}, {traceback.format_exc()}") + if message_entity is not None: + logging.info( + f"Failed to send the message with topic {message_entity.topic}, " + f"payload {message_entity.payload}, {traceback.format_exc()}" + ) + else: + logging.info(f"Failed to send the message: {traceback.format_exc()}") self.release_sender_mqtt_mgr() @@ -194,7 +270,9 @@ def release_listener_mqtt_mgr(self): self.listener_mqtt_mgr = None except Exception as e: logging.error( - f"Failed to release listener mqtt manager with Exception {e}. Traceback: {traceback.format_exc()}") + f"Failed to release listener mqtt manager with Exception {e}. " + f"Traceback: {traceback.format_exc()}" + ) pass def add_message_listener(self, topic, listener_func): @@ -207,10 +285,13 @@ def remove_message_listener(self, topic): self.listener_topics.remove(topic) self.listener_handler_funcs.pop(topic) - def get_runner(self): + def get_message_runner(self): return None - def start_listener(self, sender_message_queue=None, agent_config=None): + def get_listener_message_queue(self): + return self.listener_message_queue + + def start_listener(self, sender_message_queue=None, agent_config=None, message_center_name=None): if self.listener_message_center_process is not None: return @@ -218,12 +299,13 @@ def start_listener(self, sender_message_queue=None, agent_config=None): self.listener_message_event = multiprocessing.Event() self.listener_message_event.clear() self.listener_agent_config = agent_config - message_runner = self.get_runner() + message_runner = self.get_message_runner() message_runner.listener_agent_config = agent_config self.listener_message_center_process = Process( target=message_runner.run_listener_dispatcher, args=( self.listener_message_event, self.listener_message_queue, - self.listener_handler_funcs, sender_message_queue + self.listener_handler_funcs, sender_message_queue, + message_center_name ) ) self.listener_message_center_process.start() @@ -231,11 +313,17 @@ def start_listener(self, sender_message_queue=None, agent_config=None): def check_listener_message_stop_event(self): if self.listener_message_event is not None and self.listener_message_event.is_set(): logging.info("Received listener message center stopping event.") - raise Exception("Message center stopped (for listener)") + raise MessageCenterStoppedException("Message center stopped (for listener)") def listener_message_dispatch_center(self, topic, payload): self.receive_message_json(topic, payload) + def listener_message_passthrough_dispatch_center(self, message): + payload_obj = json.loads(message.payload) + payload_obj["is_retain"] = message.retain + payload = json.dumps(payload_obj) + self.receive_message_json(message.topic, payload) + def receive_message(self, topic, payload, run_id=None): message_entity = FedMLMessageEntity(topic=topic, payload=payload, run_id=run_id) self.listener_message_queue.put(message_entity.get_message_body()) @@ -252,10 +340,13 @@ def unsubscribe_msg(self, topic): self.listener_mqtt_mgr.unsubscribe_msg(topic) def run_listener_dispatcher( - self, message_event, message_queue, listener_funcs, sender_message_queue): + self, message_event, message_queue, listener_funcs, sender_message_queue, + message_center_name + ): self.listener_message_event = message_event self.listener_message_queue = message_queue self.listener_handler_funcs = listener_funcs + self.message_center_name = message_center_name self.setup_listener_mqtt_mgr() @@ -265,51 +356,109 @@ def run_listener_dispatcher( methodcaller(FedMLMessageCenter.FUNC_REBUILD_MESSAGE_CENTER, sender_message_queue)(self) while True: + message_entity = None try: self.check_listener_message_stop_event() except MessageCenterStoppedException as e: break + # noinspection PyBroadException try: + # Setup the mqtt connection self.setup_listener_mqtt_mgr() + # Get the message from the queue try: - message_body = self.listener_message_queue.get(block=False, timeout=0.1) + message_body = message_queue.get(block=False, timeout=0.1) except queue.Empty as e: # If queue is empty, then break loop message_body = None if message_body is None: time.sleep(0.1) continue + # Generate the message entity object message_entity = FedMLMessageEntity(message_body=message_body) + # Generate the message record + message_record = FedMLMessageRecord(message_id=str(uuid.uuid4()), message_body=message_body) + + # Cache the message + self.cache_message_record(message_record, is_sender=False) + + # Save the message + self.save_message_record(message_entity.run_id, message_entity.device_id, + message_record, is_sender=False) + + # Dispatch the message to corresponding handler message_handler_func_name = self.listener_handler_funcs.get(message_entity.topic, None) if message_handler_func_name is not None: methodcaller(message_handler_func_name, message_entity.topic, message_entity.payload)(self) except Exception as e: - logging.info( - f"Failed to dispatch messages with topic {message_entity.topic}, payload {message_entity.payload}, {traceback.format_exc()}") - + if message_entity is not None: + logging.info( + f"Failed to dispatch messages with topic {message_entity.topic}, " + f"payload {message_entity.payload}, {traceback.format_exc()}") + else: + logging.info(f"Failed to dispatch messages: {traceback.format_exc()}") self.release_listener_mqtt_mgr() -class FedMLMessageEntity(object): - def __init__(self, topic=None, payload=None, run_id=None, message_body: dict = None): - self.topic = topic - self.payload = payload - self.run_id = run_id - if message_body is not None: - self.from_message_body(message_body=message_body) + def cache_message_record(self, message_record, is_sender=True): + # Save the message to the cached list. + if is_sender: + self.sender_message_list.append(message_record.get_json_record()) + else: + self.receiver_message_list.append(message_record.get_json_record()) + + def save_message_record(self, run_id, device_id, message_record, is_sender=True): + # Check if we enable to log messages to file + if not FedMLMessageCenter.ENABLE_SAVE_MESSAGE_TO_FILE: + return + + # Log messages to file + if is_sender: + print(f"save sent message record: {message_record.get_json_record()}") + else: + print(f"save received message record: {message_record.get_json_record()}") + saved_message_file = os.path.join( + self.constants.message_log_dir, + self.message_center_name, + FedMLMessageCenter.MESSAGE_SENT_RECORDS_FILE if is_sender else + FedMLMessageCenter.MESSAGE_RECEIVED_RECORDS_FILE + ) + os.makedirs(os.path.dirname(saved_message_file), exist_ok=True) + with open(saved_message_file, "a+") as f: + f.writelines([json.dumps(message_record.get_json_record()) + "\n"]) + + def save_published_message_record(self, message_id): + # Check if we enable to log messages to file + if not FedMLMessageCenter.ENABLE_SAVE_MESSAGE_TO_FILE: + return - def from_message_body(self, message_body: dict = None): - self.topic = message_body.get("topic", None) - self.payload = message_body.get("payload", None) - self.run_id = message_body.get("run_id", None) + # Log published message ids to file + message_record = {"message_id": message_id, "timestamp": time.time_ns()/1000.0/1000.0} + published_msg_record_file = os.path.join( + self.constants.message_log_dir, self.message_center_name, + FedMLMessageCenter.MESSAGE_SENT_SUCCESS_RECORDS_FILE) + os.makedirs(os.path.dirname(published_msg_record_file), exist_ok=True) + print(f"save sent success message record: {message_record}") + with open(published_msg_record_file, "a+") as f: + f.writelines([json.dumps(message_record) + "\n"]) - def get_message_body(self): - message_body = {"topic": self.topic, "payload": self.payload, "run_id": self.run_id} - return message_body + @staticmethod + def rebuild_message_center_from_queue(sender_message_queue, listener_message_queue=None): + message_center = FedMLMessageCenter(sender_message_queue=sender_message_queue, + listener_message_queue=listener_message_queue) + return message_center class MessageCenterStoppedException(Exception): """ Message center stopped. """ pass + + +class FedMLMessageCenterConstants: + def __init__(self): + self.home_dir = expanduser("~") + self.message_center_dir = os.path.join(self.home_dir, ".fedml", "global_services", "message_center") + self.message_log_dir = os.path.join(self.message_center_dir, "logs") + os.makedirs(self.message_log_dir, exist_ok=True) diff --git a/python/fedml/computing/scheduler/scheduler_core/message_common.py b/python/fedml/computing/scheduler/scheduler_core/message_common.py new file mode 100755 index 0000000000..24449af3b5 --- /dev/null +++ b/python/fedml/computing/scheduler/scheduler_core/message_common.py @@ -0,0 +1,77 @@ +import json +import time + + +class FedMLMessageEntity(object): + def __init__(self, topic=None, payload=None, run_id=None, device_id=None, message_body: dict = None): + self.topic = topic + self.payload = payload + self.run_id = run_id + self.device_id = device_id + if message_body is not None: + self.from_message_body(message_body=message_body) + + def from_message_body(self, message_body: dict = None): + self.topic = message_body.get("topic", None) + self.payload = message_body.get("payload", None) + if self.payload is not None: + payload_json = json.loads(self.payload) + self.run_id = payload_json.get("run_id", None) + self.run_id = payload_json.get("runId", None) if self.run_id is None else self.run_id + self.device_id = payload_json.get("edge_id", None) + self.device_id = payload_json.get("ID", None) if self.device_id is None else self.device_id + + def get_message_body(self): + message_body = {"topic": self.topic, "payload": self.payload, "run_id": self.run_id} + return message_body + + +class FedMLMessageRecord(object): + def __init__(self, message_id=None, message_body=None, json_record=None): + self.message_id = message_id + self.message_body = message_body + self.timestamp = time.time_ns() / 1000.0 / 1000.0 + if json_record is not None: + self.from_message_record(json_record=json_record) + + def get_json_record(self): + return {"message_id": self.message_id, "message_body": self.message_body, "timestamp": self.timestamp} + + def from_message_record(self, json_record: dict = None): + self.message_id = json_record.get("message_id", None) + self.message_body = json_record.get("message_body", None) + self.timestamp = json_record.get("timestamp", None) + + +class FedMLStatusEntity(object): + def __init__(self, topic=None, payload=None, status_msg_body: dict = None): + self.topic = topic + self.payload = payload + self.run_id = None + self.edge_id = None + self.status = None + if status_msg_body is not None: + self.from_message_body(status_msg_body=status_msg_body) + + def from_message_body(self, status_msg_body: dict = None): + self.topic = status_msg_body.get("topic", None) + self.payload = status_msg_body.get("payload", None) + if self.payload is not None: + payload_json = json.loads(self.payload) + self.run_id = payload_json.get("run_id", None) + self.run_id = payload_json.get("runId", None) if self.run_id is None else self.run_id + self.edge_id = payload_json.get("edge_id", None) + self.status = payload_json.get("status", None) + + def get_message_body(self): + status_msg_body = {"topic": self.topic, "payload": self.payload, "run_id": self.run_id} + return status_msg_body + + +class LogArgs: + def __init__(self, role=None, edge_id=None, server_id=None, log_server_url=None, log_file_dir=None): + self.role = role + self.edge_id = edge_id + self.server_id = server_id + self.log_server_url = log_server_url + self.log_file_dir = log_file_dir diff --git a/python/fedml/computing/scheduler/scheduler_core/ota_upgrade.py b/python/fedml/computing/scheduler/scheduler_core/ota_upgrade.py new file mode 100755 index 0000000000..e32f1df806 --- /dev/null +++ b/python/fedml/computing/scheduler/scheduler_core/ota_upgrade.py @@ -0,0 +1,99 @@ +import logging +import os +import time +import traceback +import fedml +from fedml.computing.scheduler.comm_utils import sys_utils +from .general_constants import GeneralConstants + + +class FedMLOtaUpgrade: + LOCAL_RUNNER_INFO_DIR_NAME = 'runner_infos' + STATUS_IDLE = "IDLE" + + def __init__(self, edge_id=None): + self.edge_id = edge_id + self.version = fedml.get_env_version() + + def ota_upgrade(self, payload, request_json, status_reporter=None, + is_master=False, is_deploy=False): + run_id = request_json["runId"] + force_ota = False + ota_version = None + + try: + run_config = request_json.get("run_config", None) + parameters = run_config.get("parameters", None) + common_args = parameters.get("common_args", None) + force_ota = common_args.get("force_ota", False) if common_args is not None else False + ota_version = common_args.get("ota_version", None) if common_args is not None else None + except Exception as e: + logging.error( + f"Failed to get ota upgrade parameters with Exception {e}. Traceback: {traceback.format_exc()}") + pass + + if force_ota and ota_version is not None: + should_upgrade = True if ota_version != fedml.__version__ else False + upgrade_version = ota_version + else: + try: + fedml_is_latest_version, local_ver, remote_ver = sys_utils.check_fedml_is_latest_version(self.version) + except Exception as e: + logging.error(f"Failed to check fedml version with Exception {e}. Traceback: {traceback.format_exc()}") + return + + should_upgrade = False if fedml_is_latest_version else True + upgrade_version = remote_ver + + if should_upgrade: + FedMLOtaUpgrade._save_upgrading_job( + run_id, self.edge_id, payload, is_master=is_master, is_deploy=is_deploy + ) + if status_reporter is not None: + if is_master: + status_reporter.report_server_id_status( + run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_UPGRADING, edge_id=self.edge_id, + server_id=self.edge_id, server_agent_id=self.edge_id) + else: + status_reporter.report_client_id_status( + self.edge_id, GeneralConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING, run_id=run_id) + + logging.info(f"Upgrade to version {upgrade_version} ...") + + sys_utils.do_upgrade(self.version, upgrade_version) + raise Exception("Restarting after upgraded...") + + @staticmethod + def process_ota_upgrade_msg(): + os.system("pip install -U fedml") + + @staticmethod + def _save_upgrading_job(run_id, edge_id, payload, is_master=False, is_deploy=False): + if is_master and is_deploy: + from ..model_scheduler.device_server_data_interface import FedMLServerDataInterface + FedMLServerDataInterface.get_instance(). \ + save_started_job(run_id, edge_id, time.time(), + GeneralConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING, + GeneralConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING, + payload) + elif is_master and not is_deploy: + from ..master.server_data_interface import FedMLServerDataInterface + FedMLServerDataInterface.get_instance(). \ + save_started_job(run_id, edge_id, time.time(), + GeneralConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING, + GeneralConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING, + payload) + elif not is_master and is_deploy: + from ..model_scheduler.device_client_data_interface import FedMLClientDataInterface + FedMLClientDataInterface.get_instance(). \ + save_started_job(run_id, edge_id, time.time(), + GeneralConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING, + GeneralConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING, + payload) + elif not is_master and not is_deploy: + from ..slave.client_data_interface import FedMLClientDataInterface + FedMLClientDataInterface.get_instance(). \ + save_started_job(run_id, edge_id, time.time(), + GeneralConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING, + GeneralConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING, + payload) diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py new file mode 100755 index 0000000000..e2e090596d --- /dev/null +++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py @@ -0,0 +1,545 @@ +import json +import logging +import os +import platform +import shutil +import traceback +import urllib +import zipfile +from urllib.parse import urljoin, urlparse +from ..comm_utils.constants import SchedulerConstants +from ..comm_utils.job_utils import JobRunnerUtils, DockerArgs +from ..scheduler_entry.constants import Constants +from ....core.mlops import MLOpsMetrics +from ....core.mlops.mlops_device_perfs import MLOpsDevicePerfStats +from ..comm_utils.yaml_utils import load_yaml_config +from .general_constants import GeneralConstants +from ..comm_utils.sys_utils import get_python_program +from ..comm_utils import sys_utils +from ....core.mlops.mlops_utils import MLOpsUtils +from ..scheduler_core.message_center import FedMLMessageCenter +from ..scheduler_core.status_center import FedMLStatusCenter +from abc import ABC, abstractmethod + + +class RunnerError(Exception): + """ Runner stopped. """ + pass + + +class RunnerCompletedError(Exception): + """ Runner completed. """ + pass + + +class FedMLSchedulerBaseJobRunner(ABC): + + def __init__(self, args, edge_id=0, request_json=None, agent_config=None, run_id=0, + cuda_visible_gpu_ids_str=None, is_master_runner=False, + agent_data_dir=None, agent_package_download_dir=None, + agent_package_unzip_dir=None, agent_log_file_dir=None): + self.args = args + self.is_master_runner = is_master_runner + self.agent_data_dir = agent_data_dir + self.agent_package_download_dir = agent_package_download_dir + self.agent_package_unzip_dir = agent_package_unzip_dir + self.agent_log_file_dir = agent_log_file_dir + self.prev_download_progress = 0 + self.run_process_event = None + self.run_process_completed_event = None + self.run_process = None + self.running_request_json = dict() + self.start_request_json = None + self.edge_id = edge_id + self.edge_user_name = None + self.edge_extra_url = None + self.run_id = run_id + self.unique_device_id = args.unique_device_id + self.request_json = request_json + self.version = args.version + self.device_id = args.device_id + self.cur_dir = os.path.split(os.path.realpath(__file__))[0] + self.agent_config = agent_config + self.mlops_metrics = None + self.status_reporter = None + self.ntp_offset = MLOpsUtils.get_ntp_offset() + self.server_id = None + self.fedml_config_object = None + self.package_type = SchedulerConstants.JOB_PACKAGE_TYPE_DEFAULT + self.cuda_visible_gpu_ids_str = cuda_visible_gpu_ids_str + self.user_name = None + self.general_edge_id = None + self.message_center = None + self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES = { + "${FEDSYS.RUN_ID}": "", + "${FEDSYS.PRIVATE_LOCAL_DATA}": "", + "${FEDSYS.CLIENT_ID_LIST}": "", + "${FEDSYS.SYNTHETIC_DATA_URL}": "", + "${FEDSYS.IS_USING_LOCAL_DATA}": "", + "${FEDSYS.CLIENT_NUM}": "", + "${FEDSYS.CLIENT_INDEX}": "", + "${FEDSYS.CLIENT_OBJECT_LIST}": "", + "${FEDSYS.LOG_SERVER_URL}": "", + } + + def __repr__(self): + return "<{klass} @{id:x} {attrs}>".format( + klass=self.__class__.__name__, + id=id(self) & 0xFFFFFF, + attrs=" ".join("{}={!r}".format(k, v) for k, v in self.__dict__.items()), + ) + + def build_dynamic_constrain_variables(self, run_id, run_config): + data_config = run_config.get("data_config", {}) + server_edge_id_list = self.request_json["edgeids"] + local_edge_id_list = list() + local_edge_id_list.append(int(self.edge_id)) + is_using_local_data = 0 + private_data_dir = data_config.get("privateLocalData", "") + synthetic_data_url = data_config.get("syntheticDataUrl", "") + edges = self.request_json["edges"] + # if private_data_dir is not None \ + # and len(str(private_data_dir).strip(' ')) > 0: + # is_using_local_data = 1 + if private_data_dir is None or len(str(private_data_dir).strip(" ")) <= 0: + params_config = run_config.get("parameters", None) + private_data_dir = self.agent_data_dir + if synthetic_data_url is None or len(str(synthetic_data_url)) <= 0: + synthetic_data_url = private_data_dir + + self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.RUN_ID}"] = run_id + self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.PRIVATE_LOCAL_DATA}"] = private_data_dir.replace(" ", "") + self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_ID_LIST}"] = str(local_edge_id_list).replace(" ", "") + self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.SYNTHETIC_DATA_URL}"] = synthetic_data_url.replace(" ", "") + self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.IS_USING_LOCAL_DATA}"] = str(is_using_local_data) + self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_NUM}"] = len(server_edge_id_list) + if not self.is_master_runner: + self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_INDEX}"] = 1 + for cur_index, id_value in enumerate(server_edge_id_list): + if str(id_value) == str(self.edge_id): + self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_INDEX}"] = cur_index + 1 + break + client_objects = str(json.dumps(edges)) + client_objects = client_objects.replace(" ", "").replace("\n", "").replace('"', '\\"') + self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_OBJECT_LIST}"] = client_objects + self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.LOG_SERVER_URL}"] = self.agent_config["ml_ops_config"][ + "LOG_SERVER_URL" + ] + + @staticmethod + def unzip_file(zip_file, unzip_file_path) -> str: + if zipfile.is_zipfile(zip_file): + with zipfile.ZipFile(zip_file, "r") as zipf: + zipf.extractall(unzip_file_path) + unzipped_file_name = zipf.namelist()[0] + else: + raise Exception("Invalid zip file {}".format(zip_file)) + + return unzipped_file_name + + def package_download_progress(self, count, blksize, filesize): + self.check_runner_stop_event() + + downloaded = count * blksize + downloaded = filesize if downloaded > filesize else downloaded + progress = (downloaded / filesize * 100) if filesize != 0 else 0 + progress_int = int(progress) + downloaded_kb = format(downloaded / 1024, '.2f') + + # since this hook funtion is stateless, we need a state to avoid print progress repeatly + if count == 0: + self.prev_download_progress = 0 + if progress_int != self.prev_download_progress and progress_int % 5 == 0: + self.prev_download_progress = progress_int + logging.info("package downloaded size {} KB, progress {}%".format(downloaded_kb, progress_int)) + + def retrieve_and_unzip_package(self, package_name, package_url): + local_package_path = self.agent_package_download_dir + os.makedirs(local_package_path, exist_ok=True) + filename, filename_without_extension, file_extension = GeneralConstants.get_filename_and_extension(package_url) + local_package_file = os.path.join(local_package_path, f"fedml_run_{self.run_id}_{filename_without_extension}") + if os.path.exists(local_package_file): + os.remove(local_package_file) + package_url_without_query_path = urljoin(package_url, urlparse(package_url).path) + urllib.request.urlretrieve(package_url_without_query_path, local_package_file, + reporthook=self.package_download_progress) + unzip_package_path = os.path.join(self.agent_package_unzip_dir, + f"unzip_fedml_run_{self.run_id}_{filename_without_extension}") + try: + shutil.rmtree(unzip_package_path, ignore_errors=True) + except Exception as e: + logging.error( + f"Failed to remove directory {unzip_package_path}, Exception: {e}, Traceback: {traceback.format_exc()}") + pass + + # Using unzipped folder name + package_dir_name = FedMLSchedulerBaseJobRunner.unzip_file(local_package_file, unzip_package_path) + unzip_package_full_path = os.path.join(unzip_package_path, package_dir_name) + + logging.info("local_package_file {}, unzip_package_path {}, unzip file full path {}".format( + local_package_file, unzip_package_path, unzip_package_full_path)) + + return unzip_package_full_path + + @abstractmethod + def get_download_package_info(self, packages_config=None): + download_package_name = packages_config.get("server", None) if self.is_master_runner \ + else packages_config["linuxClient"] + download_package_url = packages_config.get("serverUrl", None) if self.is_master_runner \ + else packages_config["linuxClientUrl"] + return download_package_name, download_package_url + + def update_local_fedml_config(self, run_id, run_config): + # Download the package + packages_config = run_config["packages_config"] + download_package_name, download_package_url = self.get_download_package_info(packages_config) + unzip_package_path = self.retrieve_and_unzip_package(download_package_name, download_package_url) + fedml_local_config_file = os.path.join(unzip_package_path, "conf", "fedml.yaml") + + # Load the config file to memory + config_from_container = load_yaml_config(fedml_local_config_file) + container_entry_file_config = config_from_container["entry_config"] + container_dynamic_args_config = config_from_container["dynamic_args"] + entry_file = container_entry_file_config["entry_file"] + conf_file = container_entry_file_config["conf_file"] + self.package_type = container_entry_file_config.get("package_type", SchedulerConstants.JOB_PACKAGE_TYPE_DEFAULT) + full_conf_path = os.path.join(unzip_package_path, "fedml", "config", os.path.basename(conf_file)) + + # Dynamically build constrain variable with realtime parameters from server + self.build_dynamic_constrain_variables(run_id, run_config) + + # Update entry arguments value with constrain variable values with realtime parameters from server + # currently we support the following constrain variables: + # ${FEDSYS_RUN_ID}: a run id represented one entire Federated Learning flow + # ${FEDSYS_PRIVATE_LOCAL_DATA}: private local data path in the Federated Learning client + # ${FEDSYS_CLIENT_ID_LIST}: client list in one entire Federated Learning flow + # ${FEDSYS_SYNTHETIC_DATA_URL}: synthetic data url from server, + # if this value is not null, the client will download data from this URL to use it as + # federated training data set + # ${FEDSYS_IS_USING_LOCAL_DATA}: whether we use private local data as federated training data set + # container_dynamic_args_config["data_cache_dir"] = "${FEDSYS.PRIVATE_LOCAL_DATA}" + for constrain_variable_key, constrain_variable_value in self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES.items(): + for argument_key, argument_value in container_dynamic_args_config.items(): + if argument_value is not None and str(argument_value).find(constrain_variable_key) == 0: + replaced_argument_value = str(argument_value).replace( + constrain_variable_key, str(constrain_variable_value) + ) + container_dynamic_args_config[argument_key] = replaced_argument_value + + # Merge all container new config sections as new config dictionary + package_conf_object = dict() + package_conf_object["entry_config"] = container_entry_file_config + package_conf_object["dynamic_args"] = container_dynamic_args_config + package_conf_object["dynamic_args"]["config_version"] = self.args.config_version + container_dynamic_args_config["mqtt_config_path"] = os.path.join( + unzip_package_path, "fedml", "config", os.path.basename(container_dynamic_args_config["mqtt_config_path"]) + ) + container_dynamic_args_config["s3_config_path"] = os.path.join( + unzip_package_path, "fedml", "config", os.path.basename(container_dynamic_args_config["s3_config_path"]) + ) + log_file_dir = self.agent_log_file_dir + os.makedirs(log_file_dir, exist_ok=True) + package_conf_object["dynamic_args"]["log_file_dir"] = log_file_dir + + # Save new config dictionary to local file + fedml_updated_config_file = os.path.join(unzip_package_path, "conf", "fedml.yaml") + GeneralConstants.generate_yaml_doc(package_conf_object, fedml_updated_config_file) + + # Build dynamic arguments and set arguments to fedml config object + self.build_dynamic_args(run_id, run_config, package_conf_object, unzip_package_path) + + return unzip_package_path, package_conf_object + + def build_dynamic_args(self, run_id, run_config, package_conf_object, base_dir): + fedml_conf_file = package_conf_object["entry_config"]["conf_file"] + fedml_conf_file_processed = str(fedml_conf_file).replace('\\', os.sep).replace('/', os.sep) + fedml_conf_path = os.path.join(base_dir, "fedml", "config", + os.path.basename(fedml_conf_file_processed)) + fedml_conf_object = load_yaml_config(fedml_conf_path) + run_params = run_config.get("parameters", {}) + job_yaml = run_params.get("job_yaml", {}) + + # Replace local fedml config objects with parameters from MLOps web + parameters_object = run_config.get("parameters", None) + if parameters_object is not None: + for config_k, config_v in fedml_conf_object.items(): + parameter_v = parameters_object.get(config_k, None) + if parameter_v is not None: + fedml_conf_object[config_k] = parameter_v + parameters_object.pop(config_k) + + for config_k, config_v in parameters_object.items(): + fedml_conf_object[config_k] = config_v + + package_dynamic_args = package_conf_object["dynamic_args"] + if fedml_conf_object.get("comm_args", None) is not None: + fedml_conf_object["comm_args"]["mqtt_config_path"] = package_dynamic_args["mqtt_config_path"] + fedml_conf_object["comm_args"]["s3_config_path"] = package_dynamic_args["s3_config_path"] + fedml_conf_object["common_args"]["using_mlops"] = True + if fedml_conf_object.get("train_args", None) is not None: + fedml_conf_object["train_args"]["run_id"] = package_dynamic_args["run_id"] + fedml_conf_object["train_args"]["client_id_list"] = package_dynamic_args["client_id_list"] + fedml_conf_object["train_args"]["client_num_in_total"] = int(package_dynamic_args["client_num_in_total"]) + fedml_conf_object["train_args"]["client_num_per_round"] = int(package_dynamic_args["client_num_in_total"]) + fedml_conf_object["train_args"]["client_id"] = self.edge_id + fedml_conf_object["train_args"]["server_id"] = self.request_json.get("server_id", "0") + if fedml_conf_object.get("device_args", None) is not None: + fedml_conf_object["device_args"]["worker_num"] = int(package_dynamic_args["client_num_in_total"]) + # fedml_conf_object["data_args"]["data_cache_dir"] = package_dynamic_args["data_cache_dir"] + data_args = fedml_conf_object.get("data_args") + if data_args is not None: + data_cache_dir = fedml_conf_object["data_args"].get("data_cache_dir") + if data_cache_dir is not None: + data_cache_dir = os.path.join(data_cache_dir, str(self.edge_id)) + fedml_conf_object["data_args"]["data_cache_dir"] = data_cache_dir + if fedml_conf_object.get("tracking_args", None) is not None: + fedml_conf_object["tracking_args"]["log_file_dir"] = package_dynamic_args["log_file_dir"] + fedml_conf_object["tracking_args"]["log_server_url"] = package_dynamic_args["log_server_url"] + + fedml_conf_object["dynamic_args"] = package_dynamic_args + self.fedml_config_object = fedml_conf_object.copy() + GeneralConstants.generate_yaml_doc(fedml_conf_object, fedml_conf_path) + + def callback_run_bootstrap(self, job_pid): + GeneralConstants.save_bootstrap_process(self.run_id, job_pid, data_dir=self.agent_data_dir) + + def run_bootstrap_script(self, bootstrap_cmd_list, bootstrap_script_file): + try: + logging.info("Bootstrap commands are being executed...") + process, error_list = GeneralConstants.execute_commands_with_live_logs( + bootstrap_cmd_list, callback=self.callback_run_bootstrap) + + ret_code, out, err = process.returncode, None, None + if ret_code is None or ret_code <= 0: + if error_list is not None and len(error_list) > 0: + is_bootstrap_run_ok = False + else: + if out is not None: + out_str = sys_utils.decode_our_err_result(out) + if out_str != "": + logging.info("{}".format(out_str)) + + sys_utils.log_return_info(bootstrap_script_file, 0) + + is_bootstrap_run_ok = True + else: + if err is not None: + err_str = sys_utils.decode_our_err_result(err) + if err_str != "": + logging.error("{}".format(err_str)) + + sys_utils.log_return_info(bootstrap_script_file, ret_code) + + is_bootstrap_run_ok = False + except Exception as e: + logging.error(f"Bootstrap script error: Exception: {e}, Traceback: {traceback.format_exc()}") + is_bootstrap_run_ok = False + return is_bootstrap_run_ok + + def check_runner_stop_event(self): + if self.run_process_event.is_set(): + logging.info("Received stopping event.") + raise RunnerError("Runner stopped") + + if self.run_process_completed_event.is_set(): + logging.info("Received completed event.") + raise RunnerCompletedError("Runner completed") + + def trigger_stop_event(self): + if self.run_process_event is not None: + self.run_process_event.set() + + def trigger_completed_event(self): + if self.run_process_completed_event is not None: + self.run_process_completed_event.set() + + def execute_job_task(self, unzip_package_path, entry_file_full_path, conf_file_full_path, dynamic_args_config, + fedml_config_object): + run_config = self.request_json["run_config"] + run_params = run_config.get("parameters", {}) + client_rank = self.request_json.get("client_rank", 1) + job_yaml = run_params.get("job_yaml", {}) + job_yaml_default_none = run_params.get("job_yaml", None) + job_api_key = job_yaml.get("run_api_key", None) + job_api_key = job_yaml.get("fedml_run_dynamic_params", None) if job_api_key is None else job_api_key + assigned_gpu_ids = run_params.get("gpu_ids", None) + job_type = job_yaml.get("job_type", None) + containerize = fedml_config_object.get("containerize", None) + image_pull_policy = fedml_config_object.get("image_pull_policy", Constants.IMAGE_PULL_POLICY_ALWAYS) + job_type = job_yaml.get("task_type", Constants.JOB_TASK_TYPE_TRAIN) if job_type is None else job_type + conf_file_object = load_yaml_config(conf_file_full_path) + entry_args_dict = conf_file_object.get("fedml_entry_args", {}) + entry_args = entry_args_dict.get("arg_items", None) + scheduler_match_info = self.request_json.get("scheduler_match_info", {}) + if job_type == Constants.JOB_TASK_TYPE_TRAIN: + containerize = True if containerize is None else containerize + + # Bootstrap Info + bootstrap_script_path, bootstrap_script_dir, bootstrap_script_file = [None] * 3 + env_args = fedml_config_object.get("environment_args", None) + + if env_args is not None: + bootstrap_script_file = env_args.get("bootstrap", None) + if bootstrap_script_file is not None: + bootstrap_script_file = str(bootstrap_script_file).replace('\\', os.sep).replace('/', os.sep) + if platform.system() == 'Windows': + bootstrap_script_file = bootstrap_script_file.rstrip('.sh') + '.bat' + if bootstrap_script_file is not None: + bootstrap_script_dir = os.path.join(unzip_package_path, "fedml", + os.path.dirname(bootstrap_script_file)) + bootstrap_script_path = os.path.join( + bootstrap_script_dir, bootstrap_script_dir, os.path.basename(bootstrap_script_file) + ) + + bootstrap_cmd_list = list() + if bootstrap_script_path: + logging.info("Bootstrap commands are being generated...") + bootstrap_cmd_list = JobRunnerUtils.generate_bootstrap_commands(bootstrap_script_path=bootstrap_script_path, + bootstrap_script_dir=bootstrap_script_dir, + bootstrap_script_file=bootstrap_script_file) + logging.info(f"Generated following Bootstrap commands: {bootstrap_cmd_list}") + + if not containerize: + if len(bootstrap_cmd_list) and not (job_type == Constants.JOB_TASK_TYPE_DEPLOY or + job_type == Constants.JOB_TASK_TYPE_SERVE): + bootstrapping_successful = self.run_bootstrap_script(bootstrap_cmd_list=bootstrap_cmd_list, + bootstrap_script_file=bootstrap_script_file) + + if not bootstrapping_successful: + logging.info("failed to update local fedml config.") + self.check_runner_stop_event() + # Send failed msg when exceptions. + raise Exception(f"Failed to execute following bootstrap commands: {bootstrap_cmd_list}") + + logging.info("cleanup the previous learning process and bootstrap process...") + GeneralConstants.cleanup_learning_process(self.request_json["runId"], data_dir=self.agent_data_dir) + GeneralConstants.cleanup_bootstrap_process(self.request_json["runId"], data_dir=self.agent_data_dir) + + executable_interpreter = GeneralConstants.CLIENT_SHELL_PS \ + if platform.system() == GeneralConstants.PLATFORM_WINDOWS else GeneralConstants.CLIENT_SHELL_BASH + + if job_yaml_default_none is None: + # Generate the job executing commands for previous federated learning (Compatibility) + python_program = get_python_program() + logging.info("Run the client: {} {} --cf {} --rank {} --role client".format( + python_program, entry_file_full_path, conf_file_full_path, str(dynamic_args_config.get("rank", 1)))) + rank = str(dynamic_args_config.get("rank", 1)) + entry_command = f"{python_program} {entry_file_full_path} --cf " \ + f"{conf_file_full_path} --rank {rank} --role client" + shell_cmd_list = [entry_command] + + # Run the job executing commands for previous federated learning (Compatibility) + process, error_list = GeneralConstants.execute_commands_with_live_logs( + shell_cmd_list, callback=self.callback_start_fl_job, should_write_log_file=False) + is_launch_task = False + else: + self.check_runner_stop_event() + + if self.is_master_runner: + self.status_reporter.report_server_id_status( + self.run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_RUNNING, edge_id=self.edge_id, + server_id=self.edge_id, server_agent_id=self.edge_id) + else: + self.status_reporter.report_client_id_status( + self.edge_id, GeneralConstants.MSG_MLOPS_CLIENT_STATUS_RUNNING, run_id=self.run_id) + + # Generate the job executing commands + job_executing_commands = JobRunnerUtils.generate_job_execute_commands( + self.run_id, self.edge_id, self.version, + self.package_type, executable_interpreter, entry_file_full_path, + conf_file_object, entry_args, assigned_gpu_ids, + job_api_key, client_rank, scheduler_match_info=scheduler_match_info, + cuda_visible_gpu_ids_str=self.cuda_visible_gpu_ids_str) + + if containerize is not None and containerize is True: + docker_args = fedml_config_object.get("docker", {}) + docker_args = JobRunnerUtils.create_instance_from_dict(DockerArgs, docker_args) + try: + job_executing_commands = JobRunnerUtils.generate_launch_docker_command( + docker_args=docker_args, run_id=self.run_id, edge_id=self.edge_id, + unzip_package_path=unzip_package_path, executable_interpreter=executable_interpreter, + entry_file_full_path=entry_file_full_path, bootstrap_cmd_list=bootstrap_cmd_list, + cuda_visible_gpu_ids_str=self.cuda_visible_gpu_ids_str, image_pull_policy=image_pull_policy) + except Exception as e: + logging.error(f"Error occurred while generating containerized launch commands. " + f"Exception: {e}, Traceback: {traceback.format_exc()}") + return None, None, None + + if not job_executing_commands: + raise Exception("Failed to generate docker execution command") + + # Run the job executing commands + logging.info(f"Run the client job with job id {self.run_id}, device id {self.edge_id}.") + process, error_list = GeneralConstants.execute_commands_with_live_logs( + job_executing_commands, callback=self.start_job_perf, error_processor=self.job_error_processor, + should_write_log_file=False if job_type == Constants.JOB_TASK_TYPE_FEDERATE else True) + is_launch_task = False if job_type == Constants.JOB_TASK_TYPE_FEDERATE else True + + return process, is_launch_task, error_list + + def callback_start_fl_job(self, job_pid): + GeneralConstants.save_learning_process(self.run_id, job_pid, data_dir=self.agent_data_dir) + self.mlops_metrics.report_sys_perf( + self.args, self.agent_config["mqtt_config"], job_process_id=job_pid) + + def start_job_perf(self, job_pid): + GeneralConstants.save_learning_process(self.run_id, job_pid, data_dir=self.agent_data_dir) + self.mlops_metrics.report_job_perf(self.args, self.agent_config["mqtt_config"], job_pid) + + def job_error_processor(self, error_list): + self.check_runner_stop_event() + + error_str = "\n".join(error_list) + error_message = f"Error occurred when running the job... {error_str}" + logging.error(error_message) + raise Exception(error_message) + + def start_runner_process( + self, run_id, edge_id, request_json, cuda_visible_gpu_ids_str=None, + sender_message_queue=None, status_center_queue=None + ): + return None + + @staticmethod + def cleanup_containers_and_release_gpus(run_id, edge_id): + job_type = JobRunnerUtils.get_job_type_from_run_id(run_id) + + if not job_type: + logging.info(f"Failed to get job type from run id {run_id}. This is not an error as it would usually " + f"happen when the job is not found in the database because job is already finished and " + f"cleaned up. Exiting cleanup_containers_and_release_gpus.") + return + + # Check if the job type is not "serve" or "deploy" + if not (job_type == SchedulerConstants.JOB_TASK_TYPE_SERVE or + job_type == SchedulerConstants.JOB_TASK_TYPE_DEPLOY): + + # Terminate the run docker container if exists + container_name = JobRunnerUtils.get_run_container_name(run_id) + docker_client = JobRunnerUtils.get_docker_client(DockerArgs()) + logging.info(f"Terminating the run docker container {container_name} if exists...") + try: + JobRunnerUtils.remove_run_container_if_exists(container_name, docker_client) + except Exception as e: + logging.error(f"Exception {e} occurred when terminating docker container. " + f"Traceback: {traceback.format_exc()}") + + # Release the GPU ids and update the GPU availability in the persistent store + JobRunnerUtils.get_instance().release_gpu_ids(run_id, edge_id) + + # Send mqtt message reporting the new gpu availability to the backend + MLOpsDevicePerfStats.report_gpu_device_info(edge_id) + + def rebuild_message_status_center(self, sender_message_queue, listener_message_queue, status_queue): + self.message_center = FedMLMessageCenter.rebuild_message_center_from_queue( + sender_message_queue, listener_message_queue=listener_message_queue) + if self.mlops_metrics is None: + self.mlops_metrics = MLOpsMetrics() + self.mlops_metrics.set_messenger(self.message_center) + self.mlops_metrics.run_id = self.run_id + + status_center = FedMLStatusCenter.rebuild_status_center_from_queue(status_queue) + if self.status_reporter is None: + self.status_reporter = MLOpsMetrics() + self.status_reporter.set_messenger(status_center) + self.status_reporter.run_id = self.run_id diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner_manager.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner_manager.py new file mode 100755 index 0000000000..58198b6661 --- /dev/null +++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner_manager.py @@ -0,0 +1,66 @@ + +from abc import ABC, abstractmethod + + +class FedMLSchedulerBaseJobRunnerManager(ABC): + + def __init__(self): + if not hasattr(self, "job_runners"): + self.job_runners = dict() + if not hasattr(self, "cloud_run_process_map"): + self.cloud_run_process_map = dict() + + @abstractmethod + def _generate_job_runner_instance( + self, args, run_id=None, request_json=None, agent_config=None, edge_id=None + ): + return None + + def start_job_runner( + self, run_id, request_json, args=None, edge_id=None, is_server_job=False, + sender_message_queue=None, listener_message_queue=None, status_center_queue=None, + should_start_cloud_server=False, use_local_process_as_cloud_server=False, + cuda_visible_gpu_ids_str=None + ): + run_id_str = str(run_id) + self.job_runners[run_id_str] = self._generate_job_runner_instance( + args, run_id=run_id, request_json=request_json, + agent_config=args.agent_config, edge_id=edge_id, + ) + self.job_runners[run_id_str].start_runner_process( + run_id, request_json, edge_id=edge_id, + sender_message_queue=sender_message_queue, + listener_message_queue=listener_message_queue, + status_center_queue=status_center_queue + ) + + def stop_job_runner(self, run_id): + run_id_str = str(run_id) + if self.job_runners.get(run_id_str, None) is not None: + self.job_runners[run_id_str].trigger_stop_event() + + def complete_job_runner(self, run_id): + run_id_str = str(run_id) + if self.job_runners.get(run_id_str, None) is not None: + self.job_runners[run_id_str].trigger_completed_event() + + def put_run_edge_device_info_to_queue(self, run_id, device_info): + run_id_str = str(run_id) + if self.job_runners.get(run_id_str, None) is not None: + self.job_runners[run_id_str].put_run_edge_device_info_to_queue(run_id, device_info) + + def get_runner_process(self, run_id, is_cloud_server=False): + run_id_str = str(run_id) + + if self.job_runners.get(run_id_str, None) is None: + return None + + return self.job_runners[run_id_str].run_process + + def get_all_runner_pid_map(self): + process_id_dict = dict() + for run_id, runner in self.job_runners.items(): + if runner.run_process is not None: + process_id_dict[str(run_id)] = runner.run_process.pid + + return process_id_dict diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py new file mode 100755 index 0000000000..4a0c950655 --- /dev/null +++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py @@ -0,0 +1,260 @@ + +import json +import logging +import multiprocessing +import sys +import time +import traceback +import uuid +import fedml +from ....core.mlops.mlops_runtime_log import MLOpsRuntimeLog +from ....core.distributed.communication.mqtt.mqtt_manager import MqttManager +from ....core.mlops.mlops_metrics import MLOpsMetrics +from ..comm_utils import sys_utils +from ..scheduler_core.message_center import FedMLMessageCenter +from ..scheduler_core.status_center import FedMLStatusCenter +from .account_manager import FedMLAccountManager +from .general_constants import GeneralConstants +from abc import ABC, abstractmethod + + +class FedMLSchedulerBaseProtocolManager(FedMLMessageCenter, FedMLStatusCenter, ABC): + + def __init__(self, args, agent_config=None, is_master=False): + FedMLMessageCenter.__init__(self) + FedMLStatusCenter.__init__(self) + self.request_json = None + self.version = fedml.get_env_version() + self.args = args + self.is_master_agent = is_master + self.message_status_runner = None + self.message_center = None + self.status_center = None + self.message_center_name = "master_agent" if not is_master else "slave_agent" + self.run_id = None + self.edge_id = args.edge_id + self.general_edge_id = None + self.server_agent_id = args.edge_id + self.current_device_id = args.current_device_id + self.unique_device_id = args.unique_device_id + self.agent_config = agent_config + self.topic_active = None + self.topic_last_will = None + self.communication_mgr = None + self.subscribed_topics = list() + self.mlops_metrics = None + self.status_reporter = None + self.user_name = args.user_name + + if multiprocessing.get_start_method() != "fork": + # force all platforms (Windows/Linux/macOS) to use the same way (fork) for multiprocessing + multiprocessing.set_start_method("fork", force=True) + + def generate_topics(self): + # generate the subscribed topics. + self.subscribed_topics.clear() + # self.subscribed_topics.append(self.topic_start_train) + + def add_protocol_handler(self): + # Add the message listeners for all topics, the following is an example. + # self.add_message_listener(self.topic_start_train, self.callback_start_train) + pass + + def initialize(self): + # Generate the message topics + self.generate_topics() + + # Setup MQTT connection + self.communication_mgr = MqttManager( + self.agent_config["mqtt_config"]["BROKER_HOST"], + self.agent_config["mqtt_config"]["BROKER_PORT"], + self.agent_config["mqtt_config"]["MQTT_USER"], + self.agent_config["mqtt_config"]["MQTT_PWD"], + self.agent_config["mqtt_config"]["MQTT_KEEPALIVE"], + f"FedML_Agent_Daemon_@{self.user_name}@_@{self.current_device_id}@_@{str(uuid.uuid4())}@", + self.topic_last_will, + json.dumps({"ID": self.edge_id, "status": GeneralConstants.MSG_MLOPS_SERVER_STATUS_OFFLINE}) + ) + + # Add the message listeners for all topics + self.add_protocol_handler() + + # Start the message center to process edge related messages. + self.setup_message_center() + + # Start the status center to process edge related status. + self.start_status_listener_center() + + # Start the message center for listener + self.start_listener(sender_message_queue=self.message_center.get_sender_message_queue(), + agent_config=self.agent_config, + message_center_name=self.message_center_name) + + # Init extra items, e.g. database, recovery, etc. + self._init_extra_items() + + # Setup MQTT connected listener + self.communication_mgr.add_connected_listener(self.on_agent_communication_connected) + self.communication_mgr.add_disconnected_listener(self.on_agent_communication_disconnected) + self.communication_mgr.connect() + + def start(self): + # Start MQTT message loop + try: + self.communication_mgr.loop_forever() + except Exception as e: + if str(e) == "Restarting after upgraded...": + logging.info("Restarting after upgraded...") + else: + logging.info("Server tracing: {}".format(traceback.format_exc())) + + finally: + FedMLAccountManager.write_login_failed_file(is_client=not self.is_master_agent) + + self.stop() + + time.sleep(5) + sys_utils.cleanup_all_fedml_server_login_processes( + GeneralConstants.MASTER_LOGIN_PROGRAM if self.is_master_agent else GeneralConstants.SLAVE_LOGIN_PROGRAM, + clean_process_group=False) + sys.exit(1) + + def stop(self): + if self.communication_mgr is not None: + # noinspection PyBroadException + try: + for topic in self.subscribed_topics: + self.communication_mgr.unsubscribe_msg(topic) + except Exception: + pass + + self.communication_mgr.loop_stop() + self.communication_mgr.disconnect() + + self.release_message_center() + + @abstractmethod + def _init_extra_items(self): + pass + + def on_agent_communication_connected(self, mqtt_client_object): + # Setup MQTT message passthrough listener for all messages + self.communication_mgr.add_message_passthrough_listener(self.listener_message_passthrough_dispatch_center) + + # Subscribe topics for starting train, stopping train and fetching client status. + for topic in self.subscribed_topics: + self.communication_mgr.subscribe_msg(topic) + + # Broadcast the first active message. + self.send_agent_active_msg(self.edge_id) + if self.general_edge_id is not None: + self.send_agent_active_msg(self.general_edge_id) + + # Echo results + MLOpsRuntimeLog.get_instance(self.args).enable_show_log_to_stdout() + self.print_connected_info() + MLOpsRuntimeLog.get_instance(self.args).enable_show_log_to_stdout(enable=True) + + @abstractmethod + def print_connected_info(self): + print("\nCongratulations, your device is connected to the FedML MLOps platform successfully!") + print( + "Your FedML Edge ID is " + str(self.edge_id) + ", unique device ID is " + + str(self.unique_device_id) + ) + + def on_agent_communication_disconnected(self, mqtt_client_object): + pass + + def setup_message_center(self): + if self.message_center is not None: + return + + self.message_center = FedMLMessageCenter(agent_config=self.agent_config) + self.message_center.start_sender(message_center_name=self.message_center_name) + + if self.mlops_metrics is None: + self.mlops_metrics = MLOpsMetrics() + self.mlops_metrics.set_messenger(self) + self.mlops_metrics.run_id = self.run_id + self.mlops_metrics.edge_id = self.edge_id + self.mlops_metrics.server_agent_id = self.server_agent_id + + def send_message_json(self, topic, payload): + self.message_center.send_message_json(topic, payload) + + def rebuild_message_center(self, message_center_queue): + self.message_center = FedMLMessageCenter(sender_message_queue=message_center_queue) + + if self.mlops_metrics is None: + self.mlops_metrics = MLOpsMetrics() + self.mlops_metrics.set_messenger(self) + self.mlops_metrics.run_id = self.run_id + self.mlops_metrics.edge_id = self.edge_id + self.mlops_metrics.server_agent_id = self.server_agent_id + + def release_message_center(self): + try: + if self.message_center is not None: + self.message_center.stop() + self.message_center = None + + except Exception as e: + logging.error( + f"Failed to release slave communication manager with Exception {e}. " + f"Traceback: {traceback.format_exc()}") + pass + + def start_status_listener_center(self): + self.start_status_center( + sender_message_center_queue=self.message_center.get_sender_message_queue(), + listener_message_center_queue=self.get_listener_message_queue(), + is_slave_agent=not self.is_master_agent + ) + + if self.status_reporter is None: + self.status_reporter = MLOpsMetrics() + self.status_reporter.set_messenger(self, send_message_func=self.send_status_message) + self.status_reporter.run_id = self.run_id + self.status_reporter.edge_id = self.edge_id + self.status_reporter.server_agent_id = self.server_agent_id + + def rebuild_status_center(self, status_center_queue): + self.status_center = FedMLStatusCenter(message_queue=status_center_queue) + + if self.status_reporter is None: + self.status_reporter = MLOpsMetrics() + self.status_reporter.set_messenger(self.status_center, send_message_func=self.status_center.send_status_message) + self.status_reporter.run_id = self.run_id + self.status_reporter.edge_id = self.edge_id + self.status_reporter.server_agent_id = self.server_agent_id + + @abstractmethod + def generate_protocol_manager(self): + # Generate the protocol manager instance and set the attribute values. + return None + + def get_message_runner(self): + if self.message_status_runner is not None: + return self.message_status_runner + + self.message_status_runner = self.generate_protocol_manager() + self.message_status_runner.status_queue = self.get_status_queue() + + return self.message_status_runner + + def get_status_runner(self): + if self.message_status_runner is None: + self.get_message_runner() + if self.message_status_runner is not None: + self.message_status_runner.sender_message_queue = self.message_center.get_sender_message_queue() + + if self.message_status_runner is not None: + self.message_status_runner.sender_message_queue = self.message_center.get_sender_message_queue() + return self.message_status_runner + + return None + + def send_agent_active_msg(self, edge_id): + active_msg = {"ID": edge_id, "status": GeneralConstants.MSG_MLOPS_SERVER_STATUS_IDLE} + self.message_center.send_message_json(self.topic_active, json.dumps(active_msg)) diff --git a/python/fedml/computing/scheduler/scheduler_core/status_center.py b/python/fedml/computing/scheduler/scheduler_core/status_center.py new file mode 100755 index 0000000000..569f4d9257 --- /dev/null +++ b/python/fedml/computing/scheduler/scheduler_core/status_center.py @@ -0,0 +1,410 @@ +import logging +import time + +from ..slave.client_constants import ClientConstants +from ..master.server_constants import ServerConstants +from enum import Enum, unique +import multiprocessing +from multiprocessing import Process, Queue +import queue +from .message_common import FedMLMessageEntity, FedMLStatusEntity +from .message_center import FedMLMessageCenter +import traceback +from .status_manager_protocols import FedMLStatusManager +from .compute_cache_manager import ComputeCacheManager + + +@unique +class JobStatus(Enum): + STATUS_OFFLINE = "OFFLINE" + STATUS_PROVISIONING = "PROVISIONING" + STATUS_IDLE = "IDLE" + UPGRADING = "UPGRADING" + STARTING = "STARTING" + STATUS_RUNNING = "RUNNING" + STATUS_STOPPING = "STOPPING" + STATUS_KILLED = "KILLED" + STATUS_FAILED = "FAILED" + STATUS_FINISHED = "FINISHED" + STATUS_EXCEPTION = "EXCEPTION" + + def __str__(self): + return self.value + + @classmethod + def get_job_enum_from_str(cls, job_status_str: str): + for job_status in cls: + if job_status.value == job_status_str: + return job_status + return cls.STATUS_OFFLINE + + @staticmethod + def is_job_completed(job_status_str: str): + if job_status_str == JobStatus.STATUS_FINISHED.value or \ + job_status_str == JobStatus.STATUS_FAILED.value or \ + job_status_str == JobStatus.STATUS_KILLED.value or \ + job_status_str == JobStatus.STATUS_EXCEPTION.value: + return True + + return False + + +@unique +class DeviceStatus(Enum): + STATUS_OFFLINE = "OFFLINE" + STATUS_PROVISIONING = "PROVISIONING" + STATUS_IDLE = "IDLE" + STATUS_UPGRADING = "UPGRADING" + STATUS_QUEUED = "QUEUED" + STATUS_INITIALIZING = "INITIALIZING" + STATUS_TRAINING = "TRAINING" + STATUS_RUNNING = "RUNNING" + STATUS_STOPPING = "STOPPING" + STATUS_KILLED = "KILLED" + STATUS_FAILED = "FAILED" + STATUS_EXCEPTION = "EXCEPTION" + STATUS_FINISHED = "FINISHED" + + def __str__(self): + return self.value + + @classmethod + def get_device_enum_from_str(cls, device_status_str: str): + for device_status in cls: + if device_status.value == device_status_str: + return device_status + return cls.STATUS_OFFLINE + + +class FedMLStatusCenter(object): + TOPIC_MASTER_STATUS_PREFIX = "fl_server/flserver_agent_" + TOPIC_SLAVE_STATUS_PREFIX = "fl_client/flclient_agent_" + TOPIC_SLAVE_STATUS_TO_MLOPS_PREFIX = "fl_run/fl_client/mlops/status" + TOPIC_SLAVE_JOB_LAUNCH_PREFIX = "flserver_agent/" + TOPIC_SLAVE_JOB_LAUNCH_SUFFIX = "/start_train" + TOPIC_SLAVE_JOB_STOP_PREFIX = "flserver_agent/" + TOPIC_SLAVE_JOB_STOP_SUFFIX = "/stop_train" + + def __init__(self, message_queue=None): + self.status_queue = message_queue + self.job_status_in_slave = dict() + self.entire_job_status = None + self.job_status_in_master = dict() + self.slave_devices_status = dict() + self.master_devices_status = dict() + self.status_center_process = None + self.status_event = None + self.status_sender_message_center_queue = None + self.status_listener_message_center_queue = None + self.status_message_center = None + self.status_manager_instance = None + self.status_runner = None + + def __repr__(self): + return "<{klass} @{id:x} {attrs}>".format( + klass=self.__class__.__name__, + id=id(self) & 0xFFFFFF, + attrs=" ".join("{}={!r}".format(k, v) for k, v in self.__dict__.items()), + ) + + def add_job_status_in_slave(self, device_id, status): + self.job_status_in_slave[device_id] = self._status_transition(status) + + def add_job_status_in_master(self, device_id, status): + self.job_status_in_master[device_id] = self._status_transition(status) + + def set_entire_job_status(self, status): + self.entire_job_status = status + + def add_slave_device_status(self, device_id, status): + self.slave_devices_status[device_id] = self._status_transition(status) + + def add_master_device_status(self, device_id, status): + self.master_devices_status[device_id] = self._status_transition(status) + + def get_job_status_in_slave(self, device_id): + return self.job_status_in_slave.get(device_id, None) + + def get_job_status_in_master(self, device_id): + return self.job_status_in_master.get(device_id, None) + + def get_entire_job_status(self): + return self.entire_job_status + + def get_slave_device_status(self, device_id): + return self.slave_devices_status.get(device_id, None) + + def get_master_device_status(self, device_id): + return self.master_devices_status.get(device_id, None) + + def _status_transition(self, status): + transition_status = status + if self.entire_job_status is not None: + if self.entire_job_status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED or \ + self.entire_job_status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED: + if status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED or \ + status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED or \ + status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED: + transition_status = status + else: + transition_status = ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED + + return transition_status + + def get_status_runner(self): + return None + + def start_status_center(self, sender_message_center_queue=None, + listener_message_center_queue=None, is_slave_agent=False): + self.status_queue = Queue() + self.status_event = multiprocessing.Event() + self.status_event.clear() + self.status_sender_message_center_queue = sender_message_center_queue + self.status_listener_message_center_queue = listener_message_center_queue + self.status_runner = self.get_status_runner() + target_func = self.status_runner.run_status_dispatcher if not is_slave_agent else \ + self.status_runner.run_status_dispatcher_in_slave + self.status_center_process = Process( + target=target_func, args=( + self.status_event, self.status_queue, self.status_sender_message_center_queue, + self.status_listener_message_center_queue + ) + ) + + self.status_center_process.start() + + def check_message_stop_event(self): + if self.status_event is not None and self.status_event.is_set(): + logging.info("Received status center stopping event.") + raise StatusCenterStoppedException("Status center stopped (for sender)") + + def send_message(self, topic, payload, run_id=None): + message_entity = FedMLMessageEntity(topic=topic, payload=payload, run_id=run_id) + self.status_queue.put(message_entity.get_message_body()) + + def send_message_json(self, topic, payload): + self.send_message(topic, payload) + + def send_status_message(self, topic, payload): + message_entity = FedMLMessageEntity(topic=topic, payload=payload) + self.status_queue.put(message_entity.get_message_body()) + + def get_status_queue(self): + return self.status_queue + + def status_center_process_master_status(self, topic, payload): + pass + + def status_center_process_slave_status(self, topic, payload): + pass + + def rebuild_message_center(self, message_center_queue): + pass + + def rebuild_status_center(self, status_queue): + pass + + @staticmethod + def save_job_status(run_id, status): + ComputeCacheManager.get_instance().set_redis_params() + ComputeCacheManager.get_instance().get_status_cache().save_job_status(run_id, status) + + @staticmethod + def save_device_status_in_job(run_id, device_id, status): + ComputeCacheManager.get_instance().set_redis_params() + ComputeCacheManager.get_instance().get_status_cache().save_device_status_in_job(run_id, device_id, status) + + def run_status_dispatcher(self, status_event, status_queue, + sender_message_center_queue, + listener_message_center_queue): + # Save the parameters + self.status_event = status_event + self.status_queue = status_queue + self.status_sender_message_center_queue = sender_message_center_queue + self.status_listener_message_center_queue = listener_message_center_queue + + # Rebuild message center + message_center = None + if sender_message_center_queue is not None: + self.rebuild_message_center(sender_message_center_queue) + message_center = FedMLMessageCenter( + sender_message_queue=sender_message_center_queue, + listener_message_queue=listener_message_center_queue + ) + + if sender_message_center_queue is not None: + self.rebuild_status_center(status_queue) + + # Init status manager instances + status_manager_instances = dict() + + while True: + message_entity = None + + # Check if we should stop status dispatcher + try: + self.check_message_stop_event() + except StatusCenterStoppedException as e: + break + + # Dispatch status messages. + # noinspection PyBroadException + try: + # Get the status message from the queue + try: + message_body = status_queue.get(block=False, timeout=0.1) + except queue.Empty as e: # If queue is empty, then break loop + message_body = None + if message_body is None: + time.sleep(0.1) + continue + + # Build message and status entity + message_entity = FedMLMessageEntity(message_body=message_body) + status_entity = FedMLStatusEntity(status_msg_body=message_body) + + # Generate status manager instance + if status_manager_instances.get(status_entity.run_id) is None: + status_manager_instances[status_entity.run_id] = FedMLStatusManager( + run_id=status_entity.run_id, edge_id=status_entity.edge_id, status_center=self, + message_center=message_center) + else: + status_manager_instances[status_entity.run_id].edge_id = status_entity.edge_id + + # Process the master and slave status. + if message_entity.topic.startswith(FedMLStatusCenter.TOPIC_MASTER_STATUS_PREFIX): + # Process the job status + status_manager_instances[status_entity.run_id].status_center_process_master_status( + message_entity.topic, message_entity.payload) + + # Save the job status + FedMLStatusCenter.save_job_status(status_entity.run_id, self.get_entire_job_status()) + + elif message_entity.topic.startswith(FedMLStatusCenter.TOPIC_SLAVE_STATUS_PREFIX): + # Process the slave device status + status_manager_instances[status_entity.run_id].status_center_process_slave_status( + message_entity.topic, message_entity.payload) + + # Save the device status in job + FedMLStatusCenter.save_device_status_in_job(status_entity.run_id, status_entity.edge_id, + self.get_job_status_in_slave(status_entity.edge_id)) + + except Exception as e: + if message_entity is not None: + logging.info( + f"Failed to process the status with topic {message_entity.topic}, " + f"payload {message_entity.payload}, {traceback.format_exc()}") + else: + logging.info(f"Failed to process the status: {traceback.format_exc()}") + + def run_status_dispatcher_in_slave(self, status_event, status_queue, + sender_message_center_queue, + listener_message_center_queue): + # Save the parameters + self.status_event = status_event + self.status_queue = status_queue + self.status_sender_message_center_queue = sender_message_center_queue + self.status_listener_message_center_queue = listener_message_center_queue + + # Rebuild message center + message_center = None + if sender_message_center_queue is not None: + self.rebuild_message_center(sender_message_center_queue) + message_center = FedMLMessageCenter( + sender_message_queue=sender_message_center_queue, + listener_message_queue=listener_message_center_queue + ) + + if sender_message_center_queue is not None: + self.rebuild_status_center(status_queue) + + # Init status manager instances + status_manager_instances = dict() + job_launch_message_map = dict() + + while True: + message_entity = None + + # Check if we should stop status dispatcher + try: + self.check_message_stop_event() + except StatusCenterStoppedException as e: + break + + # Dispatch status messages. + # noinspection PyBroadException + try: + # Get the status message from the queue + try: + message_body = status_queue.get(block=False, timeout=0.1) + except queue.Empty as e: # If queue is empty, then break loop + message_body = None + if message_body is None: + time.sleep(0.1) + continue + + # Build message and status entity + message_entity = FedMLMessageEntity(message_body=message_body) + status_entity = FedMLStatusEntity(status_msg_body=message_body) + + # Generate status manager instance + if status_manager_instances.get(status_entity.run_id) is None: + status_manager_instances[status_entity.run_id] = FedMLStatusManager( + run_id=status_entity.run_id, edge_id=status_entity.edge_id, status_center=self, + message_center=message_center) + else: + status_manager_instances[status_entity.run_id].edge_id = status_entity.edge_id + + # Process the slave status + if message_entity.topic.startswith(FedMLStatusCenter.TOPIC_SLAVE_STATUS_PREFIX): + # Report the slave status to master + status_manager_instances[status_entity.run_id]. \ + status_center_process_slave_status_to_master_in_slave_agent( + message_entity.topic, message_entity.payload + ) + elif message_entity.topic.startswith(FedMLStatusCenter.TOPIC_SLAVE_STATUS_TO_MLOPS_PREFIX): + # Report slave status to mlops (Active/IDLE message) + status_manager_instances[status_entity.run_id]. \ + status_center_process_slave_status_to_mlops_in_slave_agent( + message_entity.topic, message_entity.payload + ) + elif (message_entity.topic.startswith(FedMLStatusCenter.TOPIC_SLAVE_JOB_LAUNCH_PREFIX) and + message_entity.topic.endswith(FedMLStatusCenter.TOPIC_SLAVE_JOB_LAUNCH_SUFFIX)): + # Async request the job status from master when launching the job + job_launch_message_map[status_entity.run_id] = {"topic": message_entity.topic, + "payload": message_entity.payload} + status_manager_instances[status_entity.run_id]. \ + status_center_request_job_status_from_master_in_slave_agent( + message_entity.topic, message_entity.payload + ) + elif (message_entity.topic.startswith(FedMLStatusCenter.TOPIC_SLAVE_JOB_STOP_PREFIX) and + message_entity.topic.endswith(FedMLStatusCenter.TOPIC_SLAVE_JOB_STOP_SUFFIX)): + # Cleanup when stopped the job + if job_launch_message_map.get(status_entity.run_id, None) is not None: + job_launch_message_map.pop(status_entity.run_id) + + except Exception as e: + if message_entity is not None: + logging.info( + f"Failed to process the status with topic {message_entity.topic}, " + f"payload {message_entity.payload}, {traceback.format_exc()}") + else: + logging.info(f"Failed to process the status: {traceback.format_exc()}") + + def register_job_launch_message(self, topic, payload): + message_entity = FedMLMessageEntity(topic=topic, payload=payload) + self.status_queue.put(message_entity.get_message_body()) + + def register_job_stop_message(self, topic, payload): + message_entity = FedMLMessageEntity(topic=topic, payload=payload) + self.status_queue.put(message_entity.get_message_body()) + + @staticmethod + def rebuild_status_center_from_queue(status_queue): + status_center = FedMLStatusCenter(message_queue=status_queue) + return status_center + + +class StatusCenterStoppedException(Exception): + """ Status center stopped. """ + pass diff --git a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py new file mode 100755 index 0000000000..06b222cfd1 --- /dev/null +++ b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py @@ -0,0 +1,303 @@ +import json +import logging +import os +import shutil +from os import listdir + +from ....core.mlops.mlops_runtime_log_daemon import MLOpsRuntimeLogDaemon +from ....core.mlops.mlops_metrics import MLOpsMetrics +from ..slave.client_constants import ClientConstants +from ..master.server_constants import ServerConstants +from ..master.server_data_interface import FedMLServerDataInterface +from .message_common import LogArgs +from .general_constants import GeneralConstants + + +class FedMLStatusManager(object): + def __init__(self, run_id=None, edge_id=None, server_id=None, + edge_id_list=None, running_scheduler_contract=None, + status_center=None, message_center=None): + self.run_id = run_id + self.edge_id = edge_id + self.server_id = server_id + self.edge_id_list = edge_id_list + self.client_agent_active_list = dict() + self.running_scheduler_contract = running_scheduler_contract if running_scheduler_contract is not None else dict() + self.message_reporter = MLOpsMetrics() + self.message_reporter.set_messenger(message_center) + self.status_reporter = MLOpsMetrics() + self.status_reporter.set_messenger(status_center, send_message_func=status_center.send_status_message) + self.status_center = status_center + self.message_center = message_center + self.log_args = LogArgs(role="server", edge_id=self.edge_id, + server_id=self.server_id, log_file_dir=ServerConstants.get_log_file_dir()) + + def __repr__(self): + return "<{klass} @{id:x} {attrs}>".format( + klass=self.__class__.__name__, + id=id(self) & 0xFFFFFF, + attrs=" ".join("{}={!r}".format(k, v) for k, v in self.__dict__.items()), + ) + + def process_job_completed_status(self, master_id, status): + # Stop the system performance monitor + try: + self.message_reporter.stop_sys_perf() + except Exception as ex: + pass + + # Stop the job process + ServerConstants.cleanup_learning_process(self.run_id) + ServerConstants.cleanup_bootstrap_process(self.run_id) + + # Remove the package download directory. + try: + local_package_path = ServerConstants.get_package_download_dir() + for package_file in listdir(local_package_path): + if os.path.basename(package_file).startswith("run_" + str(self.run_id)): + shutil.rmtree(os.path.join(local_package_path, package_file), ignore_errors=True) + except Exception as e: + pass + + # Stop log processor for current run + MLOpsRuntimeLogDaemon.get_instance(self.log_args).stop_log_processor(self.run_id, master_id) + + # RunProcessUtils.kill_process(cloud_server_process.pid) + # self.stop_cloud_server() + # self.remove_listener_for_run_metrics(self.run_id) + # self.remove_listener_for_run_logs(self.run_id) + + def process_job_exception_status(self, master_id, status): + # Send the exception status to slave devices. + self.report_exception_status( + self.edge_id_list, run_id=self.run_id, server_id=master_id, + status=ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED) + + # Save the job status to local storage + FedMLServerDataInterface.get_instance().save_job_status(self.run_id, master_id, status, status) + + def process_job_running_status(self, master_id, status): + self.message_reporter.report_server_training_status( + self.run_id, status, edge_id=master_id, running_json=self.running_scheduler_contract) + + def status_center_process_master_status(self, topic, payload): + request_json = json.loads(payload) + is_retain = request_json.get("is_retain", False) + if is_retain: + return + run_id = request_json["run_id"] + status = request_json["status"] + edge_id = request_json["edge_id"] + server_id = request_json.get("server_id", None) + run_id_str = str(run_id) + + # Process the job status + if status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED: + self.process_job_completed_status(server_id, status) + elif status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED: + self.process_job_completed_status(server_id, status) + elif status == ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED: + self.process_job_completed_status(server_id, status) + elif status == ServerConstants.MSG_MLOPS_SERVER_STATUS_EXCEPTION: + self.process_job_exception_status(server_id, status) + else: + self.process_job_running_status(server_id, status) + + # Process the consensus status + self.process_job_status_consensus(run_id, server_id, status) + + def process_job_status_consensus(self, run_id, master_id, status): + # Set the master status in the job and entire job status + self.status_center.set_entire_job_status(status) + self.status_center.add_job_status_in_master(master_id, status) + status = self.status_center.get_entire_job_status() + + # Set the device status based on the job status + edge_id_status_dict = self.client_agent_active_list.get(f"{run_id}", {}) + for edge_id_item, edge_status_item in edge_id_status_dict.items(): + if edge_id_item == "server": + continue + + # Calc the device status based on the job status + consensus_device_status = FedMLStatusManager.get_device_consensus_status_in_job( + status, edge_status_item) + if consensus_device_status is not None: + self.message_reporter.report_client_training_status( + edge_id_item, consensus_device_status, run_id=run_id) + + # Save the job status to local storage + FedMLServerDataInterface.get_instance().save_job_status(run_id, master_id, status, status) + + # Report the status to message center + self.message_reporter.report_server_training_status(run_id, status, edge_id=master_id) + + # Broadcast the status to slave agents + self.message_reporter.report_job_status(run_id, status) + + @staticmethod + def get_device_consensus_status_in_job(job_status, device_status): + if job_status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED: + if device_status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED or \ + device_status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED or \ + device_status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED: + return device_status + else: + return ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED + else: + return None + + def get_device_consensus_status_in_current_device(self, edge_id, status): + self.status_center.add_job_status_in_slave(edge_id, status) + consensus_status = self.status_center.get_job_status_in_slave(edge_id) + consensus_status = ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED \ + if consensus_status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION else consensus_status + return consensus_status + + def status_center_process_slave_status(self, topic, payload): + payload_json = json.loads(payload) + run_id = payload_json.get("run_id", None) + edge_id = payload_json.get("edge_id", None) + status = payload_json.get("status", None) + init_edge_id_list = payload_json.get("init_all_edge_id_list", None) + init_server_id = payload_json.get("init_server_id", None) + + active_item_dict = self.client_agent_active_list.get(f"{run_id}", None) + if active_item_dict is None: + self.client_agent_active_list[f"{run_id}"] = dict() + + if init_edge_id_list is not None: + self.client_agent_active_list[f"{run_id}"][f"server"] = init_server_id + for edge_id_item in init_edge_id_list: + self.client_agent_active_list[f"{run_id}"][f"{edge_id_item}"] = \ + ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE + + if run_id is not None and edge_id is not None: + self.client_agent_active_list[f"{run_id}"][f"{edge_id}"] = status + + self.process_device_status(run_id, edge_id, status) + + def process_device_status(self, run_id, edge_id, status): + number_of_failed_edges = 0 + number_of_finished_edges = 0 + number_of_killed_edges = 0 + edge_id_status_dict = self.client_agent_active_list.get(f"{run_id}", {}) + server_id = edge_id_status_dict.get("server", 0) + enable_fault_tolerance, fault_tolerance_rate = self.parse_fault_tolerance_params(run_id) + running_edges_list = list() + for edge_id_item, status_item in edge_id_status_dict.items(): + if edge_id_item == "server": + continue + + if status_item is None or status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED or \ + status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_EXCEPTION: + number_of_failed_edges += 1 + continue + + if status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED: + number_of_finished_edges += 1 + continue + + if status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED: + number_of_killed_edges += 1 + continue + + if status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE or \ + status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_OFFLINE: + continue + + running_edges_list.append(edge_id_item) + + # Report client status + consensus_status = self.get_device_consensus_status_in_current_device(edge_id, status) + self.message_reporter.report_client_training_status(edge_id, consensus_status, run_id=run_id) + + # Report server status based on the fault tolerance model and parameters + edge_nums = len(edge_id_status_dict.keys()) - 1 + status_to_report = self.calculate_server_status( + run_id, edge_nums, number_of_failed_edges, number_of_finished_edges, number_of_killed_edges, + running_edges_list, enable_fault_tolerance=enable_fault_tolerance, + fault_tolerance_rate=fault_tolerance_rate) + if status_to_report is not None: + logging.info(f"Run completed when processing edge status, will report status {status_to_report}") + self.report_server_status(run_id, edge_id, server_id, status_to_report) + + def calculate_server_status( + self, run_id, total_edge_nums, number_of_failed_edges, number_of_finished_edges, + number_of_killed_edges, running_edges_list, enable_fault_tolerance=False, + fault_tolerance_rate=0.8 + ): + # Report server status based on the fault tolerance model and parameters + actual_failed_rate = number_of_failed_edges / total_edge_nums + all_edges_run_completed = True if len(running_edges_list) <= 0 else False + if all_edges_run_completed: + status_to_report = None + if enable_fault_tolerance: + if actual_failed_rate >= fault_tolerance_rate: + status_to_report = ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED + self.report_exception_status( + running_edges_list, run_id=run_id, status=status_to_report) + return status_to_report + else: + if number_of_killed_edges == total_edge_nums: + status_to_report = ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED + else: + status_to_report = ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED + else: + if number_of_failed_edges > 0: + status_to_report = ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED + elif number_of_finished_edges == total_edge_nums: + status_to_report = ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED + elif number_of_killed_edges == total_edge_nums: + status_to_report = ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED + + return status_to_report + + def parse_fault_tolerance_params(self, run_id): + run_json = self.running_scheduler_contract.get(str(run_id), None) + if run_json is None: + return False, 0 + run_config = run_json.get("run_config", {}) + run_params = run_config.get("parameters", {}) + common_args = run_params.get("common_args", {}) + enable_fault_tolerance = common_args.get("enable_fault_tolerance", False) + fault_tolerance_rate = common_args.get("fault_tolerance_rate", 0) + return enable_fault_tolerance, fault_tolerance_rate + + def report_server_status(self, run_id, edge_id, server_id, status): + self.status_reporter.report_server_id_status( + run_id, status, edge_id=edge_id, server_id=server_id, server_agent_id=edge_id) + + def report_exception_status( + self, edge_id_list, run_id=0, server_id=None, status=None, payload=None): + if payload is None: + payload_obj = {"runId": run_id, "edgeids": edge_id_list} + if server_id is not None: + payload_obj["serverId"] = server_id + else: + payload_obj = json.loads(payload) + payload_obj["run_status"] = ClientConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION if status is None else status + topic_exception = "flserver_agent/" + str(self.edge_id) + "/stop_train" + self.message_reporter.send_message(topic_exception, json.dumps(payload_obj)) + + def status_center_process_slave_status_to_master_in_slave_agent(self, topic, payload): + # Forward the status message to the sender queue of message center. + self.message_center.send_message(topic, payload) + + # Post the status message to the listener queue of message center + self.message_center.receive_message(GeneralConstants.MSG_TOPIC_REPORT_DEVICE_STATUS_IN_JOB, payload) + + def status_center_process_slave_status_to_mlops_in_slave_agent(self, topic, payload): + # Forward the status message to message center. + self.message_center.send_message(topic, payload) + + def status_center_request_job_status_from_master_in_slave_agent(self, topic, payload): + # Parse the parameters + payload_json = json.loads(payload) + run_id = payload_json.get("run_id", None) + master_id = payload_json.get("master_id", None) + edge_id = payload_json.get("edge_id", None) + + # Request the job status from master agent. + topic_request_job_status = f"{GeneralConstants.MSG_TOPIC_REQUEST_JOB_STATUS_PREFIX}{master_id}" + payload_request_job_status = {"run_id": run_id, "edge_id": edge_id} + self.message_center.send_message(topic_request_job_status, payload_request_job_status) diff --git a/python/fedml/computing/scheduler/slave/base_slave_agent.py b/python/fedml/computing/scheduler/slave/base_slave_agent.py new file mode 100755 index 0000000000..01c0a39195 --- /dev/null +++ b/python/fedml/computing/scheduler/slave/base_slave_agent.py @@ -0,0 +1,139 @@ + +import json +import os +from ..comm_utils import sys_utils +from ..comm_utils.run_process_utils import RunProcessUtils +from ..comm_utils.sys_utils import get_python_program +from ....core.mlops import MLOpsRuntimeLog, MLOpsMetrics +from .client_data_interface import ClientConstants +from ..scheduler_core.account_manager import FedMLAccountManager +from ..scheduler_core.general_constants import GeneralConstants +from abc import ABC, abstractmethod + + +class FedMLBaseSlaveAgent(ABC): + CLIENT_API_CMD = "fedml.computing.scheduler.slave.client_api:api" + + def __init__(self): + self.agent_args = None + self.local_api_process = None + self.process = None + self.cur_dir = os.path.split(os.path.realpath(__file__))[0] + self.mlops_metrics = MLOpsMetrics() + self.protocol_mgr = None + + def login( + self, userid, api_key=None, device_id=None, + os_name=None, need_to_check_gpu=False, role=None + ): + # Preprocess the login args + if need_to_check_gpu: + gpu_count, _ = sys_utils.get_gpu_count_vendor() + if gpu_count <= 0: + print("We can't find any gpu device on your machine. \n" + "With the gpu_supplier(-g) option, you need to check if your machine " + "has nvidia GPUs and installs CUDA related drivers.") + return + + # Login account + login_result = FedMLAccountManager.get_instance().login( + userid, api_key=api_key, device_id=device_id, + os_name=os_name, role=role + ) + if login_result is not None: + self.agent_args = login_result + else: + return None + + # Save the bound info + self._save_agent_info(login_result.current_device_id + "." + login_result.os_name, login_result.edge_id) + + # Init the logs for protocol manager + self._init_logs(login_result, login_result.edge_id) + + # Create the protocol manager to communicate with the slave agents and MLOps. + self._create_protocol_manager(login_result) + + # Initialize the protocol manager + # noinspection PyBoardException + try: + self._initialize_protocol_manager() + except Exception as e: + FedMLAccountManager.write_login_failed_file(is_client=True) + self.protocol_mgr.stop() + raise e + + # Start the protocol manager to process the messages from MLOps and slave agents. + self.protocol_mgr.start() + + return login_result + + @staticmethod + def logout(): + GeneralConstants.cleanup_run_process(None) + sys_utils.cleanup_all_fedml_client_api_processes() + + def _create_protocol_manager(self, login_result): + if self.protocol_mgr is not None: + return + self.protocol_mgr = self._generate_protocol_manager_instance( + login_result, agent_config=login_result.agent_config) + self.protocol_mgr.args = login_result + self.protocol_mgr.edge_id = login_result.edge_id + self.protocol_mgr.unique_device_id = login_result.unique_device_id + self.protocol_mgr.user_name = login_result.user_name + self.protocol_mgr.agent_config = login_result.agent_config + + def _initialize_protocol_manager(self): + # Init local database + self._init_database() + + # Initialize the master protocol + self.protocol_mgr.initialize() + + # Start the client API process + self._start_slave_api() + + def _init_logs(self, login_result, edge_id): + # Init runtime logs + in_args = login_result + in_args.log_file_dir = self._get_log_file_dir() + in_args.run_id = 0 + in_args.role = "client" + client_ids = list() + client_ids.append(edge_id) + in_args.client_id_list = json.dumps(client_ids) + in_args.using_mlops = True + MLOpsRuntimeLog.get_instance(in_args).init_logs() + + def _start_slave_api(self): + # Start the local API services + client_api_cmd = FedMLBaseSlaveAgent.CLIENT_API_CMD + client_api_pids = RunProcessUtils.get_pid_from_cmd_line(client_api_cmd) + if client_api_pids is None or len(client_api_pids) <= 0: + python_program = get_python_program() + cur_dir = os.path.dirname(__file__) + fedml_base_dir = os.path.dirname(os.path.dirname(os.path.dirname(cur_dir))) + self.local_api_process = ClientConstants.exec_console_with_script( + "{} -m uvicorn {} --host 0.0.0.0 --port {} " + "--reload --reload-delay 3 --reload-dir {} --log-level critical".format( + python_program, client_api_cmd, ClientConstants.LOCAL_CLIENT_API_PORT, fedml_base_dir), + should_capture_stdout=False, + should_capture_stderr=False + ) + + @abstractmethod + def _get_log_file_dir(self): + pass + + @abstractmethod + def _save_agent_info(self, unique_device_id, edge_id): + pass + + @abstractmethod + def _init_database(self): + pass + + @abstractmethod + def _generate_protocol_manager_instance(self, args, agent_config=None): + return None diff --git a/python/fedml/computing/scheduler/slave/base_slave_job_runner.py b/python/fedml/computing/scheduler/slave/base_slave_job_runner.py new file mode 100755 index 0000000000..4448dd49fa --- /dev/null +++ b/python/fedml/computing/scheduler/slave/base_slave_job_runner.py @@ -0,0 +1,264 @@ +import logging +import multiprocessing +import os +import platform +import time +import traceback +from abc import ABC, abstractmethod + +from ....core.mlops.mlops_runtime_log import MLOpsRuntimeLog +from ....core.mlops.mlops_runtime_log_daemon import MLOpsRuntimeLogDaemon +from .client_data_interface import FedMLClientDataInterface +from ..comm_utils import sys_utils +from ....core.mlops.mlops_utils import MLOpsUtils +from multiprocessing import Process +from ..scheduler_core.scheduler_base_job_runner import FedMLSchedulerBaseJobRunner, RunnerError, RunnerCompletedError +from ..scheduler_core.general_constants import GeneralConstants + + +class FedMLBaseSlaveJobRunner(FedMLSchedulerBaseJobRunner, ABC): + + def __init__(self, args, edge_id=0, request_json=None, agent_config=None, run_id=0, + cuda_visible_gpu_ids_str=None, + agent_data_dir=None, agent_package_download_dir=None, + agent_package_unzip_dir=None, agent_log_file_dir=None): + FedMLSchedulerBaseJobRunner.__init__( + self, args, edge_id=edge_id, request_json=request_json, agent_config=agent_config, run_id=run_id, + cuda_visible_gpu_ids_str=cuda_visible_gpu_ids_str, agent_data_dir=agent_data_dir, + agent_package_download_dir=agent_package_download_dir, + agent_package_unzip_dir=agent_package_unzip_dir, + agent_log_file_dir=agent_log_file_dir + ) + + self.fedml_data_base_package_dir = os.path.join("/", "fedml", "data") + self.fedml_data_local_package_dir = os.path.join("/", "fedml", "fedml-package", "fedml", "data") + self.fedml_data_dir = self.fedml_data_base_package_dir + self.fedml_config_dir = os.path.join("/", "fedml", "conf") + self.run_extend_queue_list = None + self.computing_started_time = 0 + + def __repr__(self): + return "<{klass} @{id:x} {attrs}>".format( + klass=self.__class__.__name__, + id=id(self) & 0xFFFFFF, + attrs=" ".join("{}={!r}".format(k, v) for k, v in self.__dict__.items()), + ) + + def run(self, process_event, completed_event, run_extend_queue_list, + sender_message_center, listener_message_queue, status_center_queue): + print(f"Client runner process id {os.getpid()}, run id {self.run_id}") + + if platform.system() != "Windows": + os.setsid() + + os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning' + os.environ.setdefault('PYTHONWARNINGS', 'ignore:semaphore_tracker:UserWarning') + + self.run_process_event = process_event + self.run_process_completed_event = completed_event + try: + MLOpsUtils.set_ntp_offset(self.ntp_offset) + self.rebuild_message_status_center(sender_message_center, listener_message_queue, status_center_queue) + self.run_impl(run_extend_queue_list, sender_message_center, listener_message_queue, status_center_queue) + except RunnerError: + logging.info("Runner stopped.") + self.reset_devices_status(self.edge_id, GeneralConstants.MSG_MLOPS_CLIENT_STATUS_KILLED) + except RunnerCompletedError: + logging.info("Runner completed.") + except Exception as e: + logging.error(f"Runner exited with errors. Exception: {e}, Traceback {traceback.format_exc()}") + self.status_reporter.report_client_id_status( + self.edge_id, GeneralConstants.MSG_MLOPS_CLIENT_STATUS_FAILED, + server_id=self.server_id, run_id=self.run_id) + finally: + if self.mlops_metrics is not None: + computing_ended_time = MLOpsUtils.get_ntp_time() + self.mlops_metrics.report_edge_job_computing_cost(self.run_id, self.edge_id, + self.computing_started_time, computing_ended_time, + self.args.account_id, self.args.api_key) + logging.info("Release resources.") + FedMLSchedulerBaseJobRunner.cleanup_containers_and_release_gpus(self.run_id, self.edge_id) + MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(self.run_id, self.edge_id) + if self.mlops_metrics is not None: + self.mlops_metrics.stop_sys_perf() + time.sleep(3) + GeneralConstants.cleanup_learning_process(self.run_id) + GeneralConstants.cleanup_run_process(self.run_id) + + @abstractmethod + def run_impl(self, run_extend_queue_list, sender_message_center, + listener_message_queue, status_center_queue): + run_id = self.request_json["runId"] + run_config = self.request_json["run_config"] + data_config = run_config.get("data_config", {}) + packages_config = run_config["packages_config"] + + self.computing_started_time = MLOpsUtils.get_ntp_time() + self.mlops_metrics.report_edge_job_computing_cost(run_id, self.edge_id, + self.computing_started_time, 0, + self.args.account_id, self.args.api_key) + + self.check_runner_stop_event() + + MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO) + + self.status_reporter.report_client_id_status( + self.edge_id, GeneralConstants.MSG_MLOPS_CLIENT_STATUS_INITIALIZING, + running_json=self.start_request_json, run_id=run_id) + + # get training params + private_local_data_dir = data_config.get("privateLocalData", "") + is_using_local_data = 0 + # if private_local_data_dir is not None and len(str(private_local_data_dir).strip(' ')) > 0: + # is_using_local_data = 1 + + # start a run according to the hyper-parameters + # fedml_local_data_dir = self.cur_dir + "/fedml_data/run_" + run_id_str + "_edge_" + str(edge_id) + fedml_local_data_dir = os.path.join(self.cur_dir, "fedml_data") + fedml_local_config_dir = os.path.join(self.cur_dir, "fedml_config") + if is_using_local_data: + fedml_local_data_dir = private_local_data_dir + self.fedml_data_dir = self.fedml_data_local_package_dir + + self.check_runner_stop_event() + + logging.info("Download packages") + + # update local config with real time parameters from server and dynamically replace variables value + unzip_package_path, fedml_config_object = self.update_local_fedml_config(run_id, run_config) + # if unzip_package_path is None or fedml_config_object is None: + # logging.info("failed to update local fedml config.") + # self.check_runner_stop_event() + # # Send failed msg when exceptions. + # self.cleanup_run_when_starting_failed(status=GeneralConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION) + # return + + logging.info("Check downloaded packages...") + + entry_file_config = fedml_config_object.get("entry_config", None) + dynamic_args_config = fedml_config_object.get("dynamic_args", None) + entry_file = str(entry_file_config["entry_file"]).replace('\\', os.sep).replace('/', os.sep) + entry_file = os.path.basename(entry_file) + conf_file = entry_file_config["conf_file"] + conf_file = str(conf_file).replace('\\', os.sep).replace('/', os.sep) + ##### + # GeneralConstants.cleanup_learning_process(run_id) + # GeneralConstants.cleanup_bootstrap_process(run_id) + ##### + + if not os.path.exists(unzip_package_path): + logging.info("failed to unzip file.") + self.check_runner_stop_event() + return + os.chdir(os.path.join(unzip_package_path, "fedml")) + + self.check_runner_stop_event() + + logging.info("starting the user process...") + + entry_file_full_path = os.path.join(unzip_package_path, "fedml", entry_file) + conf_file_full_path = os.path.join(unzip_package_path, "fedml", conf_file) + logging.info("waiting the user process to finish...") + logging.info(" ") + logging.info(" ") + logging.info("====Your Run Logs Begin===") + + process, is_launch_task, error_list = self.execute_job_task( + unzip_package_path=unzip_package_path, entry_file_full_path=entry_file_full_path, + conf_file_full_path=conf_file_full_path, dynamic_args_config=dynamic_args_config, + fedml_config_object=self.fedml_config_object) + + logging.info("====Your Run Logs End===") + logging.info(" ") + logging.info(" ") + + ret_code, out, err = process.returncode if process else None, None, None + is_run_ok = sys_utils.is_runner_finished_normally(process.pid) + if is_launch_task: + is_run_ok = True + if error_list is not None and len(error_list) > 0: + is_run_ok = False + if ret_code is None or ret_code <= 0: + self.check_runner_stop_event() + + if is_run_ok: + if out is not None: + out_str = sys_utils.decode_our_err_result(out) + if out_str != "": + logging.info("{}".format(out_str)) + + self.status_reporter.report_client_id_status( + self.edge_id, GeneralConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED, + server_id=self.server_id, run_id=run_id) + + if is_launch_task: + sys_utils.log_return_info(f"job {run_id}", ret_code) + else: + sys_utils.log_return_info(entry_file, ret_code) + else: + is_run_ok = False + + if not is_run_ok: + # If the run status is killed or finished, then return with the normal state. + current_job = FedMLClientDataInterface.get_instance().get_job_by_id(run_id) + if current_job is not None and (current_job.status == GeneralConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED or + current_job.status == GeneralConstants.MSG_MLOPS_CLIENT_STATUS_KILLED): + return + + self.check_runner_stop_event() + + logging.error("failed to run the learning process...") + + if err is not None: + err_str = sys_utils.decode_our_err_result(err) + if err_str != "": + logging.error("{}".format(err_str)) + + if is_launch_task: + sys_utils.log_return_info(f"job {run_id}", ret_code) + else: + sys_utils.log_return_info(entry_file, ret_code) + + # Send failed msg when exceptions. + self.status_reporter.report_client_id_status( + self.edge_id, GeneralConstants.MSG_MLOPS_CLIENT_STATUS_FAILED, + server_id=self.server_id, run_id=run_id) + + @abstractmethod + def _generate_job_runner_instance(self, args, run_id=None, request_json=None, agent_config=None, edge_id=None): + return None + + @abstractmethod + def _generate_extend_queue_list(self): + return list() + + def reset_devices_status(self, edge_id, status): + self.status_reporter.run_id = self.run_id + self.status_reporter.edge_id = edge_id + self.status_reporter.report_client_id_status( + edge_id, status, server_id=self.server_id, run_id=self.run_id) + + def start_runner_process( + self, run_id, request_json, edge_id=None, + sender_message_queue=None, listener_message_queue=None, + status_center_queue=None, cuda_visible_gpu_ids_str=None + ): + client_runner = self._generate_job_runner_instance( + self.args, run_id=run_id, request_json=request_json, + agent_config=None, edge_id=edge_id + ) + client_runner.start_request_json = request_json + run_id_str = str(run_id) + self.run_process_event = multiprocessing.Event() + client_runner.run_process_event = self.run_process_event + self.run_process_completed_event = multiprocessing.Event() + client_runner.run_process_completed_event = self.run_process_completed_event + client_runner.server_id = request_json.get("server_id", "0") + self.run_extend_queue_list = self._generate_extend_queue_list() + logging.info("start the runner process.") + self.run_process = Process(target=client_runner.run, args=( + self.run_process_event, self.run_process_completed_event, self.run_extend_queue_list, + sender_message_queue, listener_message_queue, status_center_queue + )) + self.run_process.start() + return self.run_process diff --git a/python/fedml/computing/scheduler/slave/base_slave_job_runner_manager.py b/python/fedml/computing/scheduler/slave/base_slave_job_runner_manager.py new file mode 100755 index 0000000000..c058d5dd0e --- /dev/null +++ b/python/fedml/computing/scheduler/slave/base_slave_job_runner_manager.py @@ -0,0 +1,12 @@ + +from abc import ABC, abstractmethod +from ..scheduler_core.scheduler_base_job_runner_manager import FedMLSchedulerBaseJobRunnerManager +from ..scheduler_core.scheduler_base_job_runner import FedMLSchedulerBaseJobRunner + + +class FedMLBaseSlaveJobRunnerManager(FedMLSchedulerBaseJobRunnerManager, ABC): + def __init__(self): + FedMLSchedulerBaseJobRunnerManager.__init__(self) + + def cleanup_containers_and_release_gpus(self, run_id, edge_id): + FedMLSchedulerBaseJobRunner.cleanup_containers_and_release_gpus(run_id, edge_id) diff --git a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py new file mode 100755 index 0000000000..0543459dd0 --- /dev/null +++ b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py @@ -0,0 +1,571 @@ + +import json +import logging +import os +import time +import traceback +from abc import ABC, abstractmethod + +import fedml +from ..comm_utils.constants import SchedulerConstants +from ..comm_utils.job_utils import JobRunnerUtils, DockerArgs +from ..comm_utils.run_process_utils import RunProcessUtils +from ....core.mlops.mlops_runtime_log import MLOpsRuntimeLog +from ....core.mlops.mlops_configs import MLOpsConfigs +from ....core.mlops.mlops_runtime_log_daemon import MLOpsRuntimeLogDaemon +from ..comm_utils import sys_utils +from ....core.mlops.mlops_utils import MLOpsUtils +from ..scheduler_core.compute_cache_manager import ComputeCacheManager +from ..scheduler_core.ota_upgrade import FedMLOtaUpgrade +from .client_data_interface import FedMLClientDataInterface +from ..scheduler_core.scheduler_base_protocol_manager import FedMLSchedulerBaseProtocolManager +from ..scheduler_core.general_constants import GeneralConstants + + +class FedMLBaseSlaveProtocolManager(FedMLSchedulerBaseProtocolManager, ABC): + + def __init__(self, args, agent_config=None): + FedMLSchedulerBaseProtocolManager.__init__(self, args, agent_config=agent_config) + + self.request_json = None + self.disable_client_login = None + self.args = args + self.message_status_runner = None + self.message_center = None + self.status_center = None + self.message_center_name = "master_agent" + self.run_id = None + self.edge_id = args.edge_id + self.general_edge_id = None + self.edge_user_name = args.user_name + self.edge_extra_url = args.extra_url + self.server_agent_id = args.edge_id + self.current_device_id = args.current_device_id + self.unique_device_id = args.unique_device_id + self.agent_config = agent_config + self.topic_start_train = None + self.topic_stop_train = None + self.topic_report_status = None + self.topic_ota_msg = None + self.topic_request_device_info = None + self.topic_client_logout = None + self.topic_response_job_status = None + self.topic_report_device_status_in_job = None + self.fl_topic_start_train = None + self.fl_topic_stop_train = None + self.fl_topic_request_device_info = None + self.communication_mgr = None + self.subscribed_topics = list() + self.mlops_metrics = None + self.status_reporter = None + self.job_runners = dict() + self.ota_upgrade = FedMLOtaUpgrade(edge_id=args.edge_id) + self.running_request_json = dict() + self.start_request_json = None + self.user_name = args.user_name + self.general_edge_id = args.general_edge_id + self.server_id = args.server_id + self.model_device_server_id = None + self.model_device_client_edge_id_list = None + self.model_device_server = None + self.model_device_client_list = None + + @abstractmethod + def generate_topics(self): + # The MQTT message topic format is as follows: // + + # The topic for stopping training + self.topic_start_train = "flserver_agent/" + str(self.edge_id) + "/start_train" + + # The topi for stopping training + self.topic_stop_train = "flserver_agent/" + str(self.edge_id) + "/stop_train" + + # The topic for reporting current device status. + self.topic_report_status = "mlops/report_device_status" + + # The topic for OTA messages from the MLOps. + self.topic_ota_msg = "mlops/flclient_agent_" + str(self.edge_id) + "/ota" + + # The topic for requesting device info from the client. + self.topic_request_device_info = "server/client/request_device_info/" + str(self.edge_id) + + # The topic for requesting device info from MLOps. + self.topic_client_logout = "mlops/client/logout/" + str(self.edge_id) + + # The topic for getting job status from the status center. + self.topic_response_job_status = f"master_agent/somewhere/response_job_status/{self.edge_id}" + + # The topic for getting device status of job from the status center. + self.topic_report_device_status_in_job = f"slave_job/slave_agent/report_device_status_in_job" + + # The topic for reporting online status + self.topic_active = "flclient_agent/active" + + # The topic for last-will messages. + self.topic_last_will = "flclient_agent/last_will_msg" + + if self.general_edge_id is not None: + self.fl_topic_start_train = "flserver_agent/" + str(self.general_edge_id) + "/start_train" + self.fl_topic_stop_train = "flserver_agent/" + str(self.general_edge_id) + "/stop_train" + self.fl_topic_request_device_info = "server/client/request_device_info/" + str(self.general_edge_id) + + # Subscribe topics for starting train, stopping train and fetching client status. + self.subscribed_topics.clear() + self.add_subscribe_topic(self.topic_start_train) + self.add_subscribe_topic(self.topic_stop_train) + self.add_subscribe_topic(self.topic_report_status) + self.add_subscribe_topic(self.topic_ota_msg) + self.add_subscribe_topic(self.topic_request_device_info) + self.add_subscribe_topic(self.topic_client_logout) + self.add_subscribe_topic(self.topic_response_job_status) + self.add_subscribe_topic(self.topic_report_device_status_in_job) + if self.general_edge_id is not None: + self.add_subscribe_topic(self.fl_topic_start_train) + self.add_subscribe_topic(self.fl_topic_stop_train) + self.add_subscribe_topic(self.fl_topic_request_device_info) + + @abstractmethod + def add_protocol_handler(self): + # Add the message listeners for all topics, the following is an example. + # self.add_message_listener(self.topic_start_train, self.callback_start_train) + # Add the message listeners for all topics + self.add_message_listener(self.topic_start_train, self.callback_start_train) + self.add_message_listener(self.topic_stop_train, self.callback_stop_train) + self.add_message_listener(self.topic_ota_msg, FedMLBaseSlaveProtocolManager.callback_client_ota_msg) + self.add_message_listener(self.topic_report_status, self.callback_report_current_status) + self.add_message_listener(self.topic_request_device_info, self.callback_report_device_info) + self.add_message_listener(self.topic_client_logout, self.callback_client_logout) + self.add_message_listener(self.topic_response_job_status, self.callback_response_job_status) + self.add_message_listener(self.topic_report_device_status_in_job, self.callback_response_device_status_in_job) + self.add_message_listener(self.fl_topic_start_train, self.callback_start_train) + self.add_message_listener(self.fl_topic_stop_train, self.callback_stop_train) + self.add_message_listener(self.fl_topic_request_device_info, self.callback_report_device_info) + + @abstractmethod + def _get_job_runner_manager(self): + return None + + @abstractmethod + def _init_extra_items(self): + os.environ["FEDML_CURRENT_EDGE_ID"] = str(self.edge_id) + if not ComputeCacheManager.get_instance().set_redis_params(): + os.environ["FEDML_DISABLE_REDIS_CONNECTION"] = "1" + + def add_subscribe_topic(self, topic): + self.subscribed_topics.append(topic) + + def stop(self): + if self.model_device_server is not None: + self.model_device_server.stop() + self.model_device_server = None + + if self.model_device_client_list is not None: + for model_client in self.model_device_client_list: + model_client.stop() + self.model_device_client_list.clear() + self.model_device_client_list = None + + super().stop() + + def on_agent_communication_connected(self, mqtt_client_object): + super().on_agent_communication_connected(mqtt_client_object) + + self._process_connection_ready() + + payload = {"model_master_device_id": self.model_device_server_id, + "model_slave_device_id_list": self.model_device_client_edge_id_list} + self.receive_message(self.topic_request_device_info, json.dumps(payload)) + + def on_agent_communication_disconnected(self, mqtt_client_object): + super().on_agent_communication_disconnected(mqtt_client_object) + + self._process_connection_lost() + + @abstractmethod + def _process_connection_ready(self): + pass + + @abstractmethod + def _process_connection_lost(self): + pass + + def print_connected_info(self): + print("\nCongratulations, your device is connected to the FedML MLOps platform successfully!") + print(f"Your FedML Edge ID is {str(self.edge_id)}, unique device ID is {str(self.unique_device_id)}, " + f"master deploy ID is {str(self.model_device_server_id)}, " + f"worker deploy ID is {self.model_device_client_edge_id_list}" + ) + if self.edge_extra_url is not None and self.edge_extra_url != "": + print(f"You may visit the following url to fill in more information with your device.\n" + f"{self.edge_extra_url}") + + def callback_start_train(self, topic, payload): + # Parse the parameters + request_json = json.loads(payload) + is_retain = request_json.get("is_retain", False) + if is_retain: + return + run_id = request_json["runId"] + edge_id = str(topic).split("/")[-2] + self.args.run_id = run_id + self.args.edge_id = edge_id + + # Start log processor for current run + MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO) + MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor( + run_id, edge_id, log_source=SchedulerConstants.get_log_source(request_json)) + logging.info("start the log processor") + + # Fetch the config + try: + MLOpsConfigs.fetch_all_configs() + except Exception as e: + logging.error(f"Failed to fetch all configs with Exception {e}. Traceback: {traceback.format_exc()}") + pass + + # Check if the slave agent is disabled. + if not FedMLClientDataInterface.get_instance().get_agent_status(): + request_json = json.loads(payload) + run_id = request_json["runId"] + logging.error( + "FedMLDebug - Receive: topic ({}), payload ({}), but the client agent is disabled. {}".format( + topic, payload, traceback.format_exc() + ) + ) + # Send failed msg when exceptions. + self.status_reporter.report_client_id_status( + edge_id, GeneralConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION, run_id=run_id, + msg=f"the client agent {edge_id} is disabled") + MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, edge_id) + return + + # Print the payload + logging.info( + f"FedMLDebug - Receive: topic ({topic}), payload ({payload})" + ) + + # Occupy GPUs + server_agent_id = request_json["cloud_agent_id"] + scheduler_match_info = request_json.get("scheduler_match_info", {}) + matched_gpu_num = scheduler_match_info.get("matched_gpu_num", 0) + model_master_device_id = scheduler_match_info.get("model_master_device_id", None) + model_slave_device_id = scheduler_match_info.get("model_slave_device_id", None) + model_slave_device_id_list = scheduler_match_info.get("model_slave_device_id_list", None) + run_config = request_json.get("run_config", {}) + run_params = run_config.get("parameters", {}) + serving_args = run_params.get("serving_args", {}) + endpoint_id = serving_args.get("endpoint_id", None) + cuda_visible_gpu_ids_str = JobRunnerUtils.get_instance().occupy_gpu_ids( + run_id, matched_gpu_num, edge_id, inner_id=endpoint_id, + model_master_device_id=model_master_device_id, + model_slave_device_id=model_slave_device_id) + logging.info( + f"Run started, available gpu ids: {JobRunnerUtils.get_instance().get_available_gpu_id_list(edge_id)}") + + # Set the listener for job status from master agent + self.setup_listener_job_status(run_id) + + # Start server with multiprocessing mode + self.request_json = request_json + run_id_str = str(run_id) + self.running_request_json[run_id_str] = request_json + self._get_job_runner_manager().start_job_runner( + run_id, request_json, args=self.args, edge_id=edge_id, + sender_message_queue=self.message_center.get_sender_message_queue(), + listener_message_queue=self.get_listener_message_queue(), + status_center_queue=self.get_status_queue(), + cuda_visible_gpu_ids_str=cuda_visible_gpu_ids_str, + ) + run_process = self._get_job_runner_manager().get_runner_process(run_id) + if run_process is not None: + GeneralConstants.save_run_process(run_id, run_process.pid) + + # Register the job launch message into the status center + self.register_job_launch_message(topic, payload) + + def callback_stop_train(self, topic, payload): + # Parse the parameters. + edge_id = str(topic).split("/")[-2] + request_json = json.loads(payload) + is_retain = request_json.get("is_retain", False) + if is_retain: + return + run_id = request_json.get("runId", None) + run_id = request_json.get("id", None) if run_id is None else run_id + run_status = request_json.get("run_status", GeneralConstants.MSG_MLOPS_CLIENT_STATUS_KILLED) + + # logging.info("Stop run with multiprocessing...") + # Stop client with multiprocessing mode + run_id_str = str(run_id) + self._get_job_runner_manager().cleanup_containers_and_release_gpus(run_id, edge_id) + self.sync_run_stop_status(run_status=run_status) + + # Register the job stopping message into the status center + self.register_job_stop_message(topic, payload) + + def callback_report_current_status(self, topic, payload): + logging.info( + f"FedMLDebug - Receive: topic ({topic}), payload ({payload})" + ) + + self.send_agent_active_msg(self.edge_id) + if self.general_edge_id is not None: + self.send_agent_active_msg(self.general_edge_id) + + @staticmethod + def callback_client_ota_msg(topic, payload): + logging.info( + f"FedMLDebug - Receive: topic ({topic}), payload ({payload})" + ) + + request_json = json.loads(payload) + cmd = request_json["cmd"] + + if cmd == GeneralConstants.FEDML_OTA_CMD_UPGRADE: + FedMLOtaUpgrade.process_ota_upgrade_msg() + # Process(target=FedMLClientRunner.process_ota_upgrade_msg).start() + raise Exception("After upgraded, restart runner...") + elif cmd == GeneralConstants.FEDML_OTA_CMD_RESTART: + raise Exception("Restart runner...") + + def callback_report_device_info(self, topic, payload): + payload_json = json.loads(payload) + server_id = payload_json.get("server_id", 0) + run_id = payload_json.get("run_id", 0) + listen_edge_id = str(topic).split("/")[-1] + context = payload_json.get("context", None) + need_gpu_info = payload_json.get("need_gpu_info", False) + need_running_process_list = payload_json.get("need_running_process_list", False) + model_master_device_id = payload_json.get("model_master_device_id", None) + model_slave_device_id_list = payload_json.get("model_slave_device_id_list", None) + if model_master_device_id is not None: + self.model_device_server_id = model_master_device_id + if model_slave_device_id_list is not None: + self.model_device_client_edge_id_list = model_slave_device_id_list + response_topic = f"client/server/response_device_info/{server_id}" + if self.mlops_metrics is not None: + if not need_gpu_info: + device_info_json = { + "edge_id": listen_edge_id, + "fedml_version": fedml.__version__, + "user_id": self.args.user_name + } + else: + total_mem, free_mem, total_disk_size, free_disk_size, cup_utilization, cpu_cores, gpu_cores_total, \ + gpu_cores_available, sent_bytes, recv_bytes, gpu_available_ids = sys_utils.get_sys_realtime_stats() + host_ip = sys_utils.get_host_ip() + host_port = sys_utils.get_available_port() + gpu_available_ids = JobRunnerUtils.get_available_gpu_id_list(self.edge_id) + gpu_available_ids = JobRunnerUtils.trim_unavailable_gpu_ids(gpu_available_ids) + gpu_cores_available = len(gpu_available_ids) + gpu_list = sys_utils.get_gpu_list() + device_info_json = { + "edge_id": listen_edge_id, + "memoryTotal": round(total_mem * MLOpsUtils.BYTES_TO_GB, 2), + "memoryAvailable": round(free_mem * MLOpsUtils.BYTES_TO_GB, 2), + "diskSpaceTotal": round(total_disk_size * MLOpsUtils.BYTES_TO_GB, 2), + "diskSpaceAvailable": round(free_disk_size * MLOpsUtils.BYTES_TO_GB, 2), + "cpuUtilization": round(cup_utilization, 2), + "cpuCores": cpu_cores, + "gpuCoresTotal": gpu_cores_total, + "gpuCoresAvailable": gpu_cores_available, + "gpu_available_ids": gpu_available_ids, + "gpu_list": gpu_list, + "node_ip": host_ip, + "node_port": host_port, + "networkTraffic": sent_bytes + recv_bytes, + "updateTime": int(MLOpsUtils.get_ntp_time()), + "fedml_version": fedml.__version__, + "user_id": self.args.user_name + } + if need_running_process_list: + device_info_json["run_process_list_map"] = self.get_all_run_process_list_map() + salve_device_ids = list() + if self.model_device_client_edge_id_list is not None and \ + isinstance(self.model_device_client_edge_id_list, list): + for model_client_edge_id in self.model_device_client_edge_id_list: + salve_device_ids.append(model_client_edge_id) + response_payload = {"slave_device_id": None if len(salve_device_ids) <= 0 else salve_device_ids[0], + "slave_device_id_list": salve_device_ids, + "master_device_id": self.model_device_server_id, + "run_id": run_id, "edge_id": listen_edge_id, + "edge_info": device_info_json} + if context is not None: + response_payload["context"] = context + self.message_center.send_message(response_topic, json.dumps(response_payload), run_id=run_id) + + def callback_client_logout(self, topic, payload): + payload_json = json.loads(payload) + secret = payload_json.get("auth", None) + if secret is None or str(secret) != "246b1be6-0eeb-4b17-b118-7d74de1975d4": + return + logging.info("Received the logout request.") + for runner in self.job_runners: + runner.trigger_stop_event() + self.disable_client_login = True + time.sleep(3) + os.system("fedml logout") + + def callback_response_device_status_in_job(self, topic, payload): + # Parse the parameters + payload_json = json.loads(payload) + run_id = payload_json.get("run_id", None) + job_status = payload_json.get("status", None) + edge_id = payload_json.get("edge_id", None) + + # process the status + self.process_status(run_id, job_status, edge_id) + + def callback_response_job_status(self, topic, payload): + # Parse the parameters + payload_json = json.loads(payload) + run_id = payload_json.get("run_id", None) + master_agent = payload_json.get("master_agent", None) + job_status = payload_json.get("job_status", None) + fedml_version = payload_json.get("fedml_version", None) + edge_id = payload_json.get("edge_id", None) + + # process the status + self.process_status(run_id, job_status, edge_id) + + def callback_broadcasted_job_status(self, topic, payload): + # Parse the parameters + payload_json = json.loads(payload) + run_id = payload_json.get("run_id", None) + job_status = payload_json.get("status", None) + + # process the status + self.process_status(run_id, job_status, self.edge_id) + + def generate_protocol_manager(self): + message_status_runner = self._generate_protocol_manager_instance( + self.args, agent_config=self.agent_config + ) + message_status_runner.request_json = self.request_json + message_status_runner.disable_client_login = self.disable_client_login + message_status_runner.message_center_name = self.message_center_name + message_status_runner.run_id = self.run_id + message_status_runner.edge_id = self.edge_id + message_status_runner.edge_user_name = self.edge_user_name + message_status_runner.edge_extra_url = self.edge_extra_url + message_status_runner.server_agent_id = self.server_agent_id + message_status_runner.current_device_id = self.current_device_id + message_status_runner.unique_device_id = self.unique_device_id + message_status_runner.subscribed_topics = self.subscribed_topics + message_status_runner.running_request_json = self.running_request_json + message_status_runner.request_json = self.start_request_json + message_status_runner.user_name = self.user_name + message_status_runner.general_edge_id = self.general_edge_id + message_status_runner.server_id = self.server_id + message_status_runner.model_device_server_id = self.model_device_server_id + message_status_runner.model_device_client_edge_id_list = self.model_device_client_edge_id_list + message_status_runner.status_queue = self.get_status_queue() + + return message_status_runner + + def process_status(self, run_id, status, edge_id): + run_id_str = str(run_id) + + # Process the completed status + if status == GeneralConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED or \ + status == GeneralConstants.MSG_MLOPS_CLIENT_STATUS_FAILED or \ + status == GeneralConstants.MSG_MLOPS_CLIENT_STATUS_KILLED: + if self.job_runners.get(run_id_str, None) is not None: + self.job_runners[run_id_str].trigger_completed_event() + + # Stop the sys perf process + # noinspection PyBoardException + try: + self.mlops_metrics.stop_sys_perf() + except Exception as ex: + logging.error(f"Failed to stop sys perf with Exception {ex}. Traceback: {traceback.format_exc()}") + pass + + # Stop the user process + try: + GeneralConstants.cleanup_learning_process(run_id) + GeneralConstants.cleanup_bootstrap_process(run_id) + GeneralConstants.cleanup_run_process(run_id) + except Exception as e: + logging.error( + f"Failed to cleanup run when finished with Exception {e}. Traceback: {traceback.format_exc()}") + pass + + # Get the running json. + running_json = self.running_request_json.get(run_id_str) + if running_json is None: + try: + current_job = FedMLClientDataInterface.get_instance().get_job_by_id(run_id) + running_json = json.loads(current_job.running_json) + except Exception as e: + logging.error(f"Failed to get running json with Exception {e}. Traceback: {traceback.format_exc()}") + + # Cleanup the containers and release the gpu ids. + if running_json is not None: + job_type = JobRunnerUtils.parse_job_type(running_json) + if not SchedulerConstants.is_deploy_job(job_type): + logging.info(f"[run/device][{run_id}/{edge_id}] Release gpu resource when run ended.") + self._get_job_runner_manager().cleanup_containers_and_release_gpus(run_id, edge_id) + + # Stop the runner process + run_process = self._get_job_runner_manager().get_runner_process(run_id) + if run_process is not None: + if run_process.pid is not None: + RunProcessUtils.kill_process(run_process.pid) + + # Terminate the run docker container if exists + try: + container_name = JobRunnerUtils.get_run_container_name(run_id) + docker_client = JobRunnerUtils.get_docker_client(DockerArgs()) + logging.info(f"Terminating the run docker container {container_name} if exists...") + JobRunnerUtils.remove_run_container_if_exists(container_name, docker_client) + except Exception as e: + logging.error(f"Error occurred when terminating docker container." + f"Exception: {e}, Traceback: {traceback.format_exc()}.") + + # Stop log processor for current run + MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, edge_id) + + def setup_listener_job_status(self, run_id): + # Setup MQTT message listener to receive the job status from master agent; + topic_job_status_from_master = f"master_agent/slave_agent/job_status/{run_id}" + self.add_message_listener(topic_job_status_from_master, self.callback_broadcasted_job_status) + self.subscribe_msg(topic_job_status_from_master) + + def remove_listener_job_status(self, run_id): + # Remove MQTT message listener from master agent; + topic_job_status_from_master = f"master_agent/slave_agent/job_status/{run_id}" + self.remove_message_listener(topic_job_status_from_master) + self.unsubscribe_msg(topic_job_status_from_master) + + def sync_run_stop_status(self, run_status=GeneralConstants.MSG_MLOPS_CLIENT_STATUS_KILLED): + try: + self.status_reporter.report_client_id_status( + self.edge_id, run_status, server_id=self.server_id, run_id=self.run_id) + except Exception as e: + logging.error(f"Failed to sync run stop status with Exception {e}. Traceback: {traceback.format_exc()}") + pass + + def get_all_run_process_list_map(self): + run_process_dict = dict() + all_runner_pid_dict = self._get_job_runner_manager().get_all_runner_pid_map() + if all_runner_pid_dict is None: + return run_process_dict + for run_id_str, process in all_runner_pid_dict.items(): + cur_run_process_list = GeneralConstants.get_learning_process_list(run_id_str) + run_process_dict[run_id_str] = cur_run_process_list + + return run_process_dict + + def stop_job(self, run_id): + run_id_str = str(run_id) + if self.job_runners.get(run_id_str, None) is not None: + self.job_runners[run_id_str].trigger_stop_event() + + @staticmethod + def get_start_train_topic_with_edge_id(edge_id): + return "flserver_agent/" + str(edge_id) + "/start_train" + + @abstractmethod + def _generate_protocol_manager_instance(self, args, agent_config=None): + return None diff --git a/python/fedml/computing/scheduler/slave/client_login.py b/python/fedml/computing/scheduler/slave/client_login.py index c8123a717c..37a6dc8064 100755 --- a/python/fedml/computing/scheduler/slave/client_login.py +++ b/python/fedml/computing/scheduler/slave/client_login.py @@ -1,332 +1,11 @@ - import argparse -import json -import logging import os -import platform -import subprocess -import time -import traceback - -import click import fedml -from fedml.computing.scheduler.comm_utils import sys_utils -from fedml.computing.scheduler.comm_utils.constants import SchedulerConstants -from fedml.computing.scheduler.slave.client_runner import FedMLClientRunner -from fedml.computing.scheduler.slave.client_constants import ClientConstants -from fedml.core.mlops.mlops_runtime_log import MLOpsRuntimeLog -from fedml.core.mlops.mlops_runtime_log_daemon import MLOpsRuntimeLogDaemon - - -def init_logs(args, edge_id): - # Init runtime logs - args.log_file_dir = ClientConstants.get_log_file_dir() - args.run_id = 0 - args.role = "client" - client_ids = list() - client_ids.append(edge_id) - args.client_id_list = json.dumps(client_ids) - setattr(args, "using_mlops", True) - MLOpsRuntimeLog.get_instance(args).init_logs() - - -def __login_as_client(args, userid, api_key="", use_extra_device_id_suffix=None, role="client"): - setattr(args, "account_id", userid) - setattr(args, "current_running_dir", ClientConstants.get_fedml_home_dir()) - - sys_name = platform.system() - if sys_name == "Darwin": - sys_name = "MacOS" - if hasattr(args, "os_name") and args.os_name is not None and args.os_name != "": - pass - else: - setattr(args, "os_name", sys_name) - version = fedml.get_env_version() - setattr(args, "version", version) - setattr(args, "log_file_dir", ClientConstants.get_log_file_dir()) - is_from_docker = False - if hasattr(args, "device_id") and args.device_id is not None and args.device_id != "0": - setattr(args, "current_device_id", args.device_id) - is_from_docker = True - else: - is_gpu_supplier = (role == ClientConstants.login_role_list[ClientConstants.LOGIN_MODE_GPU_SUPPLIER_INDEX]) - setattr(args, "current_device_id", FedMLClientRunner.get_device_id(use_machine_id=is_gpu_supplier)) - setattr(args, "config_version", version) - setattr(args, "cloud_region", "") - - # Create client runner for communication with the FedML server. - runner = FedMLClientRunner(args) - - # Fetch configs from the MLOps config server. - service_config = dict() - config_try_count = 0 - edge_id = 0 - while config_try_count < 5: - try: - mqtt_config, s3_config, mlops_config, docker_config = runner.fetch_configs() - service_config["mqtt_config"] = mqtt_config - service_config["s3_config"] = s3_config - service_config["ml_ops_config"] = mlops_config - service_config["docker_config"] = docker_config - runner.agent_config = service_config - # click.echo("service_config = {}".format(service_config)) - log_server_url = mlops_config.get("LOG_SERVER_URL", None) - if log_server_url is not None: - setattr(args, "log_server_url", log_server_url) - setattr(runner.args, "log_server_url", log_server_url) - break - except Exception as e: - click.echo("{}\n{}".format(SchedulerConstants.ERR_MSG_BINDING_EXCEPTION_1, traceback.format_exc())) - click.echo(SchedulerConstants.ERR_MSG_BINDING_EXIT_RETRYING) - config_try_count += 1 - time.sleep(3) - continue - - if config_try_count >= 5: - click.echo("") - click.echo("[1] Oops, you failed to login the FedML MLOps platform.") - click.echo("Please check whether your network is normal!") - return - - # Judge whether running from fedml docker hub - is_from_fedml_docker_hub = False - dock_loc_file = ClientConstants.get_docker_location_file() - if os.path.exists(dock_loc_file): - is_from_fedml_docker_hub = True - - # Build unique device id - if is_from_docker: - unique_device_id = args.current_device_id + "@" + args.os_name + ".Docker.Edge.Device" - else: - unique_device_id = args.current_device_id + "@" + args.os_name + ".Edge.Device" - if is_from_fedml_docker_hub: - unique_device_id = args.current_device_id + "@" + args.os_name + ".DockerHub.Edge.Device" - - if use_extra_device_id_suffix is not None: - unique_device_id = args.current_device_id + "@" + args.os_name + use_extra_device_id_suffix - - # Bind account id to FedML® Nexus AI Platform - register_try_count = 0 - edge_id = -1 - user_name = None - extra_url = None - general_edge_id = None - while register_try_count < 5: - try: - edge_id, user_name, extra_url, general_edge_id = runner.bind_account_and_device_id( - service_config["ml_ops_config"]["EDGE_BINDING_URL"], args.account_id, unique_device_id, args.os_name, - api_key=api_key, role=role - ) - if edge_id > 0: - runner.edge_id = edge_id - runner.edge_user_name = user_name - runner.edge_extra_url = extra_url - break - except SystemExit as e: - click.echo("Your account does not exist. Please make sure your account correct.") - os.system("fedml logout -c") - return - except Exception as e: - click.echo("{}\n{}".format(SchedulerConstants.ERR_MSG_BINDING_EXCEPTION_2, traceback.format_exc())) - click.echo(SchedulerConstants.ERR_MSG_BINDING_EXIT_RETRYING) - register_try_count += 1 - time.sleep(3) - continue - - if edge_id <= 0: - click.echo("") - click.echo("[2] Oops, you failed to login the FedML MLOps platform.") - click.echo("Please check whether your network is normal!") - return - - # Init runtime logs - setattr(args, "client_id", edge_id) - setattr(args, "is_from_docker", is_from_docker) - runner.args = args - init_logs(args, edge_id) - # logging.info("args {}".format(args)) - - # Log arguments and binding results. - # logging.info("login: unique_device_id = %s" % str(unique_device_id)) - # logging.info("login: edge_id = %s" % str(edge_id)) - runner.unique_device_id = unique_device_id - runner.user_name = user_name - runner.general_edge_id = general_edge_id - ClientConstants.save_runner_infos(args.current_device_id + "." + args.os_name, edge_id, run_id=0) - - # Setup MQTT connection for communication with the FedML server. - try: - runner.setup_agent_mqtt_connection(service_config) - except Exception as e: - login_exit_file = os.path.join(ClientConstants.get_log_file_dir(), "exited.log") - with open(login_exit_file, "w") as f: - f.writelines(f"{os.getpid()}.") - print("finally") - runner.stop_agent() - raise e - - # Start mqtt looper - runner.start_agent_mqtt_loop() - - -def __login_as_simulator(args, userid, mqtt_connection=True): - setattr(args, "account_id", userid) - setattr(args, "current_running_dir", ClientConstants.get_fedml_home_dir()) - - sys_name = platform.system() - if sys_name == "Darwin": - sys_name = "MacOS" - setattr(args, "os_name", sys_name) - version = fedml.get_env_version() - setattr(args, "version", version) - setattr(args, "log_file_dir", ClientConstants.get_log_file_dir()) - setattr(args, "device_id", FedMLClientRunner.get_device_id()) - setattr(args, "current_device_id", FedMLClientRunner.get_device_id()) - setattr(args, "config_version", version) - setattr(args, "cloud_region", "") - - - # Create client runner for communication with the FedML server. - runner = FedMLClientRunner(args) - - # Fetch configs from the MLOps config server. - service_config = dict() - config_try_count = 0 - edge_id = 0 - while config_try_count < 5: - try: - mqtt_config, s3_config, mlops_config, docker_config = runner.fetch_configs() - service_config["mqtt_config"] = mqtt_config - service_config["s3_config"] = s3_config - service_config["ml_ops_config"] = mlops_config - service_config["docker_config"] = docker_config - runner.agent_config = service_config - log_server_url = mlops_config.get("LOG_SERVER_URL", None) - if log_server_url is not None: - setattr(args, "log_server_url", log_server_url) - setattr(runner.args, "log_server_url", log_server_url) - break - except Exception as e: - config_try_count += 1 - time.sleep(3) - continue - - if config_try_count >= 5: - click.echo("") - click.echo("[3] Oops, you failed to login the FedML MLOps platform.") - click.echo("Please check whether your network is normal!") - return False, edge_id, args - - # Build unique device id - if args.device_id is not None and len(str(args.device_id)) > 0: - unique_device_id = args.device_id + "@" + args.os_name + ".Edge.Simulator" - - # Bind account id to FedML® Nexus AI Platform - register_try_count = 0 - edge_id = -1 - user_name = None - extra_url = None - general_edge_id = None - while register_try_count < 5: - try: - edge_id, _, _, _ = runner.bind_account_and_device_id( - service_config["ml_ops_config"]["EDGE_BINDING_URL"], args.account_id, - unique_device_id, args.os_name, role="simulator" - ) - if edge_id > 0: - runner.edge_id = edge_id - break - except SystemExit as e: - click.echo("Your account does not exist. Please make sure your account correct.") - os.system("fedml logout -c") - return - except Exception as e: - register_try_count += 1 - time.sleep(3) - continue - - if edge_id <= 0: - click.echo("") - click.echo("[4] Oops, you failed to login the FedML MLOps platform.") - click.echo("Please check whether your network is normal!") - return False, edge_id, args - - # Init runtime logs - setattr(args, "client_id", edge_id) - runner.args = args - #init_logs(args, edge_id) - logging.info("args {}".format(args)) - - # Log arguments and binding results. - logging.info("login: unique_device_id = %s" % str(unique_device_id)) - logging.info("login: edge_id = %s" % str(edge_id)) - runner.unique_device_id = unique_device_id - - if mqtt_connection: - ClientConstants.save_runner_infos(args.device_id + "." + args.os_name, edge_id, run_id=0) - - # Setup MQTT connection for communication with the FedML server. - try: - runner.setup_agent_mqtt_connection(service_config) - except Exception as e: - pass - - # Open simulator daemon process to process run status. - simulator_daemon_cmd = os.path.join(os.path.dirname(__file__), "simulator_daemon.py") - env_version = fedml.get_env_version() - simulator_daemon_process = sys_utils.run_subprocess_open( - [ - sys_utils.get_python_program(), - simulator_daemon_cmd, - "-t", - "login", - "-u", - str(args.user), - "-v", - env_version, - "-r", - args.role, - "-id", - args.device_id, - "-os", - args.os_name, - "-rk", - "1", - "-lfd", - args.log_file_dir, - "-cf", - env_version, - "-ci", - str(edge_id) - ] - ).pid - - # Start mqtt looper - runner.start_agent_mqtt_loop() - - return True, edge_id, args - - -def login(args): - if args.role == ClientConstants.login_role_list[ClientConstants.LOGIN_MODE_CLIENT_INDEX]: - __login_as_client(args, args.user, api_key=args.api_key) - elif args.role == ClientConstants.login_role_list[ClientConstants.LOGIN_MODE_GPU_SUPPLIER_INDEX]: - if args.no_gpu_check == 0: - gpu_count, _ = sys_utils.get_gpu_count_vendor() - if gpu_count <= 0: - click.echo("We can't find any gpu device on your machine. \n" - "With the gpu_supplier(-g) option, you need to check if your machine " - "has nvidia GPUs and installs CUDA related drivers.") - return - __login_as_client(args, args.user, api_key=args.api_key, - use_extra_device_id_suffix=".Edge.GPU.Supplier", role=args.role) - elif args.role == ClientConstants.login_role_list[ClientConstants.LOGIN_MODE_EDGE_SIMULATOR_INDEX]: - __login_as_simulator(args, args.user) +from fedml.computing.scheduler.slave.slave_agent import FedMLLaunchSlaveAgent def logout(): - ClientConstants.cleanup_run_process(None) - sys_utils.cleanup_all_fedml_client_api_processes() + FedMLLaunchSlaveAgent.logout() if __name__ == "__main__": @@ -351,15 +30,17 @@ def logout(): if args.api_key == "": args.api_key = args.user + fedml.set_env_version("test") + if args.local_on_premise_platform_host != "127.0.0.1": fedml.set_local_on_premise_platform_host(args.local_on_premise_platform_host) if args.local_on_premise_platform_port != 80: fedml.set_local_on_premise_platform_port(args.local_on_premise_platform_port) fedml.set_env_version(args.version) + slave_agent = FedMLLaunchSlaveAgent() if args.type == 'login': - login(args) + slave_agent.login(args.api_key, api_key=args.api_key, device_id=args.device_id, + os_name=args.os_name, role=args.role) else: - logout() - - + FedMLLaunchSlaveAgent.logout() diff --git a/python/fedml/computing/scheduler/slave/client_runner.py b/python/fedml/computing/scheduler/slave/client_runner.py deleted file mode 100755 index aac57d2174..0000000000 --- a/python/fedml/computing/scheduler/slave/client_runner.py +++ /dev/null @@ -1,1775 +0,0 @@ -import json -import logging -import multiprocessing -import sys - -from multiprocessing import Process -import os -import platform -import shutil -import subprocess -import threading - -import time -import traceback -import urllib -import uuid -import zipfile -from urllib.parse import urljoin, urlparse - -import requests - -import fedml -from ..comm_utils.constants import SchedulerConstants -from ..comm_utils.job_cleanup import JobCleanup -from ..comm_utils.job_utils import JobRunnerUtils, DockerArgs -from ..comm_utils.run_process_utils import RunProcessUtils -from ..scheduler_entry.constants import Constants -from ....core.mlops.mlops_device_perfs import MLOpsDevicePerfStats -from ....core.mlops.mlops_runtime_log import MLOpsRuntimeLog - -from ....core.distributed.communication.mqtt.mqtt_manager import MqttManager -from ..comm_utils.yaml_utils import load_yaml_config -from .client_constants import ClientConstants - -from ....core.mlops.mlops_metrics import MLOpsMetrics - -from ....core.mlops.mlops_configs import MLOpsConfigs -from ....core.mlops.mlops_runtime_log_daemon import MLOpsRuntimeLogDaemon -from ....core.mlops.mlops_status import MLOpsStatus -from ..comm_utils.sys_utils import get_sys_runner_info, get_python_program -from .client_data_interface import FedMLClientDataInterface -from ..comm_utils import sys_utils -from ....core.mlops.mlops_utils import MLOpsUtils -from ..model_scheduler.model_device_client import FedMLModelDeviceClientRunner -from ..model_scheduler.model_device_server import FedMLModelDeviceServerRunner -from ..comm_utils import security_utils -from ..scheduler_core.compute_cache_manager import ComputeCacheManager -from ..scheduler_core.message_center import FedMLMessageCenter - - -class RunnerError(Exception): - """ Runner stopped. """ - pass - - -class RunnerCompletedError(Exception): - """ Runner completed. """ - pass - - -class FedMLClientRunner(FedMLMessageCenter): - - def __init__(self, args, edge_id=0, request_json=None, agent_config=None, run_id=0, - cuda_visible_gpu_ids_str=None): - super().__init__() - self.model_device_server_id = None - self.model_device_client_edge_id_list = None - self.disable_client_login = False - self.model_device_server = None - self.model_device_client_list = None - self.run_process_event = None - self.run_process_event_map = dict() - self.run_process_completed_event = None - self.run_process_completed_event_map = dict() - self.run_process = None - self.run_process_map = dict() - self.running_request_json = dict() - self.local_api_process = None - self.start_request_json = None - self.device_status = None - self.current_training_status = None - self.mqtt_mgr = None - self.edge_id = edge_id - self.edge_user_name = None - self.edge_extra_url = None - self.run_id = run_id - self.unique_device_id = None - self.args = args - self.request_json = request_json - self.version = args.version - self.device_id = args.device_id - self.cur_dir = os.path.split(os.path.realpath(__file__))[0] - if args.current_running_dir is not None: - self.cur_dir = args.current_running_dir - self.sudo_cmd = "" - self.is_mac = False - if platform.system() == "Darwin": - self.is_mac = True - - self.agent_config = agent_config - self.fedml_data_base_package_dir = os.path.join("/", "fedml", "data") - self.fedml_data_local_package_dir = os.path.join("/", "fedml", "fedml-package", "fedml", "data") - self.fedml_data_dir = self.fedml_data_base_package_dir - self.fedml_config_dir = os.path.join("/", "fedml", "conf") - - self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES = { - "${FEDSYS.RUN_ID}": "", - "${FEDSYS.PRIVATE_LOCAL_DATA}": "", - "${FEDSYS.CLIENT_ID_LIST}": "", - "${FEDSYS.SYNTHETIC_DATA_URL}": "", - "${FEDSYS.IS_USING_LOCAL_DATA}": "", - "${FEDSYS.CLIENT_NUM}": "", - "${FEDSYS.CLIENT_INDEX}": "", - "${FEDSYS.CLIENT_OBJECT_LIST}": "", - "${FEDSYS.LOG_SERVER_URL}": "", - } - - self.mlops_metrics = None - self.client_active_list = dict() - self.ntp_offset = MLOpsUtils.get_ntp_offset() - self.server_id = None - self.computing_started_time = 0 - self.fedml_config_object = None - self.package_type = SchedulerConstants.JOB_PACKAGE_TYPE_DEFAULT - self.cuda_visible_gpu_ids_str = cuda_visible_gpu_ids_str - # logging.info("Current directory of client agent: " + self.cur_dir) - self.subscribed_topics = list() - self.user_name = None - self.general_edge_id = None - self.message_center = None - - def __repr__(self): - return "<{klass} @{id:x} {attrs}>".format( - klass=self.__class__.__name__, - id=id(self) & 0xFFFFFF, - attrs=" ".join("{}={!r}".format(k, v) for k, v in self.__dict__.items()), - ) - - def copy_runner(self): - copy_runner = FedMLClientRunner(self.args) - copy_runner.disable_client_login = self.disable_client_login - copy_runner.model_device_server = self.model_device_server - copy_runner.model_device_client_list = self.model_device_client_list - copy_runner.run_process_event = self.run_process_event - copy_runner.run_process_event_map = self.run_process_event_map - copy_runner.run_process_completed_event = self.run_process_completed_event - copy_runner.run_process_completed_event_map = self.run_process_completed_event_map - copy_runner.run_process = self.run_process - copy_runner.run_process_map = self.run_process_map - copy_runner.running_request_json = self.running_request_json - copy_runner.local_api_process = self.local_api_process - copy_runner.start_request_json = self.start_request_json - copy_runner.device_status = self.device_status - copy_runner.current_training_status = self.current_training_status - copy_runner.mqtt_mgr = self.mqtt_mgr - copy_runner.edge_id = self.edge_id - copy_runner.edge_user_name = self.edge_user_name - copy_runner.edge_extra_url = self.edge_extra_url - copy_runner.run_id = self.run_id - copy_runner.unique_device_id = self.unique_device_id - copy_runner.args = self.args - copy_runner.request_json = self.request_json - copy_runner.version =self.version - copy_runner.device_id = self.device_id - copy_runner.cur_dir = self.cur_dir - copy_runner.cur_dir = self.cur_dir - copy_runner.sudo_cmd = self.sudo_cmd - copy_runner.is_mac = self.is_mac - - copy_runner.agent_config = self.agent_config - copy_runner.fedml_data_base_package_dir = self.fedml_data_base_package_dir - copy_runner.fedml_data_local_package_dir = self.fedml_data_local_package_dir - copy_runner.fedml_data_dir = self.fedml_data_dir - copy_runner.fedml_config_dir = self.fedml_config_dir - - copy_runner.FEDML_DYNAMIC_CONSTRAIN_VARIABLES = self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES - - copy_runner.mlops_metrics = self.mlops_metrics - copy_runner.client_active_list = self.client_active_list - copy_runner.ntp_offset = self.ntp_offset - copy_runner.server_id = self.server_id - copy_runner.computing_started_time = self.computing_started_time - copy_runner.fedml_config_object = self.fedml_config_object - copy_runner.package_type = self.package_type - copy_runner.cuda_visible_gpu_ids_str = self.cuda_visible_gpu_ids_str - copy_runner.subscribed_topics = self.subscribed_topics - copy_runner.user_name = self.user_name - copy_runner.general_edge_id = self.general_edge_id - copy_runner.message_center = self.message_center - - return copy_runner - - def build_dynamic_constrain_variables(self, run_id, run_config): - data_config = run_config.get("data_config", {}) - server_edge_id_list = self.request_json["edgeids"] - local_edge_id_list = list() - local_edge_id_list.append(int(self.edge_id)) - is_using_local_data = 0 - private_data_dir = data_config.get("privateLocalData", "") - synthetic_data_url = data_config.get("syntheticDataUrl", "") - edges = self.request_json["edges"] - # if private_data_dir is not None \ - # and len(str(private_data_dir).strip(' ')) > 0: - # is_using_local_data = 1 - if private_data_dir is None or len(str(private_data_dir).strip(" ")) <= 0: - params_config = run_config.get("parameters", None) - private_data_dir = ClientConstants.get_data_dir() - if synthetic_data_url is None or len(str(synthetic_data_url)) <= 0: - synthetic_data_url = private_data_dir - - self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.RUN_ID}"] = run_id - self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.PRIVATE_LOCAL_DATA}"] = private_data_dir.replace(" ", "") - self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_ID_LIST}"] = str(local_edge_id_list).replace(" ", "") - self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.SYNTHETIC_DATA_URL}"] = synthetic_data_url.replace(" ", "") - self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.IS_USING_LOCAL_DATA}"] = str(is_using_local_data) - self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_NUM}"] = len(server_edge_id_list) - self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_INDEX}"] = 1 - for cur_index, id_value in enumerate(server_edge_id_list): - if str(id_value) == str(self.edge_id): - self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_INDEX}"] = cur_index + 1 - break - client_objects = str(json.dumps(edges)) - client_objects = client_objects.replace(" ", "").replace("\n", "").replace('"', '\\"') - self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_OBJECT_LIST}"] = client_objects - self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.LOG_SERVER_URL}"] = self.agent_config["ml_ops_config"][ - "LOG_SERVER_URL" - ] - - def unzip_file(self, zip_file, unzip_file_path) -> str: - if zipfile.is_zipfile(zip_file): - with zipfile.ZipFile(zip_file, "r") as zipf: - zipf.extractall(unzip_file_path) - unzipped_file_name = zipf.namelist()[0] - else: - raise Exception("Invalid zip file {}".format(zip_file)) - - return unzipped_file_name - - def package_download_progress(self, count, blksize, filesize): - self.check_runner_stop_event() - - downloaded = count * blksize - downloaded = filesize if downloaded > filesize else downloaded - progress = (downloaded / filesize * 100) if filesize != 0 else 0 - progress_int = int(progress) - downloaded_kb = format(downloaded / 1024, '.2f') - - # since this hook funtion is stateless, we need a state to avoid print progress repeatly - if count == 0: - self.prev_download_progress = 0 - if progress_int != self.prev_download_progress and progress_int % 5 == 0: - self.prev_download_progress = progress_int - logging.info("package downloaded size {} KB, progress {}%".format(downloaded_kb, progress_int)) - - def retrieve_and_unzip_package(self, package_name, package_url): - local_package_path = ClientConstants.get_package_download_dir() - os.makedirs(local_package_path, exist_ok=True) - filename, filename_without_extension, file_extension = ClientConstants.get_filename_and_extension(package_url) - local_package_file = os.path.join(local_package_path, f"fedml_run_{self.run_id}_{filename_without_extension}") - if os.path.exists(local_package_file): - os.remove(local_package_file) - package_url_without_query_path = urljoin(package_url, urlparse(package_url).path) - urllib.request.urlretrieve(package_url_without_query_path, local_package_file, - reporthook=self.package_download_progress) - unzip_package_path = os.path.join(ClientConstants.get_package_unzip_dir(), - f"unzip_fedml_run_{self.run_id}_{filename_without_extension}") - try: - shutil.rmtree(unzip_package_path, ignore_errors=True) - except Exception as e: - logging.error( - f"Failed to remove directory {unzip_package_path}, Exception: {e}, Traceback: {traceback.format_exc()}") - pass - - package_dir_name = self.unzip_file(local_package_file, unzip_package_path) # Using unziped folder name - unzip_package_full_path = os.path.join(unzip_package_path, package_dir_name) - - logging.info("local_package_file {}, unzip_package_path {}, unzip file full path {}".format( - local_package_file, unzip_package_path, unzip_package_full_path)) - - return unzip_package_full_path - - def update_local_fedml_config(self, run_id, run_config): - packages_config = run_config["packages_config"] - - # Copy config file from the client - unzip_package_path = self.retrieve_and_unzip_package( - packages_config["linuxClient"], packages_config["linuxClientUrl"] - ) - fedml_local_config_file = os.path.join(unzip_package_path, "conf", "fedml.yaml") - - # Load the above config to memory - config_from_container = load_yaml_config(fedml_local_config_file) - container_entry_file_config = config_from_container["entry_config"] - container_dynamic_args_config = config_from_container["dynamic_args"] - entry_file = container_entry_file_config["entry_file"] - conf_file = container_entry_file_config["conf_file"] - self.package_type = container_entry_file_config.get("package_type", SchedulerConstants.JOB_PACKAGE_TYPE_DEFAULT) - full_conf_path = os.path.join(unzip_package_path, "fedml", "config", os.path.basename(conf_file)) - - # Dynamically build constrain variable with realtime parameters from server - self.build_dynamic_constrain_variables(run_id, run_config) - - # Update entry arguments value with constrain variable values with realtime parameters from server - # currently we support the following constrain variables: - # ${FEDSYS_RUN_ID}: a run id represented one entire Federated Learning flow - # ${FEDSYS_PRIVATE_LOCAL_DATA}: private local data path in the Federated Learning client - # ${FEDSYS_CLIENT_ID_LIST}: client list in one entire Federated Learning flow - # ${FEDSYS_SYNTHETIC_DATA_URL}: synthetic data url from server, - # if this value is not null, the client will download data from this URL to use it as - # federated training data set - # ${FEDSYS_IS_USING_LOCAL_DATA}: whether use private local data as federated training data set - # container_dynamic_args_config["data_cache_dir"] = "${FEDSYS.PRIVATE_LOCAL_DATA}" - for constrain_variable_key, constrain_variable_value in self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES.items(): - for argument_key, argument_value in container_dynamic_args_config.items(): - if argument_value is not None and str(argument_value).find(constrain_variable_key) == 0: - replaced_argument_value = str(argument_value).replace( - constrain_variable_key, str(constrain_variable_value) - ) - container_dynamic_args_config[argument_key] = replaced_argument_value - - # Merge all container new config sections as new config dictionary - package_conf_object = dict() - package_conf_object["entry_config"] = container_entry_file_config - package_conf_object["dynamic_args"] = container_dynamic_args_config - package_conf_object["dynamic_args"]["config_version"] = self.args.config_version - container_dynamic_args_config["mqtt_config_path"] = os.path.join( - unzip_package_path, "fedml", "config", os.path.basename(container_dynamic_args_config["mqtt_config_path"]) - ) - container_dynamic_args_config["s3_config_path"] = os.path.join( - unzip_package_path, "fedml", "config", os.path.basename(container_dynamic_args_config["s3_config_path"]) - ) - log_file_dir = ClientConstants.get_log_file_dir() - os.makedirs(log_file_dir, exist_ok=True) - package_conf_object["dynamic_args"]["log_file_dir"] = log_file_dir - - # Save new config dictionary to local file - fedml_updated_config_file = os.path.join(unzip_package_path, "conf", "fedml.yaml") - ClientConstants.generate_yaml_doc(package_conf_object, fedml_updated_config_file) - - # Build dynamic arguments and set arguments to fedml config object - self.build_dynamic_args(run_id, run_config, package_conf_object, unzip_package_path) - return unzip_package_path, package_conf_object - - def build_dynamic_args(self, run_id, run_config, package_conf_object, base_dir): - fedml_conf_file = package_conf_object["entry_config"]["conf_file"] - fedml_conf_file_processed = str(fedml_conf_file).replace('\\', os.sep).replace('/', os.sep) - fedml_conf_path = os.path.join(base_dir, "fedml", "config", - os.path.basename(fedml_conf_file_processed)) - fedml_conf_object = load_yaml_config(fedml_conf_path) - run_params = run_config.get("parameters", {}) - job_yaml = run_params.get("job_yaml", {}) - - # Replace local fedml config objects with parameters from MLOps web - parameters_object = run_config.get("parameters", None) - if parameters_object is not None: - for config_k, config_v in fedml_conf_object.items(): - parameter_v = parameters_object.get(config_k, None) - if parameter_v is not None: - fedml_conf_object[config_k] = parameter_v - parameters_object.pop(config_k) - - for config_k, config_v in parameters_object.items(): - fedml_conf_object[config_k] = config_v - - package_dynamic_args = package_conf_object["dynamic_args"] - if fedml_conf_object.get("comm_args", None) is not None: - fedml_conf_object["comm_args"]["mqtt_config_path"] = package_dynamic_args["mqtt_config_path"] - fedml_conf_object["comm_args"]["s3_config_path"] = package_dynamic_args["s3_config_path"] - fedml_conf_object["common_args"]["using_mlops"] = True - if fedml_conf_object.get("train_args", None) is not None: - fedml_conf_object["train_args"]["run_id"] = package_dynamic_args["run_id"] - fedml_conf_object["train_args"]["client_id_list"] = package_dynamic_args["client_id_list"] - fedml_conf_object["train_args"]["client_num_in_total"] = int(package_dynamic_args["client_num_in_total"]) - fedml_conf_object["train_args"]["client_num_per_round"] = int(package_dynamic_args["client_num_in_total"]) - fedml_conf_object["train_args"]["client_id"] = self.edge_id - fedml_conf_object["train_args"]["server_id"] = self.request_json.get("server_id", "0") - if fedml_conf_object.get("device_args", None) is not None: - fedml_conf_object["device_args"]["worker_num"] = int(package_dynamic_args["client_num_in_total"]) - # fedml_conf_object["data_args"]["data_cache_dir"] = package_dynamic_args["data_cache_dir"] - data_args = fedml_conf_object.get("data_args") - if data_args is not None: - data_cache_dir = fedml_conf_object["data_args"].get("data_cache_dir") - if data_cache_dir is not None: - data_cache_dir = os.path.join(data_cache_dir, str(self.edge_id)) - fedml_conf_object["data_args"]["data_cache_dir"] = data_cache_dir - if fedml_conf_object.get("tracking_args", None) is not None: - fedml_conf_object["tracking_args"]["log_file_dir"] = package_dynamic_args["log_file_dir"] - fedml_conf_object["tracking_args"]["log_server_url"] = package_dynamic_args["log_server_url"] - - fedml_conf_object["dynamic_args"] = package_dynamic_args - self.fedml_config_object = fedml_conf_object.copy() - ClientConstants.generate_yaml_doc(fedml_conf_object, fedml_conf_path) - - def run_bootstrap_script(self, bootstrap_cmd_list, bootstrap_script_file): - try: - logging.info("Bootstrap commands are being executed...") - process, error_list = ClientConstants.execute_commands_with_live_logs(bootstrap_cmd_list, - callback=self.callback_run_bootstrap) - - ret_code, out, err = process.returncode, None, None - if ret_code is None or ret_code <= 0: - if error_list is not None and len(error_list) > 0: - is_bootstrap_run_ok = False - else: - if out is not None: - out_str = sys_utils.decode_our_err_result(out) - if out_str != "": - logging.info("{}".format(out_str)) - - sys_utils.log_return_info(bootstrap_script_file, 0) - - is_bootstrap_run_ok = True - else: - if err is not None: - err_str = sys_utils.decode_our_err_result(err) - if err_str != "": - logging.error("{}".format(err_str)) - - sys_utils.log_return_info(bootstrap_script_file, ret_code) - - is_bootstrap_run_ok = False - except Exception as e: - logging.error(f"Bootstrap script error: Exception: {e}, Traceback: {traceback.format_exc()}") - is_bootstrap_run_ok = False - return is_bootstrap_run_ok - - def callback_run_bootstrap(self, job_pid): - ClientConstants.save_bootstrap_process(self.run_id, job_pid) - - def run(self, process_event, completed_event, message_center_queue): - print(f"Client runner process id {os.getpid()}, run id {self.run_id}") - - if platform.system() != "Windows": - os.setsid() - - os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning' - os.environ.setdefault('PYTHONWARNINGS', 'ignore:semaphore_tracker:UserWarning') - - self.run_process_event = process_event - self.run_process_completed_event = completed_event - try: - MLOpsUtils.set_ntp_offset(self.ntp_offset) - self.rebuild_message_center(message_center_queue) - self.run_impl() - except RunnerError: - logging.info("Runner stopped.") - self.reset_devices_status(self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED) - except RunnerCompletedError: - logging.info("Runner completed.") - except Exception as e: - logging.error(f"Runner exited with errors. Exception: {e}, Traceback {traceback.format_exc()}") - self.mlops_metrics.report_client_id_status( - self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED, - server_id=self.server_id, run_id=self.run_id) - finally: - if self.mlops_metrics is not None: - computing_ended_time = MLOpsUtils.get_ntp_time() - self.mlops_metrics.report_edge_job_computing_cost(self.run_id, self.edge_id, - self.computing_started_time, computing_ended_time, - self.args.user, self.args.api_key) - logging.info("Release resources.") - self.cleanup_containers_and_release_gpus(self.run_id, self.edge_id) - MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(self.run_id, self.edge_id) - if self.mlops_metrics is not None: - self.mlops_metrics.stop_sys_perf() - time.sleep(3) - ClientConstants.cleanup_learning_process(self.run_id) - ClientConstants.cleanup_run_process(self.run_id) - - def check_runner_stop_event(self): - if self.run_process_event.is_set(): - logging.info("Received stopping event.") - raise RunnerError("Runner stopped") - - if self.run_process_completed_event.is_set(): - logging.info("Received completed event.") - raise RunnerCompletedError("Runner completed") - - def run_impl(self): - run_id = self.request_json["runId"] - run_config = self.request_json["run_config"] - data_config = run_config.get("data_config", {}) - packages_config = run_config["packages_config"] - - self.computing_started_time = MLOpsUtils.get_ntp_time() - self.mlops_metrics.report_edge_job_computing_cost(run_id, self.edge_id, - self.computing_started_time, 0, - self.args.user, self.args.api_key) - - self.check_runner_stop_event() - - MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO) - - self.mlops_metrics.report_client_id_status( - self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_INITIALIZING, - running_json=self.start_request_json, run_id=run_id) - - # get training params - private_local_data_dir = data_config.get("privateLocalData", "") - is_using_local_data = 0 - # if private_local_data_dir is not None and len(str(private_local_data_dir).strip(' ')) > 0: - # is_using_local_data = 1 - - # start a run according to the hyper-parameters - # fedml_local_data_dir = self.cur_dir + "/fedml_data/run_" + run_id_str + "_edge_" + str(edge_id) - fedml_local_data_dir = os.path.join(self.cur_dir, "fedml_data") - fedml_local_config_dir = os.path.join(self.cur_dir, "fedml_config") - if is_using_local_data: - fedml_local_data_dir = private_local_data_dir - self.fedml_data_dir = self.fedml_data_local_package_dir - - self.check_runner_stop_event() - - logging.info("Download packages") - - # update local config with real time parameters from server and dynamically replace variables value - unzip_package_path, fedml_config_object = self.update_local_fedml_config(run_id, run_config) - # if unzip_package_path is None or fedml_config_object is None: - # logging.info("failed to update local fedml config.") - # self.check_runner_stop_event() - # # Send failed msg when exceptions. - # self.cleanup_run_when_starting_failed(status=ClientConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION) - # return - - logging.info("Check downloaded packages...") - - entry_file_config = fedml_config_object["entry_config"] - dynamic_args_config = fedml_config_object["dynamic_args"] - entry_file = str(entry_file_config["entry_file"]).replace('\\', os.sep).replace('/', os.sep) - entry_file = os.path.basename(entry_file) - conf_file = entry_file_config["conf_file"] - conf_file = str(conf_file).replace('\\', os.sep).replace('/', os.sep) - ##### - # ClientConstants.cleanup_learning_process(run_id) - # ClientConstants.cleanup_bootstrap_process(run_id) - ##### - - if not os.path.exists(unzip_package_path): - logging.info("failed to unzip file.") - self.check_runner_stop_event() - # Send failed msg when exceptions. - self.cleanup_run_when_starting_failed(status=ClientConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION) - return - os.chdir(os.path.join(unzip_package_path, "fedml")) - - self.check_runner_stop_event() - - logging.info("starting the user process...") - - entry_file_full_path = os.path.join(unzip_package_path, "fedml", entry_file) - conf_file_full_path = os.path.join(unzip_package_path, "fedml", conf_file) - logging.info("waiting the user process to finish...") - logging.info(" ") - logging.info(" ") - logging.info("====Your Run Logs Begin===") - - process, is_launch_task, error_list = self.execute_job_task(unzip_package_path=unzip_package_path, - entry_file_full_path=entry_file_full_path, - conf_file_full_path=conf_file_full_path, - dynamic_args_config=dynamic_args_config, - fedml_config_object=self.fedml_config_object) - - logging.info("====Your Run Logs End===") - logging.info(" ") - logging.info(" ") - - ret_code, out, err = process.returncode if process else None, None, None - is_run_ok = sys_utils.is_runner_finished_normally(process.pid) - if is_launch_task: - is_run_ok = True - if error_list is not None and len(error_list) > 0: - is_run_ok = False - if ret_code is None or ret_code <= 0: - self.check_runner_stop_event() - - if is_run_ok: - if out is not None: - out_str = sys_utils.decode_our_err_result(out) - if out_str != "": - logging.info("{}".format(out_str)) - - self.mlops_metrics.report_client_id_status( - self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED, - server_id=self.server_id, run_id=run_id) - - if is_launch_task: - sys_utils.log_return_info(f"job {run_id}", ret_code) - else: - sys_utils.log_return_info(entry_file, ret_code) - else: - is_run_ok = False - - if not is_run_ok: - # If the run status is killed or finished, then return with the normal state. - current_job = FedMLClientDataInterface.get_instance().get_job_by_id(run_id) - if current_job is not None and (current_job.status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED or - current_job.status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED): - return - - self.check_runner_stop_event() - - logging.error("failed to run the learning process...") - - if err is not None: - err_str = sys_utils.decode_our_err_result(err) - if err_str != "": - logging.error("{}".format(err_str)) - - if is_launch_task: - sys_utils.log_return_info(f"job {run_id}", ret_code) - else: - sys_utils.log_return_info(entry_file, ret_code) - - # Send failed msg when exceptions. - self.mlops_metrics.report_client_id_status( - self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED, - server_id=self.server_id, run_id=run_id) - - def execute_job_task(self, unzip_package_path, entry_file_full_path, conf_file_full_path, dynamic_args_config, - fedml_config_object): - run_config = self.request_json["run_config"] - run_params = run_config.get("parameters", {}) - client_rank = self.request_json.get("client_rank", 1) - job_yaml = run_params.get("job_yaml", {}) - job_yaml_default_none = run_params.get("job_yaml", None) - job_api_key = job_yaml.get("run_api_key", None) - job_api_key = job_yaml.get("fedml_run_dynamic_params", None) if job_api_key is None else job_api_key - assigned_gpu_ids = run_params.get("gpu_ids", None) - job_type = job_yaml.get("job_type", None) - containerize = fedml_config_object.get("containerize", None) - image_pull_policy = fedml_config_object.get("image_pull_policy", Constants.IMAGE_PULL_POLICY_ALWAYS) - # TODO: Can we remove task_type? - job_type = job_yaml.get("task_type", Constants.JOB_TASK_TYPE_TRAIN) if job_type is None else job_type - conf_file_object = load_yaml_config(conf_file_full_path) - entry_args_dict = conf_file_object.get("fedml_entry_args", {}) - entry_args = entry_args_dict.get("arg_items", None) - scheduler_match_info = self.request_json.get("scheduler_match_info", {}) - if job_type == Constants.JOB_TASK_TYPE_TRAIN: - containerize = True if containerize is None else containerize - - # Bootstrap Info - bootstrap_script_path, bootstrap_script_dir, bootstrap_script_file = [None] * 3 - env_args = fedml_config_object.get("environment_args", None) - - if env_args is not None: - bootstrap_script_file = env_args.get("bootstrap", None) - if bootstrap_script_file is not None: - bootstrap_script_file = str(bootstrap_script_file).replace('\\', os.sep).replace('/', os.sep) - if platform.system() == 'Windows': - bootstrap_script_file = bootstrap_script_file.rstrip('.sh') + '.bat' - if bootstrap_script_file is not None: - bootstrap_script_dir = os.path.join(unzip_package_path, "fedml", - os.path.dirname(bootstrap_script_file)) - bootstrap_script_path = os.path.join( - bootstrap_script_dir, bootstrap_script_dir, os.path.basename(bootstrap_script_file) - ) - - bootstrap_cmd_list = list() - if bootstrap_script_path: - logging.info("Bootstrap commands are being generated...") - bootstrap_cmd_list = JobRunnerUtils.generate_bootstrap_commands(bootstrap_script_path=bootstrap_script_path, - bootstrap_script_dir=bootstrap_script_dir, - bootstrap_script_file=bootstrap_script_file) - logging.info(f"Generated following Bootstrap commands: {bootstrap_cmd_list}") - - if not containerize: - if len(bootstrap_cmd_list) and not (job_type == Constants.JOB_TASK_TYPE_DEPLOY or - job_type == Constants.JOB_TASK_TYPE_SERVE): - bootstrapping_successful = self.run_bootstrap_script(bootstrap_cmd_list=bootstrap_cmd_list, - bootstrap_script_file=bootstrap_script_file) - - if not bootstrapping_successful: - logging.info("failed to update local fedml config.") - self.check_runner_stop_event() - # Send failed msg when exceptions. - self.cleanup_run_when_starting_failed(status=ClientConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION) - raise Exception(f"Failed to execute following bootstrap commands: {bootstrap_cmd_list}") - - logging.info("cleanup the previous learning process and bootstrap process...") - ClientConstants.cleanup_learning_process(self.request_json["runId"]) - ClientConstants.cleanup_bootstrap_process(self.request_json["runId"]) - - executable_interpreter = ClientConstants.CLIENT_SHELL_PS \ - if platform.system() == ClientConstants.PLATFORM_WINDOWS else ClientConstants.CLIENT_SHELL_BASH - - if job_yaml_default_none is None: - # Generate the job executing commands for previous federated learning (Compatibility) - python_program = get_python_program() - logging.info("Run the client: {} {} --cf {} --rank {} --role client".format( - python_program, entry_file_full_path, conf_file_full_path, str(dynamic_args_config.get("rank", 1)))) - rank = str(dynamic_args_config.get("rank", 1)) - entry_command = f"{python_program} {entry_file_full_path} --cf " \ - f"{conf_file_full_path} --rank {rank} --role client" - shell_cmd_list = [entry_command] - - # Run the job executing commands for previous federated learning (Compatibility) - process, error_list = ClientConstants.execute_commands_with_live_logs( - shell_cmd_list, callback=self.callback_start_fl_job, should_write_log_file=False) - is_launch_task = False - else: - self.check_runner_stop_event() - - self.mlops_metrics.report_client_id_status( - self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_RUNNING, run_id=self.run_id) - - # Generate the job executing commands - job_executing_commands = JobRunnerUtils.generate_job_execute_commands( - self.run_id, self.edge_id, self.version, - self.package_type, executable_interpreter, entry_file_full_path, - conf_file_object, entry_args, assigned_gpu_ids, - job_api_key, client_rank, scheduler_match_info=scheduler_match_info, - cuda_visible_gpu_ids_str=self.cuda_visible_gpu_ids_str) - - if containerize is not None and containerize is True: - docker_args = fedml_config_object.get("docker", {}) - docker_args = JobRunnerUtils.create_instance_from_dict(DockerArgs, docker_args) - try: - job_executing_commands = JobRunnerUtils.generate_launch_docker_command(docker_args=docker_args, - run_id=self.run_id, - edge_id=self.edge_id, - unzip_package_path=unzip_package_path, - executable_interpreter=executable_interpreter, - entry_file_full_path=entry_file_full_path, - bootstrap_cmd_list=bootstrap_cmd_list, - cuda_visible_gpu_ids_str=self.cuda_visible_gpu_ids_str, - image_pull_policy=image_pull_policy) - except Exception as e: - logging.error(f"Error occurred while generating containerized launch commands. " - f"Exception: {e}, Traceback: {traceback.format_exc()}") - return None, None, None - - if not job_executing_commands: - raise Exception("Failed to generate docker execution command") - - # Run the job executing commands - logging.info(f"Run the client job with job id {self.run_id}, device id {self.edge_id}.") - process, error_list = ClientConstants.execute_commands_with_live_logs( - job_executing_commands, callback=self.start_job_perf, error_processor=self.job_error_processor, - should_write_log_file=False if job_type == Constants.JOB_TASK_TYPE_FEDERATE else True) - is_launch_task = False if job_type == Constants.JOB_TASK_TYPE_FEDERATE else True - - return process, is_launch_task, error_list - - def callback_start_fl_job(self, job_pid): - ClientConstants.save_learning_process(self.run_id, job_pid) - self.mlops_metrics.report_sys_perf( - self.args, self.agent_config["mqtt_config"], job_process_id=job_pid) - - def start_job_perf(self, job_pid): - ClientConstants.save_learning_process(self.run_id, job_pid) - self.mlops_metrics.report_job_perf(self.args, self.agent_config["mqtt_config"], job_pid) - - def job_error_processor(self, error_list): - self.check_runner_stop_event() - - error_str = "\n".join(error_list) - error_message = f"Error occurred when running the job... {error_str}" - logging.error(error_message) - raise Exception(error_message) - - def reset_devices_status(self, edge_id, status, should_send_client_id_status=True): - self.mlops_metrics.run_id = self.run_id - self.mlops_metrics.edge_id = edge_id - - if should_send_client_id_status: - if status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED or \ - status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED or \ - status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION: - self.mlops_metrics.report_client_id_status( - edge_id, status, server_id=self.server_id, run_id=self.run_id) - - def sync_run_stop_status(self, run_status=ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED): - try: - if self.run_process_event is not None: - self.run_process_event.set() - - self.mlops_metrics.report_client_id_status( - self.edge_id, run_status, server_id=self.server_id, run_id=self.run_id) - except Exception as e: - logging.error(f"Failed to sync run stop status with Exception {e}. Traceback: {traceback.format_exc()}") - pass - - def cleanup_run_when_starting_failed( - self, status=ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED, should_send_client_id_status=True): - # logging.error("Cleanup run successfully when starting failed.") - - self.reset_devices_status( - self.edge_id, status, should_send_client_id_status=should_send_client_id_status) - - time.sleep(2) - - try: - self.mlops_metrics.stop_sys_perf() - except Exception as ex: - logging.error(f"Failed to stop sys perf with Exception {ex}. Traceback: {traceback.format_exc()}") - pass - - time.sleep(1) - - try: - ClientConstants.cleanup_learning_process(self.run_id) - ClientConstants.cleanup_bootstrap_process(self.run_id) - ClientConstants.cleanup_run_process(self.run_id) - except Exception as e: - logging.error( - f"Failed to cleanup run when starting failed with Exception {e}. Traceback: {traceback.format_exc()}") - pass - - def cleanup_run_when_finished(self): - # logging.info("Cleanup run successfully when finished.") - - self.reset_devices_status(self.edge_id, - ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED, - should_send_client_id_status=False) - - time.sleep(2) - - try: - self.mlops_metrics.stop_sys_perf() - except Exception as ex: - logging.error(f"Failed to stop sys perf with Exception {ex}. Traceback: {traceback.format_exc()}") - pass - - time.sleep(1) - - try: - ClientConstants.cleanup_learning_process(self.run_id) - ClientConstants.cleanup_bootstrap_process(self.run_id) - ClientConstants.cleanup_run_process(self.run_id) - except Exception as e: - logging.error( - f"Failed to cleanup run when finished with Exception {e}. Traceback: {traceback.format_exc()}") - pass - - def setup_message_center(self): - if self.message_center is not None: - return - - self.message_center = FedMLMessageCenter(agent_config=self.agent_config) - self.message_center.start_sender() - - if self.mlops_metrics is None: - self.mlops_metrics = MLOpsMetrics() - self.mlops_metrics.set_messenger(self.message_center) - self.mlops_metrics.run_id = self.run_id - - def rebuild_message_center(self, message_center_queue): - self.message_center = FedMLMessageCenter(message_queue=message_center_queue) - - if self.mlops_metrics is None: - self.mlops_metrics = MLOpsMetrics() - self.mlops_metrics.set_messenger(self.message_center) - self.mlops_metrics.run_id = self.run_id - - def release_message_center(self): - try: - if self.message_center is not None: - self.message_center.stop() - self.message_center = None - - except Exception as e: - logging.error( - f"Failed to release client mqtt manager with Exception {e}. Traceback: {traceback.format_exc()}") - pass - - def ota_upgrade(self, payload, request_json): - run_id = request_json["runId"] - force_ota = False - ota_version = None - - try: - run_config = request_json.get("run_config", None) - parameters = run_config.get("parameters", None) - common_args = parameters.get("common_args", None) - force_ota = common_args.get("force_ota", False) if common_args is not None else False - ota_version = common_args.get("ota_version", None) if common_args is not None else None - except Exception as e: - logging.error( - f"Failed to get ota upgrade parameters with Exception {e}. Traceback: {traceback.format_exc()}") - pass - - if force_ota and ota_version is not None: - should_upgrade = True if ota_version != fedml.__version__ else False - upgrade_version = ota_version - else: - try: - fedml_is_latest_version, local_ver, remote_ver = sys_utils.check_fedml_is_latest_version(self.version) - except Exception as e: - logging.error(f"Failed to check fedml version with Exception {e}. Traceback: {traceback.format_exc()}") - return - - should_upgrade = False if fedml_is_latest_version else True - upgrade_version = remote_ver - - if should_upgrade: - FedMLClientDataInterface.get_instance(). \ - save_started_job(run_id, self.edge_id, time.time(), - ClientConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING, - ClientConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING, - payload) - self.mlops_metrics.report_client_id_status( - self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING, run_id=run_id) - - logging.info(f"Upgrade to version {upgrade_version} ...") - - sys_utils.do_upgrade(self.version, upgrade_version) - raise Exception("Restarting after upgraded...") - - def callback_start_train(self, topic, payload): - # Get training params - - request_json = json.loads(payload) - is_retain = request_json.get("is_retain", False) - if is_retain: - return - run_id = request_json["runId"] - - # Start log processor for current run - train_edge_id = str(topic).split("/")[-2] - self.args.run_id = run_id - self.args.edge_id = train_edge_id - MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO) - MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor( - run_id, train_edge_id, log_source=SchedulerConstants.get_log_source(request_json)) - logging.info("start the log processor") - - try: - MLOpsConfigs.fetch_all_configs() - except Exception as e: - logging.error(f"Failed to fetch all configs with Exception {e}. Traceback: {traceback.format_exc()}") - pass - - if not FedMLClientDataInterface.get_instance().get_agent_status(): - request_json = json.loads(payload) - run_id = request_json["runId"] - logging.error( - "FedMLDebug - Receive: topic ({}), payload ({}), but the client agent is disabled. {}".format( - topic, payload, traceback.format_exc() - ) - ) - # Send failed msg when exceptions. - self.mlops_metrics.report_client_id_status( - train_edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION, run_id=run_id, - msg=f"the client agent {train_edge_id} is disabled") - MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, train_edge_id) - return - - logging.info( - f"FedMLDebug - Receive: topic ({topic}), payload ({payload})" - ) - - # Terminate previous process about starting or stopping run command - logging.info("cleanup and save runner information") - server_agent_id = request_json["cloud_agent_id"] - ClientConstants.save_runner_infos(self.args.device_id + "." + self.args.os_name, train_edge_id, run_id=run_id) - - # OTA upgrade - self.ota_upgrade(payload, request_json) - - # Occupy GPUs - scheduler_match_info = request_json.get("scheduler_match_info", {}) - matched_gpu_num = scheduler_match_info.get("matched_gpu_num", 0) - model_master_device_id = scheduler_match_info.get("model_master_device_id", None) - model_slave_device_id = scheduler_match_info.get("model_slave_device_id", None) - model_slave_device_id_list = scheduler_match_info.get("model_slave_device_id_list", None) - run_config = request_json.get("run_config", {}) - run_params = run_config.get("parameters", {}) - serving_args = run_params.get("serving_args", {}) - endpoint_id = serving_args.get("endpoint_id", None) - cuda_visible_gpu_ids_str = JobRunnerUtils.get_instance().occupy_gpu_ids( - run_id, matched_gpu_num, train_edge_id, inner_id=endpoint_id, - model_master_device_id=model_master_device_id, - model_slave_device_id=model_slave_device_id) - logging.info( - f"Run started, available gpu ids: {JobRunnerUtils.get_instance().get_available_gpu_id_list(train_edge_id)}") - - # Start server with multiprocessing mode - self.request_json = request_json - run_id_str = str(run_id) - self.running_request_json[run_id_str] = request_json - client_runner = FedMLClientRunner( - self.args, edge_id=train_edge_id, request_json=request_json, agent_config=self.agent_config, run_id=run_id, - cuda_visible_gpu_ids_str=cuda_visible_gpu_ids_str - ) - client_runner.start_request_json = payload - self.run_process_event_map[run_id_str] = multiprocessing.Event() - self.run_process_event_map[run_id_str].clear() - client_runner.run_process_event = self.run_process_event_map[run_id_str] - self.run_process_completed_event_map[run_id_str] = multiprocessing.Event() - self.run_process_completed_event_map[run_id_str].clear() - client_runner.run_process_completed_event = self.run_process_completed_event_map[run_id_str] - client_runner.server_id = request_json.get("server_id", "0") - logging.info("start the runner process.") - self.run_process_map[run_id_str] = Process(target=client_runner.run, args=( - self.run_process_event_map[run_id_str], self.run_process_completed_event_map[run_id_str], - self.message_center.get_message_queue())) - self.run_process_map[run_id_str].start() - ClientConstants.save_run_process(run_id, self.run_process_map[run_id_str].pid) - - def callback_stop_train(self, topic, payload): - # logging.info("callback_stop_train: topic = %s, payload = %s" % (topic, payload)) - # logging.info( - # f"FedMLDebug - Receive: topic ({topic}), payload ({payload})" - # ) - - train_edge_id = str(topic).split("/")[-2] - request_json = json.loads(payload) - is_retain = request_json.get("is_retain", False) - if is_retain: - return - run_id = request_json.get("runId", None) - if run_id is None: - run_id = request_json.get("id", None) - run_status = request_json.get("run_status", ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED) - - # logging.info("Stop run with multiprocessing...") - - # Stop client with multiprocessing mode - run_id_str = str(run_id) - client_runner = FedMLClientRunner( - self.args, edge_id=train_edge_id, request_json=request_json, agent_config=self.agent_config, run_id=run_id - ) - self.cleanup_containers_and_release_gpus(run_id, train_edge_id) - client_runner.run_process_event = self.run_process_event_map.get(run_id_str, None) - client_runner.run_process = self.run_process_map.get(run_id_str, None) - client_runner.message_center = self.message_center - client_runner.mlops_metrics = self.mlops_metrics - client_runner.sync_run_stop_status(run_status=run_status) - - def cleanup_containers_and_release_gpus(self, run_id, edge_id): - job_type = JobRunnerUtils.get_job_type_from_run_id(run_id) - - if not job_type: - logging.info(f"Failed to get job type from run id {run_id}. This is not an error as it would usually " - f"happen when the job is not found in the database because job is already finished and " - f"cleaned up. Exiting cleanup_containers_and_release_gpus.") - return - - # Check if the job type is not "serve" or "deploy" - if not (job_type == SchedulerConstants.JOB_TASK_TYPE_SERVE or - job_type == SchedulerConstants.JOB_TASK_TYPE_DEPLOY): - - # Terminate the run docker container if exists - container_name = JobRunnerUtils.get_run_container_name(run_id) - docker_client = JobRunnerUtils.get_docker_client(DockerArgs()) - logging.info(f"Terminating the run docker container {container_name} if exists...") - try: - JobRunnerUtils.remove_run_container_if_exists(container_name, docker_client) - except Exception as e: - logging.error(f"Exception {e} occurred when terminating docker container. " - f"Traceback: {traceback.format_exc()}") - - # Release the GPU ids and update the GPU availability in the persistent store - JobRunnerUtils.get_instance().release_gpu_ids(run_id, edge_id) - - # Send mqtt message reporting the new gpu availability to the backend - MLOpsDevicePerfStats.report_gpu_device_info(self.edge_id, mqtt_mgr=self.mqtt_mgr) - - def cleanup_client_with_status(self): - if self.device_status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED: - # logging.info("received to finished status.") - self.cleanup_run_when_finished() - elif self.device_status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED: - # logging.error("received to failed status from the server agent") - self.cleanup_run_when_starting_failed(should_send_client_id_status=False) - elif self.device_status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED: - # logging.error("received to failed status from the server agent") - self.cleanup_run_when_starting_failed(status=self.device_status, should_send_client_id_status=False) - - def callback_runner_id_status(self, topic, payload): - # logging.info("callback_runner_id_status: topic = %s, payload = %s" % (topic, payload)) - # logging.info(f"FedMLDebug - Receive: topic ({topic}), payload ({payload})") - request_json = json.loads(payload) - is_retain = request_json.get("is_retain", False) - if is_retain: - return - run_id = request_json["run_id"] - edge_id = str(topic).split("/")[-2].split('_')[-1] - status = request_json["status"] - run_id_str = str(run_id) - - self.save_training_status( - edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED - if status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION else status) - - if status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED or \ - status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED or \ - status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED: - completed_event = self.run_process_completed_event_map.get(run_id_str, None) - if completed_event is not None: - completed_event.set() - - # Stop client with multiprocessing mode - client_runner = FedMLClientRunner( - self.args, - edge_id=edge_id, - request_json=request_json, - agent_config=self.agent_config, - run_id=run_id, - ) - client_runner.device_status = status - client_runner.message_center = self.message_center - client_runner.mlops_metrics = self.mlops_metrics - client_runner.cleanup_client_with_status() - - running_json = self.running_request_json.get(run_id_str) - if running_json is None: - try: - current_job = FedMLClientDataInterface.get_instance().get_job_by_id(run_id) - running_json = json.loads(current_job.running_json) - except Exception as e: - logging.error(f"Failed to get running json with Exception {e}. Traceback: {traceback.format_exc()}") - - if running_json is not None: - job_type = JobRunnerUtils.parse_job_type(running_json) - if not SchedulerConstants.is_deploy_job(job_type): - logging.info(f"[run/device][{run_id}/{edge_id}] Release gpu resource when run ended.") - self.cleanup_containers_and_release_gpus(run_id, edge_id) - - run_process = self.run_process_map.get(run_id_str, None) - if run_process is not None: - if run_process.pid is not None: - RunProcessUtils.kill_process(run_process.pid) - - # Terminate the run docker container if exists - try: - container_name = JobRunnerUtils.get_run_container_name(run_id) - docker_client = JobRunnerUtils.get_docker_client(DockerArgs()) - logging.info(f"Terminating the run docker container {container_name} if exists...") - JobRunnerUtils.remove_run_container_if_exists(container_name, docker_client) - except Exception as e: - logging.error(f"Error occurred when terminating docker container." - f"Exception: {e}, Traceback: {traceback.format_exc()}.") - - self.run_process_map.pop(run_id_str) - - # Stop log processor for current run - MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, edge_id) - - def callback_report_current_status(self, topic, payload): - logging.info( - f"FedMLDebug - Receive: topic ({topic}), payload ({payload})" - ) - - self.send_agent_active_msg() - if self.general_edge_id is not None: - self.send_agent_active_msg(self.general_edge_id) - - @staticmethod - def process_ota_upgrade_msg(): - os.system("pip install -U fedml") - - @staticmethod - def callback_client_ota_msg(topic, payload): - logging.info( - f"FedMLDebug - Receive: topic ({topic}), payload ({payload})" - ) - - request_json = json.loads(payload) - cmd = request_json["cmd"] - - if cmd == ClientConstants.FEDML_OTA_CMD_UPGRADE: - FedMLClientRunner.process_ota_upgrade_msg() - # Process(target=FedMLClientRunner.process_ota_upgrade_msg).start() - raise Exception("After upgraded, restart runner...") - elif cmd == ClientConstants.FEDML_OTA_CMD_RESTART: - raise Exception("Restart runner...") - - def get_all_run_process_list_map(self): - run_process_dict = dict() - for run_id_str, process in self.run_process_map.items(): - cur_run_process_list = ClientConstants.get_learning_process_list(run_id_str) - run_process_dict[run_id_str] = cur_run_process_list - - return run_process_dict - - def callback_report_device_info(self, topic, payload): - payload_json = json.loads(payload) - server_id = payload_json.get("server_id", 0) - run_id = payload_json.get("run_id", 0) - listen_edge_id = str(topic).split("/")[-1] - context = payload_json.get("context", None) - need_gpu_info = payload_json.get("need_gpu_info", False) - need_running_process_list = payload_json.get("need_running_process_list", False) - response_topic = f"client/server/response_device_info/{server_id}" - if self.mlops_metrics is not None and self.model_device_client_edge_id_list is not None and \ - self.model_device_server_id is not None: - if not need_gpu_info: - device_info_json = { - "edge_id": listen_edge_id, - "fedml_version": fedml.__version__, - "user_id": self.args.user - } - else: - total_mem, free_mem, total_disk_size, free_disk_size, cup_utilization, cpu_cores, gpu_cores_total, \ - gpu_cores_available, sent_bytes, recv_bytes, gpu_available_ids = sys_utils.get_sys_realtime_stats() - host_ip = sys_utils.get_host_ip() - host_port = sys_utils.get_available_port() - gpu_available_ids = JobRunnerUtils.get_available_gpu_id_list(self.edge_id) - gpu_available_ids = JobRunnerUtils.trim_unavailable_gpu_ids(gpu_available_ids) - gpu_cores_available = len(gpu_available_ids) - gpu_list = sys_utils.get_gpu_list() - device_info_json = { - "edge_id": listen_edge_id, - "memoryTotal": round(total_mem * MLOpsUtils.BYTES_TO_GB, 2), - "memoryAvailable": round(free_mem * MLOpsUtils.BYTES_TO_GB, 2), - "diskSpaceTotal": round(total_disk_size * MLOpsUtils.BYTES_TO_GB, 2), - "diskSpaceAvailable": round(free_disk_size * MLOpsUtils.BYTES_TO_GB, 2), - "cpuUtilization": round(cup_utilization, 2), - "cpuCores": cpu_cores, - "gpuCoresTotal": gpu_cores_total, - "gpuCoresAvailable": gpu_cores_available, - "gpu_available_ids": gpu_available_ids, - "gpu_list": gpu_list, - "node_ip": host_ip, - "node_port": host_port, - "networkTraffic": sent_bytes + recv_bytes, - "updateTime": int(MLOpsUtils.get_ntp_time()), - "fedml_version": fedml.__version__, - "user_id": self.args.user - } - if need_running_process_list: - device_info_json["run_process_list_map"] = self.get_all_run_process_list_map() - salve_device_ids = list() - for model_client_edge_id in self.model_device_client_edge_id_list: - salve_device_ids.append(model_client_edge_id) - response_payload = {"slave_device_id": self.model_device_client_edge_id_list[0], - "slave_device_id_list": salve_device_ids, - "master_device_id": self.model_device_server_id, - "run_id": run_id, "edge_id": listen_edge_id, - "edge_info": device_info_json} - if context is not None: - response_payload["context"] = context - self.message_center.send_message(response_topic, json.dumps(response_payload), run_id=run_id) - - def callback_client_logout(self, topic, payload): - payload_json = json.loads(payload) - secret = payload_json.get("auth", None) - if secret is None or str(secret) != "246b1be6-0eeb-4b17-b118-7d74de1975d4": - return - logging.info("Received the logout request.") - if self.run_process_event is not None: - self.run_process_event.set() - if self.run_process_completed_event is not None: - self.run_process_completed_event.set() - self.disable_client_login = True - time.sleep(3) - os.system("fedml logout") - - def save_training_status(self, edge_id, training_status): - self.current_training_status = training_status - ClientConstants.save_training_infos(edge_id, training_status) - - @staticmethod - def get_gpu_machine_id(): - gpu_list = sys_utils.get_gpu_list() - gpu_uuids = "" - if len(gpu_list) > 0: - for gpu in gpu_list: - gpu_uuids += gpu.get("uuid", "") - else: - gpu_uuids = str(uuid.uuid4()) - device_id_combination = \ - f"{FedMLClientRunner.get_machine_id()}-{hex(uuid.getnode())}-{gpu_uuids}" - device_id = security_utils.get_content_hash(device_id_combination) - return device_id - - @staticmethod - def get_device_id(use_machine_id=False): - device_file_path = os.path.join(ClientConstants.get_data_dir(), - ClientConstants.LOCAL_RUNNER_INFO_DIR_NAME) - file_for_device_id = os.path.join(device_file_path, "devices.id") - if not os.path.exists(device_file_path): - os.makedirs(device_file_path, exist_ok=True) - elif os.path.exists(file_for_device_id): - with open(file_for_device_id, 'r', encoding='utf-8') as f: - device_id_from_file = f.readline() - if device_id_from_file is not None and device_id_from_file != "": - return device_id_from_file - - if platform.system() == "Darwin": - cmd_get_serial_num = "system_profiler SPHardwareDataType | grep Serial | awk '{gsub(/ /,\"\")}{print}' " \ - "|awk -F':' '{print $2}' " - device_id = os.popen(cmd_get_serial_num).read() - device_id = device_id.replace('\n', '').replace(' ', '') - if device_id is None or device_id == "": - if not use_machine_id: - device_id = hex(uuid.getnode()) - else: - device_id = FedMLClientRunner.get_gpu_machine_id() - else: - device_id = "0x" + device_id - else: - if "nt" in os.name: - - def get_uuid(): - guid = "" - try: - cmd = "wmic csproduct get uuid" - guid = str(subprocess.check_output(cmd)) - pos1 = guid.find("\\n") + 2 - guid = guid[pos1:-15] - except Exception as ex: - logging.error(f"Failed to get uuid with Exception {ex}. Traceback: {traceback.format_exc()}") - pass - return str(guid) - - device_id = str(get_uuid()) - logging.info(device_id) - elif "posix" in os.name: - device_id = sys_utils.get_device_id_in_docker() - if device_id is None: - if not use_machine_id: - device_id = hex(uuid.getnode()) - else: - device_id = device_id = FedMLClientRunner.get_gpu_machine_id() - else: - device_id = sys_utils.run_subprocess_open( - "hal-get-property --udi /org/freedesktop/Hal/devices/computer --key system.hardware.uuid".split() - ) - device_id = hex(device_id) - - if device_id is not None and device_id != "": - with open(file_for_device_id, 'w', encoding='utf-8') as f: - f.write(device_id) - else: - device_id = hex(uuid.uuid4()) - with open(file_for_device_id, 'w', encoding='utf-8') as f: - f.write(device_id) - - return device_id - - @staticmethod - def get_machine_id(): - try: - import machineid - return machineid.id().replace('\n', '').replace('\r\n', '').strip() - except Exception as e: - logging.error(f"Failed to get machine id with Exception {e}. Traceback: {traceback.format_exc()}") - return hex(uuid.getnode()) - - @staticmethod - def bind_account_and_device_id(url, account_id, device_id, os_name, api_key="", role="client"): - ip = requests.get('https://checkip.amazonaws.com').text.strip() - fedml_ver, exec_path, os_ver, cpu_info, python_ver, torch_ver, mpi_installed, \ - cpu_usage, available_mem, total_mem, gpu_info, gpu_available_mem, gpu_total_mem, \ - gpu_count, gpu_vendor, cpu_count, gpu_device_name = get_sys_runner_info() - host_name = sys_utils.get_host_name() - json_params = { - "accountid": account_id, - "deviceid": device_id, - "type": os_name, - "state": ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE, - "status": ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE, - "processor": cpu_info, - "core_type": cpu_info, - "network": "", - "role": role, - "os_ver": os_ver, - "memory": total_mem, - "ip": ip, - "api_key": api_key, - "extra_infos": {"fedml_ver": fedml_ver, "exec_path": exec_path, "os_ver": os_ver, - "cpu_info": cpu_info, "python_ver": python_ver, "torch_ver": torch_ver, - "mpi_installed": mpi_installed, "cpu_usage": cpu_usage, - "available_mem": available_mem, "total_mem": total_mem, - "cpu_count": cpu_count, "gpu_count": 0, "host_name": host_name} - } - if gpu_count > 0: - if gpu_total_mem is not None: - json_params["gpu"] = gpu_info if gpu_info is not None else "" + ", Total GPU Memory: " + gpu_total_mem - else: - json_params["gpu"] = gpu_info if gpu_info is not None else "" - json_params["extra_infos"]["gpu_info"] = gpu_info if gpu_info is not None else "" - if gpu_available_mem is not None: - json_params["extra_infos"]["gpu_available_mem"] = gpu_available_mem - if gpu_total_mem is not None: - json_params["extra_infos"]["gpu_total_mem"] = gpu_total_mem - - json_params["extra_infos"]["gpu_count"] = gpu_count - json_params["extra_infos"]["gpu_vendor"] = gpu_vendor - json_params["extra_infos"]["gpu_device_name"] = gpu_device_name - - gpu_available_id_list = sys_utils.get_available_gpu_id_list(limit=gpu_count) - gpu_available_count = len(gpu_available_id_list) if gpu_available_id_list is not None else 0 - gpu_list = sys_utils.get_gpu_list() - json_params["extra_infos"]["gpu_available_count"] = gpu_available_count - json_params["extra_infos"]["gpu_available_id_list"] = gpu_available_id_list - json_params["extra_infos"]["gpu_list"] = gpu_list - else: - json_params["gpu"] = "None" - json_params["extra_infos"]["gpu_available_count"] = 0 - json_params["extra_infos"]["gpu_available_id_list"] = [] - json_params["extra_infos"]["gpu_list"] = [] - - _, cert_path = MLOpsConfigs.get_request_params() - if cert_path is not None: - try: - requests.session().verify = cert_path - response = requests.post( - url, json=json_params, verify=True, - headers={"content-type": "application/json", "Connection": "close"} - ) - except requests.exceptions.SSLError as err: - logging.error( - f"Failed to bind account and device id with error: {err}, traceback: {traceback.format_exc()}") - MLOpsConfigs.install_root_ca_file() - response = requests.post( - url, json=json_params, verify=True, - headers={"content-type": "application/json", "Connection": "close"} - ) - else: - response = requests.post(url, json=json_params, headers={"Connection": "close"}) - edge_id, user_name, extra_url, general_edge_id = -1, None, None, None - if response.status_code != 200: - print(f"Binding to MLOps with response.status_code = {response.status_code}, " - f"response.content: {response.content}") - pass - else: - # print("url = {}, response = {}".format(url, response)) - status_code = response.json().get("code") - if status_code == "SUCCESS": - edge_id = response.json().get("data").get("id") - user_name = response.json().get("data").get("userName", None) - extra_url = response.json().get("data").get("url", None) - general_edge_id = response.json().get("data").get("general_edge_id", None) - if edge_id is None or edge_id <= 0: - print(f"Binding to MLOps with response.status_code = {response.status_code}, " - f"response.content: {response.content}") - else: - if status_code == SchedulerConstants.BINDING_ACCOUNT_NOT_EXIST_ERROR: - raise SystemExit(SchedulerConstants.BINDING_ACCOUNT_NOT_EXIST_ERROR) - print(f"Binding to MLOps with response.status_code = {response.status_code}, " - f"response.content: {response.content}") - return -1, None, None, None - return edge_id, user_name, extra_url, general_edge_id - - def fetch_configs(self): - return MLOpsConfigs.fetch_all_configs() - - def send_agent_active_msg(self, edge_id): - active_topic = "flclient_agent/active" - status = MLOpsStatus.get_instance().get_client_agent_status(edge_id) - if ( - status is not None - and status != ClientConstants.MSG_MLOPS_CLIENT_STATUS_OFFLINE - and status != ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE - ): - return - - try: - current_job = FedMLClientDataInterface.get_instance().get_job_by_id(self.run_id) - except Exception as e: - logging.error(f"Failed to get current job with Exception {e}. Traceback: {traceback.format_exc()}") - current_job = None - if current_job is None: - if status is not None and status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_OFFLINE: - status = ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE - else: - return - else: - status = ClientConstants.get_device_state_from_run_edge_state(current_job.status) - active_msg = {"ID": edge_id, "status": status} - MLOpsStatus.get_instance().set_client_agent_status(edge_id, status) - self.mqtt_mgr.send_message_json(active_topic, json.dumps(active_msg)) - logging.info(f"Send agent active msg {active_msg}") - - def recover_start_train_msg_after_upgrading(self): - try: - current_job = FedMLClientDataInterface.get_instance().get_current_job() - if current_job is not None and \ - current_job.status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING: - logging.info("start training after upgrading.") - topic_start_train = "flserver_agent/" + str(self.edge_id) + "/start_train" - self.callback_start_train(topic_start_train, current_job.running_json) - except Exception as e: - logging.error(f"recover starting train message after upgrading failed with exception {e}, " - f"Traceback {traceback.format_exc()}") - - def on_agent_mqtt_connected(self, mqtt_client_object): - # The MQTT message topic format is as follows: // - - # Setup MQTT message listener for starting training - topic_start_train = "flserver_agent/" + str(self.edge_id) + "/start_train" - self.add_message_listener(topic_start_train, self.callback_start_train) - self.mqtt_mgr.add_message_listener(topic_start_train, self.listener_message_dispatch_center) - - # Setup MQTT message listener for stopping training - topic_stop_train = "flserver_agent/" + str(self.edge_id) + "/stop_train" - self.add_message_listener(topic_stop_train, self.callback_stop_train) - self.mqtt_mgr.add_message_listener(topic_stop_train, self.listener_message_dispatch_center) - - - # Setup MQTT message listener for client status switching - topic_client_status = "fl_client/flclient_agent_" + str(self.edge_id) + "/status" - self.add_message_listener(topic_client_status, self.callback_runner_id_status) - self.mqtt_mgr.add_message_listener(topic_client_status, self.listener_message_dispatch_center) - - # Setup MQTT message listener to report current device status. - topic_report_status = "mlops/report_device_status" - self.add_message_listener(topic_report_status, self.callback_report_current_status) - self.mqtt_mgr.add_message_listener(topic_report_status, self.listener_message_dispatch_center) - - # Setup MQTT message listener to OTA messages from the MLOps. - topic_ota_msg = "mlops/flclient_agent_" + str(self.edge_id) + "/ota" - self.add_message_listener(topic_ota_msg, self.callback_client_ota_msg) - self.mqtt_mgr.add_message_listener(topic_ota_msg, self.listener_message_dispatch_center) - - # Setup MQTT message listener to OTA messages from the MLOps. - topic_request_device_info = "server/client/request_device_info/" + str(self.edge_id) - self.add_message_listener(topic_request_device_info, self.callback_report_device_info) - self.mqtt_mgr.add_message_listener(topic_request_device_info, self.listener_message_dispatch_center) - - # Setup MQTT message listener to logout from MLOps. - topic_client_logout = "mlops/client/logout/" + str(self.edge_id) - self.add_message_listener(topic_client_logout, self.callback_client_logout) - self.mqtt_mgr.add_message_listener(topic_client_logout, self.listener_message_dispatch_center) - - # Subscribe topics for starting train, stopping train and fetching client status. - mqtt_client_object.subscribe(topic_start_train, qos=2) - mqtt_client_object.subscribe(topic_stop_train, qos=2) - mqtt_client_object.subscribe(topic_client_status, qos=2) - mqtt_client_object.subscribe(topic_report_status, qos=2) - mqtt_client_object.subscribe(topic_ota_msg, qos=2) - mqtt_client_object.subscribe(topic_request_device_info, qos=2) - mqtt_client_object.subscribe(topic_client_logout, qos=2) - - self.subscribed_topics.clear() - self.subscribed_topics.append(topic_start_train) - self.subscribed_topics.append(topic_stop_train) - self.subscribed_topics.append(topic_client_status) - self.subscribed_topics.append(topic_report_status) - self.subscribed_topics.append(topic_ota_msg) - self.subscribed_topics.append(topic_request_device_info) - self.subscribed_topics.append(topic_client_logout) - - # Subscribe the messages for federated learning. - self.subscribe_fl_msgs() - - # Broadcast the first active message. - self.send_agent_active_msg(self.edge_id) - if self.general_edge_id is not None: - self.send_agent_active_msg(self.general_edge_id) - - # Echo results - MLOpsRuntimeLog.get_instance(self.args).enable_show_log_to_stdout() - worker_deploy_id_list = [modeld_device_clint.edge_id for index, modeld_device_clint in - enumerate(self.model_device_client_list)] - print("\nCongratulations, your device is connected to the FedML MLOps platform successfully!") - print(f"Your FedML Edge ID is {str(self.edge_id)}, unique device ID is {str(self.unique_device_id)}, " - f"master deploy ID is {str(self.model_device_server.edge_id)}, " - f"worker deploy ID is {worker_deploy_id_list}" - ) - if self.edge_extra_url is not None and self.edge_extra_url != "": - print(f"You may visit the following url to fill in more information with your device.\n" - f"{self.edge_extra_url}") - MLOpsRuntimeLog.get_instance(self.args).enable_show_log_to_stdout(enable=False) - - from fedml.core.mlops import sync_deploy_id - sync_deploy_id( - self.edge_id, self.model_device_server.edge_id, worker_deploy_id_list) - - # Start the message center for listener - self.start_listener(sender_message_queue=self.message_center.get_message_queue(), - agent_config=self.agent_config) - - def subscribe_fl_msgs(self): - if self.general_edge_id is None: - return - - # Setup MQTT message listener for starting training - topic_start_train = "flserver_agent/" + str(self.general_edge_id) + "/start_train" - self.add_message_listener(topic_start_train, self.callback_start_train) - self.mqtt_mgr.add_message_listener(topic_start_train, self.listener_message_dispatch_center) - - # Setup MQTT message listener for stopping training - topic_stop_train = "flserver_agent/" + str(self.general_edge_id) + "/stop_train" - self.add_message_listener(topic_stop_train, self.callback_stop_train) - self.mqtt_mgr.add_message_listener(topic_stop_train, self.listener_message_dispatch_center) - - # Setup MQTT message listener for client status switching - topic_client_status = "fl_client/flclient_agent_" + str(self.general_edge_id) + "/status" - self.add_message_listener(topic_client_status, self.callback_runner_id_status) - self.mqtt_mgr.add_message_listener(topic_client_status, self.listener_message_dispatch_center) - - # Setup MQTT message listener to OTA messages from the MLOps. - topic_request_device_info = "server/client/request_device_info/" + str(self.general_edge_id) - self.add_message_listener(topic_request_device_info, self.callback_report_device_info) - self.mqtt_mgr.add_message_listener(topic_request_device_info, self.listener_message_dispatch_center) - - # Subscribe topics for starting train, stopping train and fetching client status. - self.mqtt_mgr.subscribe_msg(topic_start_train) - self.mqtt_mgr.subscribe_msg(topic_stop_train) - self.mqtt_mgr.subscribe_msg(topic_client_status) - self.mqtt_mgr.subscribe_msg(topic_request_device_info) - - self.subscribed_topics.append(topic_start_train) - self.subscribed_topics.append(topic_stop_train) - self.subscribed_topics.append(topic_client_status) - self.subscribed_topics.append(topic_request_device_info) - - def on_agent_mqtt_disconnected(self, mqtt_client_object): - MLOpsStatus.get_instance().set_client_agent_status( - self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_OFFLINE - ) - pass - - def setup_agent_mqtt_connection(self, service_config): - # Setup MQTT connection - self.mqtt_mgr = MqttManager( - service_config["mqtt_config"]["BROKER_HOST"], - service_config["mqtt_config"]["BROKER_PORT"], - service_config["mqtt_config"]["MQTT_USER"], - service_config["mqtt_config"]["MQTT_PWD"], - service_config["mqtt_config"]["MQTT_KEEPALIVE"], - f"FedML_ClientAgent_Daemon_@{self.user_name}@_@{self.args.current_device_id}@_@{str(uuid.uuid4())}@", - "flclient_agent/last_will_msg", - json.dumps({"ID": self.edge_id, "status": ClientConstants.MSG_MLOPS_CLIENT_STATUS_OFFLINE}) - ) - self.agent_config = service_config - - # Init local database - FedMLClientDataInterface.get_instance().create_job_table() - - # Start the message center to process edge related messages. - self.setup_message_center() - - # Start local API services - client_api_cmd = "fedml.computing.scheduler.slave.client_api:api" - client_api_pids = RunProcessUtils.get_pid_from_cmd_line(client_api_cmd) - if client_api_pids is None or len(client_api_pids) <= 0: - python_program = get_python_program() - cur_dir = os.path.dirname(__file__) - fedml_base_dir = os.path.dirname(os.path.dirname(os.path.dirname(cur_dir))) - self.local_api_process = ClientConstants.exec_console_with_script( - "{} -m uvicorn {} --host 0.0.0.0 --port {} " - "--reload --reload-delay 3 --reload-dir {} --log-level critical".format( - python_program, client_api_cmd, ClientConstants.LOCAL_CLIENT_API_PORT, fedml_base_dir), - should_capture_stdout=False, - should_capture_stderr=False - ) - # if self.local_api_process is not None and self.local_api_process.pid is not None: - # print(f"Client local API process id {self.local_api_process.pid}") - - # Setup MQTT connected listener - self.mqtt_mgr.add_connected_listener(self.on_agent_mqtt_connected) - self.mqtt_mgr.add_disconnected_listener(self.on_agent_mqtt_disconnected) - self.mqtt_mgr.connect() - - # Report the IDLE status to MLOps - self.mlops_metrics.report_client_training_status( - self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE) - MLOpsStatus.get_instance().set_client_agent_status(self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE) - - # MLOpsRuntimeLogDaemon.get_instance(self.args).stop_all_log_processor() - self.recover_start_train_msg_after_upgrading() - - infer_host = os.getenv("FEDML_INFER_HOST", None) - infer_redis_addr = os.getenv("FEDML_INFER_REDIS_ADDR", None) - infer_redis_port = os.getenv("FEDML_INFER_REDIS_PORT", None) - infer_redis_password = os.getenv("FEDML_INFER_REDIS_PASSWORD", None) - model_client_num = os.getenv("FEDML_MODEL_WORKER_NUM", None) - os.environ["FEDML_CURRENT_EDGE_ID"] = str(self.edge_id) - - if not ComputeCacheManager.get_instance().set_redis_params(): - os.environ["FEDML_DISABLE_REDIS_CONNECTION"] = "1" - - if self.model_device_client_edge_id_list is None: - self.model_device_client_edge_id_list = list() - if self.model_device_client_list is None: - model_client_num = 1 if model_client_num is None else int(model_client_num) - self.model_device_client_list = list() - for client_index in range(model_client_num): - model_device_client = FedMLModelDeviceClientRunner( - self.args, f"{self.args.current_device_id}_{client_index + 1}", self.args.os_name, - self.args.is_from_docker, self.agent_config) - if infer_host is not None: - model_device_client.infer_host = infer_host - if infer_redis_addr is not None: - model_device_client.redis_addr = infer_redis_addr - if infer_redis_port is not None: - model_device_client.redis_port = infer_redis_port - if infer_redis_password is not None: - model_device_client.redis_password = infer_redis_password - model_device_client.start() - self.model_device_client_list.append(model_device_client) - self.model_device_client_edge_id_list.append(model_device_client.get_edge_id()) - - if self.model_device_server is None: - self.model_device_server = FedMLModelDeviceServerRunner(self.args, self.args.current_device_id, - self.args.os_name, self.args.is_from_docker, - self.agent_config) - if infer_host is not None: - self.model_device_server.infer_host = infer_host - if infer_redis_addr is not None: - self.model_device_server.redis_addr = infer_redis_addr - if infer_redis_port is not None: - self.model_device_server.redis_port = infer_redis_port - if infer_redis_password is not None: - self.model_device_server.redis_password = infer_redis_password - - self.model_device_server.start() - self.model_device_server_id = self.model_device_server.get_edge_id() - - JobCleanup.get_instance().sync_data_on_startup(self.edge_id) - - os.environ["FEDML_DEPLOY_MASTER_ID"] = str(self.model_device_server.get_edge_id()) - os.environ["FEDML_DEPLOY_WORKER_IDS"] = str([client.get_edge_id() for client in self.model_device_client_list]) - self.mlops_metrics.stop_device_realtime_perf() - self.mlops_metrics.report_device_realtime_perf(self.args, service_config["mqtt_config"]) - - def start_agent_mqtt_loop(self): - # Start MQTT message loop - try: - self.mqtt_mgr.loop_forever() - except Exception as e: - logging.error(f"Errors in the MQTT loop: Exception {e}, Traceback: {traceback.format_exc()}") - if str(e) == "Restarting after upgraded...": - logging.info("Restarting after upgraded...") - else: - logging.info("Client tracing: {}".format(traceback.format_exc())) - finally: - print("finally") - login_exit_file = os.path.join(ClientConstants.get_log_file_dir(), "exited.log") - with open(login_exit_file, "w") as f: - f.writelines(f"{os.getpid()}.") - - self.stop_agent() - - time.sleep(5) - sys_utils.cleanup_all_fedml_client_login_processes( - ClientConstants.CLIENT_LOGIN_PROGRAM, clean_process_group=False) - sys.exit(1) - - def stop_agent(self): - if self.run_process_event is not None: - self.run_process_event.set() - - if self.model_device_server is not None: - self.model_device_server.stop() - self.model_device_server = None - - if self.model_device_client_list is not None: - for model_client in self.model_device_client_list: - model_client.stop() - self.model_device_client_list.clear() - self.model_device_client_list = None - - if self.mqtt_mgr is not None: - try: - for topic in self.subscribed_topics: - self.mqtt_mgr.unsubscribe_msg(topic) - except Exception as e: - logging.error(f"Unsubscribe topics error: {e}, Traceback: {traceback.format_exc()}") - pass - - self.mqtt_mgr.loop_stop() - self.mqtt_mgr.disconnect() - - self.release_message_center() - - def get_runner(self): - runner = FedMLClientRunner( - self.args, edge_id=self.edge_id, request_json=self.request_json, - agent_config=self.agent_config, run_id=self.run_id, - cuda_visible_gpu_ids_str=self.cuda_visible_gpu_ids_str - ) - runner.edge_user_name = self.user_name - runner.edge_extra_url = self.edge_extra_url - runner.unique_device_id = self.unique_device_id - runner.user_name = self.user_name - runner.general_edge_id = self.general_edge_id - runner.model_device_client_edge_id_list = self.model_device_client_edge_id_list - runner.model_device_server_id = self.model_device_server_id - return runner diff --git a/python/fedml/computing/scheduler/slave/launch_job_runner.py b/python/fedml/computing/scheduler/slave/launch_job_runner.py new file mode 100755 index 0000000000..07533af399 --- /dev/null +++ b/python/fedml/computing/scheduler/slave/launch_job_runner.py @@ -0,0 +1,41 @@ +from abc import ABC + +from .base_slave_job_runner import FedMLBaseSlaveJobRunner +from .client_constants import ClientConstants + + +class FedMLLaunchSlaveJobRunner(FedMLBaseSlaveJobRunner, ABC): + + def __init__(self, args, edge_id=0, request_json=None, agent_config=None, run_id=0, + cuda_visible_gpu_ids_str=None): + FedMLBaseSlaveJobRunner.__init__( + self, args, edge_id=edge_id, request_json=request_json, agent_config=agent_config, run_id=run_id, + cuda_visible_gpu_ids_str=cuda_visible_gpu_ids_str, agent_data_dir=ClientConstants.get_data_dir(), + agent_package_download_dir=ClientConstants.get_package_download_dir(), + agent_package_unzip_dir=ClientConstants.get_package_unzip_dir(), + agent_log_file_dir=ClientConstants.get_log_file_dir() + ) + + # Override + def _generate_job_runner_instance(self, args, run_id=None, request_json=None, agent_config=None, edge_id=None): + return FedMLLaunchSlaveJobRunner( + args, run_id=run_id, request_json=request_json, agent_config=self.agent_config, edge_id=edge_id + ) + + # Override + def _generate_extend_queue_list(self): + return None + + # Override + def get_download_package_info(self, packages_config=None): + return super().get_download_package_info(packages_config) + + # Override + def run_impl( + self, run_extend_queue_list, sender_message_center, + listener_message_queue, status_center_queue + ): + super().run_impl( + run_extend_queue_list, sender_message_center, + listener_message_queue, status_center_queue) + diff --git a/python/fedml/computing/scheduler/slave/launch_job_runner_manager.py b/python/fedml/computing/scheduler/slave/launch_job_runner_manager.py new file mode 100755 index 0000000000..3f65438f9e --- /dev/null +++ b/python/fedml/computing/scheduler/slave/launch_job_runner_manager.py @@ -0,0 +1,22 @@ + +from fedml.core.common.singleton import Singleton +from .base_slave_job_runner_manager import FedMLBaseSlaveJobRunnerManager +from .launch_job_runner import FedMLLaunchSlaveJobRunner + + +class FedMLLaunchJobRunnerManager(FedMLBaseSlaveJobRunnerManager, Singleton): + def __init__(self): + FedMLBaseSlaveJobRunnerManager.__init__(self) + + @staticmethod + def get_instance(): + return FedMLLaunchJobRunnerManager() + + # Override + def _generate_job_runner_instance( + self, args, run_id=None, request_json=None, agent_config=None, edge_id=None + ): + return FedMLLaunchSlaveJobRunner( + args, run_id=run_id, request_json=request_json, agent_config=agent_config, edge_id=edge_id) + + diff --git a/python/fedml/computing/scheduler/slave/slave_agent.py b/python/fedml/computing/scheduler/slave/slave_agent.py new file mode 100755 index 0000000000..e9c8b2fc93 --- /dev/null +++ b/python/fedml/computing/scheduler/slave/slave_agent.py @@ -0,0 +1,26 @@ + +from .base_slave_agent import FedMLBaseSlaveAgent +from .client_constants import ClientConstants +from .client_data_interface import FedMLClientDataInterface +from .slave_protocol_manager import FedMLLaunchSlaveProtocolManager + + +class FedMLLaunchSlaveAgent(FedMLBaseSlaveAgent): + def __init__(self): + FedMLBaseSlaveAgent.__init__(self) + + # Override + def _get_log_file_dir(self): + return ClientConstants.get_log_file_dir() + + # Override + def _save_agent_info(self, unique_device_id, edge_id): + ClientConstants.save_runner_infos(unique_device_id, edge_id) + + # Override + def _init_database(self): + FedMLClientDataInterface.get_instance().create_job_table() + + # Override + def _generate_protocol_manager_instance(self, args, agent_config=None): + return FedMLLaunchSlaveProtocolManager(args, agent_config=agent_config) diff --git a/python/fedml/computing/scheduler/slave/slave_protocol_manager.py b/python/fedml/computing/scheduler/slave/slave_protocol_manager.py new file mode 100755 index 0000000000..cd8e40d7e8 --- /dev/null +++ b/python/fedml/computing/scheduler/slave/slave_protocol_manager.py @@ -0,0 +1,104 @@ +import copy +import os +from ..comm_utils.job_cleanup import JobCleanup +from .base_slave_protocol_manager import FedMLBaseSlaveProtocolManager +from .launch_job_runner_manager import FedMLLaunchJobRunnerManager +from ..model_scheduler.model_device_server import FedMLModelDeviceServerRunner +from ..model_scheduler.model_device_client import FedMLModelDeviceClientRunner + + +class FedMLLaunchSlaveProtocolManager(FedMLBaseSlaveProtocolManager): + + def __init__(self, args, agent_config=None): + FedMLBaseSlaveProtocolManager.__init__(self, args, agent_config=agent_config) + + # Override + def generate_topics(self): + super().generate_topics() + + # Override + def add_protocol_handler(self): + super().add_protocol_handler() + + # Override + def _generate_protocol_manager_instance(self, args, agent_config=None): + return FedMLLaunchSlaveProtocolManager(args, agent_config=agent_config) + + # Override + def _get_job_runner_manager(self): + return FedMLLaunchJobRunnerManager.get_instance() + + # Override + def _process_connection_ready(self): + from fedml.core.mlops import sync_deploy_id + sync_deploy_id( + self.edge_id, self.model_device_server.edge_id, self.model_device_client_edge_id_list) + + # Override + def _process_connection_lost(self): + pass + + # Override + def _init_extra_items(self): + super()._init_extra_items() + + # Sync the data when startup + JobCleanup.get_instance().sync_data_on_startup(self.args.edge_id) + + # Get the environment variables + infer_host = os.getenv("FEDML_INFER_HOST", None) + infer_redis_addr = os.getenv("FEDML_INFER_REDIS_ADDR", None) + infer_redis_port = os.getenv("FEDML_INFER_REDIS_PORT", None) + infer_redis_password = os.getenv("FEDML_INFER_REDIS_PASSWORD", None) + model_client_num = os.getenv("FEDML_MODEL_WORKER_NUM", None) + + # Start deploy master agent and slave agent + in_args = copy.deepcopy(self.args) + if self.model_device_client_edge_id_list is None: + self.model_device_client_edge_id_list = list() + if self.model_device_client_list is None: + model_client_num = 1 if model_client_num is None else int(model_client_num) + self.model_device_client_list = list() + for client_index in range(model_client_num): + model_device_client = FedMLModelDeviceClientRunner( + in_args, f"{in_args.current_device_id}_{client_index + 1}", in_args.os_name, + in_args.is_from_docker, self.agent_config) + if infer_host is not None: + model_device_client.infer_host = infer_host + if infer_redis_addr is not None: + model_device_client.redis_addr = infer_redis_addr + if infer_redis_port is not None: + model_device_client.redis_port = infer_redis_port + if infer_redis_password is not None: + model_device_client.redis_password = infer_redis_password + model_device_client.start() + self.model_device_client_list.append(model_device_client) + self.model_device_client_edge_id_list.append(model_device_client.get_edge_id()) + + self.args = copy.deepcopy(in_args) + if self.model_device_server is None: + self.model_device_server = FedMLModelDeviceServerRunner(in_args, in_args.current_device_id, + in_args.os_name, in_args.is_from_docker, + self.agent_config) + if infer_host is not None: + self.model_device_server.infer_host = infer_host + if infer_redis_addr is not None: + self.model_device_server.redis_addr = infer_redis_addr + if infer_redis_port is not None: + self.model_device_server.redis_port = infer_redis_port + if infer_redis_password is not None: + self.model_device_server.redis_password = infer_redis_password + + self.model_device_server.start() + self.model_device_server_id = self.model_device_server.get_edge_id() + + # Save the deployed master and worker id list to the environment variable. + os.environ["FEDML_DEPLOY_MASTER_ID"] = str(self.model_device_server_id) + os.environ["FEDML_DEPLOY_WORKER_IDS"] = str(self.model_device_client_edge_id_list) + + # Start the monitor process + self.args = copy.deepcopy(in_args) + self.mlops_metrics.stop_device_realtime_perf() + self.mlops_metrics.report_device_realtime_perf(self.args, self.args.agent_config["mqtt_config"]) + pass + diff --git a/python/fedml/core/mlops/__init__.py b/python/fedml/core/mlops/__init__.py index 9ccd4d2265..a2e9fcc241 100644 --- a/python/fedml/core/mlops/__init__.py +++ b/python/fedml/core/mlops/__init__.py @@ -17,8 +17,6 @@ from fedml.core.mlops.mlops_configs import MLOpsConfigs from ...computing.scheduler.slave.client_constants import ClientConstants -from ...computing.scheduler.slave.client_runner import FedMLClientRunner -from ...computing.scheduler.master.server_runner import FedMLServerRunner from ...constants import FEDML_TRAINING_PLATFORM_SIMULATION, FEDML_TRAINING_PLATFORM_SIMULATION_TYPE from ...computing.scheduler.master.server_constants import ServerConstants @@ -35,6 +33,8 @@ from ...computing.scheduler.slave.client_data_interface import FedMLClientDataInterface from .mlops_utils import MLOpsUtils from .mlops_constants import MLOpsConstants +from ...computing.scheduler.master.master_protocol_manager import FedMLLaunchMasterProtocolManager +from ...computing.scheduler.scheduler_core.account_manager import FedMLAccountManager FEDML_MLOPS_API_RESPONSE_SUCCESS_CODE = "SUCCESS" @@ -50,6 +50,8 @@ "log_aggregation_failed_status", "log_training_failed_status", "log_endpoint_status", + "MLOpsConfigs", + "sync_deploy_id" ] @@ -1244,12 +1246,13 @@ def bind_simulation_device(args, userid): setattr(args, "version", version) if args.rank == 0: setattr(args, "log_file_dir", ServerConstants.get_log_file_dir()) - setattr(args, "device_id", FedMLServerRunner.get_device_id()) - runner = FedMLServerRunner(args) + setattr(args, "device_id", + FedMLAccountManager.get_device_id(ServerConstants.get_data_dir())) + runner = FedMLLaunchMasterProtocolManager(args) else: setattr(args, "log_file_dir", ClientConstants.get_log_file_dir()) - setattr(args, "device_id", FedMLClientRunner.get_device_id()) - runner = FedMLClientRunner(args) + setattr(args, "device_id", FedMLAccountManager.get_device_id()) + runner = FedMLSlaveProtocolManager(args) setattr(args, "config_version", version) setattr(args, "cloud_region", "") @@ -1326,10 +1329,10 @@ def fetch_config(args, version="release"): setattr(args, "version", version) if args.rank == 0: setattr(args, "log_file_dir", ServerConstants.get_log_file_dir()) - setattr(args, "device_id", FedMLServerRunner.get_device_id()) + setattr(args, "device_id", FedMLAccountManager.get_device_id(ServerConstants.get_data_dir())) else: setattr(args, "log_file_dir", ClientConstants.get_log_file_dir()) - setattr(args, "device_id", FedMLClientRunner.get_device_id()) + setattr(args, "device_id", FedMLAccountManager.get_device_id(ClientConstants.get_data_dir())) setattr(args, "config_version", version) setattr(args, "cloud_region", "") diff --git a/python/fedml/core/mlops/mlops_configs.py b/python/fedml/core/mlops/mlops_configs.py index b83e80a4dd..e0410a880d 100644 --- a/python/fedml/core/mlops/mlops_configs.py +++ b/python/fedml/core/mlops/mlops_configs.py @@ -2,8 +2,6 @@ import time from enum import Enum - - import certifi import requests import fedml diff --git a/python/fedml/core/mlops/mlops_device_perfs.py b/python/fedml/core/mlops/mlops_device_perfs.py index d488ef27a4..4ed3cd1b6f 100644 --- a/python/fedml/core/mlops/mlops_device_perfs.py +++ b/python/fedml/core/mlops/mlops_device_perfs.py @@ -6,7 +6,7 @@ import uuid from os.path import expanduser -import multiprocess as multiprocessing +import multiprocessing import psutil from fedml.computing.scheduler.comm_utils import sys_utils @@ -19,22 +19,14 @@ from .device_info_report_protocol import FedMLDeviceInfoReportProtocol ROLE_DEVICE_INFO_REPORTER = 1 -ROLE_ENDPOINT_MASTER = 2 -ROLE_ENDPOINT_SLAVE = 3 -ROLE_RUN_MASTER = 4 -ROLE_RUN_SLAVE = 5 -ROLE_ENDPOINT_LOGS = 6 +ROLE_DEVICE_JOB_MONITOR = 2 class MLOpsDevicePerfStats(object): def __init__(self): self.device_realtime_stats_process = None self.device_realtime_stats_event = None - self.monitor_run_slave_process = None - self.monitor_run_master_process = None - self.monitor_endpoint_master_process = None - self.monitor_endpoint_slave_process = None - self.monitor_endpoint_logs_process = None + self.device_monitor_process = None self.args = None self.device_id = None self.run_id = None @@ -70,36 +62,15 @@ def setup_realtime_stats_process(self, sys_args): self.device_realtime_stats_process = multiprocessing.Process( target=perf_stats.report_device_realtime_stats_entry, - args=(self.device_realtime_stats_event, ROLE_DEVICE_INFO_REPORTER)) + args=(self.device_realtime_stats_event, ROLE_DEVICE_INFO_REPORTER, self.is_client)) self.device_realtime_stats_process.start() - if self.is_client: - self.monitor_endpoint_slave_process = multiprocessing.Process( - target=perf_stats.report_device_realtime_stats_entry, - args=(self.device_realtime_stats_event, ROLE_ENDPOINT_SLAVE)) - self.monitor_endpoint_slave_process.start() - - self.monitor_endpoint_master_process = multiprocessing.Process( - target=perf_stats.report_device_realtime_stats_entry, - args=(self.device_realtime_stats_event, ROLE_ENDPOINT_MASTER)) - self.monitor_endpoint_master_process.start() - - self.monitor_run_slave_process = multiprocessing.Process( - target=perf_stats.report_device_realtime_stats_entry, - args=(self.device_realtime_stats_event, ROLE_RUN_SLAVE)) - self.monitor_run_slave_process.start() - - self.monitor_endpoint_logs_process = multiprocessing.Process( - target=perf_stats.report_device_realtime_stats_entry, - args=(self.device_realtime_stats_event, ROLE_ENDPOINT_LOGS)) - self.monitor_endpoint_logs_process.start() - else: - self.monitor_run_master_process = multiprocessing.Process( - target=perf_stats.report_device_realtime_stats_entry, - args=(self.device_realtime_stats_event, ROLE_RUN_MASTER)) - self.monitor_run_master_process.start() - - def report_device_realtime_stats_entry(self, sys_event, role): + self.device_monitor_process = multiprocessing.Process( + target=perf_stats.report_device_realtime_stats_entry, + args=(self.device_realtime_stats_event, ROLE_DEVICE_JOB_MONITOR, self.is_client)) + self.device_monitor_process.start() + + def report_device_realtime_stats_entry(self, sys_event, role, is_client): # print(f"Report device realtime stats, process id {os.getpid()}") self.device_realtime_stats_event = sys_event @@ -117,40 +88,38 @@ def report_device_realtime_stats_entry(self, sys_event, role): parent_pid = psutil.Process(os.getpid()).ppid() sys_stats_obj = SysStats(process_id=parent_pid) - if role == ROLE_RUN_MASTER: - device_info_reporter = FedMLDeviceInfoReportProtocol(run_id=self.run_id, mqtt_mgr=mqtt_mgr) + device_info_reporter = FedMLDeviceInfoReportProtocol(run_id=self.run_id, mqtt_mgr=mqtt_mgr) JobMonitor.get_instance().mqtt_config = self.args.mqtt_config_path # Notify MLOps with system information. - sleep_time_interval = 10 - time_interval_map = { - ROLE_DEVICE_INFO_REPORTER: 10, ROLE_RUN_SLAVE: 60, ROLE_RUN_MASTER: 70, - ROLE_ENDPOINT_SLAVE: 80, ROLE_ENDPOINT_MASTER: 90, ROLE_ENDPOINT_LOGS: 30} + sleep_time_interval_for_device_info = 60 + sleep_time_interval_for_client_monitor = 30 + sleep_time_interval_for_server_monitor = 60 while not self.should_stop_device_realtime_stats(): - try: - time.sleep(time_interval_map[role]) + if role == ROLE_DEVICE_INFO_REPORTER: + time.sleep(sleep_time_interval_for_device_info) + elif role == ROLE_DEVICE_JOB_MONITOR: + time.sleep(sleep_time_interval_for_client_monitor if is_client + else sleep_time_interval_for_server_monitor) + try: if role == ROLE_DEVICE_INFO_REPORTER: MLOpsDevicePerfStats.report_gpu_device_info(self.edge_id, mqtt_mgr=mqtt_mgr) - elif role == ROLE_RUN_SLAVE: - JobMonitor.get_instance().monitor_slave_run_process_status() - elif role == ROLE_RUN_MASTER: - JobMonitor.get_instance().monitor_master_run_process_status( - self.edge_id, device_info_reporter=device_info_reporter) - elif role == ROLE_ENDPOINT_SLAVE: - JobMonitor.get_instance().monitor_slave_endpoint_status() - elif role == ROLE_ENDPOINT_MASTER: - JobMonitor.get_instance().monitor_master_endpoint_status() - elif role == ROLE_ENDPOINT_LOGS: - JobMonitor.get_instance().monitor_endpoint_logs() + elif role == ROLE_DEVICE_JOB_MONITOR: + if is_client: + JobMonitor.get_instance().monitor_slave_run_process_status() + JobMonitor.get_instance().monitor_slave_endpoint_status() + JobMonitor.get_instance().monitor_master_endpoint_status() + JobMonitor.get_instance().monitor_endpoint_logs() + else: + JobMonitor.get_instance().monitor_master_run_process_status( + self.edge_id, device_info_reporter=device_info_reporter) except Exception as e: logging.error(f"exception {e} when reporting device pref: {traceback.format_exc()}.") pass - time.sleep(sleep_time_interval) - if role == ROLE_DEVICE_INFO_REPORTER: self.check_fedml_client_parent_process() @@ -201,6 +170,7 @@ def check_fedml_client_parent_process(self): if not self.is_client: return + # inspection PyBroadException try: home_dir = expanduser("~") fedml_ppids_dir = os.path.join(home_dir, ".fedml", "fedml-client", "fedml", "data", "ppids") @@ -222,13 +192,14 @@ def check_fedml_client_parent_process(self): print(f"Parent client process {file_list} has been killed, so fedml will exit.") logging.info(f"Parent client process {file_list} has been killed, so fedml will exit.") os.system("fedml logout") - except Exception as e: + except Exception: pass def check_fedml_server_parent_process(self): if self.is_client: return + # inspection PyBroadException try: home_dir = expanduser("~") fedml_ppids_dir = os.path.join(home_dir, ".fedml", "fedml-server", "fedml", "data", "ppids") @@ -250,5 +221,5 @@ def check_fedml_server_parent_process(self): print(f"Parent server process {file_list} has been killed, so fedml will exit.") logging.info(f"Parent server process {file_list} has been killed, so fedml will exit.") os.system("fedml logout -s") - except Exception as e: + except Exception: pass diff --git a/python/fedml/core/mlops/mlops_metrics.py b/python/fedml/core/mlops/mlops_metrics.py index 57860ab7cd..ca41df09f2 100644 --- a/python/fedml/core/mlops/mlops_metrics.py +++ b/python/fedml/core/mlops/mlops_metrics.py @@ -16,18 +16,12 @@ class MLOpsMetrics(object): - def __new__(cls, *args, **kw): - if not hasattr(cls, "_instance"): - orig = super(MLOpsMetrics, cls) - cls._instance = orig.__new__(cls, *args, **kw) - cls._instance.init() - return cls._instance - def __init__(self): - pass + self.init() def init(self): self.messenger = None + self.send_message_func = None self.args = None self.run_id = None self.edge_id = None @@ -38,8 +32,9 @@ def init(self): self.job_perfs = MLOpsJobPerfStats() self.device_perfs = MLOpsDevicePerfStats() - def set_messenger(self, msg_messenger, args=None): + def set_messenger(self, msg_messenger, args=None, send_message_func=None): self.messenger = msg_messenger + self.send_message_func = send_message_func if args is not None: self.args = args self.run_id = args.run_id @@ -94,7 +89,7 @@ def report_client_device_status_to_web_ui(self, edge_id, status, run_id=0): message_json = json.dumps(msg) logging.info("report_client_device_status. message_json = %s" % message_json) MLOpsStatus.get_instance().set_client_status(edge_id, status) - self.messenger.send_message_json(topic_name, message_json) + self.send_message(topic_name, message_json) def common_report_client_training_status(self, edge_id, status, run_id=0): # if not self.comm_sanity_check(): @@ -109,7 +104,7 @@ def common_report_client_training_status(self, edge_id, status, run_id=0): message_json = json.dumps(msg) logging.info("report_client_training_status. message_json = %s" % message_json) MLOpsStatus.get_instance().set_client_status(edge_id, status) - self.messenger.send_message_json(topic_name, message_json) + self.send_message(topic_name, message_json) def broadcast_client_training_status(self, edge_id, status, is_from_model=False, run_id=0): # if not self.comm_sanity_check(): @@ -137,14 +132,14 @@ def common_broadcast_client_training_status(self, edge_id, status, run_id=0): msg = {"edge_id": edge_id, "run_id": run_id, "status": status} message_json = json.dumps(msg) logging.info("broadcast_client_training_status. message_json = %s" % message_json) - self.messenger.send_message_json(topic_name, message_json) + self.send_message(topic_name, message_json) def client_send_exit_train_msg(self, run_id, edge_id, status, msg=None): topic_exit_train_with_exception = "flserver_agent/" + str(run_id) + "/client_exit_train_with_exception" msg = {"run_id": run_id, "edge_id": edge_id, "status": status, "msg": msg if msg is not None else ""} message_json = json.dumps(msg) logging.info("client_send_exit_train_msg.") - self.messenger.send_message_json(topic_exit_train_with_exception, message_json) + self.send_message(topic_exit_train_with_exception, message_json) def report_client_id_status(self, edge_id, status, running_json=None, is_from_model=False, server_id="0", run_id=0, msg=""): @@ -172,7 +167,7 @@ def common_report_client_id_status(self, run_id, edge_id, status, server_id="0", msg = {"run_id": run_id, "edge_id": edge_id, "status": status, "server_id": server_id, "msg": msg} message_json = json.dumps(msg) # logging.info("report_client_id_status. message_json = %s" % message_json) - self.messenger.send_message_json(topic_name, message_json) + self.send_message(topic_name, message_json) def report_server_training_status(self, run_id, status, edge_id=0, role=None, running_json=None, is_from_model=False): # if not self.comm_sanity_check(): @@ -186,6 +181,13 @@ def report_server_training_status(self, run_id, status, edge_id=0, role=None, ru from ...computing.scheduler.master.server_data_interface import FedMLServerDataInterface FedMLServerDataInterface.get_instance().save_job(run_id, self.edge_id, status, running_json) + def report_job_status(self, run_id, status): + topic_name = "master_agent/slave_agent/job_status" + payload = {"run_id": run_id, "status": status} + + message_json = json.dumps(payload) + self.send_message(topic_name, message_json) + def report_server_device_status_to_web_ui(self, run_id, status, edge_id=0, role=None): """ this is used for notifying the server device status to MLOps Frontend @@ -206,7 +208,7 @@ def report_server_device_status_to_web_ui(self, run_id, status, edge_id=0, role= # logging.info("report_server_device_status. msg = %s" % msg) message_json = json.dumps(msg) MLOpsStatus.get_instance().set_server_status(self.edge_id, status) - self.messenger.send_message_json(topic_name, message_json) + self.send_message(topic_name, message_json) def common_report_server_training_status(self, run_id, status, role=None, edge_id=0): # if not self.comm_sanity_check(): @@ -223,7 +225,7 @@ def common_report_server_training_status(self, run_id, status, role=None, edge_i # logging.info("report_server_training_status. msg = %s" % msg) message_json = json.dumps(msg) MLOpsStatus.get_instance().set_server_status(self.edge_id, status) - self.messenger.send_message_json(topic_name, message_json) + self.send_message(topic_name, message_json) def broadcast_server_training_status(self, run_id, status, role=None, is_from_model=False, edge_id=None): if self.messenger is None: @@ -239,7 +241,7 @@ def broadcast_server_training_status(self, run_id, status, role=None, is_from_mo } logging.info("broadcast_server_training_status. msg = %s" % msg) message_json = json.dumps(msg) - self.messenger.send_message_json(topic_name, message_json) + self.send_message(topic_name, message_json) if is_from_model: from ...computing.scheduler.model_scheduler.device_server_data_interface import FedMLServerDataInterface @@ -248,19 +250,29 @@ def broadcast_server_training_status(self, run_id, status, role=None, is_from_mo from ...computing.scheduler.master.server_data_interface import FedMLServerDataInterface FedMLServerDataInterface.get_instance().save_job(run_id, self.edge_id, status) - def report_server_id_status(self, run_id, status, edge_id=None, server_id=None, server_agent_id=None): + def report_server_id_status(self, run_id, status, edge_id=None, server_id=None, server_agent_id=None, + is_from_model=False, running_json=None): # if not self.comm_sanity_check(): # return topic_name = "fl_server/flserver_agent_" + str(server_agent_id if server_agent_id is not None else self.server_agent_id) + "/status" - msg = {"run_id": run_id, "edge_id": edge_id if edge_id is not None else self.edge_id, "status": status} + in_edge_id = edge_id if edge_id is not None else self.edge_id + msg = {"run_id": run_id, "edge_id": in_edge_id, + "status": status, "is_from_model": is_from_model} if server_id is not None: msg["server_id"] = server_id message_json = json.dumps(msg) logging.info(f"report_server_id_status; topic_name: {topic_name}, msg: {msg}") # logging.info("report_server_id_status server id {}".format(server_agent_id)) # logging.info("report_server_id_status. message_json = %s" % message_json) - self.messenger.send_message_json(topic_name, message_json) + self.send_message(topic_name, message_json) + + if is_from_model: + from ...computing.scheduler.model_scheduler.device_server_data_interface import FedMLServerDataInterface + FedMLServerDataInterface.get_instance().save_job(run_id, in_edge_id, status, running_json) + else: + from ...computing.scheduler.master.server_data_interface import FedMLServerDataInterface + FedMLServerDataInterface.get_instance().save_job(run_id, in_edge_id, status, running_json) def report_client_training_metric(self, metric_json): # if not self.comm_sanity_check(): @@ -268,7 +280,7 @@ def report_client_training_metric(self, metric_json): topic_name = "fl_client/mlops/training_metrics" logging.info("report_client_training_metric. message_json = %s" % metric_json) message_json = json.dumps(metric_json) - self.messenger.send_message_json(topic_name, message_json) + self.send_message(topic_name, message_json) def report_server_training_metric(self, metric_json, payload=None): # if not self.comm_sanity_check(): @@ -279,7 +291,7 @@ def report_server_training_metric(self, metric_json, payload=None): else: message_json = json.dumps(metric_json) # logging.info("report_server_training_metric. message_json = %s" % metric_json) - self.messenger.send_message_json(topic_name, message_json) + self.send_message(topic_name, message_json) def report_endpoint_metric(self, metric_json, payload=None): # if not self.comm_sanity_check(): @@ -290,7 +302,7 @@ def report_endpoint_metric(self, metric_json, payload=None): else: message_json = json.dumps(metric_json) # logging.info("report_endpoint_metric. message_json = %s" % metric_json) - self.messenger.send_message_json(topic_name, message_json) + self.send_message(topic_name, message_json) def report_fedml_train_metric(self, metric_json, run_id=0, is_endpoint=False): # if not self.comm_sanity_check(): @@ -299,42 +311,42 @@ def report_fedml_train_metric(self, metric_json, run_id=0, is_endpoint=False): logging.info("report_fedml_train_metric. message_json = %s" % metric_json) metric_json["is_endpoint"] = is_endpoint message_json = json.dumps(metric_json) - self.messenger.send_message_json(topic_name, message_json) + self.send_message(topic_name, message_json) def report_fedml_run_logs(self, logs_json, run_id=0): # if not self.comm_sanity_check(): # return topic_name = f"fedml_slave/fedml_master/logs/{run_id}" message_json = json.dumps(logs_json) - self.messenger.send_message_json(topic_name, message_json) + self.send_message(topic_name, message_json) def report_server_training_round_info(self, round_info): # if not self.comm_sanity_check(): # return topic_name = "fl_server/mlops/training_roundx" message_json = json.dumps(round_info) - self.messenger.send_message_json(topic_name, message_json) + self.send_message(topic_name, message_json) def report_client_model_info(self, model_info_json): # if not self.comm_sanity_check(): # return topic_name = "fl_server/mlops/client_model" message_json = json.dumps(model_info_json) - self.messenger.send_message_json(topic_name, message_json) + self.send_message(topic_name, message_json) def report_aggregated_model_info(self, model_info_json): # if not self.comm_sanity_check(): # return topic_name = "fl_server/mlops/global_aggregated_model" message_json = json.dumps(model_info_json) - self.messenger.send_message_json(topic_name, message_json) + self.send_message(topic_name, message_json) def report_training_model_net_info(self, model_net_info_json): # if not self.comm_sanity_check(): # return topic_name = "fl_server/mlops/training_model_net" message_json = json.dumps(model_net_info_json) - self.messenger.send_message_json(topic_name, message_json) + self.send_message(topic_name, message_json) def report_llm_record(self, metric_json): # if not self.comm_sanity_check(): @@ -342,7 +354,7 @@ def report_llm_record(self, metric_json): topic_name = "model_serving/mlops/llm_input_output_record" logging.info("report_llm_record. message_json = %s" % metric_json) message_json = json.dumps(metric_json) - self.messenger.send_message_json(topic_name, message_json) + self.send_message(topic_name, message_json) def report_edge_job_computing_cost(self, job_id, edge_id, computing_started_time, computing_ended_time, @@ -359,7 +371,7 @@ def report_edge_job_computing_cost(self, job_id, edge_id, "computing_ended_time": computing_ended_time, "duration": duration, "user_id": user_id, "api_key": api_key} message_json = json.dumps(msg) - self.messenger.send_message_json(topic_name, message_json) + self.send_message(topic_name, message_json) # logging.info("report_job_computing_cost. message_json = %s" % message_json) def report_logs_updated(self, run_id): @@ -369,7 +381,7 @@ def report_logs_updated(self, run_id): msg = {"time": time.time()} message_json = json.dumps(msg) logging.info("report_logs_updated. message_json = %s" % message_json) - self.messenger.send_message_json(topic_name, message_json) + self.send_message(topic_name, message_json) def report_artifact_info(self, job_id, edge_id, artifact_name, artifact_type, artifact_local_path, artifact_url, @@ -388,7 +400,7 @@ def report_artifact_info(self, job_id, edge_id, artifact_name, artifact_type, "timestamp": timestamp } message_json = json.dumps(artifact_info_json) - self.messenger.send_message_json(topic_name, message_json) + self.send_message(topic_name, message_json) def report_endpoint_status(self, end_point_id, model_status, timestamp=None, end_point_name="", model_name="", model_inference_url=""): @@ -401,8 +413,8 @@ def report_endpoint_status(self, end_point_id, model_status, timestamp=None, "model_status": model_status, "timestamp": int(format(time_param, '.0f'))} - self.messenger.send_message_json(deployment_status_topic, json.dumps(deployment_status_payload)) - self.messenger.send_message_json(deployment_status_topic_prefix, json.dumps(deployment_status_payload)) + self.send_message(deployment_status_topic, json.dumps(deployment_status_payload)) + self.send_message(deployment_status_topic_prefix, json.dumps(deployment_status_payload)) def report_run_log( self, run_id, device_id, log_list, log_source=None, use_mqtt=False @@ -480,4 +492,10 @@ def stop_device_realtime_perf(self): self.device_perfs.stop_device_realtime_stats() def report_json_message(self, topic, payload): - self.messenger.send_message_json(topic, payload) \ No newline at end of file + self.send_message(topic, payload) + + def send_message(self, topic, payload): + if self.send_message_func is not None: + self.send_message_func(topic, payload) + elif self.messenger is not None: + self.messenger.send_message_json(topic, payload) \ No newline at end of file diff --git a/python/fedml/workflow/driver_example/customized_job_example/customized_workflow.py b/python/fedml/workflow/driver_example/customized_job_example/customized_workflow.py index 2a8f2008eb..b948231c96 100644 --- a/python/fedml/workflow/driver_example/customized_job_example/customized_workflow.py +++ b/python/fedml/workflow/driver_example/customized_job_example/customized_workflow.py @@ -109,7 +109,7 @@ def create_deploy_workflow(job_api_key=None): # DeployImageJob.generate_yaml_doc(deploy_image_job_yaml_obj, deploy_image_job_yaml) # Generate the job object - endpoint_id = 100 # Here you need to set your own endpoint id + endpoint_id = None # Here you need to set your own endpoint id deploy_image_job = DeployImageJob( name="deploy_image_job", endpoint_id=endpoint_id, job_yaml_absolute_path=deploy_image_job_yaml, job_api_key=job_api_key) @@ -168,7 +168,795 @@ def create_inference_train_workflow( # workflow.add_job(train_job, dependencies=[inference_jobs[-1]]) # Set the input to the workflow - input_json = {"text": "What is a good cure for hiccups?"} if input_json is None else input_json + # input_json = {"text": "What is a good cure for hiccups?"} if input_json is None else input_json + input_json = { + "arr": [ + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + -0.0100005, + -0.0100005, + -0.0100005, + -0.013973799, + -0.0189315247, + -0.023184301, + -0.0360728861, + -0.0392619154, + -0.0380269994, + -0.0390143887, + -0.0346046778, + -0.0257765396, + -0.0209733754, + -0.0217809993, + -0.0144984527, + -0.0118807892, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + -0.0178081425, + -0.0232058779, + -0.0298662898, + -0.0414395151, + -0.0586512813, + -0.0812643979, + -0.105997038, + -0.121704878, + -0.134457288, + -0.139756261, + -0.141562422, + -0.135229133, + -0.120246727, + -0.104490087, + -0.0870044931, + -0.0716699334, + -0.0485892545, + -0.0324260775, + -0.0216926329, + -0.0100005, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + -0.0132956624, + -0.0225936238, + -0.0383702224, + -0.0598206019, + -0.0842014426, + -0.118390816, + -0.154266827, + -0.188282524, + -0.219803054, + -0.242936317, + -0.255020324, + -0.259481423, + -0.249404582, + -0.226727106, + -0.200418885, + -0.16716117, + -0.134317009, + -0.0958717755, + -0.0736565245, + -0.0503983075, + -0.0269783475, + -0.0168919, + -0.0100005, + 0, + 0, + 0, + 0, + -0.0147795885, + -0.025122101, + -0.0381226487, + -0.0786317321, + -0.119593671, + -0.165704529, + -0.228814281, + -0.288620224, + -0.354491034, + -0.421140618, + -0.480243669, + -0.527064646, + -0.540807419, + -0.521388017, + -0.474446021, + -0.403948632, + -0.336571539, + -0.271580657, + -0.20666741, + -0.154539645, + -0.108856709, + -0.0677589146, + -0.0340327281, + -0.0215091205, + 0, + 0, + -0.0100005, + -0.0107381289, + -0.0260253876, + -0.0570600482, + -0.0914378767, + -0.143000013, + -0.199005834, + -0.266034404, + -0.353401549, + -0.450251488, + -0.551598332, + -0.647939202, + -0.743171364, + -0.818162561, + -0.851073275, + -0.83112168, + -0.763764496, + -0.659992784, + -0.547527626, + -0.439376979, + -0.33557659, + -0.254856553, + -0.183933732, + -0.126755715, + -0.0706477667, + -0.0388818206, + 0, + 0, + 0, + -0.0134176155, + -0.0390612132, + -0.0873974922, + -0.133107017, + -0.194532142, + -0.27478633, + -0.369886454, + -0.482920333, + -0.605294063, + -0.735621386, + -0.869509827, + -0.989564738, + -1.09132506, + -1.13182948, + -1.09408349, + -0.996373436, + -0.868781173, + -0.717778845, + -0.570649327, + -0.439021868, + -0.326889344, + -0.235934504, + -0.167697996, + -0.0995100269, + -0.0479392976, + -0.0187851186, + 0, + -0.0117322667, + -0.0288274493, + -0.0646532861, + -0.118956716, + -0.17783758, + 1.53795878, + 2.57176245, + 1.53212043, + 1.00392168, + -0.179355647, + -0.591732991, + -1.05273662, + -1.15378689, + -1.22142979, + -1.2388156, + -1.21321586, + -1.14302847, + -1.02018313, + -0.857098743, + -0.676706697, + -0.516203262, + -0.379287244, + -0.271402545, + -0.189934521, + -0.119940614, + -0.0556340911, + -0.0145752163, + 0, + -0.0206611389, + -0.0437166621, + -0.0808756237, + -0.140488164, + -0.207699245, + 3.7747726, + 3.14033146, + 2.28939169, + 1.76127332, + 1.4318542, + 1.1313135, + 0.679164893, + 0.665484747, + 0.666043389, + 0.680680095, + 0.677305174, + 0.665508286, + 0.721340316, + 0.883661589, + 0.91751869, + 0.0282541074, + -0.401002939, + -0.283099723, + -0.194831338, + -0.123075256, + -0.066612686, + -0.0161462821, + -0.0112546885, + -0.0293918605, + -0.0484646663, + -0.093178326, + -0.146682925, + -0.218121209, + 0.830460131, + 1.04725853, + 0.147086928, + 0.259684517, + 0.495679969, + 0.998953721, + 1.29535061, + 1.12204782, + 1.41528197, + 1.4259952, + 1.36416372, + 1.22805443, + 1.03395727, + 1.40874227, + 1.73166837, + 1.00260058, + -0.401823716, + -0.275049233, + -0.181713744, + -0.107567122, + -0.0566041118, + -0.0189159236, + -0.0121427928, + -0.0243168731, + -0.050270377, + -0.0887358114, + -0.138806025, + -0.212706019, + -0.321729999, + -0.462313723, + -0.652442841, + -0.845524923, + -0.961258323, + -0.793125052, + -0.226359955, + -0.640468216, + -0.12372009, + -0.167157468, + -0.255843161, + -0.441448335, + -0.792766628, + 1.30597044, + 1.81460411, + 0.691054579, + -0.383665051, + -0.26310513, + -0.166473946, + -0.0799663431, + -0.0455007946, + -0.0195541446, + -0.0100005, + -0.0186206584, + -0.0414986832, + -0.0722615997, + -0.123238725, + -0.212256343, + -0.331309824, + -0.491126078, + -0.687704902, + -0.86260267, + -0.939124713, + -0.869991467, + -0.758168797, + -0.722198511, + -0.739826964, + -0.809980626, + -0.911188613, + -1.00032001, + -0.221550751, + 1.53134484, + 1.47605194, + -0.273150738, + -0.363157263, + -0.252975575, + -0.157152039, + -0.0652009258, + -0.0335283586, + -0.0124209728, + 0, + -0.014849279, + -0.0329699917, + -0.0601451792, + -0.118353377, + -0.219271688, + -0.354392407, + -0.523006773, + -0.71568287, + -0.862626101, + -0.90524289, + -0.831592288, + -0.751312636, + -0.762948163, + -0.825877849, + -0.930232292, + -1.04727288, + -0.879016953, + 1.11455708, + 1.61660969, + 0.264000765, + -0.464282235, + -0.354907482, + -0.256014147, + -0.158427696, + -0.0620647188, + -0.0242921899, + 0, + 0, + -0.0117874599, + -0.0252632841, + -0.0502423656, + -0.115068847, + -0.235195531, + -0.377531303, + -0.547311188, + -0.723069536, + -0.848981953, + -0.878897369, + -0.826469482, + -0.795496372, + -0.883536617, + -0.994814123, + -1.13364619, + -1.20871511, + 0.0000560198157, + 1.28700658, + 1.50082995, + -0.122561277, + -0.462110102, + -0.360151562, + -0.263898374, + -0.166295096, + -0.0568635009, + -0.0105441394, + 0, + 0, + 0, + -0.016636779, + -0.0423254862, + -0.119931644, + -0.252550583, + -0.39191634, + -0.556171069, + -0.717849905, + -0.829516019, + -0.854549188, + -0.84598967, + -0.889246054, + -1.03761315, + -1.16457617, + -1.30025654, + -0.740699086, + 1.05188993, + 1.3036988, + -0.163440609, + -0.59058464, + -0.474233049, + -0.368789557, + -0.274082099, + -0.174264813, + -0.0696188843, + -0.018003151, + 0, + 0, + 0, + -0.0168610568, + -0.0451688568, + -0.131668459, + -0.267838929, + -0.398906806, + -0.548202377, + -0.690077015, + -0.789823563, + -0.831599129, + -0.861314493, + -0.95681566, + -1.11036634, + -1.22743073, + -1.31006468, + -0.02573686, + 1.14239899, + 0.761423491, + -0.706825874, + -0.608999426, + -0.492457882, + -0.380502867, + -0.279282191, + -0.173984018, + -0.0767235054, + -0.0195871373, + -0.0100005, + 0, + -0.0100005, + -0.024817808, + -0.0552275065, + -0.148243512, + -0.283202341, + -0.4022125, + -0.534598048, + -0.656007943, + -0.738083794, + -0.781657503, + -0.824620535, + -0.918824463, + -1.04078449, + -1.13391454, + -1.09212795, + 0.70592031, + 1.17679031, + -0.37378182, + -0.758547572, + -0.62868064, + -0.501492113, + -0.381043892, + -0.270505206, + -0.168251255, + -0.0784168728, + -0.022799968, + -0.0157856413, + 0, + 0, + -0.0269850288, + -0.0676999793, + -0.167498207, + -0.298089736, + -0.411096027, + -0.522810883, + -0.625838621, + -0.693423683, + -0.731704263, + -0.767086709, + -0.82998003, + -0.921590434, + -1.00562716, + 0.0779492952, + 1.22959017, + 0.636500653, + -0.901400043, + -0.769630793, + -0.635363773, + -0.494618472, + -0.369117095, + -0.255794246, + -0.156732083, + -0.0783809414, + -0.0267109338, + -0.0148726634, + 0, + -0.0100005, + -0.0348385687, + -0.0869311199, + -0.185622432, + -0.311777198, + -0.427690033, + -0.530457702, + -0.612837575, + -0.669073252, + -0.706628103, + -0.737178903, + -0.779583917, + -0.866698428, + -0.288157768, + 1.2193059, + 1.10500698, + -0.50413989, + -0.909137779, + -0.774520432, + -0.619405771, + -0.472096102, + -0.344822207, + -0.235626373, + -0.144455008, + -0.0769092863, + -0.0286146987, + -0.0100005, + 0, + -0.0100005, + -0.0342628198, + -0.101174053, + -0.195711272, + -0.324606261, + -0.442716711, + -0.545960978, + -0.637281741, + -0.703742928, + -0.753441795, + -0.788772419, + -0.829773267, + -0.745526297, + 0.949893727, + 1.18293215, + 0.385795002, + -1.023299, + -0.89872884, + -0.736858006, + -0.575258663, + -0.430322485, + -0.30912025, + -0.209889823, + -0.13189517, + -0.0731506415, + -0.0276674735, + -0.0100005, + 0, + -0.0100005, + -0.0400234981, + -0.10709374, + -0.194645695, + -0.316981297, + -0.440895564, + -0.560086039, + -0.667605659, + -0.763806998, + -0.843535003, + -0.903604039, + -0.938010529, + 0.763887624, + 1.12176928, + 0.784111, + -0.818046093, + -0.991046672, + -0.828340182, + -0.652780006, + -0.495325185, + -0.364891317, + -0.261772085, + -0.17529887, + -0.112966586, + -0.0617374486, + -0.0270715466, + 0, + 0, + 0, + -0.0406825662, + -0.0978606438, + -0.177848987, + -0.287783481, + -0.412614752, + -0.543271605, + -0.671018812, + -0.798159188, + -0.916686263, + -1.02499517, + -0.773682132, + 1.09355574, + 1.05041156, + -0.498209852, + -1.05256459, + -0.870980804, + -0.688431167, + -0.523166414, + -0.391308572, + -0.282035183, + -0.199071147, + -0.13652517, + -0.0893688913, + -0.041317086, + -0.016850831, + 0, + 0, + 0, + -0.0283386899, + -0.0765120563, + -0.141969555, + -0.232658498, + -0.341261378, + -0.469723228, + -0.606194512, + -0.747366354, + -0.880786554, + -0.729389144, + 0.895224865, + 1.11943124, + -0.105438374, + -1.00783177, + -0.859696548, + -0.683890026, + -0.531181637, + -0.395889778, + -0.289956123, + -0.203267966, + -0.14295145, + -0.0963532989, + -0.0643914026, + -0.0337070214, + -0.0111853003, + 0, + 0, + -0.0100005, + -0.0151722732, + -0.0480051146, + -0.0951161616, + -0.160643556, + -0.245453283, + -0.353245922, + -0.474265429, + -0.598667391, + -0.729305101, + 0.389322873, + 1.38694264, + 1.37486731, + -0.403963644, + -0.77444593, + -0.638730244, + -0.502999283, + -0.387339921, + -0.279971294, + -0.198381814, + -0.135822721, + -0.0965383286, + -0.0633365644, + -0.0427549534, + -0.0257581657, + -0.0100005, + 0, + 0, + 0, + 0, + -0.0237543896, + -0.0522032466, + -0.0858749627, + -0.140703979, + -0.208515621, + -0.290149335, + -0.368567087, + 0.334201602, + 2.33307288, + 2.27286258, + 2.23777229, + 0.0412218057, + -0.494890333, + -0.422342015, + -0.339048837, + -0.257069088, + -0.185534152, + -0.136577185, + -0.0860242391, + -0.0578259874, + -0.033636416, + -0.0181122384, + -0.0100005, + 0, + 0, + 0, + 0, + 0, + -0.0136274661, + -0.0285803164, + -0.0474793553, + -0.0779785591, + -0.118532172, + -0.167201555, + -0.214787719, + 2.22171299, + 4.30500754, + 4.03125111, + 3.36505818, + 0.379953648, + -0.284269948, + -0.247694588, + -0.205869945, + -0.155925102, + -0.116435448, + -0.0857647974, + -0.0546508166, + -0.0401800073, + -0.023758997, + -0.0165780693, + -0.0100005, + 0, + 0, + 0, + 0, + 0, + 0, + -0.0115748833, + -0.0284271584, + -0.0506655656, + -0.0740332846, + -0.100455604, + -0.124744578, + 4.17363552, + 7.81243004, + 5.7896979, + 0.322149281, + -0.181506609, + -0.160333393, + -0.139182079, + -0.118875455, + -0.0873316648, + -0.0700227708, + -0.0540690537, + -0.0384297037, + -0.0265616274, + -0.0161844507, + -0.0119683967, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + -0.0132918601, + -0.0159980455, + -0.0207236291, + -0.0266997366, + -0.0284703819, + -0.0343035092, + -0.0410336906, + -0.0488886427, + -0.0548357917, + -0.0551988782, + -0.0469971082, + -0.0388769026, + -0.0316010302, + -0.0285226846, + -0.021736589, + -0.0100005, + 0, + 0, + 0, + 0, + 0, + 0 + ] + } workflow.set_workflow_input(input_json) # Run workflow @@ -213,6 +1001,6 @@ def create_inference_train_workflow( if is_inference and deployed_endpoint_id is not None: create_inference_train_workflow( - job_api_key=args.api_key, endpoint_id_list=[deployed_endpoint_id, deployed_endpoint_id], + job_api_key=args.api_key, endpoint_id_list=[deployed_endpoint_id], input_json=args.infer_json) exit(0) diff --git a/python/fedml/workflow/driver_example/customized_job_example/deploy_image_job.yaml b/python/fedml/workflow/driver_example/customized_job_example/deploy_image_job.yaml index 6ec64b0404..52ac79344e 100755 --- a/python/fedml/workflow/driver_example/customized_job_example/deploy_image_job.yaml +++ b/python/fedml/workflow/driver_example/customized_job_example/deploy_image_job.yaml @@ -6,12 +6,12 @@ workspace: deploy_image_job # Running entry commands which will be executed as the job entry point. # Support multiple lines, which can not be empty. job: | - echo "current job id: $FEDML_CURRENT_RUN_ID" - echo "current edge id: $FEDML_CURRENT_EDGE_ID" - echo "Hello, Here is the FedML Nexus AI platform." - echo "Current directory is as follows." - pwd - sleep 3 + echo "current job id: $FEDML_CURRENT_RUN_ID" + echo "current edge id: $FEDML_CURRENT_EDGE_ID" + echo "Hello, Here is the FedML Nexus AI platform." + echo "Current directory is as follows." + pwd + sleep 3 job_type: deploy # options: train, deploy, federate diff --git a/python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/fedml_model_config.yaml b/python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/fedml_model_config.yaml index ab8dbc4747..6992bb37df 100644 --- a/python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/fedml_model_config.yaml +++ b/python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/fedml_model_config.yaml @@ -1,12 +1,13 @@ -workspace: "." -entry_point: "main_entry.py" +workspace: "./" +entry_point: "mnist_serve_main.py" -auto_detect_public_ip: true -server_external_port: 20215 +data_cache_dir: "" +bootstrap: "" + +server_external_port: 20203 server_internal_port: 2203 -bootstrap: | - echo "Bootstrap start..." - pip install -U fedml - sh ./config/bootstrap.sh - echo "Bootstrap finished" +auto_detect_public_ip: true + +request_input_example: {"arr":[0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,-1.00005000e-02,-1.00005000e-02,-1.00005000e-02,-1.39737990e-02,-1.89315247e-02,-2.31843010e-02,-3.60728861e-02,-3.92619154e-02,-3.80269994e-02,-3.90143887e-02,-3.46046778e-02,-2.57765396e-02,-2.09733754e-02,-2.17809993e-02,-1.44984527e-02,-1.18807892e-02, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,-1.78081425e-02,-2.32058779e-02,-2.98662898e-02,-4.14395151e-02,-5.86512813e-02,-8.12643979e-02,-1.05997038e-01,-1.21704878e-01,-1.34457288e-01,-1.39756261e-01,-1.41562422e-01,-1.35229133e-01,-1.20246727e-01,-1.04490087e-01,-8.70044931e-02,-7.16699334e-02,-4.85892545e-02,-3.24260775e-02,-2.16926329e-02,-1.00005000e-02, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,-1.32956624e-02,-2.25936238e-02,-3.83702224e-02,-5.98206019e-02,-8.42014426e-02,-1.18390816e-01,-1.54266827e-01,-1.88282524e-01,-2.19803054e-01,-2.42936317e-01,-2.55020324e-01,-2.59481423e-01,-2.49404582e-01,-2.26727106e-01,-2.00418885e-01,-1.67161170e-01,-1.34317009e-01,-9.58717755e-02,-7.36565245e-02,-5.03983075e-02,-2.69783475e-02,-1.68919000e-02,-1.00005000e-02, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,-1.47795885e-02,-2.51221010e-02,-3.81226487e-02,-7.86317321e-02,-1.19593671e-01,-1.65704529e-01,-2.28814281e-01,-2.88620224e-01,-3.54491034e-01,-4.21140618e-01,-4.80243669e-01,-5.27064646e-01,-5.40807419e-01,-5.21388017e-01,-4.74446021e-01,-4.03948632e-01,-3.36571539e-01,-2.71580657e-01,-2.06667410e-01,-1.54539645e-01,-1.08856709e-01,-6.77589146e-02,-3.40327281e-02,-2.15091205e-02, 0.00000000e+00, 0.00000000e+00,-1.00005000e-02,-1.07381289e-02,-2.60253876e-02,-5.70600482e-02,-9.14378767e-02,-1.43000013e-01,-1.99005834e-01,-2.66034404e-01,-3.53401549e-01,-4.50251488e-01,-5.51598332e-01,-6.47939202e-01,-7.43171364e-01,-8.18162561e-01,-8.51073275e-01,-8.31121680e-01,-7.63764496e-01,-6.59992784e-01,-5.47527626e-01,-4.39376979e-01,-3.35576590e-01,-2.54856553e-01,-1.83933732e-01,-1.26755715e-01,-7.06477667e-02,-3.88818206e-02, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,-1.34176155e-02,-3.90612132e-02,-8.73974922e-02,-1.33107017e-01,-1.94532142e-01,-2.74786330e-01,-3.69886454e-01,-4.82920333e-01,-6.05294063e-01,-7.35621386e-01,-8.69509827e-01,-9.89564738e-01,-1.09132506e+00,-1.13182948e+00,-1.09408349e+00,-9.96373436e-01,-8.68781173e-01,-7.17778845e-01,-5.70649327e-01,-4.39021868e-01,-3.26889344e-01,-2.35934504e-01,-1.67697996e-01,-9.95100269e-02,-4.79392976e-02,-1.87851186e-02, 0.00000000e+00,-1.17322667e-02,-2.88274493e-02,-6.46532861e-02,-1.18956716e-01,-1.77837580e-01, 1.53795878e+00, 2.57176245e+00, 1.53212043e+00, 1.00392168e+00,-1.79355647e-01,-5.91732991e-01,-1.05273662e+00,-1.15378689e+00,-1.22142979e+00,-1.23881560e+00,-1.21321586e+00,-1.14302847e+00,-1.02018313e+00,-8.57098743e-01,-6.76706697e-01,-5.16203262e-01,-3.79287244e-01,-2.71402545e-01,-1.89934521e-01,-1.19940614e-01,-5.56340911e-02,-1.45752163e-02, 0.00000000e+00,-2.06611389e-02,-4.37166621e-02,-8.08756237e-02,-1.40488164e-01,-2.07699245e-01, 3.77477260e+00, 3.14033146e+00, 2.28939169e+00, 1.76127332e+00, 1.43185420e+00, 1.13131350e+00, 6.79164893e-01, 6.65484747e-01, 6.66043389e-01, 6.80680095e-01, 6.77305174e-01, 6.65508286e-01, 7.21340316e-01, 8.83661589e-01, 9.17518690e-01, 2.82541074e-02,-4.01002939e-01,-2.83099723e-01,-1.94831338e-01,-1.23075256e-01,-6.66126860e-02,-1.61462821e-02,-1.12546885e-02,-2.93918605e-02,-4.84646663e-02,-9.31783260e-02,-1.46682925e-01,-2.18121209e-01, 8.30460131e-01, 1.04725853e+00, 1.47086928e-01, 2.59684517e-01, 4.95679969e-01, 9.98953721e-01, 1.29535061e+00, 1.12204782e+00, 1.41528197e+00, 1.42599520e+00, 1.36416372e+00, 1.22805443e+00, 1.03395727e+00, 1.40874227e+00, 1.73166837e+00, 1.00260058e+00,-4.01823716e-01,-2.75049233e-01,-1.81713744e-01,-1.07567122e-01,-5.66041118e-02,-1.89159236e-02,-1.21427928e-02,-2.43168731e-02,-5.02703770e-02,-8.87358114e-02,-1.38806025e-01,-2.12706019e-01,-3.21729999e-01,-4.62313723e-01,-6.52442841e-01,-8.45524923e-01,-9.61258323e-01,-7.93125052e-01,-2.26359955e-01,-6.40468216e-01,-1.23720090e-01,-1.67157468e-01,-2.55843161e-01,-4.41448335e-01,-7.92766628e-01, 1.30597044e+00, 1.81460411e+00, 6.91054579e-01,-3.83665051e-01,-2.63105130e-01,-1.66473946e-01,-7.99663431e-02,-4.55007946e-02,-1.95541446e-02,-1.00005000e-02,-1.86206584e-02,-4.14986832e-02,-7.22615997e-02,-1.23238725e-01,-2.12256343e-01,-3.31309824e-01,-4.91126078e-01,-6.87704902e-01,-8.62602670e-01,-9.39124713e-01,-8.69991467e-01,-7.58168797e-01,-7.22198511e-01,-7.39826964e-01,-8.09980626e-01,-9.11188613e-01,-1.00032001e+00,-2.21550751e-01, 1.53134484e+00, 1.47605194e+00,-2.73150738e-01,-3.63157263e-01,-2.52975575e-01,-1.57152039e-01,-6.52009258e-02,-3.35283586e-02,-1.24209728e-02, 0.00000000e+00,-1.48492790e-02,-3.29699917e-02,-6.01451792e-02,-1.18353377e-01,-2.19271688e-01,-3.54392407e-01,-5.23006773e-01,-7.15682870e-01,-8.62626101e-01,-9.05242890e-01,-8.31592288e-01,-7.51312636e-01,-7.62948163e-01,-8.25877849e-01,-9.30232292e-01,-1.04727288e+00,-8.79016953e-01, 1.11455708e+00, 1.61660969e+00, 2.64000765e-01,-4.64282235e-01,-3.54907482e-01,-2.56014147e-01,-1.58427696e-01,-6.20647188e-02,-2.42921899e-02, 0.00000000e+00, 0.00000000e+00,-1.17874599e-02,-2.52632841e-02,-5.02423656e-02,-1.15068847e-01,-2.35195531e-01,-3.77531303e-01,-5.47311188e-01,-7.23069536e-01,-8.48981953e-01,-8.78897369e-01,-8.26469482e-01,-7.95496372e-01,-8.83536617e-01,-9.94814123e-01,-1.13364619e+00,-1.20871511e+00, 5.60198157e-05, 1.28700658e+00, 1.50082995e+00,-1.22561277e-01,-4.62110102e-01,-3.60151562e-01,-2.63898374e-01,-1.66295096e-01,-5.68635009e-02,-1.05441394e-02, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,-1.66367790e-02,-4.23254862e-02,-1.19931644e-01,-2.52550583e-01,-3.91916340e-01,-5.56171069e-01,-7.17849905e-01,-8.29516019e-01,-8.54549188e-01,-8.45989670e-01,-8.89246054e-01,-1.03761315e+00,-1.16457617e+00,-1.30025654e+00,-7.40699086e-01, 1.05188993e+00, 1.30369880e+00,-1.63440609e-01,-5.90584640e-01,-4.74233049e-01,-3.68789557e-01,-2.74082099e-01,-1.74264813e-01,-6.96188843e-02,-1.80031510e-02, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,-1.68610568e-02,-4.51688568e-02,-1.31668459e-01,-2.67838929e-01,-3.98906806e-01,-5.48202377e-01,-6.90077015e-01,-7.89823563e-01,-8.31599129e-01,-8.61314493e-01,-9.56815660e-01,-1.11036634e+00,-1.22743073e+00,-1.31006468e+00,-2.57368600e-02, 1.14239899e+00, 7.61423491e-01,-7.06825874e-01,-6.08999426e-01,-4.92457882e-01,-3.80502867e-01,-2.79282191e-01,-1.73984018e-01,-7.67235054e-02,-1.95871373e-02,-1.00005000e-02, 0.00000000e+00,-1.00005000e-02,-2.48178080e-02,-5.52275065e-02,-1.48243512e-01,-2.83202341e-01,-4.02212500e-01,-5.34598048e-01,-6.56007943e-01,-7.38083794e-01,-7.81657503e-01,-8.24620535e-01,-9.18824463e-01,-1.04078449e+00,-1.13391454e+00,-1.09212795e+00, 7.05920310e-01, 1.17679031e+00,-3.73781820e-01,-7.58547572e-01,-6.28680640e-01,-5.01492113e-01,-3.81043892e-01,-2.70505206e-01,-1.68251255e-01,-7.84168728e-02,-2.27999680e-02,-1.57856413e-02, 0.00000000e+00, 0.00000000e+00,-2.69850288e-02,-6.76999793e-02,-1.67498207e-01,-2.98089736e-01,-4.11096027e-01,-5.22810883e-01,-6.25838621e-01,-6.93423683e-01,-7.31704263e-01,-7.67086709e-01,-8.29980030e-01,-9.21590434e-01,-1.00562716e+00, 7.79492952e-02, 1.22959017e+00, 6.36500653e-01,-9.01400043e-01,-7.69630793e-01,-6.35363773e-01,-4.94618472e-01,-3.69117095e-01,-2.55794246e-01,-1.56732083e-01,-7.83809414e-02,-2.67109338e-02,-1.48726634e-02, 0.00000000e+00,-1.00005000e-02,-3.48385687e-02,-8.69311199e-02,-1.85622432e-01,-3.11777198e-01,-4.27690033e-01,-5.30457702e-01,-6.12837575e-01,-6.69073252e-01,-7.06628103e-01,-7.37178903e-01,-7.79583917e-01,-8.66698428e-01,-2.88157768e-01, 1.21930590e+00, 1.10500698e+00,-5.04139890e-01,-9.09137779e-01,-7.74520432e-01,-6.19405771e-01,-4.72096102e-01,-3.44822207e-01,-2.35626373e-01,-1.44455008e-01,-7.69092863e-02,-2.86146987e-02,-1.00005000e-02, 0.00000000e+00,-1.00005000e-02,-3.42628198e-02,-1.01174053e-01,-1.95711272e-01,-3.24606261e-01,-4.42716711e-01,-5.45960978e-01,-6.37281741e-01,-7.03742928e-01,-7.53441795e-01,-7.88772419e-01,-8.29773267e-01,-7.45526297e-01, 9.49893727e-01, 1.18293215e+00, 3.85795002e-01,-1.02329900e+00,-8.98728840e-01,-7.36858006e-01,-5.75258663e-01,-4.30322485e-01,-3.09120250e-01,-2.09889823e-01,-1.31895170e-01,-7.31506415e-02,-2.76674735e-02,-1.00005000e-02, 0.00000000e+00,-1.00005000e-02,-4.00234981e-02,-1.07093740e-01,-1.94645695e-01,-3.16981297e-01,-4.40895564e-01,-5.60086039e-01,-6.67605659e-01,-7.63806998e-01,-8.43535003e-01,-9.03604039e-01,-9.38010529e-01, 7.63887624e-01, 1.12176928e+00, 7.84111000e-01,-8.18046093e-01,-9.91046672e-01,-8.28340182e-01,-6.52780006e-01,-4.95325185e-01,-3.64891317e-01,-2.61772085e-01,-1.75298870e-01,-1.12966586e-01,-6.17374486e-02,-2.70715466e-02, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,-4.06825662e-02,-9.78606438e-02,-1.77848987e-01,-2.87783481e-01,-4.12614752e-01,-5.43271605e-01,-6.71018812e-01,-7.98159188e-01,-9.16686263e-01,-1.02499517e+00,-7.73682132e-01, 1.09355574e+00, 1.05041156e+00,-4.98209852e-01,-1.05256459e+00,-8.70980804e-01,-6.88431167e-01,-5.23166414e-01,-3.91308572e-01,-2.82035183e-01,-1.99071147e-01,-1.36525170e-01,-8.93688913e-02,-4.13170860e-02,-1.68508310e-02, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,-2.83386899e-02,-7.65120563e-02,-1.41969555e-01,-2.32658498e-01,-3.41261378e-01,-4.69723228e-01,-6.06194512e-01,-7.47366354e-01,-8.80786554e-01,-7.29389144e-01, 8.95224865e-01, 1.11943124e+00,-1.05438374e-01,-1.00783177e+00,-8.59696548e-01,-6.83890026e-01,-5.31181637e-01,-3.95889778e-01,-2.89956123e-01,-2.03267966e-01,-1.42951450e-01,-9.63532989e-02,-6.43914026e-02,-3.37070214e-02,-1.11853003e-02, 0.00000000e+00, 0.00000000e+00,-1.00005000e-02,-1.51722732e-02,-4.80051146e-02,-9.51161616e-02,-1.60643556e-01,-2.45453283e-01,-3.53245922e-01,-4.74265429e-01,-5.98667391e-01,-7.29305101e-01, 3.89322873e-01, 1.38694264e+00, 1.37486731e+00,-4.03963644e-01,-7.74445930e-01,-6.38730244e-01,-5.02999283e-01,-3.87339921e-01,-2.79971294e-01,-1.98381814e-01,-1.35822721e-01,-9.65383286e-02,-6.33365644e-02,-4.27549534e-02,-2.57581657e-02,-1.00005000e-02, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,-2.37543896e-02,-5.22032466e-02,-8.58749627e-02,-1.40703979e-01,-2.08515621e-01,-2.90149335e-01,-3.68567087e-01, 3.34201602e-01, 2.33307288e+00, 2.27286258e+00, 2.23777229e+00, 4.12218057e-02,-4.94890333e-01,-4.22342015e-01,-3.39048837e-01,-2.57069088e-01,-1.85534152e-01,-1.36577185e-01,-8.60242391e-02,-5.78259874e-02,-3.36364160e-02,-1.81122384e-02,-1.00005000e-02, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,-1.36274661e-02,-2.85803164e-02,-4.74793553e-02,-7.79785591e-02,-1.18532172e-01,-1.67201555e-01,-2.14787719e-01, 2.22171299e+00, 4.30500754e+00, 4.03125111e+00, 3.36505818e+00, 3.79953648e-01,-2.84269948e-01,-2.47694588e-01,-2.05869945e-01,-1.55925102e-01,-1.16435448e-01,-8.57647974e-02,-5.46508166e-02,-4.01800073e-02,-2.37589970e-02,-1.65780693e-02,-1.00005000e-02, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,-1.15748833e-02,-2.84271584e-02,-5.06655656e-02,-7.40332846e-02,-1.00455604e-01,-1.24744578e-01, 4.17363552e+00, 7.81243004e+00, 5.78969790e+00, 3.22149281e-01,-1.81506609e-01,-1.60333393e-01,-1.39182079e-01,-1.18875455e-01,-8.73316648e-02,-7.00227708e-02,-5.40690537e-02,-3.84297037e-02,-2.65616274e-02,-1.61844507e-02,-1.19683967e-02, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00,-1.32918601e-02,-1.59980455e-02,-2.07236291e-02,-2.66997366e-02,-2.84703819e-02,-3.43035092e-02,-4.10336906e-02,-4.88886427e-02,-5.48357917e-02,-5.51988782e-02,-4.69971082e-02,-3.88769026e-02,-3.16010302e-02,-2.85226846e-02,-2.17365890e-02,-1.00005000e-02, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00]} + diff --git a/python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/mnist_serve_main.py b/python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/mnist_serve_main.py new file mode 100644 index 0000000000..6367ea487f --- /dev/null +++ b/python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/mnist_serve_main.py @@ -0,0 +1,37 @@ +from fedml.serving import FedMLPredictor +from fedml.serving import FedMLInferenceRunner +from model.minist_model import LogisticRegression + +# This is the model file that will upload to MLOps +MODEL_PARMS_DIR = "./model/model_parms_from_mlops" +# If you do not want to upload the model file to MLOps, +# (i.e., you want to use the model file in the lcoal DATA_CACHE_DIR) +# Please use the DATA_CACHE_DIR and specify DATA_CACHE_DIR +# in the fedml_model_config.yaml +# DATA_CACHE_DIR = "" + +class MnistPredictor(FedMLPredictor): + def __init__(self): + import pickle + import torch + + with open(MODEL_PARMS_DIR, 'rb') as model_file_obj: + model_params = pickle.load(model_file_obj) + + output_dim = 10 + + self.model = LogisticRegression(28 * 28, output_dim) + + self.model.load_state_dict(model_params) + + self.list_to_tensor_func = torch.tensor + + def predict(self, request): + arr = request["arr"] + input_tensor = self.list_to_tensor_func(arr) + return self.model(input_tensor) + +if __name__ == "__main__": + predictor = MnistPredictor() + fedml_inference_runner = FedMLInferenceRunner(predictor) + fedml_inference_runner.run() \ No newline at end of file diff --git a/python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/model/minist_model.py b/python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/model/minist_model.py new file mode 100644 index 0000000000..25789d4e1c --- /dev/null +++ b/python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/model/minist_model.py @@ -0,0 +1,11 @@ +import torch +class LogisticRegression(torch.nn.Module): + def __init__(self, input_dim, output_dim): + super(LogisticRegression, self).__init__() + self.linear = torch.nn.Linear(input_dim, output_dim) + + def forward(self, x): + import torch + outputs = torch.sigmoid(self.linear(x)) + return outputs + diff --git a/python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/model/model_parms_from_mlops b/python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/model/model_parms_from_mlops new file mode 100644 index 0000000000000000000000000000000000000000..8c31b9f85b8d6964c585c43189ab5cf7a0a7d3c3 GIT binary patch literal 32188 zcmZsCc|285_`iMMQzRh?sU)em=Q*WPsZ^4rK;OU+_6Bg9>tT3~kJNdl5IoH&*6#SpU|JP-AuH*lE3nE)N z9rxPze~30 z|8HsTW7?@ooL2tl(1|E79Z!P%hEa?+eT&Uj$5864BB)<7V9%NU0{W$Uh*X;boNv>? zEa84K^F)+MeNlxahVGavoyY3h=K%Q;58$q4Ew(pZ#QYF*$Q&OB^Y&3<{JjI*?jZRu z=QL&N%}4tb3o>Ra%W%hHSuCAri#l~Pz?02jTxl>yc{bn^VPU4AI|ub`{(zWM0K7`Q z3blqmSQoY9(Hx#q)wx29Pt0q~oQcQtK4k!>?n7a_KRUh5W*5p3@Nt%=Z&8lWFP}^q zYt?{>Q7+og>7XRu^rBxwJqBvsz~23V@L2Z*tZ>T19bP)Xwo8T4Z?R;)buOzvP5`(D ziy=)}jJ8?Zi%W$pF~`~#Ze@>w@z7sTs^_9#Dt)02;w+Aa?x$ecBfMxO#QgffO=o#` zL+r3L<16}}I=kd4i7u=pTc(0At;-s;PUIoK@g6v@5C&eip22_Lx5L)?vP|8}YP8FX zgo8JSsK*TrINN<4FHA|$)#M4RKjDw_TjVf)K$M~0QEZVXov8EUBet9K5P>5Ps1HS_ za56Cu!bA%3Y&joo=v@cPntoy1fHc|*J!dJT22zRqT@*WHE4Z!DDl_|e2dz6!fY4(x zCh&G7?p-cNvH}<22`nHMcb=la65bPUGNW5cI#5kfm;BCBu;8!P0PL^(l5f1x*P zZ^Um{yXH68UwecqyTs^IQ&$jDezW(;>EI^Go1|L4mF#R3Wa@5kGwpp($cz}2*KY~K z-}{e%OAaryL`R9FC34Y&@~1Grim(ar(}Y0i2y*MPfqv>Dc?G5UhHc zRka<3q zW%jQIC+dgs@3POZb!#5ABl#Pc@^3~5S4B{gcO~EZ94WDj(P%T|1D|5I!lkYtmIsqa z>I0wSE{ui4U0h7>VhR@MDPtn*E*W`}4VtswMDcAUG)L})Lxt~fy-hx)A$tb4hB`oH zT?`ph97UtXT2kY#2WHBf@x0y<_+{D)xn{cPBfEk!KM(@u!)cVs%e&MDM=rQ=Hq5R2Yk)EIZGTvL|Vs&Qx|zaSap%rDKY(6q6}4glC8QVeyt^Fj^-} zYtw@u)X2j`__koI>Jp-0y#mA5NYGY2MQE=Ojfv?pOtC^KF5-O(d}klRymfxi{>>ZI zbZ){ajSTEBOQw>f+raE%Bt$tMr3ZRf&~DG==$ZRkDEK%WA}52Wr7fc9Ii8LoYtqqz z`iRZSH4ysm1%!PRq!LLrv5cKh#}9r0cHcWzSWrF=&3_8bQbE+b*~N^KTPkSI(6D9I z8$1vx0IGt2&}>8wgY)F+-$l`|$zcxZ`_4sK4`&emMSW!R7c1((7D0LiYns)cQ;bsU zM_5ub;gB@H93;Ae@w~|-^(#3DpWI!5ZN>IbS}}zw(?AJ0#sh1P9`;fTn2}2z=xyvp z>MIo)V_OMGOypv=oo~PoCvKob^d1lymY}CoKjY|;afq{XM(3@|fM?MgR?|0LW+Y%7 zJaukE=I~w6kOm;LsV^k?2T{TZiK=>y!9mUOL(D!C6yg zC$J)`57eTnSv(i6qHLEFafw()?{Ac*CjzGsf7HUUlhL5QFb{6t{S9Shw=v?lK5%by zf&&-iSe6bgFjE*s<^88X8`+;>t^d+T8cwKzR>lUnR~UjyMcb&%y*Ht;Z<=_Yn5LF5 zQ^z;MwlHWJhQqFcDDqw$^Zg`fpFa~sb-_ggk5|{xXk3~$X_ufYb35@Ne8VSyyD*wh zip?WxOnn~MjDs2)tPi;aP&j!7qUf4N4Mx%}TPtlJd=7#WHlKfCoJ~>3}X#KeTX>Yo0CO+BO6I;8Vy?BcF=dW2kv-OliuAo zNVj$G zKfS^3-#EOCTfjPTavqaTH^R@EcGUD10DY+(O6_|H23G6@!%iNiU7a5`sn?-a@+&f} zq(Rw>e+8a9H?hnu6EyxT#nvY+*r~jNF}=KksoVJ-QUeVzW$s({k3~1BF^h00ur7o{ zQu&}RJ%Ke3dWpi5a@=cp5B1q1&}1S^n;jd(I;n@SwcrMdjeR1~YX{)GKOb5b?V}d8 zG~m3OlTc~)lJve9z=$|*c<3~XYe!_5ReEZ~d~-L$oa7Pzz74bQ(}@ zgyjS(UpxuskK-tX1I{?zwgtHa=R?4)*Mz0>81!@nDf;t&V4>*;1tsEmQ`-;x_jf?{ z7k;Yx+;8gpYJLh7T7XZ}4qY}}$M|<)@H%1~(_8r|O>ZA8J^lh3J9rsC^)fKgoI`hR zn5LxCL`hhb6z$sRizAT~u~70u{fIkc)4^G)PcWIfu(E@g=1DV4Oav((v(xO0&vPi# zQ$6tgl_c#GmWi^y&TPM_ov7P2jYmeFVn~rRt-ZPe#C!@NbfOr3HM~QUBe~SJIx%|W zZW4Z8%}1Y^Qi9~=FM%)h47z;`hPCXtNEHO!7W4 zCanB9+{@*sXXaLbl6NgEHy*{mPMr{}!^_-!t_r?Z`KWvj&|5?fCKK1;Oz=UXwwssP zA;ia+&N~fS7u|3RU4;9t+uQ_PPYIHr$ z)s+O7QDNHtycm=8XD+jJwI21WuMR4Yy}$;U2UK+S6HpN`0AG>0P@OTC?ii|qs6}^B z$lHkGn~eueZ(jOcpA^$jl>%1OQk2b?VD{bThiUgzM7vU|@h(&Dh)iiZ|GavpZeiqlJUJkhbU92I&_l23Pqm_kc$=xXnUk&{{=_HHrB8jFC! zC9OE}cZ74_n#`w1^BEZ{Ros;^3=Qier~&O0kkIxD2WyQX=YBDH#p1Uf-BDC1Ws`Zp$M`@c4v%i6KC*?G{1*|VUhfgE<6u*sfQ9f^PJ-z$ z;$dKJGK3WTqcZb^X`xslWStDb9aZaz(9{3esS!^hHf<5Z*PKoigd89<<{^d)B;v?K zER44ZGieqQFj4#wBPy@Mw$)QOI6JcuF9-xxlQ)qWy zgKK}-!=RriliacrC7L2J<>&oLjV9nc)kT_n8 z!`EM6K$#AfjjUmrjVeP-WjrhLkuchdKy}P@AQ|oc-B@M~0-BsG`;Q zbE*lXb`FBgQ30k(5=i#1G|1m-3x|KU!so@oWUhKUj;t;Ni;es6+_tM=8$TbXRWC#3 zVF9{8LYQ`b$HyqDSK*1x?kF%_hG*|c&`&>vkdf#nYz>dV-R`BRcViXoc`MCGZIWjO zwUwEV$y=erDF~9cmty9JIpBJ$35vH#QepCov7xk;RTu0}vXWxp_-{_kJQ)QlLRm2M zJqK%#jzO!86g?tfj}1Ah*j5$;T>m~`_Q_)~SpAJkQ_P36tNEDpCEU#K1D^2v*g6<27ijI0|kepP=~~ zXMCQQg4Kahu$nK##NIheWzYP95Vt10Z?g{6tHz?B?P2vsiQJbzUfT*H42{@`}itT8}k5p2Pfv7r?ULhB{>v#OT8tOkesNmw$Bty5unWx^mHpHcc3> z^PWBV#EJ^|mIMb!+hOO1``|R}2n{{~X!o}fU$D8D-FttqT)Cua5zs9UCE%V(GF)F^(;t53>)%$0zGukhpCf}N zZ!<{Jx+|!%Wj*f55MrWzU%)P1caUzliV1VJpwE^dkkvN9BkJlDs`Bpx9VZRh@XrsmIP?D#E z?`}tnjRx3aqyyGD`BaBMA~bb~(U}X%(75OpxG&iTb$O{^HFXW zJssy4j; z$AWC@9x}_?MOk${$JO;b%)l{A5a|3xnJ2EKI%b|jq|b5G5IF`zO1H58rXUO`a-+8U zW7wwA3vTy^V8fXgEMw*YoN|(4QX}tTL&-ZTQMIJ}xO+c#UiQON8ke#1gCw)H){W}D zPzQI_Tp^Pu9MiP~m`=4p%nmVS$zQ35OgT5K%}hpt%QrD9SCr&j6l8jy{zHv^QMzqI z7OH;H!PzDjHc|gD%_NpYE!zn;B@XD;B1oQ!wX?@A%w>j6UV^665Ck|q1I2;$m{xuh z#w9pwWt9|$^NkSIX%pO;aTX-agRx%w6`33}!|E*(%!u@3(s=PV&h{lkR^nOUv;7EL z)`a62e`I;HE+6>n&8Oj-5Zzmyf`u-;OrWd;^Ru55#}0Gwb&&*UtdpQkp1p&zu{kUy z^Ac2gCqWxM9E48$i?HEqGLBwo1OF>(EK9Xg*kOADuF<#Q?tN}%{WS({<4-~Tco0^e zpU>3#OhK3g>icC4KuG9!oS>A!x;zXO1*6e+1s^W6x5N73Sa9rbCo{*oVPv@gl)Ww{ z`hT-<`44`EuSg3`cL_4P-oGZ%D^9T{_&>9?K84_*)f2e)@+KT~QODtc+t}215hfn? z!^Aibqo4nrL^iG_Yd0C;L=_jE^EieW-I;+%Q&|>RyAt*AY^=My5k2&1Fn9Kc9g&%A z_2Z&+?4^1XJY)@FRlIb~o#$kf>nk+%Z)VL>@9^9DWE9jt3ezLrxH?A``VD00ZWmLS zcWV`)`4yo|Jstg5{=lta@37M62gd&WN%>@cp$gWW#^m=BOwIXZnBmSud+R5p+w>=% zm&icX`CedcBE(F;$;7~e@8P0oJXQpF;=~R$98x|DJVTE$GtZeapK=AW1Pv0R^oH^o z>PDrIKyZ9;2$JffiT2PKPKr3g`oRh?c-TOW_lJV-CrhXmc1HF&RbrkjO~*Fq7=#ZE zVCF+Da^&iDQ0Lx&fj<)P`H3-bICcwqZayJ~HUC)mvRp{yUQuX{tz(5e%SVCVAE~6l z6jsz5O=_a>5Zb+(4=TR~(Ku`s7(GYso`#OS{Z+{IwDuV_Rr_1rwqfR_Jlmlarrs4CM zVc520kWzd48hRdbGZ7B;ctBs6(Pxdpkv|#+3e%TS$80;etUQRSDg<{F90T=jZIoEx zE6ggc#JDS=fTACuXZcnNj$OyJ(EG4w@De8{dd7(bVb~yL0PRcb;Ax~17NjP@b%AV* zc`rlra4`613n|c_h=SAIHZYlyhA~1aEX_`J(9M}9)%(TiO5dYs?8?LJ*dC5EPXw87 zhsR(~YXENLXc(QAr^qjfYTWeL3nQABK+;7J@?V?b93OvN*>MNjdVf;tZ+Ync$5-H@DnC8^wgJ0Rx#(y2ZE$P-Fga_t zoU*-hlUy{F2d>THwARmC_$&G#OuX+z^MOw4pwA?{;cI}k3DY1Xa|Fz#E57f+sJA2pRi#oO`trmu!Hlx{`!_B3cVxrXDfDk;dxz-ezi9Eo#;lC_kk=^(6K{1pQ06Cpw0 z2};@*;J@74FdiNWSHez0yz(@XgJSatzCGXvNB6~miC80iR*-_vb$7vH-vF3~^-|WoyP^8|CzP04fZ3DQ6fTXU z^jpmEkhBz2wowXe!!JX=gCPBz%wj2A0k3OvjK$z0#tA=RlHnYjnHZ+tYfqpWX@bS_ z{~%eelI5EB9+FOnW82^()Lg=;U3;IP?8{yp6m^BKEn1*^SdvcNcLHamE#SgG zCrq`8hfF$%y{4%T+UhgmNW)hMGjRsPU%4co^%N$ZUZUzOH%;x4q@5Zh=(P(ubLCV5 z{uKDcI&*a{gt-R7?4zT=CpQjSM^up=X@D=&_JER>3Jj8LoOZUS#`8PTS#}A&lQ@o3 z0{=ivl!pm;oQW2f<}w3`#Z*?L7?W8ULA^dSj`X&t7#b9g-f1(G=8Pz9aYlrW3X>&W zf%1&~l`~kjD-b&x#L*+t1QI_)z&nv3(EqX$Hum^ovF@#M5A%5H-R@YdQ^< zBf?yqt%Lq;HYD<#FjKzjG59R`fN%c2qsVa~IyypzQqk1|SBndvsmU1wwl3DbFGk%r zzlfQ871-|$XWg=X1_ED(*po-JVN)YNC9ob@65b+=g33QED>R~{;;+N00~}m_oS$0h z>j_>O=Qx_pEdG`dW!~xwFoFC{WEWQt(&>gM)R#o!PMm<{p~@gJDL{{`o&;UvF4W=8 zASF&x)bA_fu(^|k(kIX0RRe-&Ne@H|D50#FC9XeO3)dI{I`I>-ohKUb_xCXni8sM- zThHR(lzP1XJsDLu@G{*BZFoL@E;D*w5pxf30w&-SrmYSsw;XjslST)47uN>Qs>SeX zRVH3B>BEefTolrm2Oa?)2#!pLI`t%C&X)kM=JPN|hZaJFx&&RTBE|S<$D&0{5qu84 zPqI`JG2~MgL_BdTpLJbKM)#h?;)9Q2ZE_@>x>ZTt_jp9TR*r^V?lc$}NWj}S-;>Sf zJt4Ow9CM6>XrFV9xF#eT>Z~eJoNFueA87)=jBAvnk0bdT!G`|J%{aQa3T=+fpif&q z+QxBFJAy9a_?|v0e{8KmeOe#X@La(q=j715rxn}}5W>1EPA6Uq#H6Z2gzKp?{?oYz zy4k+4Tq+T}b(&yz^%j=-L2F{!CI~vCjWF+>B;9-@5*91Jq~2FmLe73?G-oBDi;p~( zO=e@BLpbDA-=QKc)Ud@bl7~=N`X@WztO;!!1l3`P1bk_hwk88X>q|^f6@Xb2O}_CsAM@2ji!6@=gzN zx+kv@y}TclFV+hpRvq`@u*3=Am+i*fGLAMJFU>q(f0B}PSV4r|O_AirYRKO_1+g37 zP=OuVh@GRbBjyu?%bUPWZBH`t@eL(=oU=ywZKtG0Ww6!lFFHPm!O#CbLia$ULDV?` zmLqQzgf*t)@Y2I9$2aZdQgkxZYK%hb#xiJE>3~=p9wtfH6ctyeQ0i{~aO;pNG;{EP zpJFPt%Eo4b6avU7ydllqJ9 zo%7iB3VraB833EViLf#jsi=8#L1WoROgnoL9&M6l=DqPiy*KiB*z`F(7{13YT^xWN zeEpQAG=>R_Ezfs`Z z7Z5uw#T@wE%r5BT-mVEPxf&|KLQcHfP-~M#JSb zc%ktJI7h_dNKP@)S;U3e-}6}t-z6BmycT%>@&gjK485$+3Uhyipv}(`A|$ySK6zCU z6;D~_{;CG(wU?s(-o}9Qc}ZIFoHEEdt|Lo?zG8;pKlF5ehDS@~Fko{6hl99=_WC>{wKUVUIHF=^K#xja5~`%D&OmcKi_BA)AQco zOnE=Ndds4?b|k~bLw_;Jxr2)RQ3$QAStQIP6K@9$LS9P<+|3q*zUP(H0`K+gGnZ2# zL7f{DgC1gCp(Y&El%x2HE>M97SEKWl&!qLUB#HEs2JiIm_)$TQ36vItN$Drp;}k~T z9-j;TV{TMZ83Rqax2QUMX(;pGOZqBusoF>jC{B0=K4(Oj?s9ibT*kw^e0~gXe0>D_ zpNcVlGlGm`#4XU8--${$;-J2LoJ#kTgY#A6P?*Kfr0R6SiwE1G;Cls{wax|mj|;Jq zgU?J^KEyX56s5$u=tZyoK;wfmM5vsb@mncL{L&SuglkX0F)xW#cVQU>SuX^xi|NRd zR)j0kxtXw~x%ve~6s%k-Oqza4(mIElNYV%!qffn}yaG9VN^1^{U}v!S`T&x+ zcro_pT;lR*5Z1jMLVk|+aPRk0Dw)?2`Ly||xdze3U-c@)V_KDm$`I#yk6lbC( zuH)%-mSAc=&bA7_itaL8Ox0`|FuLDK&Ppj}`hgg|FzPD2%YA^FbK0;(Nu2)mdPu*N zlgC-nSukDmgUDK_P$EtV=-c!gvvyT;K%*b|H24}XEs&yJo)Ux6bZcC)!X9K7@G$Px z@o1f42{w18v3L_l|MXZ+#hm($Rtx4ZJg5VstAjz+Y>L>-=Vewz-^aoe_SBW(YplUb zbHH|K19d6v4_kI+B53!E)8<(}KrGH3+E)%!uB|puZtjaiZFYF3qz5+bzm0Rdy3t|j6Cxxd zLgxrfQ3^WA6i-MDsm~|{XWr}B{a-D-<`sbkBY$ese<@V;k`in^{SFNC7_gk1f$7T{ zG5>KWYhb4o&A0d!(b%Xdr3m?KfIpL%jmZT zf=I*#ayzvIOIE#RTRi4xI=L5s%UBrXH;dE3`6IaSViXB9xD6e_bLdTesmLYp47476 z0RBKZdgOWwStX}ObE&$Nv&5F;S?+MqQcbv3=A zw7eBOr1)sG8zH;(JV8Jy8wMVW(mS_TA&>h(q_|H(X+kQCE9My%@cqRDB7)4eo^~AH zb{V@hyRlW5j|qD#MlD}-A11}aPbF#2&*&6_$BX;O=If<8cP^&`>*mrrTOXm) z&l;+EjWRIBU1Wfd;H1e1G#KPIi78rrC9FQ ziRQilP*lnRovOKDM+LJw|Ng@GS|59B%@T&@SMaqd@&rXve+d+i)aAfssFQ1K!$k=3{*nTs$cTi!utR z{;k~f_QVW4@OT^QUgc#@I5?4l^$Vzvo906(UPG3sCeIo{ z+0#K7`LGnN>)sh~?HL1}jmM!-s2R-viqTHR=fS7b161pbAfWLT><(GPEDx`ye4Rg$ z;~XB}%*Gxwx8DcbRDRliY6Q(r--fkE>)_;eO*q=s&$2D(0N#Cf*vqd3V%&}sIN1OKu#Yf8Xc!Wwm#wt!=SC|&wM6n+VOBokaIu(ep7xnT5yLBHHU=9=JmCVj>J_x~e!WeZ%oW35T#%k@jg`+X=sERyCfYv40qjv@JkI6Gf z{O5w#-CFRr`vX$v!y%Kq7dHN}Li3z!Xn0RTUn^q)Ha8`Jz4A>kD!LAbejs|L@iCuI zOE8mhrtn6EkGZKO!+1=_;9*Nn4xr$OitXj-b|V!pNdE)tA`6T^9uJhwK2#Y9MbUC| zTJjElX7&=#_IA}=TEpJk)M%zF2p>PkzfSVKf-?5EKstZ zWTo2tK^qGNbPf_`zMtFk)-`zgw-9`J>mQk|%>t%{oKFiJXVZ$LAfo4#Y& zLPajlW1qfv9hcV~1nVCWc=n7txEEbSrNJ67ws--FI#(dE=QD9N<^{hWOBl1fa>$jj zz*r?MIQ&Qk;X@$SeJ#MPKW@Nk%_^*&62uE0nJ`lMnR*%j5Qn-ju|mmV=RK63`b~1L zK$a>~O~z@rXRx?a8O^NNrSNEg{{fY^ z6tLD$&0)*tZiQVO{8shfV{FYk3v2d&18b_atoV}~w5i5)ZT_3Qo)~0+9DS_Ht8QO$<7bbpa16#9!|IvvVceV`ty!Qy|A08!z=9y4AdPIL= zzzc}DZA=>czhm)-7_iL0Ksic&hr(WaNL|+lfumC((X5gq|mMDAXkl5w|${X8jd#TwepG zeD5K5>3YgFEf@B$+y-vGjnI1KJtUhIW5zNGh#2R_dE3@;I8tFcBB78fDZdOg{(Q9g zO>R1b>lJ9W@1|H$S!n)+LEXDIAb-Y(GI)@IrHNNS!kC-3skjL>p9tx24W^{>5nVA9 z8rynt?ce7x%LHNb5ho0@?LxOBi%GMtaG8vn3#xqj45vL_f=5jQwl@S)TB#X8XRM%S zx(Qp)ljBh~>8DP|*O7YVY7pRP7+(wrfM?flSh~dq1A0whut0(iHdTlHM~(q|hXvfU zJB12wBskuhTb#Kx1)=um(e39qs{ZvRtXOgy8vPVV>a`e{Jg9{27MZ9MejK_ePM&8t zi^{XkF!3Y}mp!t^_^1tdaL*><_&NzwH1g2m+Cg0UfI!56Ac(|W#k%l%Y=6c<>fRKX z3~pvu7~Kc6FF)bKM=kivtq8{ZuYq@=2m1Q@QIt>z4D9FN>4%p{X-@^V)k`x*EF+lM zTn&w^5=?!o4E~x(ElO`AJDbon?mmb4PijS#@_d)&PM3}ec9_k$IqZ~TUqPgn; zHCPo1?St#U&7~DO0wT$h!P^im%7Oq<8YSF%uugCsvTB`i(U&C9uGs~Tl*dWWd@g3J zY7Vu7?*Ue=)Gx zl`yH9hs)pI$C7-`b8}UrwC%UR@2V_(blHa7A8RBJ-t*BTkM?tLx)^OVmyD6{E;I*}-1 zK}Ui#bF*2H*^#)@pm(bgsG#+3NeV&}%o=#wU2a8nLObr|4%7?1tghM%JaaLW5HI&(bk8@J6T*c(oa-Tz@X zB@Jhrvf$zN84wEk3p`J2vFYe)d^33kzn|}-6ibxg*m_>DoXmmTLy6!bd>hT%Io_GF zrx3RJHdS%$FG{sM#Pw~9KtI9|?4$+hhS4~VpVkT%cP%Kd{KLu1n%8hVB>}*N=kRW5 z8_TD@0z5AyL9O>~V*m6uYx-^?o|L-^MizAt{bU!o&WbVWT(3}}>j}tSzef)IC<1lu zc5vFT6rn#GHwX$bA^#4-!q?Y9C-)8c@2LcHv4kKirHs7}!ojDRqu{_`XC}n*?ld?MKtG9xB@CAZqm~Qsetxmvy;TqSI0yrZTh_ZKb?1R`C%f@H7NH zwp?YeyLX0zk@)F3w}bKVQ(@Y5x(D5rejwMZ5oE2&Ky`8*^4Ddeyul}s@RXukB-T(5 zZAD=wO`2X&R|URv2XXO^)l{JHdyIGZ!`a!ellb=Sq?*EtF(@P%k9@pARaA|^)<4(r zO1cLXbWo6vsOP19vfh!3c_Q?AjsM_hqaY1GI#@zieqn&!M%=h#k}B*z3s3V^8Lh!; zC_lv+ugWZ_4xd9iKVAqj{HY{?T?(;VRk1X=9Rqt`pzZ50_!HR!s$*5G{3GH_>njN+ zQ=SJ)&!<5{34`q(x}Y@`hu6#-^4KYDpsekE;@Y51lOe^%$XfYIFU7qgWj&}Vn;*574(&4 zaZSNzu6UeWG)0OPG$F$5AM8K+4z@uk6+G7#KQ`!rHRsikbkJb8s@{Qp^EkeItD9JV zVVY>X=VG>Bk_Gvm_fWCoA+e6Fr!GdTK&y2M@u43=aPAs*zL6TU>4h+}ZmSY=&2uq3 zbTkTHU2P!u75{*sY73#_J1Da`{~%>|40PAuC<}QkMOzr&M`e{G5NTWv-_I9u=BgmG z^V?>SU9t;n^uq8`f(SI9e1+V?)1dD@LHq||aBKE+P@9^dcKbh|j6CCTdz=VeFuW9h z3{6APRa1hy!=P00F>H<<#I-#y@%0-y#^dK}fG1qE+p_(*-Xaq7e?O&+cv4`)S~=MM zxW91Jx9cn(Mg%Hm%LX)qSRTf9)wuxe$Womxr-8 zdoCDn)W^0sc_t~L4u>adF{eEetKL>|p3PIzR(J@?g~pL@a1+VR*T7v)u{dr$Mffva zQ6e>x^=Q zEFb%WEL|k!j6T8H=Bsuk}{&Ucn-6DL*DGS_cLF2iT4yL*OG8O0_*M!_wY;nELoT zwt_lJaQd)Zwwj$-FUK@Eb1>r!FX+A(V^kiG&$&Dr#ODCb4plV%}eJd z591<^=NF2_Y2F@gX2x(Bti)v?vEj1`~`Kf@wm41DRjP&hkTt9$n9#xFeZqd>5&NSOZKsi?zFQQ zcozon^WYqg-`RNQZP@-)mbUySNg9Nm;b#0{G+H@KCgv7lZ{TC7+#AlWwzY+po&>On zsHJirgis&r_zC}?as2SBi_*;;Chr@5phk=ky(9PxY1?@nJ2fSl4MBX2+@~$zReA*t z(#|25R37?Ne1);pN#Y{f0c?3udQg!;Er(uMm8gy8;nOf+e;HcONi!nf@~IGRLnu6X z7}rO91jF8MD9A|^zt&39iCd)UX7O04+?0=3z5bw^-Z$)ZnunH(88E`ZMFwZGNqPeh zUC7xBgy*AC+2%A3jLJ|^uXRAl_A1L%ZzagKrGq?=G;X{b2q~+*C?4hjczizzdfaj- zq#1x~y8Yq)L?QASrUJF149C^$A;J6vv^Pp%eA+$8-EN5IKaNm#6Nno(GLDmS*hpe1(qpwON)e7u66tgUTC zt4kfAuJIjg#)avtd#dnh=Y25sO#!XWa2zbzNtNlxz%kEuu%0i>`*wUP_%XZ0{`doYGf=VQm--!Si_6N#4h z!!OdjjLy{tmg+`T#`1X==@<#;>`{A+vI=+M>uGh&Io5;IVscEBxF3WZp2E60mmp`O zG3B-^2&7axkiCbC4h)xOJY>2t-tz%;%B8S2y}gT*C&$U+YrM>xb}mLrkq7ItD&)hnd)S{TfXfY@QlF2dfuMylN+$|2PWMvbc})hW zoMK*Z&aYO6K&e~seB~*$Sx8Y~ zVSKdf)eMZXuf_DEJ*fC8AFi$tV3f8+V(-nTFwgb@9v}V&Ml~T=u(+HuTy>M$nQq6P zF(`%V^OKN&zZ7jwrNfR*3n5xcjU8*)fc7yfVD@k;sHyxxQw2dL>!l>4ttSYTi!||9 z^%|VFPKX}aFc&9Zlw)kD8C4&fijnTi&?h_*RKx_K&7%&D0!8Uxd9{%1Q;YM8{6M49 z6D(R^qCNi$wBv9S`SXL|v(Z~>-L3PG+;9%h9qQ)zGHz1}@4p~-fD}fpj0b_g+z@v6 zA<=W>czFJm;JQOI;F@p`70qA5*7uw7QQu1gAzFel>Hr+S`Ws@xc^LVWI7s2FNn3v- zrIX3oyQ|)V@&`U){tM^yEI2y#4}0sKFyT%s)p}$zqz;>q`BvW09B%}{UwIjgN<$cF zT1rHD>W~eYtj+;RocZq-7A@vysG~n&$e{rDoz0-yEJiqcp%O^ck1BkZBhCakKBvS! zU&SrpLwULEgS7dgI6tHfQhVo;h<7Y3KAC}@qR!xQ`4-;oUIIt-7ZDS!Hyl1A1lx?R zq0`lDl4!uD0Sm-5IJy`oeS@=hL}30LS!^Y?}?OTl3s5hDa)F$!0{c9 zRxgJIoV{@}SIluyg($n)z8ce`#o=4s9_VkvHhpv{84KM3) z_J4mtGx9h1t-lSXr=CDvgA}{GHw>Faq}W!xk5|tA#NaibAz*tH$UJxons=_@LA7R> z5*AITPgTJYw-`9Mx)@%%7vdGK3C#4YHom;da+tDNmen!aPl_cU;lMFOTPFDtE3Eth3OQyf@AP)OR(bZX&DPSxN?6dFV zzKuaV{b?s~ts)OBI6uLqR|Vy4lX%mk10d;G7hZF-#49e+_;znOKHoK;tX$Q}^M2Kd zf0}xTFgK&>fG>D4$@o!Fn5pM{P<72voY8h2H0V$0HkD>Hg#W_Wl_*R)Aqg4JGtsU6 zHt%J@5Ll1ihgpvWS%nZUT%aYv*5A^BL2QsabHOFP9fLD})A~I1g$&W?|&b zGO)3fWjj9=;g=CXcETnJ_TcIYSQ0r27p-W-dyj;`g#QVLBFA|hM}43_ae$_UgaDH- z2oLqz@p0cjf`^`izUFFd*P2EZwX|use*+&g6yO_ozb`RphHxWyh`4Zuq)9}R{6;}` z`kv#s%J%`4SpA)>Ui29AHZCUWV1Um2Cg>qU=-haX%&aJcN#9$sWb8bD|F7?)a>~s{Bzm>^afXxst;+XXBAAm7Kk#X2`|W$xesuqS2y3K zM2;=a%f(M3C3Mg?69x7Rg5UgRvWxu+Gr#xJhTkDHOKTF0nJ+{~!}a*~wjAr>-vXE` z%4kO_vU}pTp|ip_WYRowXmS-So$3wcQ-kSH^#!mBGliH(*%1EnD~@cQ0{jz`Su=%q z_#x384?1hpfe=a7Q}iUyRHlf?yg!OQJOB+wkmu2>|+E-TT8M<(=st6umo2eU8Xp@E~1<~n|cXzV(;UeM@PZwNk2J%u88RTxP{@q-f-%Y45Kc3ogC#n zEAz46cbr_JsMyFX#zv^xy;oe!n2cTXheyj2+f=sU1+Zo|4uT*vl`A~S2t|M<^e zFn*|?*m0Wnh3? zHc2rNzdzB=mtNR-^%ca(@nK~4FbVs7j9)FTfSSF7P@MM!R=r7qV@ z&p6NRPCt0cw&U|1p&*78IOW(8^h$n3EDD#9LW|E(r*taYwbTL?Y8=q=>^q*+cmoPA z>%vjX3)tmVz^grwi+ZlHc+37h%+C^MVq%x0ywD%CJ9Gg5b~?iBO-2y4SQlp={)>4% z>bQPqJ2hF61?6EkvGZyc+_`>>@Gh&M@dG9H`C1ZeKN!vqb^Wg;ROfF#zDaMj)C>#ApF4P*h_c(;#ChkBKFx= zut)zCEUkKvsg>WrB()vts$P@L68B&*+yRmzui=dKLwGvYhxGY8hUVuZG;`ims=18o z2^Q~$pzC#@xH*(u4)P*fnx!yhQ2}kf5{^dUui@MYQKn$-9=N;Eo7bLch>xervnMYW z(#YOo6sY)wS2v2__f|Dy5hPvMO2OGk5 zSB@;AS(6;VOWFp5w0DuLok#Ha&)3)y^c8H5zr#g)igI{gjG)jh7KPW|hJoRJta~QS zM4GrmV9F!9=fD8xfBEykZySt!jl}pvfwZ@H0xM>y$Xe;S(yEF<@LB#J-^x)RzMil{ z4K)Ufm!GGRs{?RQ{TZB<%z=Q+W;`Hd02^fO@zDK8*m3YW6)V!k>ydHz=|ct)Kgj9I z%IUPtaXPjCu172`T|$wzF!1DNE8`!#sN1v|WG()H{|0};@QROkywVdjCFNO}!Yt^0 z)eZ};ZAJacwXiR0DvYil#H%|l!Sl>e*rq2-{GL4~HtY{<;7`Mm{9%-v$`|kIT(GiNM@#4Sb2=r=)gc z5rw7^+-PSbb2}NKK#k@fsRLkcqXBP+Jwq8Sr7;7a|7|k zJPjNfjfKK`uFnwSLj1;4G2?s*h8;?y?kC;AMcS0FMe0!F-cy>LHJu?=4fth4H9CtH zK(N6iCM-#s^@Keet6Y@Oh=)~$YG71j z!^;+nL+089_Q2g!7-2MqJbMjRtWlj!&-;#dx@WP9jRT~>c`IB$e;MS;<=L;1SKWS2hf|o7ma%&@%rI+_@^TPE7r*|5@|lTcHcxSD7g*#2~~LhRTQcuwLp^hK~&kw zW%VqkqQyl`&ck0setT#_?a6-3eLN2Jf(oqEdp&6MWl(eG6}-4ck}(Gv)_=lI(&bnP zt4ze1y$c4=L@j_6N=#rhw#l)px+cSHwG1qr(N265|De&fhhWNnq2|Azg5Did%KJAT z+kNe@%XlKII`0OWZ@t0OJ<@;{(n8E5CyFy#oUp#FgqHgc<54v)eqHDtTswCJfA3G` z!I%i6SR@O5j{T55)jwx2dn0Zr`N_?V!Fa7%gv}V0hv}^mkQJMSFnKQg+kYP&U;*l^ zGe9x(zcgrR3m$RMfstBC+#1~i$D`kqN6mk+cJD7#KA(jB*WKXx^w;>>$rG=pwh?uo zH(1vhhMw}GjH^)?UR=5g*Y2$0yfm)k{OLP99T-LI58=+s-Ly0F6B?yH$H7{4_Vm=( zV7-7(og$^!jj@8vQWY)(Z+-^MY$chc&;^|;17JII7(&dyg41F#kUF`HetSEW)P6sP zyiGn}uOGzANSA_6cZxy-yRl!Z84MR+A^KZ(L)a7ncBywDrkIz|V>9%yBIzzf-6rVz z?iW0n^pS3GItC82-(x{u2-&%5Dm!pXmUSrpfQ37y;goYfPy41GG4D7-(`*Hp8_y;% z0&6Zq@#^m&rrpY4^>G)bzxYn{QVL+=KmyG;^a zm13-H7GTOBra|qq&p63joXS7*qVu;OhH@{iW91*H@6fdd3g6y^H8yTU`{gZENt9&0 z7Dj<*4L1XPUxWhwLhSmzI`AvD7&XUo(O+5x2A5r>_JJYPb8!a*{L;af#vHTk@CBT8 z2w~&!BsiNWz#J6V1jWyf;1#|J6d!8`u_@K0bg>W2nOKZ&(;@*YxO23DFk`gZ6$ZQ) z5|x`0tb(p6V?U2$_i-Mkw&^Lz72g6U4k&Xu$qP6Xy9Y8pErRtEm1&pFMRJ|fN(Hvb zuwDK?QC<5j1-W;iy5t(nYn#Ge-n|8teU8zPcV-ZDcmTX!`S7GATTm}skw|#hVDDHe zMzqI35AOlpI_M3bh1wx_vM(`Pn+SXTZs4SnM__mF70*QX4aA8|U_u-#@Ok${=E=Ds zXns_My<$7yi|Y`&y%hoRc^gSmqAVM?<{(5(mcoXNiHw-B0%NgvD@1&5gLm_HKy9-$ z^V!Oj*4&-|o!@h*^MX4$*=Ofqe3UhItW5>p15YrS^dDlFf zuzDZIzPLUJOWz2v@I#ReQ_sSbM~eVreXuLBhqlQ`VpZV~sk?R)+l#%xs_QMjALCIy zr6GtQ(Wqk1{SQ412knQ8P%C&lZ25t>>8l4B?9=1Uqr!~-oZD!*%oB^#cj3g|5p0+@ z7m^xBaFw(n%3b!so4*$k%TudCL820U_sqb3hbB?Mk|>n3RN^%I6Y%j<3EFvgz(4;= z2r6BK6`twDF84a=xAdhRZLe{;?I=htiGYVDKX9a=7TXe)nbOL6oS(6h+Pk&D2gU%J zRdry<{4*?o0r?xy1p{%#xg{>fJ;qUm7fgf56~~ z2iJ`YLV>^jm^qQagt3EEXG#vIwM(*LJ=vrlr(l}ZLwx&Vn8Z6MFjF%IAw=UbOgC@9 z2~Llp-tayYb*}{5!pX=F>ByO*8-o3lo2Y`mVYccRerp-Tq#tHb zp85l?ewAekJ!F~gO*$A6UPM=YXv5p)6NvBS7E)gwioMamY14Q3PABH0^}e)hn{rw9 z0p~La9f=_p6|zi_NHY4x@Qh#?CPb`?sUWj)GNSA?4O-~>g1=y8KBB?0XVN`t{3R1HMU}M%- z(Cd*!o%aQ}FE|zd-I)QuDtytwt(>nLl|rWKWq{M7AjsFf0f%WIHQaa!B(y`Y(tbCt z)AFD%pT?t;;|Mf3j8n7rHd44?8Qomj!Y^xjf+jyysJN{V8{_4JWtuH;VVNYm>s}st z;frLJzcicsDVAiY+i=fxu5;G>n)nX3!OFV}d1=Knp=+Ksd08aSUQV*cQ0`o&Ztw%% z=G=sy=U$i_r;3|>)5&s9cbKHIJ;$y0Cam~)mZ;n}p_^h6HZQyiQ)GnLc%5Xh%x^~< zi#V`J83LPYhltFXRXG1%0BTw|bAxc8&*p(i>#kuU&mi!cazJ`bNaezl@P-_ z_7=sqh-2G>sT?O%g3SrdLa*}eP>?0Xd<@q{ufw-;!elv?)iD`%|LJj9xnTsXcIlzT z!J`;|onve+<7OryS=Oy63e6b?H|x7#PR>len!rcM4;F_vTo)rOMUsh5>jkl<)nMQs z2=a0JAn9NqTuzr}dFfeLH0)3Ry!=F^KEA@lO)_lCX^zE{|BUMI9RbCWSJ0ZM1zXcI z@t*AobZ|LDYVM1(4e1~F@uS^j{b~`$GIT2H-+pf!7s?Y)hcWG9bU3^;2mZETfrvP} zHAEDp?zxk=q2Hj{E6u9j(BwLMs!%fNAdGe=!wrsW75JYd7W>J7*N1Ot+6g!?vr=2{6;-^A!-@kV9>~jtje@!FyIrjWRzYdX=YL&Rr z=MJi_Fe0VTTCkV74`=QbkUwWP@QgdfsMmrK2rL(68teU_aNAu7Yq*WEdXAuU=?&h| zcLnLk6`1g*c18J8?tG#B62^WCFm?7O@T0*IgasBuNaYufW%&d&R5rm@9c_$R3vpK*%RX`w}Z@3my)MsK^gEad>OOc7- z*erVcJAmEQhNkbt*wXQ@prIL0?}&aR{%eDvjeC~On|+smq3$WD&FX})cy+8V8N#lX zP|UNe2CKot-khNsV_2Fkkh5f)2;? z>~|An7o>P$Ven_%`Xm6gY>T0~H4nY!OE4~bmqR0I;)xtBA$mrC$e!Lc@cL8-t}-lG zVT*;}a8Q|n6La|cmlvR|RRH+%gjoKizmR`4jLWxj0%PH}>=dzhXbX)&yd%%H+`~;?sIG7llja2*{!2o?QwDgn?-_b; z6J*r-bkOv)8=S&v1mm0J$wJbxfE?T4*RS5Ok;W56Li=wQexy)NfM%!339D zE5o=*3ua%uf=`N9Lx|~o)?{-Kb^W&++a+QlTqPbmUv!hnZk#7#G!C;f8mW=YPYjxS z1g)Z2=-{sfQTp{R5qoeUxzuQ37)~jbX6Fg|fz;%0kgso!5-&Z_bkPTL{Nffm z<6jAA%uk>p!4kN4t13HuT@A>?NFEkX1sgR!q-wi?(t>$B-85Sol9q!1dfTCKPa`Pa zkfvrblFX)MW6<<76+}zJAWZcHtT!p8+oevB!XG^_+JdzD^-AdbVTjmLLSizbAgU(> zZVRq~S#@eKss1pUwFoe}ZRK#?REiOQ_X9V2_+9fU zo|0!ZKm6q@DXyYttZt!uImb}-d0xj>uQ| zkPOmS#xdb;0_v`-7f>MaZbX9z!rSHPna#1=%E!J6IuJ7 z_h@3TNwqS+bF7d*kU6IsR`#^;8jLgH)SjhydqE+_ZgvC_;K4^{eb^{=1C~}!Mzb03 zxa3thT#mYcLcI-Oe{DCswR*y9JE_bwk{*MXwOPFBzPll*vKnUcPV%Iej>B@#91I(K zL7fWj^DcCBGW+7_BJD#xc>ry?;2KZ^=6 zYxZA;>nHw@&4 zSyrM+j%6oZ!#I0GZERA8ZlLo7jO%`to3RWePcvDa6c|J`WA| zETDbbF}ThsGea*qo$%xxeCj(2$w?bwYsnBKJg6boRoh|rfoyiCoburhTLo8w>KHATNv6!3&6N^2&}?8ae8SXan37+w0eD7>5>P_?{a6dk8P0W!U;f9JBR62JTVP$8j!yUCRE(>WPB+DKsx=KTn-qQ3BKq6Um4aW51phWQ_Lap}|8Vu;%jU zgC7bpe@_zCdlHW+&u5{Krz>uFq=6T?Cr%O_I$>7iQO&FT$Yk2D+@P7JjAO0-g1eO!FZ_NKt!E)PJ?pRcoAa-&9Vc zZJo^KjZ1TE@*>G`#)pH1K5w_*NbwU_Hj=xwhAV@#SuuC?mnvv7LvK8yO43nNboA5FezH;0lU6Fk7bkT!SQFoH3b`wl? zr@(Q`U|!z(Y82Fa4yuwKM8z--oD~Xi;yOhP=;x|oa$;m|)ClA|HWG`B5x76ZTR*nWRX`qQxnK)=yM}p< z52b1H+*+(Tlaq zf5L}^iOhyc_p$u?Vwe>+Mon#B!`H!!II^V~de2Q~lDf+vv-&2r{PK-7KMjPj!CMFs zH&ISRp6z1ZfvMI_6u&YIi?6lP^6=fba{hf#^4v?i?D8<~*k$71>x=u=zr(TiEUe*T z*{6nbcy0H%*#>37^8H=3nJL1$nHhn$D#0m|iWqbM2UxZKhNY`S*`3=*A#1t>{8}5C<_8ZetZB&w)D$M|^%c2nf^Ba+iZNMwdGEDwhAE@rR#PyMCNc_SaymsdX zKYz+UTv}TN&K^pvgF^|-c<~X2Gu1hkf@fMSApgik5M{d|DpiU7vbqb`s>VQr%zYyMBNW~n zdSKFxpCEFH0Bvx`y>p(U2**x26ROEe&n3|`ibC*{N- z&M+J+|8X6GR|?F9P7AI-X^f$^9i+lpf%63t5Q7BSU*C!$aLUR#SU0V z5&56Fvw2MrE{y#PX5u60-x>pHGl#Q}j0|G;?g*G=VUE?>mw8Pwo3ZDoAZD6Q;uyTw zVV#^VUBA5)LjPKG-1Q#)`l{Vvmg9@pIh}uN>n|K#*^Jtkdhm6z0+fA~zx4!LUjC21um+bdKgkpML&2f-4QjZbfcFx5^iO$ zkq1Voh%usnyvXs|21tq%WIC2@K#__juIrmlwaq^g!jFQN8Xc!Bb=9B>`ZFHwb&K)vUJkl?|8hwo|wd!L(Pgl&hwX}Zu4as zYn~YWbBf~eRUCio(=57e+j88$aPiv=zm2*jCMT5U-6I0c8$#(YbF4nOcbLNimPY|uL(4>s$l2# zcYLYn0PGS^#~^tRoRTpH)qj7&1+F_$KkyeIvTkc{2S^^x6)TW>2KvC8CE?h3?I z3sIPG@d?yaV`@|6({ooC=Pm0rt3WP2b^26DpTn6*X zOOh&SNTrUL@yaVWZ*Ki$=KL~mRM=66TXZAPKh_0R?TT@_`$}B(+83M4yZF~jSHjrJ zmoWcW85SRI!Zh%~4CzKJP^{+3<<{fB_ilWFl|hs{#N)z`9;hwn2JzoD@p6nX`{ToR zP_S5sx8<|Q%0(k!YY+=H(<oUHm_ytmTTG8;12&>QM!|bM0U~}po@n75tr&fID?a7g2!oI!2x4*{lZlWYw zG8})gvKuGMd4YQD7I<8{0Nc1dIqqZOu>0~&ND7l6PN%kmOERa`Tz!V(Z%43My`9=j zeM=*(u3>f0C(=wfuG38^#&_m>YUg|z-~G8uC#!SUd3+3`cF%(#Lv?&`<^k4@-GI@} ztq|qtLRQc5Lf2JU;A}38Zao{YTkil>YWqxc7bq}BOQkSgUx3ZdQA3Vk3ZHhlLYF`d z&MWBPo)H&tx#41d(7!2}jQE@ejfRp;igOLj zJyePA?KO~WAB;+kKXG~DGZ0($7L_u^829t?OhIe`=DvCbDf61BUGQX%gOEvd2aUmY z&wCt`mS$`{OQ9hk7T-Me1VyJIUgsY@$ZcPP`b~$)H_IY&F+-HO6TXHtU#fs-no9ii zrF#i)zdkNnTTg>Oe8CQm9h7wM0VW&|g<=bF=;U(@o3m1^$X-iYqbI}uozc$iINApC zj^Y?n@rGk2eumhaPx;LcOi|bQIp|8Xz*YSl)W%Sny*>3b`2S9%i~g%a!!trKcW(^O zd+AwTO^zZHGA|gavvNQxGtZ!7YdiIM#qra>)uSOJ#Y{YVmze(+XIuLcuuJJS^xqJN z@I#)hd*YNGbTt)Lae7(m8ac8mOr8xB;%0)PBar#)7(_N@fXlK}$XCBfGsN>@ zXLAwP&rpQGp=P{WQVwUP+`;eHxc(>aIl39oaW{Q9 z+KJ&xF<7Y;54q6iJIw?)S#keh7bx!FZ-rQ4irRPA zp2Ib%Y3kWI2^nmECjudW(YC1APO=)aA~n9 z8=^E5#*GD8DK#5#PrQU^JB{l;wt#=&FlemUhJK$)Fze9;ke7Ib?>drDZqq0_1#|DC zU!!x9qMnd5Vp+&qyF-?>3_Ce+A?~ZopaF|GHvU9Q8n2K*@=iCxg_=#kGwg)5MP_I+ zGZ98BMe*RQ0=lopj)ac&!{E~tdPYGLztCh9k{QL}unkZq;~AA?D(0k*%nLaK@jPaU%%n= zS-)}r&j)a@yqc&C$P!uGQ_!qvhk1{8kmW8ijG@LAoY!y+rf|Mv>{t#J+VBOwmTpF$ zz_lozAWgQ8oyF@7X(%w1iC4`OaYR*(&B$Wdc-{4EZsIPO@9_xb?(L-OCv9c@pFP5| zV++{uJ}3O~CmW9TJOHik+c<>JG3_4L3)K%mi|HG|m)lLXRQv(F5ITY?*G6IAYja%4 z+l?tx0J60=K#(8$~0$kQN z0ls=n0R1e2=PYGFqHiMH<=&gqtTbSXcRr2=WkasLAT!_Cos5Yz!I-E8{k_K*2B%E~ zrMdIiu-%FgkvVIxWe< zUvAgnj4{wVa*6mS^)3k6F30h8cSv)O1nN#X07D*Ua6|fTnv_0<_ndk$z2_$1cful` zWmXI$w~s_OSqVUuCaE&DFFz8WBSFx?W!l}}nuGH!JqYTP!b48NY+S@!dVRcyN-lhZ4eWYqrS^;8 zGN8g9_|?gqvOSo5jp6RG7lX7lT7$&c%R%e-ZOm=CMpdUrfRF1Qw2+k~t=#iSXyiXm z8`wdfvECT5B!LF}DGa8^-cR%@QbevwC{ z*ZMChoubQ_T)hpWhxRhVH`KB3+iMaxX#*(FThA`h59g1b`HAH&IbdbDj_Hl~PEMM- zq3*PJTstivH0*6~Zl)m1OKO2Lhxrh#=tY+|x)bBW*U4{nDJIY?9I{lG!adtqgAQ9& zQ0_9pZ1t7Q`i^x>R(C$<>4rhaQg;j%n#YzbkYfwYB^cMIq8Q|H7FQ+gXU>lcVBtC? zruX9y_~N?}mA2YJm~j?3m~t7@r7@Vb?ReHmSbctN1^{cX(mZF0*1HThY`Jn%);-4 z%=3GOd!zqgMuj7Pr(OV>bzFmu4x`w~X@wzD;~?s0gX*4&?Ac$h;o`_2`b#pB*3O|Y z(Kj8cYJJf)_BHT5wlN1(ufg3iUFM&-4kH?y0@_Pi9Ee-VyjNStOgLT%wLvA3b0Q-bBT6NA%1%sNgCJB zr9TdRA=^$hfpeNNe$)!bkO$TDW}-N=g3I-66pV#00qJzQB&XqMsWPtw*YH(c%21|s z7B#*p1O?xO$ymn-)oPeaedjaaoi-2l`2^zKuhlR-^$|56IE#fpcHzFG%MAi#>e1Iz zgl*b%8;V-GKy0-VJJR0_$DO!cs;+NH#Og6{TKf>Qcby_(hn1Q6HbL}gfdm@qDzo;% zW;l%ukd4F9{PS}T;LO%$E+4$bHinzI zvo1)Yl>TGt-A1s=eKzaX5Ci?bWw>KV3}$l~Jh6k7u+_Z*B3{4XI+zH~A2ZNR$y~N>@gKg? zpJUXyO^`MC*a{)Ly->S96|~RIMl0idc=_)(PT{}B(|%kh;CUAI#57=OQ5VV3kz(_v zMxw&bf86tWCfgY}hL?^vKKEFKT#ht^HL=NFH^KUS2eVSzZ zNU$-7Cb9vql-S=1eYo(30q_RwU?7g$#iK0FIvq)YNQvK2!(R*sdt!0o<%w*9@CX>s zx=%06E1}NzUg*5KhJ;5E_WGk6@USWai8l-}iv2l^-rMyNbD+?V#^|7!cPn^y{_3@-d?vmEl;(`E;3h z3O+=et4egkrD%SPU_3a=4-@@eCN%AzD5JmqEM7|p1GC0pJUf6quRs~*_yiGVx0@EH z=j$ zZbRG3&-iTaKQLP$$q0Qm2CvaQ%(tx7kmK)$>d8XPs3XVIUF=WKa{L!>+vzl_Ll(^o z_Cvl>7*v@bM_meZCo^~_mC5|>|~Zr}sdH!K7jn?UGc+94atXrf#)^o0Dy6!&Dj_4*NM*jS5t zt1J0qZyeFTP>LaB6OQXHf^F%s#I!(^=^lyXZ!8P}E2zZhQXjdU%Yh&xlL zeeBTLAUkm_J1L_%g8#o>+PzG7>EdNe43_Kv-%GnUOZ|WE?EXK$%pGr6zNowe5aU#sc~K>z>% literal 0 HcmV?d00001 diff --git a/python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job.yaml b/python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job.yaml new file mode 100755 index 0000000000..8ac9300165 --- /dev/null +++ b/python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job.yaml @@ -0,0 +1,29 @@ +# Local directory where your source code resides. +# It should be the relative path to this job yaml file or the absolute path. +# If your job doesn't contain any source code, it can be empty. +workspace: deploy_llm_job + +# Running entry commands which will be executed as the job entry point. +# Support multiple lines, which can not be empty. +job: | + echo "current job id: $FEDML_CURRENT_RUN_ID" + echo "current edge id: $FEDML_CURRENT_EDGE_ID" + echo "Hello, Here is the FedML Nexus AI platform." + echo "Current directory is as follows." + pwd + sleep 3 + +job_type: deploy # options: train, deploy, federate + +# Bootstrap shell commands which will be executed before running entry commands. +# Support multiple lines, which can be empty. +bootstrap: | + pip install -r requirements.txt + echo "Bootstrap finished." + +computing: + #resource_type: RTX-3090 # e.g., A100-80G, please check the resource type list by "fedml show-resource-type" or visiting URL: https://open.fedml.ai/accelerator_resource_type + resource_type: A100-80GB-SXM + minimum_num_gpus: 1 # minimum # of GPUs to provision + maximum_cost_per_hour: $10 # max cost per hour of all machines for your job + # device_type: GPU # GPU or CPU diff --git a/python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job/.gitignore b/python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job/.gitignore new file mode 100644 index 0000000000..0d20b6487c --- /dev/null +++ b/python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job/.gitignore @@ -0,0 +1 @@ +*.pyc diff --git a/python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/__init__.py b/python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job/__init__.py similarity index 100% rename from python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/__init__.py rename to python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job/__init__.py diff --git a/python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/app/__init__.py b/python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job/app/__init__.py similarity index 100% rename from python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/app/__init__.py rename to python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job/app/__init__.py diff --git a/python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/app/pipe/__init__.py b/python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job/app/pipe/__init__.py similarity index 100% rename from python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/app/pipe/__init__.py rename to python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job/app/pipe/__init__.py diff --git a/python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/app/pipe/constants.py b/python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job/app/pipe/constants.py similarity index 100% rename from python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/app/pipe/constants.py rename to python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job/app/pipe/constants.py diff --git a/python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/app/pipe/instruct_pipeline.py b/python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job/app/pipe/instruct_pipeline.py similarity index 100% rename from python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/app/pipe/instruct_pipeline.py rename to python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job/app/pipe/instruct_pipeline.py diff --git a/python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/config/__init__.py b/python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job/config/__init__.py similarity index 100% rename from python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/config/__init__.py rename to python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job/config/__init__.py diff --git a/python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job/fedml_model_config.yaml b/python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job/fedml_model_config.yaml new file mode 100644 index 0000000000..bff517ef6d --- /dev/null +++ b/python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job/fedml_model_config.yaml @@ -0,0 +1,12 @@ +workspace: "." +entry_point: "main_entry.py" + +auto_detect_public_ip: true +server_external_port: 20203 +server_internal_port: 2203 + +bootstrap: | + echo "Bootstrap start..." + pip install -U fedml + sh ./config/bootstrap.sh + echo "Bootstrap finished" diff --git a/python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/main_entry.py b/python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job/main_entry.py similarity index 100% rename from python/fedml/workflow/driver_example/customized_job_example/deploy_image_job/main_entry.py rename to python/fedml/workflow/driver_example/customized_job_example/deploy_llm_job/main_entry.py diff --git a/python/setup.py b/python/setup.py index a9d61b352a..c531f722e2 100644 --- a/python/setup.py +++ b/python/setup.py @@ -41,7 +41,7 @@ def finalize_options(self): "wandb==0.13.2", "httpx", "attrs", - "fastapi>=0.92.0", + "fastapi", "uvicorn", "geventhttpclient>=1.4.4,<=2.0.9", "aiohttp>=3.8.1", @@ -62,7 +62,7 @@ def finalize_options(self): "py-machineid", "cachetools", "toposort", - "pydantic>=2.0", + "pydantic", "pydantic-settings", ] @@ -116,7 +116,7 @@ def finalize_options(self): setup( name="fedml", - version="0.8.27.dev2", + version="0.8.29.dev4", author="FedML Team", author_email="ch@fedml.ai", description="A research and production integrated edge-cloud library for " From 4ea1c7cc76ab0c1ff55ebf38a5d9b7130c9b09eb Mon Sep 17 00:00:00 2001 From: Alex Date: Thu, 28 Mar 2024 17:27:14 +0800 Subject: [PATCH 002/251] [CoreEngine] download packages without the ssl certification. --- .../scheduler/scheduler_core/scheduler_base_job_runner.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py index e2e090596d..46f1e7ff8f 100755 --- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py +++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py @@ -20,6 +20,7 @@ from ..scheduler_core.message_center import FedMLMessageCenter from ..scheduler_core.status_center import FedMLStatusCenter from abc import ABC, abstractmethod +import ssl class RunnerError(Exception): @@ -160,8 +161,8 @@ def retrieve_and_unzip_package(self, package_name, package_url): local_package_file = os.path.join(local_package_path, f"fedml_run_{self.run_id}_{filename_without_extension}") if os.path.exists(local_package_file): os.remove(local_package_file) - package_url_without_query_path = urljoin(package_url, urlparse(package_url).path) - urllib.request.urlretrieve(package_url_without_query_path, local_package_file, + ssl._create_default_https_context = ssl._create_unverified_context + urllib.request.urlretrieve(package_url, local_package_file, reporthook=self.package_download_progress) unzip_package_path = os.path.join(self.agent_package_unzip_dir, f"unzip_fedml_run_{self.run_id}_{filename_without_extension}") From 46cbab24068b348c148a7c7e39c7f1c458240b04 Mon Sep 17 00:00:00 2001 From: Alex Date: Sat, 6 Apr 2024 21:00:38 +0800 Subject: [PATCH 003/251] [CoreEngine] sync the deployment and launch modules. --- .../master/base_master_protocol_manager.py | 39 +-- .../model_scheduler/job_runner_msg_sender.py | 87 +---- .../model_scheduler/master_job_runner.py | 299 +++++++++++++----- .../master_job_runner_manager.py | 8 +- .../master_protocol_manager.py | 146 +++------ .../model_scheduler/worker_job_runner.py | 287 +++++++++-------- .../slave/base_slave_protocol_manager.py | 13 +- .../scheduler/slave/slave_protocol_manager.py | 51 +++ 8 files changed, 520 insertions(+), 410 deletions(-) diff --git a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py index bf720515d9..25cab5a17c 100755 --- a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py +++ b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py @@ -76,8 +76,8 @@ def generate_topics(self): # The topic for requesting device info from the client. self.topic_response_device_info = "client/server/response_device_info/" + str(self.edge_id) - # The topic for requesting device info from MLOps. - self.topic_request_device_info_from_mlops = f"mlops/master_agent/request_device_info/{self.edge_id}" + # The topic for requesting device info from mlops. + self.topic_request_device_info_from_mlops = f"deploy/mlops/master_agent/request_device_info/{self.edge_id}" # The topic for getting job status from the status center. self.topic_requesst_job_status = f"anywhere/master_agent/request_job_status/{self.edge_id}" @@ -115,6 +115,7 @@ def add_protocol_handler(self): self.add_message_listener(self.topic_ota_msg, FedMLBaseMasterProtocolManager.callback_server_ota_msg) self.add_message_listener(self.topic_report_status, self.callback_report_current_status) self.add_message_listener(self.topic_response_device_info, self.callback_response_device_info) + self.add_message_listener(self.topic_response_device_info, self.callback_response_device_info) self.add_message_listener(self.topic_request_device_info_from_mlops, self.callback_request_device_info_from_mlops) self.add_message_listener(self.topic_requesst_job_status, self.callback_request_job_status) @@ -436,38 +437,10 @@ def response_device_status_in_job(self, topic, payload): self.mlops_metrics.report_json_message(response_topic, json.dumps(response_payload)) def response_device_info_to_mlops(self, topic, payload): - response_topic = f"master_agent/mlops/response_device_info" - payload_json = json.loads(payload) - need_gpu_info = payload_json.get("need_gpu_info", False) + response_topic = f"deploy/master_agent/mlops/response_device_info" if self.mlops_metrics is not None: - if not need_gpu_info: - response_payload = { - "run_id": self.run_id, - "master_agent_device_id": self.edge_id, - "fedml_version": fedml.__version__ - } - else: - total_mem, free_mem, total_disk_size, free_disk_size, cup_utilization, cpu_cores, \ - gpu_cores_total, gpu_cores_available, sent_bytes, recv_bytes, gpu_available_ids = \ - sys_utils.get_sys_realtime_stats() - gpu_available_ids = JobRunnerUtils.get_instance().get_available_gpu_id_list(self.edge_id) - gpu_available_ids = JobRunnerUtils.trim_unavailable_gpu_ids(gpu_available_ids) - gpu_cores_available = len(gpu_available_ids) - response_payload = { - "run_id": self.run_id, - "master_agent_device_id": self.edge_id, - "memoryTotal": round(total_mem * MLOpsUtils.BYTES_TO_GB, 2), - "memoryAvailable": round(free_mem * MLOpsUtils.BYTES_TO_GB, 2), - "diskSpaceTotal": round(total_disk_size * MLOpsUtils.BYTES_TO_GB, 2), - "diskSpaceAvailable": round(free_disk_size * MLOpsUtils.BYTES_TO_GB, 2), - "cpuUtilization": round(cup_utilization, 2), - "cpuCores": cpu_cores, - "gpuCoresTotal": gpu_cores_total, - "gpuCoresAvailable": gpu_cores_available, - "networkTraffic": sent_bytes + recv_bytes, - "timestamp": int(MLOpsUtils.get_ntp_time()), - "fedml_version": fedml.__version__ - } + response_payload = {"run_id": self.run_id, "master_agent_device_id": self.edge_id, + "fedml_version": fedml.__version__} self.mlops_metrics.report_json_message(response_topic, json.dumps(response_payload)) def init_job_task(self, request_json): diff --git a/python/fedml/computing/scheduler/model_scheduler/job_runner_msg_sender.py b/python/fedml/computing/scheduler/model_scheduler/job_runner_msg_sender.py index 3fe45401ac..acce17d20b 100755 --- a/python/fedml/computing/scheduler/model_scheduler/job_runner_msg_sender.py +++ b/python/fedml/computing/scheduler/model_scheduler/job_runner_msg_sender.py @@ -18,7 +18,7 @@ def __init__(self): self.request_json = None self.edge_id = None - def send_deployment_results_with_payload(self, end_point_id, end_point_name, payload): + def send_deployment_results_with_payload(self, end_point_id, end_point_name, payload, replica_id_list=None): self.send_deployment_results(end_point_id, end_point_name, payload["model_name"], payload["model_url"], payload["model_version"], payload["port"], @@ -26,12 +26,13 @@ def send_deployment_results_with_payload(self, end_point_id, end_point_name, pay payload["model_metadata"], payload["model_config"], payload["input_json"], - payload["output_json"]) + payload["output_json"], + replica_id_list=replica_id_list) def send_deployment_results(self, end_point_id, end_point_name, model_name, model_inference_url, model_version, inference_port, inference_engine, - model_metadata, model_config, input_json, output_json): + model_metadata, model_config, input_json, output_json, replica_id_list=None): deployment_results_topic_prefix = "model_ops/model_device/return_deployment_result" deployment_results_topic = "{}/{}".format(deployment_results_topic_prefix, end_point_id) deployment_results_payload = {"end_point_id": end_point_id, "end_point_name": end_point_name, @@ -42,7 +43,8 @@ def send_deployment_results(self, end_point_id, end_point_name, "model_config": model_config, "input_json": input_json, "output_json": output_json, - "timestamp": int(format(time.time_ns() / 1000.0, '.0f'))} + "timestamp": int(format(time.time_ns() / 1000.0, '.0f')), + "replica_ids": replica_id_list} logging.info(f"[Master] deployment_results_payload is sent to mlops: {deployment_results_payload}") self.message_center.send_message_json(deployment_results_topic, json.dumps(deployment_results_payload)) @@ -104,85 +106,16 @@ def send_deployment_start_request_to_edges(self): continue should_added_devices.append(edge_id) # send start deployment request to each device - self.send_deployment_start_request_to_edge(edge_id) + self.send_deployment_start_request_to_edge(edge_id, self.request_json) return should_added_devices - def send_deployment_start_request_to_edge(self, edge_id): + def send_deployment_start_request_to_edge(self, edge_id, request_json): topic_start_deployment = "model_ops/model_device/start_deployment/{}".format(str(edge_id)) logging.info("start_deployment: send topic " + topic_start_deployment + " to client...") - self.message_center.send_message_json(topic_start_deployment, json.dumps(self.request_json)) + self.message_center.send_message_json(topic_start_deployment, json.dumps(request_json)) def send_deployment_delete_request_to_edges(self, payload, model_msg_object): - if model_msg_object is None: # Called after the diff operation - if "diff_devices" not in self.request_json or self.request_json["diff_devices"] is None: - return - else: - edge_id_list_to_delete = [] - for device_id in self.request_json["diff_devices"]: - if self.request_json["diff_devices"][device_id] == ServerConstants.DEVICE_DIFF_DELETE_OPERATION: - edge_id_list_to_delete.append(device_id) - if len(edge_id_list_to_delete) == 0: - return - - try: - FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, - self.redis_password) - - # 1. Get & Delete the endpoint device info in Redis / SQLite - device_objs = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - get_end_point_device_info(self.request_json["run_id"]) - - if device_objs is None: - raise Exception("The device list in local redis is None") - else: - total_device_objs_list = json.loads(device_objs) - for device_obj in total_device_objs_list: - if device_obj["id"] in edge_id_list_to_delete: - total_device_objs_list.remove(device_obj) - - FedMLModelCache.get_instance(self.redis_addr, self.redis_port).set_end_point_device_info( - self.request_json["end_point_id"], self.request_json["end_point_name"], - json.dumps(total_device_objs_list)) - - # 2 Delete the result in deployment result list in Redis / SQLite - device_result_list = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - get_deployment_result_list(self.request_json["end_point_id"], - self.request_json["end_point_name"], - self.request_json["model_config"]["model_name"]) - delete_device_result_list = [] - for device_result in device_result_list: - device_result_dict = json.loads(device_result) - if int(device_result_dict["cache_device_id"]) in edge_id_list_to_delete: - delete_device_result_list.append(device_result) - - for delete_item in delete_device_result_list: - FedMLModelCache.get_instance(self.redis_addr, self.redis_port).delete_deployment_result( - delete_item, self.request_json["end_point_id"], - self.request_json["end_point_name"], - self.request_json["model_config"]["model_name"] - ) - - except Exception as e: - run_id = self.request_json["run_id"] - error_log_path = f"~/.fedml/fedml-model-server/fedml/logs/error_delete_{run_id}.txt" - if not os.path.exists(os.path.dirname(os.path.expanduser(error_log_path))): - os.makedirs(os.path.dirname(os.path.expanduser(error_log_path))) - with open(os.path.expanduser(error_log_path), "w") as f: - f.write(str(self.request_json)) - f.write(str(e)) - f.write('\n') - raise e - - else: # Delete the whole endpoint - edge_id_list_to_delete = model_msg_object.device_ids - - # For Debug - if payload is not None: - debug_log_path = f"~/.fedml/fedml-model-server/fedml/logs/tmp_debug_delete_payload.txt" - if not os.path.exists(os.path.dirname(os.path.expanduser(debug_log_path))): - os.makedirs(os.path.dirname(os.path.expanduser(debug_log_path))) - with open(os.path.expanduser(debug_log_path), "w") as f: - f.write(str(payload)) + edge_id_list_to_delete = model_msg_object.device_ids # Remove the model master node id from the list using index 0 edge_id_list_to_delete = edge_id_list_to_delete[1:] diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py index f3d68c1f6a..867f299ccc 100755 --- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py +++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py @@ -125,7 +125,7 @@ def run_impl( # Changed the status to "IDLE" self.status_reporter.report_server_id_status( run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED, - is_from_model=True, server_agent_id=self.edge_id, server_id=self.edge_id, edge_id=self.edge_id,) + is_from_model=True, server_agent_id=self.edge_id, server_id=self.edge_id, edge_id=self.edge_id) # Check if we should stop the runner logging.info("send the model inference request to slave devices...") @@ -136,28 +136,32 @@ def run_impl( devices_sent_add_or_remove_msg = self.send_deployment_start_request_to_edges() # Handle "op:update" - devices_sent_update_remove_msg = self.send_first_scroll_update_msg() - - if len(devices_sent_add_or_remove_msg) == 0 and len(devices_sent_update_remove_msg) == 0: - # No device is added or removed, and no device is updated or removed - ip = GeneralConstants.get_ip_address(self.request_json) - master_port = os.getenv("FEDML_MASTER_PORT", None) - if master_port is not None: - inference_port = int(master_port) - model_inference_port = inference_port - if ip.startswith("http://") or ip.startswith("https://"): - model_inference_url = "{}/api/v1/predict".format(ip) - else: - model_inference_url = "http://{}:{}/api/v1/predict".format(ip, model_inference_port) + try: + devices_sent_update_remove_msg = self.send_first_scroll_update_msg() + + if len(devices_sent_add_or_remove_msg) == 0 and len(devices_sent_update_remove_msg) == 0: + # No device is added or removed, and no device is updated or removed + logging.info("No device is added, updated or removed. No action needed for reconciliation.") + ip = GeneralConstants.get_ip_address(self.request_json) + master_port = os.getenv("FEDML_MASTER_PORT", None) + if master_port is not None: + inference_port = int(master_port) + model_inference_port = inference_port + if ip.startswith("http://") or ip.startswith("https://"): + model_inference_url = "{}/api/v1/predict".format(ip) + else: + model_inference_url = "http://{}:{}/api/v1/predict".format(ip, model_inference_port) - self.send_deployment_status( - run_id, end_point_name, model_name, model_inference_url, - ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED, - message_center=self.message_center - ) + self.send_deployment_status( + run_id, end_point_name, model_name, model_inference_url, + ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED, + message_center=self.message_center + ) - self.trigger_completed_event() - return + self.trigger_completed_event() + return + except Exception as e: + logging.info(f"Exception at run impl {traceback.format_exc()}") self.deployment_result_queue = run_extend_queue_list[0] while True: @@ -187,19 +191,78 @@ def process_deployment_result_message(self, topic=None, payload=None): model_name = payload_json["model_name"] model_version = payload_json["model_version"] model_status = payload_json["model_status"] - replica_no = payload_json.get("replica_no", None) # Idx start from 1 + replica_no = payload_json.get("replica_no", None) # "no" Idx start from 1 run_id_str = str(end_point_id) + # HotFix(Raphael): logging service cross talk + # Change the handler since each handler need to write to different log files + try: + # Remove the existing file handler + root_logger = logging.getLogger() + for handler in root_logger.handlers: + if isinstance(handler, logging.FileHandler): + root_logger.removeHandler(handler) + + # Correct log path: ~/.fedml/fedml-model-server/fedml/logs/fedml-run-$rid-edge-$eid.log + log_file = os.path.join(ServerConstants.get_log_file_dir(), + f"fedml-run-{run_id_str}-edge-{self.edge_id}.log") + + filehandler = logging.FileHandler(log_file, "a") + + program_prefix = "FedML-Server @device-id-{}".format(self.edge_id) + formatter = logging.Formatter(fmt="[" + program_prefix + "] [%(asctime)s] [%(levelname)s] " + "[%(filename)s:%(lineno)d:%(funcName)s] %(" + "message)s", + datefmt="%a, %d %b %Y %H:%M:%S") + + filehandler.setFormatter(formatter) + root_logger.addHandler(filehandler) + except Exception as e: + logging.warning(f"Failed to change the logging handler due to {e}.") + + logging.info("========== callback_deployment_result_message ==========\n") + # Identify the operation for this run (add, remove, update) + if run_id_str not in self.running_request_json: + logging.error(f"Run id {run_id_str} is not in the running request json.") + return + + # The rolling update and scale out / in operation should not happen at the same time + assert not ("replica_num_diff" in self.running_request_json[run_id_str] and + len(self.running_request_json[run_id_str]["replica_num_diff"]) > 0 and + "replica_version_diff" in self.running_request_json[run_id_str]) + + if "replica_version_diff" in self.running_request_json[run_id_str]: + run_operation = "UPDATE" + elif "replica_num_diff" in self.running_request_json[run_id_str] and \ + len(self.running_request_json[run_id_str]["replica_num_diff"]) > 0: + run_operation = "ADD_OR_REMOVE" + else: + logging.error(f"Unsupported operation for run id {run_id_str}. and request json " + f"{self.running_request_json[run_id_str]}") + return + + logging.info(f"End point {end_point_id}; Device {device_id}; replica {replica_no}; " + f"run_operation {run_operation} model status {model_status}.") + + # OPTIONAL DEBUG PARAMS + # this_run_controller = self.model_runner_mapping[run_id_str].replica_controller + # logging.info(f"The current replica controller state is " + # f"Total version diff num {this_run_controller.total_replica_version_diff_num}") + # logging.info(f"self.request_json now {self.request_json}") # request_json will be deprecated + # this_run_request_json = self.running_request_json.get(run_id_str, None) + # logging.info(f"self.running_request_json now {this_run_request_json}") + # Set redis + sqlite deployment result FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password) - # Save deployment result to local cache + # Deal with different model status if model_status == ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DELETED: + # remove FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ delete_deployment_result_with_device_id_and_replica_no( end_point_id, end_point_name, model_name, device_id, replica_no) elif model_status == ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED: - # add or update + # add or update or update-failed-rollback FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ set_deployment_result(end_point_id, end_point_name, model_name, model_version, @@ -210,38 +273,66 @@ def process_deployment_result_message(self, topic=None, payload=None): else: if model_status != ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED: logging.error(f"Unsupported model status {model_status}.") - self.send_deployment_status( - end_point_id, end_point_name, payload_json["model_name"], "", - ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED, - message_center=self.message_center - ) + # Failure handler + if run_operation == "ADD_OR_REMOVE": + # TODO(Raphael): Also support rollback for scale out / in operation + self.send_deployment_status( + end_point_id, end_point_name, payload_json["model_name"], "", + ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED, + message_center=self.message_center) + return + else: + # Overwrite the json with the rollback version diff + rollback_version_diff = self.replica_controller.rollback_get_replica_version_diff( + device_id_trigger=device_id, replica_no_trigger=replica_no) + + # Change the target version to the start version + self.replica_controller.rollback_setback_target_replica_version() + + self.running_request_json[run_id_str]["replica_version_diff"] = copy.deepcopy(rollback_version_diff) + + # Send the rollback message to the worker devices + self.send_rollback_msg(run_id_str) + + # Set the deployment status to ABORTING + self.send_deployment_status( + end_point_id, end_point_name, payload_json["model_name"], "", + ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_ABORTING, + message_center=self.message_center) + + # TODO(Raphael): Check if resource left not cleaned up + return + + # Move to the next state (rolling update, finish the deployment, etc.) # Notify the replica number controller - self.callback_update_curr_replica_num_state(device_id, replica_no, model_status) + (self.replica_controller.callback_update_curr_replica_num_state(device_id, replica_no, model_status)) # Notify the replica version controller, which might trigger the next rolling update - self.send_next_scroll_update_msg(device_id, replica_no) + self.send_next_scroll_update_msg(run_id_str, device_id, replica_no) # Update the global deployment result mapping self.slave_deployment_results_map[str(device_id)] = model_status - # Check if the endpoint is running - request_json = self.request_json + logging.info("callback_deployment_result_message: topic {}, payload {}, result mapping {}.".format( + topic, payload, self.slave_deployment_results_map)) + + request_json = self.running_request_json.get(run_id_str, None) if request_json is None: - logging.error(f"The endpoint {end_point_id} is not running.") + logging.error(f"The endpoint {end_point_id} is no longer running.") self.send_deployment_status( end_point_id, end_point_name, payload_json["model_name"], "", ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED, - message_center=self.message_center - ) + message_center=self.message_center) return - # Wait for all replica's result, not device-level - if self.is_all_replica_num_reconciled() and self.is_all_replica_version_reconciled(): + # Wait for all replica-level's result, not device-level + if (self.replica_controller.is_all_replica_num_reconciled() and + self.replica_controller.is_all_replica_version_reconciled()): ''' When all the devices have finished the add / delete / update operation ''' - # 1. We should generate one unified inference api + # Generate one unified inference api # Note that here we use the gateway port instead of the inference port that is used by the slave device model_config_parameters = request_json["parameters"] inference_port = model_config_parameters.get("server_internal_port", @@ -255,15 +346,16 @@ def process_deployment_result_message(self, topic=None, payload=None): model_inference_url = "http://{}:{}/inference/{}".format(ip, inference_port_external, end_point_id) # Send stage: MODEL_DEPLOYMENT_STAGE5 = "StartInferenceIngress" - self.send_deployment_stages( - end_point_id, model_name, model_id, model_inference_url, - ServerConstants.MODEL_DEPLOYMENT_STAGE5["index"], ServerConstants.MODEL_DEPLOYMENT_STAGE5["text"], - "inference url: {}".format(model_inference_url), message_center=self.message_center) - - # Prepare the result to MLOps - deployed_replica_payload = self.get_deployed_replica_payload() - if deployed_replica_payload is not None: - payload_json = deployed_replica_payload + self.send_deployment_stages(end_point_id, model_name, model_id, + model_inference_url, + ServerConstants.MODEL_DEPLOYMENT_STAGE5["index"], + ServerConstants.MODEL_DEPLOYMENT_STAGE5["text"], + "inference url: {}".format(model_inference_url), + message_center=self.message_center) + + # Send the result to MLOps + if self.deployed_replica_payload is not None: + payload_json = self.deployed_replica_payload model_slave_url = payload_json["model_url"] payload_json["model_url"] = model_inference_url payload_json["port"] = inference_port_external @@ -274,15 +366,18 @@ def process_deployment_result_message(self, topic=None, payload=None): model_inputs = model_metadata["inputs"] ret_inputs = list() if "type" in model_metadata and model_metadata["type"] == "default": - payload_json["input_json"] = { - "end_point_name": end_point_name, "model_name": model_name, "token": str(token), - "inputs": model_inputs, "outputs": []} + payload_json["input_json"] = {"end_point_name": end_point_name, + "model_name": model_name, + "token": str(token), + "inputs": model_inputs, + "outputs": []} payload_json["output_json"] = model_metadata["outputs"] else: raise Exception(f"Unsupported model metadata type {model_metadata['type']}") self.send_deployment_results_with_payload( - end_point_id, end_point_name, payload_json) + end_point_id, end_point_name, payload_json, + self.replica_controller.target_replica_ids) payload_json_saved = payload_json payload_json_saved["model_slave_url"] = model_slave_url @@ -295,12 +390,20 @@ def process_deployment_result_message(self, topic=None, payload=None): FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ set_end_point_activation(end_point_id, end_point_name, True) - self.send_deployment_status( - end_point_id, end_point_name, payload_json["model_name"], - model_inference_url, ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED, - message_center=self.message_center - ) + if self.replica_controller.under_rollback: + self.send_deployment_status(end_point_id, end_point_name, + payload_json["model_name"], + model_inference_url, + ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_ABORTED) + self.replica_controller.under_rollback = False + else: + self.send_deployment_status(end_point_id, end_point_name, + payload_json["model_name"], + model_inference_url, + ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED, + message_center=self.message_center) + time.sleep(3) self.trigger_completed_event() @staticmethod @@ -429,17 +532,28 @@ def send_first_scroll_update_msg(self): first_chunk_dict = self.request_json["replica_version_diff"] # Delete the record of the replaced device - self.delete_device_replica_info_on_master(first_chunk_dict) + try: + self.delete_device_replica_info_on_master( + self.request_json["end_point_id"], self.request_json["end_point_name"], + self.request_json["model_config"]["model_name"], first_chunk_dict) + except Exception as e: + logging.info(f"Exception at send_first_scroll_update_msg {traceback.format_exc()}") + + logging.info(f"Send the first scroll update msg to the device {first_chunk_dict} ") # Send the deployment msg to the devices, (we reuse the start_deployment msg) for edge_id in first_chunk_dict.keys(): if edge_id == self.edge_id: continue # send start deployment request to each device - self.send_deployment_start_request_to_edge(edge_id) + self.send_deployment_start_request_to_edge(edge_id, self.request_json) return list(first_chunk_dict.keys()) - def send_next_scroll_update_msg(self, device_id, replica_no): + def send_next_scroll_update_msg(self, run_id_str, device_id, replica_no): + """ + Send the next scroll update msg to the devices if needed. + If there is no need for the next scroll update, directly return. + """ if replica_no is None: return @@ -448,33 +562,70 @@ def send_next_scroll_update_msg(self, device_id, replica_no): if replica_controller.total_replica_version_diff_num == 0: return + if replica_controller.under_rollback: + replica_controller.intermediate_replica_version[device_id][replica_no] = replica_controller.start_version + return + + logging.info(f"Curr updating window: {replica_controller.curr_replica_updating_window} " + f"Curr version diff num: {replica_controller.total_replica_version_diff_num}") + replica_controller.callback_update_updating_window(device_id, replica_no) # Decide whether to send the next scroll update next_chunk_dict = replica_controller.get_next_chunk_devices_replica() - replica_controller.curr_replica_updating_window = copy.deepcopy(next_chunk_dict) - if next_chunk_dict: + logging.info(f"The next scroll update for end point {run_id_str} is {next_chunk_dict}") + # Update curr updating window + replica_controller.curr_replica_updating_window = copy.deepcopy(next_chunk_dict) + + # Use global deployment result mapping to decide whether to send the next scroll update self.request_json["replica_version_diff"] = next_chunk_dict - self.delete_device_replica_info_on_master(next_chunk_dict) + + # Avoid using the old request_json + try: + self.delete_device_replica_info_on_master( + self.request_json["end_point_id"], + self.request_json["end_point_name"], + self.request_json["model_config"]["model_name"], + next_chunk_dict) + except Exception as e: + logging.info(f"Exception at send_next_scroll_update_msg {traceback.format_exc()}") # Send the deployment msg to the devices, (we reuse the start_deployment msg) for edge_id in next_chunk_dict.keys(): if edge_id == self.edge_id: continue # send start deployment request to each device - self.send_deployment_start_request_to_edge(edge_id) + self.send_deployment_start_request_to_edge(edge_id, self.request_json) return - def delete_device_replica_info_on_master(self, edge_id_replica_no_dict): + def send_rollback_msg(self, run_id_str): + # Avoid using the old request_json + try: + self.delete_device_replica_info_on_master( + self.request_json["end_point_id"], + self.request_json["end_point_name"], + self.request_json["model_config"]["model_name"], + self.request_json["replica_version_diff"]) + except Exception as e: + logging.info(f"Exception at send_rollback_msg {traceback.format_exc()}") + + # Send the deployment msg to the devices, (we reuse the start_deployment msg) + for edge_id in self.request_json["replica_version_diff"].keys(): + if edge_id == self.edge_id: + continue + # send start deployment request to each device + self.send_deployment_start_request_to_edge(edge_id, self.request_json) + + def delete_device_replica_info_on_master(self, endpoint_id, endpoint_name, model_name, edge_id_replica_no_dict): FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password) # Remove the record of the replaced device # [Deprecated] deployment status & device info # Delete the result in deployment result list in Redis / SQLite device_result_list = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - get_deployment_result_list(self.request_json["end_point_id"], self.request_json["end_point_name"], - self.request_json["model_config"]["model_name"]) + get_deployment_result_list(endpoint_id, endpoint_name, model_name) + delete_device_result_list = [] for device_result in device_result_list: device_result_dict = json.loads(device_result) @@ -485,9 +636,7 @@ def delete_device_replica_info_on_master(self, edge_id_replica_no_dict): for delete_item in delete_device_result_list: FedMLModelCache.get_instance(self.redis_addr, self.redis_port).delete_deployment_result( - delete_item, self.request_json["end_point_id"], - self.request_json["end_point_name"], - self.request_json["model_config"]["model_name"] + delete_item, endpoint_id, endpoint_name, model_name ) logging.info(f"Deleted the record of the replaced device {delete_device_result_list}") @@ -515,7 +664,7 @@ def is_all_replica_version_reconciled(self): return False @staticmethod - def generate_request_json_with_replica_diff(run_id, edge_id, request_json): + def generate_request_json_with_replica_num_diff(run_id, edge_id, request_json): # Replica Controller is per deployment! replica_controller = FedMLDeviceReplicaController(edge_id, request_json) logging.info(f"Start Diff Replica controller for run {run_id} on edge {edge_id}") @@ -525,6 +674,14 @@ def generate_request_json_with_replica_diff(run_id, edge_id, request_json): new_request_with_num_diff = replica_controller.generate_diff_to_request_json() request_json = new_request_with_num_diff + return request_json + + @staticmethod + def generate_request_json_with_replica_version_diff(run_id, edge_id, request_json): + # Replica Controller is per deployment! + replica_controller = FedMLDeviceReplicaController(edge_id, request_json) + logging.info(f"Start Diff Replica controller for run {run_id} on edge {edge_id}") + # Prepare version diff new_request_with_version_diff = replica_controller.init_first_update_device_replica_mapping() request_json = new_request_with_version_diff diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py index 40896b9ee8..7221a09574 100755 --- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py +++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py @@ -58,5 +58,9 @@ def recover_inference_and_monitor(): FedMLDeployMasterJobRunner.recover_inference_and_monitor() @staticmethod - def generate_request_json_with_replica_diff(run_id, edge_id, request_json): - return FedMLDeployMasterJobRunner.generate_request_json_with_replica_diff(run_id, edge_id, request_json) + def generate_request_json_with_replica_num_diff(run_id, edge_id, request_json): + return FedMLDeployMasterJobRunner.generate_request_json_with_replica_num_diff(run_id, edge_id, request_json) + + @staticmethod + def generate_request_json_with_replica_version_diff(run_id, edge_id, request_json): + return FedMLDeployMasterJobRunner.generate_request_json_with_replica_num_diff(run_id, edge_id, request_json) diff --git a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py index e8be50f77f..8566848ec6 100755 --- a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py +++ b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py @@ -1,7 +1,6 @@ import json import logging -import os from fedml.core.mlops import MLOpsConfigs, MLOpsRuntimeLog, MLOpsRuntimeLogDaemon from .device_model_cache import FedMLModelCache from .device_model_db import FedMLModelDatabase @@ -102,8 +101,8 @@ def callback_deployment_result_message(self, topic=None, payload=None): FedMLDeployJobRunnerManager.get_instance().save_deployment_result(topic, payload) def callback_delete_deployment(self, topic, payload): - # Parse payload as the model message object. logging.info("[Master] callback_delete_deployment") + # Parse payload as the model message object. model_msg_object = FedMLModelMsgObject(topic, payload) # Set end point as deactivated status @@ -115,8 +114,7 @@ def callback_delete_deployment(self, topic, payload): delete_end_point(model_msg_object.inference_end_point_id, model_msg_object.end_point_name, model_msg_object.model_name, model_msg_object.model_version) - FedMLDeployJobRunnerManager.get_instance().send_deployment_delete_request_to_edges( - model_msg_object.inference_end_point_id, payload, model_msg_object) + FedMLDeployJobRunnerManager.get_instance().send_deployment_delete_request_to_edges(payload, model_msg_object) FedMLDeployJobRunnerManager.get_instance().stop_job_runner(model_msg_object.run_id) @@ -138,7 +136,7 @@ def callback_start_deployment(self, topic, payload): except Exception as e: pass - # Parse the deployment parameters + # Get deployment params request_json = json.loads(payload) run_id = request_json["end_point_id"] end_point_name = request_json["end_point_name"] @@ -147,6 +145,7 @@ def callback_start_deployment(self, topic, payload): user_name = request_json["user_name"] device_ids = request_json["device_ids"] device_objs = request_json["device_objs"] + model_config = request_json["model_config"] model_name = model_config["model_name"] model_id = model_config["model_id"] @@ -156,62 +155,76 @@ def callback_start_deployment(self, topic, payload): inference_engine = model_config.get("inference_engine", 0) inference_end_point_id = run_id + logging.info("[Master] received start deployment request for end point {}.".format(run_id)) + # Start log processor for current run self.args.run_id = run_id self.args.edge_id = self.edge_id - MLOpsRuntimeLog.get_instance(self.args).init_logs() + MLOpsRuntimeLog(args=self.args).init_logs() MLOpsRuntimeLogDaemon.get_instance(self.args).set_log_source( ServerConstants.FEDML_LOG_SOURCE_TYPE_MODEL_END_POINT) MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor(run_id, self.edge_id) - # Generate the deployment new parameters - logging.info("callback_start_deployment {}".format(payload)) + # Add additional parameters to the request_json run_id = inference_end_point_id - run_id_str = str(run_id) + self.args.run_id = run_id + self.run_id = run_id request_json["run_id"] = run_id self.request_json = request_json + run_id_str = str(run_id) self.running_request_json[run_id_str] = request_json - diff_devices, diff_version = self.get_diff_devices(run_id) - self.request_json["diff_devices"] = diff_devices - self.request_json["diff_version"] = diff_version self.request_json["master_node_ip"] = GeneralConstants.get_ip_address(self.request_json) - # Save the endpoint device info - self.init_device_update_map() + # Target status of the devices FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password) FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ set_end_point_device_info(request_json["end_point_id"], end_point_name, json.dumps(device_objs)) - # Save the endpoint token - usr_indicated_token = FedMLDeployMasterProtocolManager.get_usr_indicated_token(request_json) + # Setup Token + usr_indicated_token = self.get_usr_indicated_token(request_json) if usr_indicated_token != "": logging.info(f"Change Token from{token} to {usr_indicated_token}") token = usr_indicated_token FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ set_end_point_token(run_id, end_point_name, model_name, token) - # Subscribe deployment result messages from slave devices self.subscribe_deployment_messages_from_slave_devices(request_json) - # Send stage: MODEL_DEPLOYMENT_STAGE1 = "Received" + # Report stage to mlops: MODEL_DEPLOYMENT_STAGE1 = "Received" FedMLDeployJobRunnerManager.get_instance().send_deployment_stages( self.run_id, model_name, model_id, "", ServerConstants.MODEL_DEPLOYMENT_STAGE1["index"], ServerConstants.MODEL_DEPLOYMENT_STAGE1["text"], "Received request for end point {}".format(run_id), message_center=self.message_center) - # Send stage: MODEL_DEPLOYMENT_STAGE2 = "Initializing" + # Report stage to mlops: MODEL_DEPLOYMENT_STAGE2 = "Initializing" FedMLDeployJobRunnerManager.get_instance().send_deployment_stages( self.run_id, model_name, model_id, "", ServerConstants.MODEL_DEPLOYMENT_STAGE2["index"], ServerConstants.MODEL_DEPLOYMENT_STAGE2["text"], ServerConstants.MODEL_DEPLOYMENT_STAGE2["text"], message_center=self.message_center) - # Save the runner info ServerConstants.save_runner_infos(self.args.device_id + "." + self.args.os_name, self.edge_id, run_id=run_id) - # Start the job runner to deploy models - self.running_request_json[run_id_str] = FedMLDeployJobRunnerManager.generate_request_json_with_replica_diff( + # Num diff + request_json = FedMLDeployJobRunnerManager.generate_request_json_with_replica_num_diff( + run_id, self.edge_id, request_json + ) + + # Listen to extra worker topics, especially when worker's replica remove to zero, + # In this case, currently Java will NOT send those worker ids to the master, but still need to listen to it. + if "replica_num_diff" in request_json and len(request_json["replica_num_diff"]) > 0: + for device_id in request_json["replica_num_diff"].keys(): + # {"op": "remove", "curr_num": 1, "target_num": 0} + if request_json["replica_num_diff"][device_id]["op"] == "remove" and \ + request_json["replica_num_diff"][device_id]["target_num"] == 0: + self.subscribe_spec_device_message(run_id, device_id) + + # Version diff + request_json = FedMLDeployJobRunnerManager.generate_request_json_with_replica_version_diff( run_id, self.edge_id, request_json ) + self.running_request_json[run_id_str] = request_json + + # Start the job runner to deploy models self._get_job_runner_manager().start_job_runner( run_id, request_json, args=self.args, edge_id=self.edge_id, sender_message_queue=self.message_center.get_sender_message_queue(), @@ -262,81 +275,6 @@ def callback_deactivate_deployment(self, topic, payload): FedMLModelCache.get_instance(self.redis_addr, self.redis_port).set_end_point_activation( model_msg_object.inference_end_point_id, model_msg_object.model_name, False) - def get_diff_devices(self, run_id) -> (dict, dict): - """ - {device_id(int): "op: add" | "op: delete" | "op: replace"} - "op: add" -> need to add - "op: delete" -> need to delete device - "op: replace" -> need to restart the container of the device on same port with new (same) model pkg - - {device_id(int): "old_version"} - """ - try: - logging.info(f"Get diff devices for run {run_id}") - request_json = self.running_request_json.get(str(run_id)) - - diff_devices = {} - diff_version = {} - FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password) - device_objs = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - get_end_point_device_info(run_id) - if device_objs is None: - for new_device_id in request_json["device_ids"]: - diff_devices[new_device_id] = ServerConstants.DEVICE_DIFF_ADD_OPERATION - else: - device_objs_dict = json.loads(device_objs) - device_ids_frm_db = [d["id"] for d in device_objs_dict] - - for exist_device_id in device_ids_frm_db: - if exist_device_id not in request_json["device_ids"]: - diff_devices[exist_device_id] = ServerConstants.DEVICE_DIFF_DELETE_OPERATION - - for new_device_id in request_json["device_ids"]: - if new_device_id not in device_ids_frm_db: - diff_devices[new_device_id] = ServerConstants.DEVICE_DIFF_ADD_OPERATION - else: - if new_device_id == self.edge_id: - continue - - old_version = self.should_update_device(request_json, new_device_id) - if old_version: - diff_devices[new_device_id] = ServerConstants.DEVICE_DIFF_REPLACE_OPERATION - diff_version[new_device_id] = old_version - else: - pass - logging.info(f"Diff devices: {diff_devices}") - except Exception as e: - error_log_path = f"~/.fedml/fedml-model-server/fedml/logs/{run_id}_error.txt" - if not os.path.exists(os.path.dirname(os.path.expanduser(error_log_path))): - os.makedirs(os.path.dirname(os.path.expanduser(error_log_path))) - with open(os.path.expanduser(error_log_path), "w") as f: - f.write(str(e)) - raise e - return diff_devices, diff_version - - def should_update_device(self, payload, new_device_id): - """ - Query the device info in local redis, if the device info is different from the payload, - return the old model version - """ - device_result_list = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - get_deployment_result_list(self.request_json["end_point_id"], - self.request_json["end_point_name"], - self.request_json["model_config"]["model_name"]) - - for device_result in device_result_list: - if device_result is None: - continue - device_result_dict = json.loads(device_result) - - if int(device_result_dict["cache_device_id"]) == new_device_id: - result_body = json.loads(device_result_dict["result"]) - if result_body["model_version"] != payload["model_config"]["model_version"]: - return result_body["model_version"] - else: - return None - return None - @staticmethod def get_usr_indicated_token(request_json) -> str: usr_indicated_token = "" @@ -358,8 +296,20 @@ def subscribe_deployment_messages_from_slave_devices(self, request_json): if str(edge_id) == str(self.edge_id): continue # subscribe deployment result message for each model device - deployment_results_topic = "model_device/model_device/return_deployment_result/{}".format(edge_id) + deployment_results_topic = "model_device/model_device/return_deployment_result/{}/{}".format( + run_id, edge_id) self.add_message_listener(deployment_results_topic, self.callback_deployment_result_message) self.subscribe_msg(deployment_results_topic) logging.info("subscribe device messages {}".format(deployment_results_topic)) + + def subscribe_spec_device_message(self, run_id, device_id): + if device_id == self.edge_id: + return + + # subscribe deployment result message for each model device + deployment_results_topic = "model_device/model_device/return_deployment_result/{}/{}".format( + run_id, device_id) + + self.add_message_listener(deployment_results_topic, self.callback_deployment_result_message) + self.subscribe_msg(deployment_results_topic) diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py index 5d6f1a4d8e..78e2527e0c 100755 --- a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py +++ b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py @@ -7,7 +7,6 @@ import traceback import urllib from abc import ABC -from urllib.parse import urljoin, urlparse import yaml from fedml.computing.scheduler.comm_utils.job_utils import JobRunnerUtils from fedml.core.mlops import MLOpsRuntimeLog @@ -27,7 +26,7 @@ def __init__(self, args, run_id=0, request_json=None, agent_config=None, edge_id FedMLBaseSlaveJobRunner.__init__( self, args, edge_id=edge_id, request_json=request_json, agent_config=agent_config, run_id=run_id, cuda_visible_gpu_ids_str=cuda_visible_gpu_ids_str, agent_data_dir=ClientConstants.get_data_dir(), - agent_package_download_dir=ClientConstants.get_package_download_dir(), + agent_package_download_dir=ClientConstants.get_model_package_dir(), agent_package_unzip_dir=GeneralConstants.get_package_unzip_dir(ClientConstants.get_package_download_dir()), agent_log_file_dir=ClientConstants.get_log_file_dir() ) @@ -57,8 +56,7 @@ def retrieve_binary_model_file(self, package_name, package_url): local_package_file = "{}".format(os.path.join(local_package_path, package_name)) if os.path.exists(local_package_file): os.remove(local_package_file) - package_url_without_query_path = urljoin(package_url, urlparse(package_url).path) - urllib.request.urlretrieve(package_url_without_query_path, local_package_file, + urllib.request.urlretrieve(package_url, local_package_file, reporthook=self.package_download_progress) unzip_package_path = os.path.join(unzip_package_path, package_name) @@ -79,10 +77,6 @@ def get_model_bin_file(unzip_package_full_path): def update_local_fedml_config(self, run_id, model_config, model_config_parameters=None): model_name = model_config["model_name"] model_storage_url = model_config["model_storage_url"] - scale_min = model_config.get("instance_scale_min", 0) - scale_max = model_config.get("instance_scale_max", 0) - inference_engine = model_config.get("inference_engine", 0) - inference_end_point_id = run_id # Retrieve model package or model binary file. if self.model_is_from_open: @@ -92,7 +86,6 @@ def update_local_fedml_config(self, run_id, model_config, model_config_parameter model_bin_file = FedMLDeployWorkerJobRunner.get_model_bin_file(unzip_package_path) # Load the config to memory - package_conf_object = {} fedml_local_config_file = os.path.join(unzip_package_path, "fedml_model_config.yaml") # Inject the config from UI to pkg yaml @@ -117,92 +110,49 @@ def download_model_package(self, package_name, package_url): # Override def run_impl(self, run_extend_queue_list, sender_message_center, listener_message_queue, status_center_queue): + # Get deployment params run_id = self.request_json["end_point_id"] end_point_name = self.request_json["end_point_name"] - token = self.request_json["token"] - user_id = self.request_json["user_id"] - user_name = self.request_json["user_name"] device_ids = self.request_json["device_ids"] - device_objs = self.request_json["device_objs"] master_ip = self.request_json["master_node_ip"] - model_config = self.request_json["model_config"] model_name = model_config["model_name"] model_id = model_config["model_id"] model_version = model_config["model_version"] - model_storage_url = model_config["model_storage_url"] - scale_min = model_config.get("instance_scale_min", 0) - scale_max = model_config.get("instance_scale_max", 0) model_config_parameters = self.request_json["parameters"] - - self.replica_handler = FedMLDeviceReplicaHandler(self.edge_id, self.request_json) - inference_port = model_config_parameters.get("worker_internal_port", ClientConstants.MODEL_INFERENCE_DEFAULT_PORT) inference_port_external = model_config_parameters.get("worker_external_port", inference_port) - - if "using_triton" in model_config_parameters and model_config_parameters["using_triton"]: - inference_engine = ClientConstants.INFERENCE_ENGINE_TYPE_INT_TRITON - else: - inference_engine = ClientConstants.INFERENCE_ENGINE_TYPE_INT_DEFAULT - - logging.info("[Critical] The inference_engine is: {}".format(inference_engine)) - - self.model_is_from_open = True if model_config.get("is_from_open", 0) == 1 else False - if self.model_is_from_open: - model_net_url = model_config["model_net_url"] + inference_engine = model_config_parameters.get("inference_engine", + ClientConstants.INFERENCE_ENGINE_TYPE_INT_DEFAULT) inference_end_point_id = run_id - use_gpu = "gpu" # TODO: Get GPU from device infos - memory_size = "4096m" # TODO: Get Memory size for each instance self.mlops_metrics.report_sys_perf(self.args, self.agent_config["mqtt_config"], run_id=run_id) + MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO) - self.check_runner_stop_event() - - logging.info("model deployment request: {}".format(self.request_json)) + logging.info(f"[Worker] Received model deployment request from master for endpoint {run_id}.") + self.replica_handler = FedMLDeviceReplicaHandler(self.edge_id, self.request_json) + if self.replica_handler is not None: + logging.info(f"=================Worker replica Handler ======================" + f"Reconcile with num diff {self.replica_handler.replica_num_diff} " + f"and version diff {self.replica_handler.replica_version_diff}." + f"=============================================================") + else: + logging.error(f"[Worker] Replica handler is None.") + return False - MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO) + self.check_runner_stop_event() + # Report the deployment status to mlops self.status_reporter.report_client_id_status( self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_INITIALIZING, is_from_model=True, running_json=json.dumps(self.request_json), run_id=run_id) - self.status_reporter.report_client_id_status( self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_RUNNING, is_from_model=True, run_id=run_id) self.check_runner_stop_event() - # update local config with real time parameters from server and dynamically replace variables value - logging.info("download and unzip model to local...") - unzip_package_path, model_bin_file, fedml_config_object = \ - self.update_local_fedml_config(run_id, model_config, model_config_parameters) - if unzip_package_path is None or fedml_config_object is None: - logging.info("failed to update local fedml config.") - self.check_runner_stop_event() - self.status_reporter.report_client_id_status( - self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED, - is_from_model=True, run_id=run_id) - return False - - logging.info("check downloaded packages...") - if not os.path.exists(unzip_package_path): - logging.info("failed to unzip file.") - self.check_runner_stop_event() - self.status_reporter.report_client_id_status( - self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED, - is_from_model=True, run_id=run_id) - return False - - # download model net and load into the torch model - model_from_open = None - self.model_is_from_open = None - - logging.info("start the model deployment...") - self.check_runner_stop_event() - running_model_name, inference_output_url, inference_model_version, model_metadata, model_config = \ - "", "", model_version, {}, {} - # Reconcile the replica number (op: add, remove) prev_rank, op, op_num = self.replica_handler.reconcile_num_replica() @@ -212,55 +162,134 @@ def run_impl(self, run_extend_queue_list, sender_message_center, replica_rank_to_update, op = self.replica_handler.reconcile_replica_version() if not op: - logging.info("No need to reconcile.") + logging.info("[Worker] No need to reconcile.") return True + logging.info( + f"================Worker Reconcile Operations ======================\n" + f" op: {op}; op num: {op_num}.\n" + f"==================================================================\n") + + # If not rollback, download package from MLOps; otherwise, use the backup package + if op != "rollback": + logging.info("Download and unzip model to local...") + unzip_package_path, _, _ = \ + self.update_local_fedml_config(run_id, model_config, model_config_parameters) + if unzip_package_path is None: + logging.info("Failed to update local fedml config.") + self.check_runner_stop_event() + self.status_reporter.report_client_id_status( + self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED, + is_from_model=True, run_id=run_id) + return False + + if not os.path.exists(unzip_package_path): + logging.info("Failed to unzip file.") + self.check_runner_stop_event() + self.status_reporter.report_client_id_status( + self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED, + is_from_model=True, run_id=run_id) + return False + else: + logging.info("Try to use backup package to rollback...") + # Find folder under "~/.fedml/fedml-model-client/fedml/model_packages \ + # /${end_point_id}_${end_point_name}_${model_name}_${model_version}" + backup_folder_full_path = None + models_root_dir = ClientConstants.get_model_package_dir() + + # Find the version (notified by master) to rollback + version_diff_dict = self.request_json["replica_version_diff"][str(self.edge_id)] + version_rollback_to = None + for replica_no, rollback_ops in version_diff_dict.items(): + version_rollback_to = rollback_ops["new_version"] # Note that new_version is the version to rollback + break + if version_rollback_to is None: + logging.error(f"No old version found for run_id: {self.run_id} " + f"edge_id: {self.edge_id}, rollback failed. No old version found in request_json.") + return False + model_version = version_rollback_to + + # Format the version to match the folder name + model_version_formatted = version_rollback_to.replace(" ", "-") + model_version_formatted = model_version_formatted.replace(":", "-") + + last_run_folder_sub_fd = f"{run_id}_{end_point_name}_{model_name}_{model_version_formatted}" + for folder in os.listdir(models_root_dir): + if last_run_folder_sub_fd in folder: + backup_folder_full_path = os.path.join(models_root_dir, folder) + break + if backup_folder_full_path is None: + logging.error(f"No backup folder found for run_id: {self.run_id} edge_id: {self.edge_id} " + f"under {models_root_dir} with sub folder {last_run_folder_sub_fd}, rollback failed.") + return False + + # Inside backup folder, find unzipped package with prefix unzip_fedml_run + unzip_package_path_parent = None + for folder in os.listdir(backup_folder_full_path): + if folder.startswith("unzip_fedml_run"): + unzip_package_path_parent = os.path.join(backup_folder_full_path, folder) + break + + # Inside unzip folder, find the unzipped package, should be the only one + unzip_package_path = None + for folder in os.listdir(unzip_package_path_parent): + if os.path.isdir(os.path.join(unzip_package_path_parent, folder)): + unzip_package_path = os.path.join(unzip_package_path_parent, folder) + break + + if unzip_package_path is None: + logging.error(f"No unzipped package found for run_id: {self.run_id} edge_id: {self.edge_id} " + f"under {backup_folder_full_path}, rollback failed.") + return False + + self.check_runner_stop_event() + + running_model_name, inference_output_url, inference_model_version, model_metadata, model_config = \ + "", "", model_version, {}, {} + if op == "add": worker_ip = GeneralConstants.get_ip_address(self.request_json) - for rank in range(prev_rank+1, prev_rank+1+op_num): + for rank in range(prev_rank + 1, prev_rank + 1 + op_num): # TODO: Support Rollback if this for loop failed try: running_model_name, inference_output_url, inference_model_version, model_metadata, model_config = \ start_deployment( - inference_end_point_id, end_point_name, model_id, model_version, - unzip_package_path, model_bin_file, model_name, inference_engine, - ClientConstants.INFERENCE_HTTP_PORT, - ClientConstants.INFERENCE_GRPC_PORT, - ClientConstants.INFERENCE_METRIC_PORT, - use_gpu, memory_size, - ClientConstants.INFERENCE_CONVERTOR_IMAGE, - ClientConstants.INFERENCE_SERVER_IMAGE, - worker_ip, - self.model_is_from_open, model_config_parameters, - model_from_open, - token, - master_ip, self.edge_id, master_device_id=device_ids[0], replica_rank=rank, + end_point_id=inference_end_point_id, end_point_name=end_point_name, model_id=model_id, + model_version=model_version, model_storage_local_path=unzip_package_path, + inference_model_name=model_name, inference_engine=inference_engine, + infer_host=worker_ip, master_ip=master_ip, edge_id=self.edge_id, + master_device_id=device_ids[0], replica_rank=rank, gpu_per_replica=int(self.replica_handler.gpu_per_replica) ) except Exception as e: inference_output_url = "" - logging.error(f"Exception at deployment: {traceback.format_exc()}") + logging.error(f"[Worker] Exception at deployment: {traceback.format_exc()}") if inference_output_url == "": - logging.error("failed to deploy the model...") + logging.error("[Worker] Failed to deploy the model.") + # Send failed result back to master result_payload = self.send_deployment_results( end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED, model_id, model_name, inference_output_url, inference_model_version, inference_port, inference_engine, model_metadata, model_config) + self.status_reporter.run_id = self.run_id self.status_reporter.report_client_id_status( self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED, is_from_model=True, run_id=self.run_id) + return False else: - logging.info("finished deployment, continue to send results to master...") + # Send failed successful result back to master + logging.info("Finished deployment, continue to send results to master...") result_payload = self.send_deployment_results( end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED, model_id, model_name, inference_output_url, model_version, inference_port_external, inference_engine, model_metadata, model_config, replica_no=rank + 1) - if inference_port_external != inference_port: # Save internal port to local db + if inference_port_external != inference_port: + # Save internal port to local db logging.info("inference_port_external {} != inference_port {}".format( inference_port_external, inference_port)) result_payload = self.construct_deployment_results( @@ -272,21 +301,22 @@ def run_impl(self, run_extend_queue_list, sender_message_center, run_id, end_point_name, model_name, model_version, self.edge_id, json.dumps(result_payload), replica_no=rank + 1) - logging.info(f"Deploy replica {rank+1} / {prev_rank+1+op_num} successfully.") + logging.info(f"Deploy replica {rank + 1} / {prev_rank + 1 + op_num} successfully.") time.sleep(5) time.sleep(1) + self.status_reporter.run_id = self.run_id self.status_reporter.report_client_id_status( self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED, is_from_model=True, run_id=self.run_id) return True elif op == "remove": - for rank_to_delete in range(prev_rank, prev_rank-op_num, -1): + for rank_to_delete in range(prev_rank, prev_rank - op_num, -1): self.replica_handler.remove_replica(rank_to_delete) FedMLModelCache.get_instance().set_redis_params() replica_occupied_gpu_ids_str = FedMLModelCache.get_instance().get_replica_gpu_ids( - run_id, end_point_name, model_name, self.edge_id, rank_to_delete+1) + run_id, end_point_name, model_name, self.edge_id, rank_to_delete + 1) replica_occupied_gpu_ids = json.loads(replica_occupied_gpu_ids_str) @@ -302,6 +332,7 @@ def run_impl(self, run_extend_queue_list, sender_message_center, inference_engine, model_metadata, model_config, replica_no=rank_to_delete + 1) time.sleep(1) + self.status_reporter.run_id = self.run_id self.status_reporter.report_client_id_status( self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED, is_from_model=True, run_id=self.run_id) @@ -310,11 +341,11 @@ def run_impl(self, run_extend_queue_list, sender_message_center, if rank_to_delete == 0: pass return True - elif op == "update": + elif op == "update" or op == "rollback": # Update is combine of delete and add - worker_ip = GeneralConstants.get_ip_address(self.request_json) + worker_ip = self.get_ip_address(self.request_json) for rank in replica_rank_to_update: - # Delete the container + # Delete a replica (container) if exists self.replica_handler.remove_replica(rank) FedMLModelCache.get_instance().set_redis_params() @@ -322,33 +353,36 @@ def run_impl(self, run_extend_queue_list, sender_message_center, run_id, end_point_name, model_name, self.edge_id, rank + 1) replica_occupied_gpu_ids = json.loads(replica_occupied_gpu_ids_str) + logging.info(f"Release gpu ids {replica_occupied_gpu_ids} for update / rollback.") - JobRunnerUtils.get_instance().release_partial_job_gpu(run_id, self.edge_id, replica_occupied_gpu_ids) + # TODO (Raphael) check if this will allow another job to seize the gpu during high concurrency: + try: + JobRunnerUtils.get_instance().release_partial_job_gpu(run_id, self.edge_id, replica_occupied_gpu_ids) + except Exception as e: + if op == "rollback": + pass + else: + logging.error(f"Failed to release gpu ids {replica_occupied_gpu_ids} for update.") + return False # Delete the deployment result from local db FedMLModelDatabase.get_instance().delete_deployment_result_with_device_id_and_rank( run_id, end_point_name, model_name, self.edge_id, rank) + logging.info(f"Delete replica with no {rank + 1} successfully.") time.sleep(1) - # Add the container + # Add a replica (container) # TODO: Reduce the duplicated code + logging.info(f"Start to deploy the model with replica no {rank + 1} ...") try: running_model_name, inference_output_url, inference_model_version, model_metadata, model_config = \ start_deployment( - inference_end_point_id, end_point_name, model_id, model_version, - unzip_package_path, model_bin_file, model_name, inference_engine, - ClientConstants.INFERENCE_HTTP_PORT, - ClientConstants.INFERENCE_GRPC_PORT, - ClientConstants.INFERENCE_METRIC_PORT, - use_gpu, memory_size, - ClientConstants.INFERENCE_CONVERTOR_IMAGE, - ClientConstants.INFERENCE_SERVER_IMAGE, - worker_ip, - self.model_is_from_open, model_config_parameters, - model_from_open, - token, - master_ip, self.edge_id, master_device_id=device_ids[0], replica_rank=rank, + end_point_id=inference_end_point_id, end_point_name=end_point_name, model_id=model_id, + model_version=model_version, model_storage_local_path=unzip_package_path, + inference_model_name=model_name, inference_engine=inference_engine, + infer_host=worker_ip, master_ip=master_ip, edge_id=self.edge_id, + master_device_id=device_ids[0], replica_rank=rank, gpu_per_replica=int(self.replica_handler.gpu_per_replica) ) except Exception as e: @@ -356,20 +390,30 @@ def run_impl(self, run_extend_queue_list, sender_message_center, logging.error(f"Exception at deployment: {traceback.format_exc()}") if inference_output_url == "": - logging.error("failed to deploy the model...") + logging.error("Failed to deploy the model...") + + # If update failed, should release this replica's gpu + FedMLModelCache.get_instance().set_redis_params() + replica_occupied_gpu_ids_str = FedMLModelCache.get_instance().get_replica_gpu_ids( + run_id, end_point_name, model_name, self.edge_id, rank + 1) + + replica_occupied_gpu_ids = json.loads(replica_occupied_gpu_ids_str) + + JobRunnerUtils.get_instance().release_partial_job_gpu( + run_id, self.edge_id, replica_occupied_gpu_ids) result_payload = self.send_deployment_results( end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED, model_id, model_name, inference_output_url, inference_model_version, inference_port, inference_engine, model_metadata, model_config) + self.status_reporter.run_id = self.run_id self.status_reporter.report_client_id_status( self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED, is_from_model=True, run_id=self.run_id) - return False else: - logging.info("finished deployment, continue to send results to master...") + logging.info("Finished deployment, continue to send results to master...") result_payload = self.send_deployment_results( end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED, model_id, model_name, inference_output_url, model_version, inference_port_external, @@ -390,6 +434,7 @@ def run_impl(self, run_extend_queue_list, sender_message_center, logging.info(f"Update replica with no {rank + 1} successfully. Op num {op_num}") time.sleep(5) time.sleep(1) + self.status_reporter.run_id = self.run_id self.status_reporter.report_client_id_status( self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED, is_from_model=True, run_id=self.run_id) @@ -437,7 +482,9 @@ def send_deployment_results(self, end_point_name, device_id, model_status, model_id, model_name, model_inference_url, model_version, inference_port, inference_engine, model_metadata, model_config, replica_no=1): - deployment_results_topic = "model_device/model_device/return_deployment_result/{}".format(device_id) + deployment_results_topic = "model_device/model_device/return_deployment_result/{}/{}".format( + self.run_id, device_id) + deployment_results_payload = self.construct_deployment_results( end_point_name, device_id, model_status, model_id, model_name, model_inference_url, @@ -445,7 +492,7 @@ def send_deployment_results(self, end_point_name, device_id, model_status, model_metadata, model_config, replica_no=replica_no) logging.info("[client] send_deployment_results: topic {}, payload {}.".format(deployment_results_topic, - deployment_results_payload)) + deployment_results_payload)) self.message_center.send_message_json(deployment_results_topic, json.dumps(deployment_results_payload)) return deployment_results_payload @@ -455,18 +502,8 @@ def send_deployment_status(self, end_point_name, device_id, inference_port=ClientConstants.MODEL_INFERENCE_DEFAULT_PORT, replica_no=1, # start from 1 ): - deployment_status_topic = "model_device/model_device/return_deployment_status/{}".format(device_id) - deployment_status_payload = self.construct_deployment_status( - end_point_name, device_id, - model_id, model_name, model_version, - model_inference_url, model_status, - inference_port=inference_port, - replica_no=replica_no) - - logging.info("[client] send_deployment_status: topic {}, payload {}.".format(deployment_status_topic, - deployment_status_payload)) - self.message_center.send_message_json(deployment_status_topic, json.dumps(deployment_status_payload)) - return deployment_status_payload + # Deprecated + pass def reset_devices_status(self, edge_id, status): self.status_reporter.run_id = self.run_id diff --git a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py index 0543459dd0..856abdac40 100755 --- a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py +++ b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py @@ -255,10 +255,15 @@ def callback_start_train(self, topic, payload): run_params = run_config.get("parameters", {}) serving_args = run_params.get("serving_args", {}) endpoint_id = serving_args.get("endpoint_id", None) - cuda_visible_gpu_ids_str = JobRunnerUtils.get_instance().occupy_gpu_ids( - run_id, matched_gpu_num, edge_id, inner_id=endpoint_id, - model_master_device_id=model_master_device_id, - model_slave_device_id=model_slave_device_id) + job_yaml = run_params.get("job_yaml", {}) + job_type = job_yaml.get("job_type", SchedulerConstants.JOB_TASK_TYPE_TRAIN) + cuda_visible_gpu_ids_str = None + if not (job_type == SchedulerConstants.JOB_TASK_TYPE_SERVE or + job_type == SchedulerConstants.JOB_TASK_TYPE_DEPLOY): + cuda_visible_gpu_ids_str = JobRunnerUtils.get_instance().occupy_gpu_ids( + run_id, matched_gpu_num, edge_id, inner_id=endpoint_id, + model_master_device_id=model_master_device_id, + model_slave_device_id=model_slave_device_id) logging.info( f"Run started, available gpu ids: {JobRunnerUtils.get_instance().get_available_gpu_id_list(edge_id)}") diff --git a/python/fedml/computing/scheduler/slave/slave_protocol_manager.py b/python/fedml/computing/scheduler/slave/slave_protocol_manager.py index cd8e40d7e8..6e2a03a8b4 100755 --- a/python/fedml/computing/scheduler/slave/slave_protocol_manager.py +++ b/python/fedml/computing/scheduler/slave/slave_protocol_manager.py @@ -1,5 +1,7 @@ import copy +import json import os +import fedml from ..comm_utils.job_cleanup import JobCleanup from .base_slave_protocol_manager import FedMLBaseSlaveProtocolManager from .launch_job_runner_manager import FedMLLaunchJobRunnerManager @@ -11,15 +13,38 @@ class FedMLLaunchSlaveProtocolManager(FedMLBaseSlaveProtocolManager): def __init__(self, args, agent_config=None): FedMLBaseSlaveProtocolManager.__init__(self, args, agent_config=agent_config) + self.topic_request_deploy_slave_device_info_from_mlops = None + self.topic_request_deploy_master_device_info_from_mlops = None + self.topic_request_edge_device_info_from_mlops = None # Override def generate_topics(self): super().generate_topics() + # The topic for requesting device info from mlops. + self.topic_request_edge_device_info_from_mlops = f"deploy/mlops/slave_agent/request_device_info/{self.edge_id}" + + # The topic for requesting deployment master device info from mlops. + self.topic_request_deploy_master_device_info_from_mlops = f"deploy/mlops/master_agent/request_device_info/{self.model_device_server_id}" + + # The topic for requesting deployment slave device info from mlops. + self.topic_request_deploy_slave_device_info_from_mlops = f"deploy/mlops/slave_agent/request_device_info/{self.model_device_client_edge_id_list[0]}" + + self.add_subscribe_topic(self.topic_request_edge_device_info_from_mlops) + self.add_subscribe_topic(self.topic_request_deploy_master_device_info_from_mlops) + self.add_subscribe_topic(self.topic_request_deploy_slave_device_info_from_mlops) + # Override def add_protocol_handler(self): super().add_protocol_handler() + self.add_message_listener( + self.topic_request_edge_device_info_from_mlops, self.callback_response_device_info_to_mlops) + self.add_message_listener( + self.topic_request_deploy_master_device_info_from_mlops, self.callback_response_device_info_to_mlops) + self.add_message_listener( + self.topic_request_deploy_slave_device_info_from_mlops, self.callback_response_device_info_to_mlops) + # Override def _generate_protocol_manager_instance(self, args, agent_config=None): return FedMLLaunchSlaveProtocolManager(args, agent_config=agent_config) @@ -102,3 +127,29 @@ def _init_extra_items(self): self.mlops_metrics.report_device_realtime_perf(self.args, self.args.agent_config["mqtt_config"]) pass + def callback_response_device_info_to_mlops(self, topic, payload): + payload_json = json.loads(payload) + server_id = payload_json.get("server_id", 0) + run_id = payload_json.get("run_id", 0) + listen_edge_id = str(topic).split("/")[-1] + context = payload_json.get("context", None) + response_topic = f"deploy/slave_agent/mlops/response_device_info" + if self.mlops_metrics is not None and self.model_device_client_edge_id_list is not None and \ + self.model_device_server_id is not None: + device_info_json = { + "edge_id": listen_edge_id, + "fedml_version": fedml.__version__, + "user_id": self.args.user + } + salve_device_ids = list() + for model_client_edge_id in self.model_device_client_edge_id_list: + salve_device_ids.append(model_client_edge_id) + response_payload = {"slave_device_id": self.model_device_client_edge_id_list[0], + "slave_device_id_list": salve_device_ids, + "master_device_id": self.model_device_server_id, + "run_id": run_id, "edge_id": listen_edge_id, + "edge_info": device_info_json} + if context is not None: + response_payload["context"] = context + self.message_center.send_message(response_topic, json.dumps(response_payload), run_id=run_id) + From b3742bb710068ea85f73baf12bd14930633854ba Mon Sep 17 00:00:00 2001 From: Alex Date: Tue, 9 Apr 2024 21:32:13 +0800 Subject: [PATCH 004/251] [CoreEngine] make the latest deployment module work with the refactored paradigm. --- .../model_scheduler/job_runner_msg_sender.py | 7 ++-- .../model_scheduler/master_job_runner.py | 26 ++++++-------- .../master_job_runner_manager.py | 5 +-- .../master_protocol_manager.py | 3 +- .../scheduler_core/compute_status_cache.py | 2 ++ .../scheduler_core/message_center.py | 14 ++++++-- .../scheduler_base_protocol_manager.py | 3 ++ .../status_manager_protocols.py | 2 +- .../scheduler/slave/slave_protocol_manager.py | 34 +++++++++++-------- 9 files changed, 57 insertions(+), 39 deletions(-) diff --git a/python/fedml/computing/scheduler/model_scheduler/job_runner_msg_sender.py b/python/fedml/computing/scheduler/model_scheduler/job_runner_msg_sender.py index acce17d20b..104dacf716 100755 --- a/python/fedml/computing/scheduler/model_scheduler/job_runner_msg_sender.py +++ b/python/fedml/computing/scheduler/model_scheduler/job_runner_msg_sender.py @@ -114,7 +114,7 @@ def send_deployment_start_request_to_edge(self, edge_id, request_json): logging.info("start_deployment: send topic " + topic_start_deployment + " to client...") self.message_center.send_message_json(topic_start_deployment, json.dumps(request_json)) - def send_deployment_delete_request_to_edges(self, payload, model_msg_object): + def send_deployment_delete_request_to_edges(self, payload, model_msg_object, message_center=None): edge_id_list_to_delete = model_msg_object.device_ids # Remove the model master node id from the list using index 0 @@ -128,7 +128,10 @@ def send_deployment_delete_request_to_edges(self, payload, model_msg_object): # send delete deployment request to each model device topic_delete_deployment = "model_ops/model_device/delete_deployment/{}".format(str(edge_id)) logging.info("delete_deployment: send topic " + topic_delete_deployment + " to client...") - self.message_center.send_message_json(topic_delete_deployment, payload) + if message_center is not None: + message_center.send_message_json(topic_delete_deployment, payload) + else: + self.message_center.send_message_json(topic_delete_deployment, payload) def send_deployment_stop_request_to_edges(self, edge_id_list, payload): for edge_id in edge_id_list: diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py index 867f299ccc..d1cc68dc98 100755 --- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py +++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py @@ -221,24 +221,20 @@ def process_deployment_result_message(self, topic=None, payload=None): logging.warning(f"Failed to change the logging handler due to {e}.") logging.info("========== callback_deployment_result_message ==========\n") - # Identify the operation for this run (add, remove, update) - if run_id_str not in self.running_request_json: - logging.error(f"Run id {run_id_str} is not in the running request json.") - return # The rolling update and scale out / in operation should not happen at the same time - assert not ("replica_num_diff" in self.running_request_json[run_id_str] and - len(self.running_request_json[run_id_str]["replica_num_diff"]) > 0 and - "replica_version_diff" in self.running_request_json[run_id_str]) + assert not ("replica_num_diff" in self.request_json and + len(self.request_json["replica_num_diff"]) > 0 and + "replica_version_diff" in self.request_json) - if "replica_version_diff" in self.running_request_json[run_id_str]: + if "replica_version_diff" in self.request_json: run_operation = "UPDATE" - elif "replica_num_diff" in self.running_request_json[run_id_str] and \ - len(self.running_request_json[run_id_str]["replica_num_diff"]) > 0: + elif "replica_num_diff" in self.request_json and \ + len(self.request_json["replica_num_diff"]) > 0: run_operation = "ADD_OR_REMOVE" else: logging.error(f"Unsupported operation for run id {run_id_str}. and request json " - f"{self.running_request_json[run_id_str]}") + f"{self.request_json}") return logging.info(f"End point {end_point_id}; Device {device_id}; replica {replica_no}; " @@ -249,8 +245,8 @@ def process_deployment_result_message(self, topic=None, payload=None): # logging.info(f"The current replica controller state is " # f"Total version diff num {this_run_controller.total_replica_version_diff_num}") # logging.info(f"self.request_json now {self.request_json}") # request_json will be deprecated - # this_run_request_json = self.running_request_json.get(run_id_str, None) - # logging.info(f"self.running_request_json now {this_run_request_json}") + # this_run_request_json = self.request_json + # logging.info(f"self.request_json now {this_run_request_json}") # Set redis + sqlite deployment result FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password) @@ -290,7 +286,7 @@ def process_deployment_result_message(self, topic=None, payload=None): # Change the target version to the start version self.replica_controller.rollback_setback_target_replica_version() - self.running_request_json[run_id_str]["replica_version_diff"] = copy.deepcopy(rollback_version_diff) + self.request_json["replica_version_diff"] = copy.deepcopy(rollback_version_diff) # Send the rollback message to the worker devices self.send_rollback_msg(run_id_str) @@ -317,7 +313,7 @@ def process_deployment_result_message(self, topic=None, payload=None): logging.info("callback_deployment_result_message: topic {}, payload {}, result mapping {}.".format( topic, payload, self.slave_deployment_results_map)) - request_json = self.running_request_json.get(run_id_str, None) + request_json = self.request_json if request_json is None: logging.error(f"The endpoint {end_point_id} is no longer running.") self.send_deployment_status( diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py index 7221a09574..0bfc205b34 100755 --- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py +++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py @@ -42,10 +42,11 @@ def send_deployment_stages( message_center=message_center ) - def send_deployment_delete_request_to_edges(self, end_point_id, payload, model_msg_object): + def send_deployment_delete_request_to_edges(self, end_point_id, payload, model_msg_object, message_center=None): run_id_str = str(end_point_id) if self.job_runners.get(run_id_str, None) is not None: - self.job_runners[run_id_str].send_deployment_delete_request_to_edges(payload, model_msg_object) + self.job_runners[run_id_str].send_deployment_delete_request_to_edges( + payload, model_msg_object, message_center=message_center) def stop_device_inference_monitor(self, run_id, end_point_name, model_id, model_name, model_version): run_id_str = str(run_id) diff --git a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py index 8566848ec6..962dcbbcb3 100755 --- a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py +++ b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py @@ -114,7 +114,8 @@ def callback_delete_deployment(self, topic, payload): delete_end_point(model_msg_object.inference_end_point_id, model_msg_object.end_point_name, model_msg_object.model_name, model_msg_object.model_version) - FedMLDeployJobRunnerManager.get_instance().send_deployment_delete_request_to_edges(payload, model_msg_object) + FedMLDeployJobRunnerManager.get_instance().send_deployment_delete_request_to_edges( + model_msg_object.run_id, payload, model_msg_object, message_center=self.message_center) FedMLDeployJobRunnerManager.get_instance().stop_job_runner(model_msg_object.run_id) diff --git a/python/fedml/computing/scheduler/scheduler_core/compute_status_cache.py b/python/fedml/computing/scheduler/scheduler_core/compute_status_cache.py index a1929abbef..f224806b8c 100755 --- a/python/fedml/computing/scheduler/scheduler_core/compute_status_cache.py +++ b/python/fedml/computing/scheduler/scheduler_core/compute_status_cache.py @@ -42,6 +42,8 @@ def get_job_status(self, run_id): return status def save_device_status_in_job(self, run_id, device_id, status): + if status is None: + return try: self.redis_connection.set(self._get_device_status_in_job_key(run_id, device_id), status) except Exception as e: diff --git a/python/fedml/computing/scheduler/scheduler_core/message_center.py b/python/fedml/computing/scheduler/scheduler_core/message_center.py index 7ae1e4c0b5..dcf21d33b7 100755 --- a/python/fedml/computing/scheduler/scheduler_core/message_center.py +++ b/python/fedml/computing/scheduler/scheduler_core/message_center.py @@ -200,6 +200,7 @@ def run_sender(self, message_event, message_queue, message_center_name): while True: message_entity = None + message_body = None try: self.check_message_stop_event() except MessageCenterStoppedException as e: @@ -242,7 +243,7 @@ def run_sender(self, message_event, message_queue, message_center_name): f"payload {message_entity.payload}, {traceback.format_exc()}" ) else: - logging.info(f"Failed to send the message: {traceback.format_exc()}") + logging.info(f"Failed to send the message with body {message_body}, {traceback.format_exc()}") self.release_sender_mqtt_mgr() @@ -291,11 +292,18 @@ def get_message_runner(self): def get_listener_message_queue(self): return self.listener_message_queue - def start_listener(self, sender_message_queue=None, agent_config=None, message_center_name=None): + def setup_listener_message_queue(self): + self.listener_message_queue = Queue() + + def start_listener(self, sender_message_queue=None, listener_message_queue=None, agent_config=None, message_center_name=None): if self.listener_message_center_process is not None: return - self.listener_message_queue = Queue() + if listener_message_queue is None: + if self.listener_message_queue is None: + self.listener_message_queue = Queue() + else: + self.listener_message_queue = listener_message_queue self.listener_message_event = multiprocessing.Event() self.listener_message_event.clear() self.listener_agent_config = agent_config diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py index 4a0c950655..8c1756880a 100755 --- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py +++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py @@ -82,6 +82,9 @@ def initialize(self): # Start the message center to process edge related messages. self.setup_message_center() + # Setup the message listener queue + self.setup_listener_message_queue() + # Start the status center to process edge related status. self.start_status_listener_center() diff --git a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py index 06b222cfd1..1a43653bd9 100755 --- a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py +++ b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py @@ -300,4 +300,4 @@ def status_center_request_job_status_from_master_in_slave_agent(self, topic, pay # Request the job status from master agent. topic_request_job_status = f"{GeneralConstants.MSG_TOPIC_REQUEST_JOB_STATUS_PREFIX}{master_id}" payload_request_job_status = {"run_id": run_id, "edge_id": edge_id} - self.message_center.send_message(topic_request_job_status, payload_request_job_status) + self.message_center.send_message(topic_request_job_status, json.dumps(payload_request_job_status)) diff --git a/python/fedml/computing/scheduler/slave/slave_protocol_manager.py b/python/fedml/computing/scheduler/slave/slave_protocol_manager.py index 6e2a03a8b4..ef8dac8730 100755 --- a/python/fedml/computing/scheduler/slave/slave_protocol_manager.py +++ b/python/fedml/computing/scheduler/slave/slave_protocol_manager.py @@ -24,27 +24,12 @@ def generate_topics(self): # The topic for requesting device info from mlops. self.topic_request_edge_device_info_from_mlops = f"deploy/mlops/slave_agent/request_device_info/{self.edge_id}" - # The topic for requesting deployment master device info from mlops. - self.topic_request_deploy_master_device_info_from_mlops = f"deploy/mlops/master_agent/request_device_info/{self.model_device_server_id}" - - # The topic for requesting deployment slave device info from mlops. - self.topic_request_deploy_slave_device_info_from_mlops = f"deploy/mlops/slave_agent/request_device_info/{self.model_device_client_edge_id_list[0]}" - self.add_subscribe_topic(self.topic_request_edge_device_info_from_mlops) - self.add_subscribe_topic(self.topic_request_deploy_master_device_info_from_mlops) - self.add_subscribe_topic(self.topic_request_deploy_slave_device_info_from_mlops) # Override def add_protocol_handler(self): super().add_protocol_handler() - self.add_message_listener( - self.topic_request_edge_device_info_from_mlops, self.callback_response_device_info_to_mlops) - self.add_message_listener( - self.topic_request_deploy_master_device_info_from_mlops, self.callback_response_device_info_to_mlops) - self.add_message_listener( - self.topic_request_deploy_slave_device_info_from_mlops, self.callback_response_device_info_to_mlops) - # Override def _generate_protocol_manager_instance(self, args, agent_config=None): return FedMLLaunchSlaveProtocolManager(args, agent_config=agent_config) @@ -121,6 +106,9 @@ def _init_extra_items(self): os.environ["FEDML_DEPLOY_MASTER_ID"] = str(self.model_device_server_id) os.environ["FEDML_DEPLOY_WORKER_IDS"] = str(self.model_device_client_edge_id_list) + # Subscribe handshaking messages from MLOps. + self.subscribe_handshaking_messages_from_mlops() + # Start the monitor process self.args = copy.deepcopy(in_args) self.mlops_metrics.stop_device_realtime_perf() @@ -153,3 +141,19 @@ def callback_response_device_info_to_mlops(self, topic, payload): response_payload["context"] = context self.message_center.send_message(response_topic, json.dumps(response_payload), run_id=run_id) + def subscribe_handshaking_messages_from_mlops(self): + # The topic for requesting deployment master device info from mlops. + self.topic_request_deploy_master_device_info_from_mlops = f"deploy/mlops/master_agent/request_device_info/{self.model_device_server_id}" + + # The topic for requesting deployment slave device info from mlops. + self.topic_request_deploy_slave_device_info_from_mlops = f"deploy/mlops/slave_agent/request_device_info/{self.model_device_client_edge_id_list[0]}" + + self.add_subscribe_topic(self.topic_request_deploy_master_device_info_from_mlops) + self.add_subscribe_topic(self.topic_request_deploy_slave_device_info_from_mlops) + + self.add_message_listener( + self.topic_request_edge_device_info_from_mlops, self.callback_response_device_info_to_mlops) + self.add_message_listener( + self.topic_request_deploy_master_device_info_from_mlops, self.callback_response_device_info_to_mlops) + self.add_message_listener( + self.topic_request_deploy_slave_device_info_from_mlops, self.callback_response_device_info_to_mlops) \ No newline at end of file From d6c5a78e76d18721a634ee66f8ed5a1a2951e62c Mon Sep 17 00:00:00 2001 From: alaydshah Date: Mon, 15 Apr 2024 22:49:00 +0000 Subject: [PATCH 005/251] Minor Fixes --- .../computing/scheduler/scheduler_core/account_manager.py | 2 +- .../scheduler/scheduler_core/status_manager_protocols.py | 8 +++----- .../scheduler/slave/base_slave_protocol_manager.py | 1 - python/fedml/computing/scheduler/slave/client_login.py | 2 +- 4 files changed, 5 insertions(+), 8 deletions(-) diff --git a/python/fedml/computing/scheduler/scheduler_core/account_manager.py b/python/fedml/computing/scheduler/scheduler_core/account_manager.py index 61ffd20988..da04fc3989 100755 --- a/python/fedml/computing/scheduler/scheduler_core/account_manager.py +++ b/python/fedml/computing/scheduler/scheduler_core/account_manager.py @@ -184,7 +184,7 @@ def build_agent_args(self, user_id, api_key=None, device_id=None, os_name=None, # Check if it is running in the fedml docker hub is_from_fedml_docker_hub = False dock_loc_file = GeneralConstants.get_deploy_docker_location_file(is_master=is_master) \ - if is_deploy else GeneralConstants.get_deploy_docker_location_file(is_master=is_master) + if is_deploy else GeneralConstants.get_launch_docker_location_file(is_master=is_master) if os.path.exists(dock_loc_file): is_from_fedml_docker_hub = True diff --git a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py index 1a43653bd9..4d2cf3a5ed 100755 --- a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py +++ b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py @@ -92,11 +92,9 @@ def status_center_process_master_status(self, topic, payload): run_id_str = str(run_id) # Process the job status - if status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED: - self.process_job_completed_status(server_id, status) - elif status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED: - self.process_job_completed_status(server_id, status) - elif status == ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED: + if status in (ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED, + ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED, + ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED): self.process_job_completed_status(server_id, status) elif status == ServerConstants.MSG_MLOPS_SERVER_STATUS_EXCEPTION: self.process_job_exception_status(server_id, status) diff --git a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py index 856abdac40..514aa98cd7 100755 --- a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py +++ b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py @@ -33,7 +33,6 @@ def __init__(self, args, agent_config=None): self.message_status_runner = None self.message_center = None self.status_center = None - self.message_center_name = "master_agent" self.run_id = None self.edge_id = args.edge_id self.general_edge_id = None diff --git a/python/fedml/computing/scheduler/slave/client_login.py b/python/fedml/computing/scheduler/slave/client_login.py index 37a6dc8064..a4df2ccb6a 100755 --- a/python/fedml/computing/scheduler/slave/client_login.py +++ b/python/fedml/computing/scheduler/slave/client_login.py @@ -30,7 +30,7 @@ def logout(): if args.api_key == "": args.api_key = args.user - fedml.set_env_version("test") + # fedml.set_env_version("test") if args.local_on_premise_platform_host != "127.0.0.1": fedml.set_local_on_premise_platform_host(args.local_on_premise_platform_host) From 3e6e7d1847009f3e756baab80fb45102623600ee Mon Sep 17 00:00:00 2001 From: alaydshah Date: Tue, 16 Apr 2024 18:02:50 +0000 Subject: [PATCH 006/251] Fix logging --- .../scheduler_core/scheduler_base_protocol_manager.py | 2 +- python/fedml/core/mlops/mlops_utils.py | 10 ++++++---- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py index 8c1756880a..e3cac7a425 100755 --- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py +++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py @@ -30,7 +30,7 @@ def __init__(self, args, agent_config=None, is_master=False): self.message_status_runner = None self.message_center = None self.status_center = None - self.message_center_name = "master_agent" if not is_master else "slave_agent" + self.message_center_name = "master_agent" if is_master else "slave_agent" self.run_id = None self.edge_id = args.edge_id self.general_edge_id = None diff --git a/python/fedml/core/mlops/mlops_utils.py b/python/fedml/core/mlops/mlops_utils.py index 7313141550..1d6db23d02 100644 --- a/python/fedml/core/mlops/mlops_utils.py +++ b/python/fedml/core/mlops/mlops_utils.py @@ -128,15 +128,17 @@ def get_program_prefix(args, edge_id): @staticmethod def get_edge_id_from_args(args): if args.role == "server": - if hasattr(args, "server_id"): + # Considering that 0 is a valid value, we need to ensure it is not None rather than solely checking + # for truthiness + if getattr(args, "server_id", None) is not None: edge_id = args.server_id else: - if hasattr(args, "edge_id"): + if getattr(args, "edge_id", None) is not None: edge_id = args.edge_id else: edge_id = 0 else: - if hasattr(args, "client_id"): + if getattr(args, "client_id", None) is not None: edge_id = args.client_id elif hasattr(args, "client_id_list"): if args.client_id_list is None: @@ -148,7 +150,7 @@ def get_edge_id_from_args(args): else: edge_id = 0 else: - if hasattr(args, "edge_id"): + if getattr(args, "client_id", None) is not None: edge_id = args.edge_id else: edge_id = 0 From 8a544172638f2cb3a12dc9865ce0acbef20b8dac Mon Sep 17 00:00:00 2001 From: Alay Dilipbhai Shah Date: Tue, 16 Apr 2024 11:04:31 -0700 Subject: [PATCH 007/251] Update client_login.py --- python/fedml/computing/scheduler/slave/client_login.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/fedml/computing/scheduler/slave/client_login.py b/python/fedml/computing/scheduler/slave/client_login.py index a4df2ccb6a..37a6dc8064 100755 --- a/python/fedml/computing/scheduler/slave/client_login.py +++ b/python/fedml/computing/scheduler/slave/client_login.py @@ -30,7 +30,7 @@ def logout(): if args.api_key == "": args.api_key = args.user - # fedml.set_env_version("test") + fedml.set_env_version("test") if args.local_on_premise_platform_host != "127.0.0.1": fedml.set_local_on_premise_platform_host(args.local_on_premise_platform_host) From 852a3bf4b6e4d9f65e6a317579c691b7e6a36f21 Mon Sep 17 00:00:00 2001 From: Alay Shah Date: Fri, 19 Apr 2024 22:43:52 -0700 Subject: [PATCH 008/251] Update Launch Job Docker Image name --- python/examples/launch/hello_job_with_container.yaml | 2 +- python/fedml/computing/scheduler/comm_utils/constants.py | 2 +- .../driver_example/customized_job_example/train_job.yaml | 2 +- python/fedml/workflow/driver_example/hello_world_job.yaml | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/python/examples/launch/hello_job_with_container.yaml b/python/examples/launch/hello_job_with_container.yaml index 2c520beb24..26202a3d98 100755 --- a/python/examples/launch/hello_job_with_container.yaml +++ b/python/examples/launch/hello_job_with_container.yaml @@ -43,7 +43,7 @@ job_type: train # options: train, deploy, federate job_subtype: generate_training docker: - image: fedml/fedml-default-launch:cu12.1-u22.04 + image: fedml/fedml-launch-job:cu12.1-u22.04 #registry: docker.io #username: my_hub_user #password: my_hub_password diff --git a/python/fedml/computing/scheduler/comm_utils/constants.py b/python/fedml/computing/scheduler/comm_utils/constants.py index b1294181bb..f89d5640ce 100644 --- a/python/fedml/computing/scheduler/comm_utils/constants.py +++ b/python/fedml/computing/scheduler/comm_utils/constants.py @@ -103,7 +103,7 @@ class SchedulerConstants: RUN_PROCESS_TYPE_BOOTSTRAP_PROCESS = "bootstrap-process" FEDML_DEFAULT_LAUNCH_CONTAINER_PREFIX = "fedml_default_launch_container" - FEDML_DEFAULT_LAUNCH_IMAGE = "fedml/fedml-default-launch:cu12.1-u22.04" + FEDML_DEFAULT_LAUNCH_IMAGE = "fedml/fedml-launch-job:cu12.1-u22.04" FEDML_DEFAULT_LOG_DIR = ".fedml/fedml-client/fedml/logs" FEDML_DEFAULT_DATA_DIR = ".fedml/fedml-client/fedml/data" diff --git a/python/fedml/workflow/driver_example/customized_job_example/train_job.yaml b/python/fedml/workflow/driver_example/customized_job_example/train_job.yaml index e057791431..2ccbc897f0 100755 --- a/python/fedml/workflow/driver_example/customized_job_example/train_job.yaml +++ b/python/fedml/workflow/driver_example/customized_job_example/train_job.yaml @@ -7,7 +7,7 @@ workspace: train_job # It should be the full name of the image with tag. # If you want to use the default image, it can be empty. docker: - image: fedml/fedml-default-launch:cu12.1-u22.04 + image: fedml/fedml-launch-job:cu12.1-u22.04 # Running entry commands which will be executed as the job entry point. # Support multiple lines, which can not be empty. diff --git a/python/fedml/workflow/driver_example/hello_world_job.yaml b/python/fedml/workflow/driver_example/hello_world_job.yaml index e1dcb02f7e..e63712f99a 100755 --- a/python/fedml/workflow/driver_example/hello_world_job.yaml +++ b/python/fedml/workflow/driver_example/hello_world_job.yaml @@ -10,7 +10,7 @@ workspace: hello_world # It should be the full name of the image with tag. # If you want to use the default image, it can be empty. #docker: -# image: fedml/fedml-default-launch:cu12.1-u22.04 +# image: fedml/fedml-launch-job:cu12.1-u22.04 # Running entry commands which will be executed as the job entry point. # Support multiple lines, which can not be empty. From 06ed07fa8c4796a7719a9882cc2086996ea88424 Mon Sep 17 00:00:00 2001 From: bhargav191098 Date: Wed, 1 May 2024 19:14:26 -0700 Subject: [PATCH 009/251] Adding tags to data storage object and the corresponding pretty table changes --- python/fedml/api/__init__.py | 5 ++--- python/fedml/api/modules/storage.py | 5 +++-- python/fedml/cli/modules/storage.py | 20 ++++++++++++++------ 3 files changed, 19 insertions(+), 11 deletions(-) diff --git a/python/fedml/api/__init__.py b/python/fedml/api/__init__.py index 4e004f07d3..3e75b987d6 100755 --- a/python/fedml/api/__init__.py +++ b/python/fedml/api/__init__.py @@ -179,13 +179,12 @@ def cluster_killall(api_key=None) -> bool: return cluster.kill(cluster_names=(), api_key=api_key) -def upload(data_path, api_key=None, service="R2", name=None, description=None, metadata=None, show_progress=False, +def upload(data_path, api_key=None, tag_list=[], service="R2", name=None, description=None, metadata=None, show_progress=False, out_progress_to_err=True, progress_desc=None) -> FedMLResponse: - return storage.upload(data_path=data_path, api_key=api_key, name=name, description=description, + return storage.upload(data_path=data_path, api_key=api_key, name=name, description=description, tag_list =tag_list, service=service, progress_desc=progress_desc, show_progress=show_progress, out_progress_to_err=out_progress_to_err, metadata=metadata) - def get_storage_user_defined_metadata(data_name, api_key=None) -> FedMLResponse: return storage.get_user_metadata(data_name=data_name, api_key=api_key) diff --git a/python/fedml/api/modules/storage.py b/python/fedml/api/modules/storage.py index 1582788e3a..51f58539bf 100644 --- a/python/fedml/api/modules/storage.py +++ b/python/fedml/api/modules/storage.py @@ -18,12 +18,13 @@ def __init__(self, data: dict): self.createdAt = data.get("createTime", None) self.updatedAt = data.get("updateTime", None) self.size = _get_size(data.get("fileSize",None)) + self.tag_list = data.get("tags", None) # Todo (alaydshah): Store service name in metadata # Todo (alaydshah): If data already exists, don't upload again. Instead suggest to use update command -def upload(data_path, api_key, name, description, service, show_progress, out_progress_to_err, progress_desc, +def upload(data_path, api_key, name, description, tag_list, service, show_progress, out_progress_to_err, progress_desc, metadata) -> FedMLResponse: api_key = authenticate(api_key) @@ -58,7 +59,7 @@ def upload(data_path, api_key, name, description, service, show_progress, out_pr "description": description, "fileSize": file_size, "fileUrl": file_uploaded_url, - "tagNameList": [], + "tagNameList": tag_list, } try: diff --git a/python/fedml/cli/modules/storage.py b/python/fedml/cli/modules/storage.py index 93ce273e92..af75cda85f 100644 --- a/python/fedml/cli/modules/storage.py +++ b/python/fedml/cli/modules/storage.py @@ -53,6 +53,7 @@ def validate_argument(ctx, param, value): @click.option("--user_metadata", "-um", type=str, help="User-defined metadata in the form of a dictionary, for instance, " " {'name':'value'} within double quotes. "" " "Defaults to None.") +@click.option("--tags", "-t", type=str, help="Add tags to your data to store. Give tags in comma separated form like 'cv,unet,segmentation' If not provided, the tags will be empty.") @click.option('--service', "-s", type=click.Choice(['R2']), default="R2", help="Storage service for object storage. " "Only R2 is supported as of now") @click.option( @@ -65,10 +66,11 @@ def validate_argument(ctx, param, value): default="release", help=version_help, ) -def upload(data_path: str, name: str, user_metadata: str, description: str, version: str, api_key: str, service): +def upload(data_path: str, name: str, user_metadata: str, description: str, version: str, api_key: str, tags:str, service): metadata = _parse_metadata(user_metadata) + tag_list = _parse_tags(tags) fedml.set_env_version(version) - response = fedml.api.upload(data_path=data_path, api_key=api_key, name=name, service=service, show_progress=True, + response = fedml.api.upload(data_path=data_path, api_key=api_key, name=name, tag_list = tag_list, service=service, show_progress=True, description=description, metadata=metadata) if response.code == ResponseCode.SUCCESS: click.echo(f"Data uploaded successfully. | url: {response.data}") @@ -96,10 +98,10 @@ def list_data(version, api_key): if not response.data: click.echo(f"No stored objects found for account linked with apikey: {api_key}") return - object_list_table = PrettyTable(["Data Name", "Data Size", "Description", "Created At", "Updated At"]) + object_list_table = PrettyTable(["Data Name", "Data Size", "Description", "Data Tags","Created At", "Updated At"]) for stored_object in response.data: object_list_table.add_row( - [stored_object.dataName, stored_object.size, stored_object.description, stored_object.createdAt, stored_object.updatedAt]) + [stored_object.dataName, stored_object.size, stored_object.description, stored_object.tag_list,stored_object.createdAt, stored_object.updatedAt]) click.echo(object_list_table) else: click.echo(f"Failed to list stored objects for account linked with apikey {api_key}. " @@ -157,8 +159,8 @@ def get_metadata(data_name, version, api_key): return click.echo(f"Successfully fetched metadata for object {data_name}:") # Todo (alaydshah): Add file size and tags - metadata_table = PrettyTable(["Data Name","Data Size","Description", "Created At", "Updated At"]) - metadata_table.add_row([metadata.dataName,metadata.size,metadata.description, metadata.createdAt, metadata.updatedAt]) + metadata_table = PrettyTable(["Data Name","Data Size","Description","Data Tags","Created At", "Updated At"]) + metadata_table.add_row([metadata.dataName,metadata.size,metadata.description,metadata.tag_list,metadata.createdAt, metadata.updatedAt]) click.echo(metadata_table) click.echo("") else: @@ -238,3 +240,9 @@ def _parse_metadata(metadata: str): click.echo( f"Input metadata cannot be evaluated. Please make sure metadata is in the correct format. Error: {e}.") exit() + +def _parse_tags(tags:str): + if not tags: + return [] + tag_list = tags.split(",") + return tag_list \ No newline at end of file From 6e09a604b7ee5df016dcb4011a613bd4f1f9e103 Mon Sep 17 00:00:00 2001 From: Alay Shah Date: Wed, 1 May 2024 20:04:36 -0700 Subject: [PATCH 010/251] Fix Log Rotation Bug --- python/fedml/core/mlops/__init__.py | 20 ++++++++-------- python/fedml/core/mlops/mlops_runtime_log.py | 24 ++++++++++++++------ 2 files changed, 26 insertions(+), 18 deletions(-) diff --git a/python/fedml/core/mlops/__init__.py b/python/fedml/core/mlops/__init__.py index 77ad06165e..2b4dc24c95 100644 --- a/python/fedml/core/mlops/__init__.py +++ b/python/fedml/core/mlops/__init__.py @@ -9,7 +9,6 @@ import uuid from multiprocessing import Process -import click import requests import fedml @@ -95,9 +94,9 @@ def init(args, should_init_logs=True): if not mlops_parrot_enabled(args): if not hasattr(args, "config_version"): args.config_version = "release" - fetch_config(args, args.config_version) if should_init_logs: MLOpsRuntimeLog.get_instance(args).init_logs() + fetch_config(args, args.config_version) return else: if hasattr(args, "simulator_daemon"): @@ -137,7 +136,7 @@ def init(args, should_init_logs=True): MLOpsStore.mlops_project_id = project_id MLOpsStore.mlops_run_id = run_id if result_project is False or result_run is False: - click.echo("Failed to init project and run.") + print("Failed to init project and run.") return # Init runtime logs @@ -973,10 +972,9 @@ def _generate_log_metrics(metrics: dict, step: int = None, customized_step_key: def log_mlops_running_logs(artifact: fedml.mlops.Artifact, version=None, run_id=None, edge_id=None, only_push_artifact=False): - fedml_args = get_fedml_args() artifact_archive_zip_file, artifact_storage_url = push_artifact_to_s3( - artifact, version=version if version is not None else fedml_args.config_version, show_progress=False) + artifact, version=version if version is not None else fedml.get_env_version(), show_progress=False) if only_push_artifact: return artifact_storage_url @@ -1274,8 +1272,8 @@ def bind_simulation_device(args, userid): continue if config_try_count >= 5: - click.echo("\nNote: Internet is not connected. " - "Experimental tracking results will not be synchronized to the MLOps (open.fedml.ai).\n") + logging.info("\nNote: Internet is not connected. " + "Experimental tracking results will not be synchronized to the MLOps (open.fedml.ai).\n") return False # Build unique device id @@ -1301,8 +1299,8 @@ def bind_simulation_device(args, userid): continue if edge_id <= 0: - click.echo("Oops, you failed to login the FedML MLOps platform.") - click.echo("Please check whether your network is normal!") + print("Oops, you failed to login the FedML MLOps platform.") + print("Please check whether your network is normal!") return False MLOpsStore.mlops_edge_id = edge_id setattr(MLOpsStore.mlops_args, "client_id", edge_id) @@ -1353,8 +1351,8 @@ def fetch_config(args, version="release"): continue if config_try_count >= 5: - click.echo("\nNote: Internet is not connected. " - "Experimental tracking results will not be synchronized to the MLOps (open.fedml.ai).\n") + logging.info("\nNote: Internet is not connected. " + "Experimental tracking results will not be synchronized to the MLOps (open.fedml.ai).\n") return False diff --git a/python/fedml/core/mlops/mlops_runtime_log.py b/python/fedml/core/mlops/mlops_runtime_log.py index 6992c44555..0bc4dc6b6c 100644 --- a/python/fedml/core/mlops/mlops_runtime_log.py +++ b/python/fedml/core/mlops/mlops_runtime_log.py @@ -5,6 +5,7 @@ import sys import threading import time +import shutil from logging.handlers import TimedRotatingFileHandler from fedml import mlops @@ -12,19 +13,19 @@ LOG_LEVEL = logging.INFO ROTATION_FREQUENCY = 'D' +# when rollover is done, no more than backupCount files are kept - the oldest ones are deleted. BACKUP_COUNT = 100 class MLOpsFileHandler(TimedRotatingFileHandler): def __init__(self, run_id, edge_id, log_config_file, filepath): - super(MLOpsFileHandler, self).__init__(filename=filepath, when=ROTATION_FREQUENCY, backupCount=BACKUP_COUNT, - encoding='utf-8') + super().__init__(filename=filepath, when=ROTATION_FREQUENCY, + backupCount=BACKUP_COUNT,encoding='utf-8') self.run_id = run_id self.edge_id = edge_id self.file_path = filepath self.rotate_count = 0 - self.backupCount = BACKUP_COUNT self.rotator: callable = self.update_config_and_rotate self.log_config_file = log_config_file self.__initialize_config() @@ -32,17 +33,26 @@ def __init__(self, run_id, edge_id, log_config_file, filepath): def update_config_and_rotate(self, source, dest): # source = current log file name # dest = log file name (dated) - if os.path.exists(source): - os.rename(source, dest) MLOpsLoggingUtils.acquire_lock() - config_data = MLOpsLoggingUtils.load_log_config(self.run_id, self.edge_id, self.log_config_file) + + # Check if the source and destination files exist. If it does, return + if os.path.exists(source): + # Copy the contents of the source file to the destination file + shutil.copy(source, dest) + # Clear everything in the source file + with open(source, 'w') as src_file: + src_file.truncate(0) + src_file.close() + + config_data = MLOpsLoggingUtils.load_log_config(self.run_id, self.edge_id, + self.log_config_file) # Update file name of current log file config_data[self.rotate_count].file_path = dest self.rotate_count += 1 # Store the rotate count, and corresponding log file name in the config file - rotated_log_file = LogFile(file_path=source, uploaded_file_index=self.backupCount) + rotated_log_file = LogFile(file_path=source) config_data[self.rotate_count] = rotated_log_file MLOpsLoggingUtils.save_log_config(run_id=self.run_id, device_id=self.edge_id, log_config_file=self.log_config_file, From d150998a2a8cf245aef030abbac7896317cac367 Mon Sep 17 00:00:00 2001 From: Alay Shah Date: Wed, 1 May 2024 21:50:44 -0700 Subject: [PATCH 011/251] Bug spotted, added FIXME Comment --- python/fedml/computing/scheduler/comm_utils/job_monitor.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/python/fedml/computing/scheduler/comm_utils/job_monitor.py b/python/fedml/computing/scheduler/comm_utils/job_monitor.py index 5874adfef7..fc97a8e077 100644 --- a/python/fedml/computing/scheduler/comm_utils/job_monitor.py +++ b/python/fedml/computing/scheduler/comm_utils/job_monitor.py @@ -352,6 +352,8 @@ def monitor_slave_run_process_status(self): continue # Check if all processes of the specific run are exited + # FIXME: Proactively release the gpu ids when the run processes have not even started yet as the docker + # image is being pulled run_process_list = client_constants.ClientConstants.get_learning_process_list(job.job_id) all_run_processes_exited = True if len(run_process_list) <= 0 else False if all_run_processes_exited: From 4ce4c78753ae9ef3aa22b0e0578039d9b28a5140 Mon Sep 17 00:00:00 2001 From: fedml-dimitris Date: Wed, 1 May 2024 18:09:21 +0000 Subject: [PATCH 012/251] Disabling replica release after idle secs. --- .../model_scheduler/autoscaler/autoscaler.py | 31 ++++++++++--------- .../model_scheduler/autoscaler/policies.py | 2 +- 2 files changed, 17 insertions(+), 16 deletions(-) diff --git a/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py b/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py index dd6ca67706..5f0d425505 100644 --- a/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py +++ b/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py @@ -303,25 +303,26 @@ def scale_operation_endpoint(self, # If no metric exists then no scaling operation. return scale_op - # If we continue here, then it means that there was at least one request. - # The `most_recent_metric` is of type list, hence we need to access index 0. - most_recent_metric = metrics[-1] - latest_request_timestamp_micro_secs = most_recent_metric["timestamp"] - # The time module does not have a micro-second function built-in, so we need to - # divide nanoseconds by 1e3 and convert to micro-seconds. - current_time_micro_seconds = time.time_ns() / 1e3 - # compute elapsed time and convert to seconds - elapsed_time_secs = \ - (current_time_micro_seconds - latest_request_timestamp_micro_secs) / 1e6 - if elapsed_time_secs > autoscaling_policy.release_replica_after_idle_secs: + if autoscaling_policy.release_replica_after_idle_secs: + # At this point it means that there was at least one request. The + # `most_recent_metric` is of type list, hence we need to access index 0. + most_recent_metric = metrics[-1] + latest_request_timestamp_micro_secs = most_recent_metric["timestamp"] + # The time module does not have a micro-second function built-in, + # so we need to divide nanoseconds by 1e3 and convert to micro-seconds. + current_time_micro_seconds = time.time_ns() / 1e3 + # Compute the elapsed time and convert to seconds. + elapsed_time_secs = \ + (current_time_micro_seconds - latest_request_timestamp_micro_secs) / 1e6 # If the elapsed time is greater than the requested idle time, # in other words there was no incoming request then scale down. - scale_op = ScaleOp.DOWN_IN_OP + if elapsed_time_secs > autoscaling_policy.release_replica_after_idle_secs: + scale_op = ScaleOp.DOWN_IN_OP else: - # Otherwise, it means there was a request within the elapsed time, then: + # Otherwise, it means there was a request within the elapsed time, then, + # Check if the current number of running replicas is 0 it means + # we need more resources, hence we need to scale up: ScaleOp.UP_OUT_OP. if autoscaling_policy.current_replicas == 0: - # Check if the current number of running replicas is 0, - # then we need more resources, hence ScaleOp.UP_OUT_OP. scale_op = ScaleOp.UP_OUT_OP else: # Else, trigger the autoscaling policy with all existing values. diff --git a/python/fedml/computing/scheduler/model_scheduler/autoscaler/policies.py b/python/fedml/computing/scheduler/model_scheduler/autoscaler/policies.py index 546817ec82..fd49549812 100644 --- a/python/fedml/computing/scheduler/model_scheduler/autoscaler/policies.py +++ b/python/fedml/computing/scheduler/model_scheduler/autoscaler/policies.py @@ -22,7 +22,7 @@ class AutoscalingPolicy(BaseModel): min_replicas: NonNegativeInt max_replicas: NonNegativeInt previous_triggering_value: float = None - release_replica_after_idle_secs: NonNegativeInt = 300 # default is after 5 minutes + release_replica_after_idle_secs: NonNegativeInt = None scaledown_delay_secs: NonNegativeInt = 60 # default is 1 minute scaleup_cost_secs: NonNegativeInt = 300 # default is 5 minutes From 975e53de8185777f146215f552bbfb7d7f0740b5 Mon Sep 17 00:00:00 2001 From: fedml-dimitris Date: Wed, 1 May 2024 22:34:13 +0000 Subject: [PATCH 013/251] Return scale down operation when no incoming request with then policy's time frame window. --- .../model_scheduler/autoscaler/autoscaler.py | 41 ++++++++++++------- 1 file changed, 26 insertions(+), 15 deletions(-) diff --git a/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py b/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py index 5f0d425505..03666a5919 100644 --- a/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py +++ b/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py @@ -55,13 +55,17 @@ def scale_operation_ewm(cls, with warnings.catch_warnings(): warnings.simplefilter(action='ignore', category=FutureWarning) period_data = metrics.last("{}min".format(ewm_policy.ewm_mins)) - # If the data frame window is empty then do nothing more, just return. - if period_data.empty: - return ScaleOp.NO_OP - metric_name = "current_latency" \ - if "ewm_latency" == ewm_policy.metric else "current_qps" - ewm_period = period_data[metric_name] \ - .ewm(alpha=ewm_policy.ewm_alpha).mean() + + # If the data frame window is empty then it means we + # did not have any incoming request, so we need to scale down. + if period_data.empty: + return ScaleOp.DOWN_IN_OP + + # Otherwise, we proceed as normal. + metric_name = "current_latency" \ + if "ewm_latency" == ewm_policy.metric else "current_qps" + ewm_period = period_data[metric_name] \ + .ewm(alpha=ewm_policy.ewm_alpha).mean() scale_op = ScaleOp.NO_OP # If there is no exponential moving average within this @@ -115,10 +119,14 @@ def scale_operation_query_concurrency(cls, warnings.simplefilter(action='ignore', category=FutureWarning) # Here, the number of queries is the number of rows in the short period data frame. period_data = metrics.last("{}s".format(concurrent_query_policy.window_size_secs)) - # If the data frame window is empty then do nothing more, just return. - if period_data.empty: - return ScaleOp.NO_OP - queries_num = period_data.shape[0] + + # If the data frame window is empty then it means we + # did not have any incoming request, so we need to scale down. + if period_data.empty: + return ScaleOp.DOWN_IN_OP + + # Otherwise, we proceed as normal. + queries_num = period_data.shape[0] try: # QSR: Queries per Second per Replica: (Number of Queries / Number of Current Replicas) / Window Size @@ -159,10 +167,13 @@ def scale_operation_meet_traffic_demand(cls, warnings.simplefilter(action='ignore', category=FutureWarning) # Here, the number of queries is the number of rows in the short period data frame. period_data = metrics.last("{}s".format(meet_traffic_demand_policy.window_size_secs)) - # If the data frame window is empty then do nothing more, just return. - if period_data.empty: - return ScaleOp.NO_OP + # If the data frame window is empty then it means we + # did not have any incoming request, so we need to scale down. + if period_data.empty: + return ScaleOp.DOWN_IN_OP + + # Otherwise, we proceed as normal. period_requests_num = period_data.shape[0] all_latencies = metrics["current_latency"] # Original value is milliseconds, convert to seconds. @@ -293,7 +304,7 @@ def scale_operation_endpoint(self, 0: do nothing """ - # Fetch most recent metric record from the database. + # Fetch all metrics record from the database. metrics = self.fedml_model_cache.get_endpoint_metrics( endpoint_id=endpoint_id) From 5fb500f04762f17d797ac6717380ce2a555382d1 Mon Sep 17 00:00:00 2001 From: fedml-dimitris Date: Wed, 1 May 2024 22:58:43 +0000 Subject: [PATCH 014/251] Adding logging to figure out scaling down delay. --- .../model_scheduler/autoscaler/autoscaler.py | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py b/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py index 03666a5919..d81e0148eb 100644 --- a/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py +++ b/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py @@ -51,6 +51,7 @@ def scale_operation_ewm(cls, ewm_policy: EWMPolicy, metrics: pd.DataFrame) -> ScaleOp: + logging.info("Executing the ExponentialWeightMoving average autoscaling policy.") # Adding the context below to avoid having a series of warning messages. with warnings.catch_warnings(): warnings.simplefilter(action='ignore', category=FutureWarning) @@ -114,6 +115,7 @@ def scale_operation_query_concurrency(cls, concurrent_query_policy: ConcurrentQueryPolicy, metrics: pd.DataFrame) -> ScaleOp: + logging.info("Executing the QueryConcurrency autoscaling policy.") # Adding the context below to avoid having a series of warning messages. with warnings.catch_warnings(): warnings.simplefilter(action='ignore', category=FutureWarning) @@ -162,6 +164,7 @@ def scale_operation_meet_traffic_demand(cls, meet_traffic_demand_policy: MeetTrafficDemandPolicy, metrics: pd.DataFrame) -> ScaleOp: + logging.info("Executing the MeetTrafficDemand autoscaling policy.") # Adding the context below to avoid having a series of warning messages. with warnings.catch_warnings(): warnings.simplefilter(action='ignore', category=FutureWarning) @@ -227,6 +230,7 @@ def run_autoscaling_policy(self, def validate_scaling_bounds(cls, scale_op: ScaleOp, autoscaling_policy: AutoscalingPolicy) -> ScaleOp: + logging.info("Validating scaling bounds.") # We cannot be lower than the minimum number of replicas, # nor exceed the maximum number of requested replicas. new_running_replicas = autoscaling_policy.current_replicas + scale_op.value @@ -266,6 +270,7 @@ def enforce_scaling_down_delay_interval(self, previous_timestamp = \ self.fedml_model_cache.get_endpoint_scaling_down_decision_time(endpoint_id) diff_secs = (current_timestamp - previous_timestamp) / 1e6 + logging.info("Difference in seconds between scaling down operations: {}".format(diff_secs)) if diff_secs > autoscaling_policy.scaledown_delay_secs: # At this point, we will perform the scaling down operation, hence # we need to delete the previously stored scaling down timestamp (if any). @@ -279,7 +284,8 @@ def enforce_scaling_down_delay_interval(self, return scale_op def clean_up_scaling_down_operation_state(self, endpoint_id) -> bool: - # We return True if the clean up operation succeeded, else False. + # We return True if the cleaning up operation succeeded, else False. + logging.info("Not a scaling down operation, cleaning up scale down state from Redis.") to_clean_up = \ self.fedml_model_cache.exists_endpoint_scaling_down_decision_time(endpoint_id) if to_clean_up: @@ -312,6 +318,7 @@ def scale_operation_endpoint(self, scale_op = ScaleOp.NO_OP if not metrics: # If no metric exists then no scaling operation. + logging.info("No existing metric, so no scaling operation.") return scale_op if autoscaling_policy.release_replica_after_idle_secs: @@ -328,12 +335,15 @@ def scale_operation_endpoint(self, # If the elapsed time is greater than the requested idle time, # in other words there was no incoming request then scale down. if elapsed_time_secs > autoscaling_policy.release_replica_after_idle_secs: + logging.info("Endpoint remained idle for {} seconds, need to scale down.".format( + elapsed_time_secs)) scale_op = ScaleOp.DOWN_IN_OP else: # Otherwise, it means there was a request within the elapsed time, then, # Check if the current number of running replicas is 0 it means # we need more resources, hence we need to scale up: ScaleOp.UP_OUT_OP. if autoscaling_policy.current_replicas == 0: + logging.info("Incoming requests but with 0 replicas, scaling up.") scale_op = ScaleOp.UP_OUT_OP else: # Else, trigger the autoscaling policy with all existing values. From 14a75a93e86b90feb000b014c9ff8592bd47a606 Mon Sep 17 00:00:00 2001 From: fedml-dimitris Date: Thu, 2 May 2024 01:48:28 +0000 Subject: [PATCH 015/251] Fixed indefinite no scale down. Problem was the data frame period parsing. --- .../model_scheduler/autoscaler/autoscaler.py | 44 +++++++++++++++---- 1 file changed, 35 insertions(+), 9 deletions(-) diff --git a/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py b/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py index d81e0148eb..009345863a 100644 --- a/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py +++ b/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py @@ -8,7 +8,7 @@ from enum import Enum from fedml.computing.scheduler.model_scheduler.device_model_cache import FedMLModelCache from fedml.computing.scheduler.model_scheduler.autoscaler.policies import * -from utils.singleton import Singleton +from fedml.computing.scheduler.model_scheduler.autoscaler.utils.singleton import Singleton class ScaleOp(Enum): @@ -38,6 +38,26 @@ def get_current_timestamp_micro_seconds(cls): # in REDIS we record/operate in micro-seconds, hence the division by 1e3! return int(format(time.time_ns() / 1000.0, '.0f')) + @classmethod + def filter_by_timestamp(cls, + metrics, + before_now_minutes=None, + before_now_seconds=None) -> pd.DataFrame: + + # We subtract the number of seconds/minutes from the current timestamp, and then we query + # the data frame to fetch all the records whose timestamp is within the given range. + # By default, we return all records. + filtered = metrics + if before_now_minutes: + less_than_ts = \ + str(pd.Timestamp.now() - pd.Timedelta(minutes=before_now_minutes)) + filtered = metrics.query("'{}' <= {}".format(less_than_ts, "timestamp")) + if before_now_seconds: + less_than_ts = \ + str(pd.Timestamp.now() - pd.Timedelta(seconds=before_now_seconds)) + filtered = metrics.query("'{}' <= {}".format(less_than_ts, "timestamp")) + return filtered + @classmethod def scale_operation_predictive(cls, predictive_policy: PredictivePolicy, @@ -55,7 +75,8 @@ def scale_operation_ewm(cls, # Adding the context below to avoid having a series of warning messages. with warnings.catch_warnings(): warnings.simplefilter(action='ignore', category=FutureWarning) - period_data = metrics.last("{}min".format(ewm_policy.ewm_mins)) + period_data = cls.filter_by_timestamp(metrics, + before_now_minutes=ewm_policy.ewm_mins) # If the data frame window is empty then it means we # did not have any incoming request, so we need to scale down. @@ -119,8 +140,9 @@ def scale_operation_query_concurrency(cls, # Adding the context below to avoid having a series of warning messages. with warnings.catch_warnings(): warnings.simplefilter(action='ignore', category=FutureWarning) - # Here, the number of queries is the number of rows in the short period data frame. - period_data = metrics.last("{}s".format(concurrent_query_policy.window_size_secs)) + period_data = cls.filter_by_timestamp( + metrics, + before_now_seconds=concurrent_query_policy.window_size_secs) # If the data frame window is empty then it means we # did not have any incoming request, so we need to scale down. @@ -168,8 +190,9 @@ def scale_operation_meet_traffic_demand(cls, # Adding the context below to avoid having a series of warning messages. with warnings.catch_warnings(): warnings.simplefilter(action='ignore', category=FutureWarning) - # Here, the number of queries is the number of rows in the short period data frame. - period_data = metrics.last("{}s".format(meet_traffic_demand_policy.window_size_secs)) + period_data = cls.filter_by_timestamp( + metrics, + before_now_seconds=meet_traffic_demand_policy.window_size_secs) # If the data frame window is empty then it means we # did not have any incoming request, so we need to scale down. @@ -257,6 +280,7 @@ def enforce_scaling_down_delay_interval(self, # If the policy has no scaledown delay then return immediately. if autoscaling_policy.scaledown_delay_secs == 0: + logging.info("No scale down delay, so scale down immediately.") return ScaleOp.DOWN_IN_OP # By default, we return a no operation. @@ -270,11 +294,13 @@ def enforce_scaling_down_delay_interval(self, previous_timestamp = \ self.fedml_model_cache.get_endpoint_scaling_down_decision_time(endpoint_id) diff_secs = (current_timestamp - previous_timestamp) / 1e6 - logging.info("Difference in seconds between scaling down operations: {}".format(diff_secs)) if diff_secs > autoscaling_policy.scaledown_delay_secs: + logging.info("Scaling down since the time difference: {}secs, " + "is above the delay period: {} secs.".format( + diff_secs, autoscaling_policy.scaledown_delay_secs)) # At this point, we will perform the scaling down operation, hence # we need to delete the previously stored scaling down timestamp (if any). - self.fedml_model_cache.delete_endpoint_scaling_down_decision_time(endpoint_id) + self.clean_up_scaling_down_operation_state(endpoint_id) scale_op = ScaleOp.DOWN_IN_OP else: # Record the timestamp of the scaling down operation. @@ -285,7 +311,7 @@ def enforce_scaling_down_delay_interval(self, def clean_up_scaling_down_operation_state(self, endpoint_id) -> bool: # We return True if the cleaning up operation succeeded, else False. - logging.info("Not a scaling down operation, cleaning up scale down state from Redis.") + logging.info("Cleaning up scale down state from Redis.") to_clean_up = \ self.fedml_model_cache.exists_endpoint_scaling_down_decision_time(endpoint_id) if to_clean_up: From 2ff516e59877164f290e93c16cfcd43a48b46ef5 Mon Sep 17 00:00:00 2001 From: Raphael Jin Date: Thu, 2 May 2024 16:08:46 -0700 Subject: [PATCH 016/251] [Deploy] Fix edge case of readiness check. --- .../scheduler/comm_utils/job_monitor.py | 32 +++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/python/fedml/computing/scheduler/comm_utils/job_monitor.py b/python/fedml/computing/scheduler/comm_utils/job_monitor.py index 5874adfef7..c24dd2a830 100644 --- a/python/fedml/computing/scheduler/comm_utils/job_monitor.py +++ b/python/fedml/computing/scheduler/comm_utils/job_monitor.py @@ -574,6 +574,15 @@ def monitor_slave_endpoint_status(self): is_endpoint_ready = self._check_and_reset_endpoint_status( job.job_id, job.edge_id, deployment_result, only_check_inference_ready_status=True) + # [Hotfix] Under high-concurrency situation, the ready endpoint might not be available + # But the container is in health state + # In this case, we need to have an exact 503 code, instead of timeout to decide to restart + # TODO(Raphael): Split the /ready endpoint and predict endpoint traffic + if not self._lenient_check_replica_ready(deployment_result): + is_endpoint_ready = False + else: + is_endpoint_ready = True + # Get endpoint container name prefix, prepare for restart endpoint_container_name_prefix = \ (device_client_constants.ClientConstants.get_endpoint_container_name( @@ -736,6 +745,28 @@ def monitor_slave_endpoint_status(self): except Exception as e: pass + def _lenient_check_replica_ready( + self, deployment_result + ): + """ + Double-check the replica's liveness using /ready api: + if 200 -> return True + [Critical] if timeout -> Could be under high pressure -> return True + if HTTP_202_ACCEPTED -> unhealthy -> return False + """ + result_json = deployment_result + inference_url = result_json.get("model_url", None) + + # Make a curl get to inference_url with timeout 5s + # TODO(Raphael): Also support PROXY and MQTT to check the readiness + response_ok = asyncio.run(FedMLHttpInference.is_inference_ready(inference_url, timeout=5)) + if response_ok is None: + # This means the server return 202 + return False + + # 200 or Timeout + return True + def _check_and_reset_endpoint_status( self, endpoint_id, device_id, deployment_result, only_check_inference_ready_status=False, should_release_gpu_ids=False @@ -761,6 +792,7 @@ def _check_and_reset_endpoint_status( if self.endpoint_unavailable_counter.get(str(endpoint_id)) is None: self.endpoint_unavailable_counter[str(endpoint_id)] = 0 + if not response_ok: self.endpoint_unavailable_counter[str(endpoint_id)] += 1 else: From 162d896894e829e3adcded97287232307fe75539 Mon Sep 17 00:00:00 2001 From: Raphael Jin Date: Fri, 3 May 2024 12:13:22 -0700 Subject: [PATCH 017/251] [Deploy] Catch exception when process autoscale. --- .../scheduler/comm_utils/job_monitor.py | 168 +++++++++--------- 1 file changed, 86 insertions(+), 82 deletions(-) diff --git a/python/fedml/computing/scheduler/comm_utils/job_monitor.py b/python/fedml/computing/scheduler/comm_utils/job_monitor.py index 75ef647e3f..11123d9c63 100644 --- a/python/fedml/computing/scheduler/comm_utils/job_monitor.py +++ b/python/fedml/computing/scheduler/comm_utils/job_monitor.py @@ -104,91 +104,95 @@ def autoscaler_reconcile_after_interval(self): for endpoint_settings in endpoints_settings_list: endpoint_state = endpoint_settings["state"] if endpoint_state == "DEPLOYED" and endpoint_settings["enable_auto_scaling"]: - logging.info(f"After interval, check the autoscaler for async future list." - f"{self.endpoints_autoscale_predict_future}") - # TODO(fedml-dimitris): The policy can be set dynamically or be user specific. - # Set the policy, here we use latency, but other metrics are possible as well, such as qps. - # For more advanced use cases look for the testing scripts under the autoscaler/test directory. - autoscaling_policy_config = \ - { - "current_replicas": int(endpoint_settings["replica_num"]), - "min_replicas": int(endpoint_settings["scale_min"]), - "max_replicas": int(endpoint_settings["scale_max"]), - "queries_per_replica": int(endpoint_settings["target_queries_per_replica"]), - "window_size_secs": int(endpoint_settings["aggregation_window_size_seconds"]), - "scaledown_delay_secs": int(endpoint_settings["scale_down_delay_seconds"]), + try: # Should not let one endpoint affect the others + logging.info(f"After interval, check the autoscaler for async future list." + f"{self.endpoints_autoscale_predict_future}") + # TODO(fedml-dimitris): The policy can be set dynamically or be user specific. + # Set the policy, here we use latency, but other metrics are possible as well, such as qps. + # For more advanced use cases look for the testing scripts under the autoscaler/test directory. + autoscaling_policy_config = \ + { + "current_replicas": int(endpoint_settings["replica_num"]), + "min_replicas": int(endpoint_settings["scale_min"]), + "max_replicas": int(endpoint_settings["scale_max"]), + "queries_per_replica": int(endpoint_settings["target_queries_per_replica"]), + "window_size_secs": int(endpoint_settings["aggregation_window_size_seconds"]), + "scaledown_delay_secs": int(endpoint_settings["scale_down_delay_seconds"]), + } + autoscaling_policy = ConcurrentQueryPolicy(**autoscaling_policy_config) + + e_id, e_name, model_name = endpoint_settings["endpoint_id"], endpoint_settings["endpoint_name"], \ + endpoint_settings["model_name"] + + logging.info(f"Querying the autoscaler for endpoint {e_id} with user settings {endpoint_settings}.") + + # For every endpoint we just update the policy configuration. + autoscaling_policy.min_replicas = endpoint_settings["scale_min"] + autoscaling_policy.max_replicas = endpoint_settings["scale_max"] + # We retrieve a list of replicas for every endpoint. The number + # of running replicas is the length of that list. + current_replicas = len(fedml_model_cache.get_endpoint_replicas_results(e_id)) + autoscaling_policy.current_replicas = current_replicas + logging.info(f"Endpoint {e_id} autoscaling policy: {autoscaling_policy}.") + + scale_op = autoscaler.scale_operation_endpoint( + autoscaling_policy, + str(e_id)) + + new_replicas = current_replicas + scale_op.value + + logging.info(f"Scaling operation {scale_op.value} for endpoint {e_id} .") + logging.info(f"New Replicas {new_replicas} for endpoint {e_id} .") + logging.info(f"Current Replicas {current_replicas} for endpoint {e_id} .") + if current_replicas == new_replicas: + # Basically the autoscaler decided that no scaling operation should take place. + logging.info(f"No scaling operation for endpoint {e_id}.") + return + + # Should scale in / out + curr_version = fedml.get_env_version() + + if curr_version == "release": + mlops_prefix = "https://open.fedml.ai/" + elif curr_version == "test": + mlops_prefix = "https://open-test.fedml.ai/" + else: + logging.error(f"Do not support the version {curr_version}.") + return + autoscale_url_path = "fedmlModelServer/api/v1/endpoint/auto-scale" + url = f"{mlops_prefix}{autoscale_url_path}" + + # Get cached token for authorization of autoscale request + cached_token = fedml_model_cache.get_end_point_token(e_id, e_name, model_name) + if cached_token is None: + logging.error(f"Failed to get the cached token for endpoint {e_id}.") + return + + req_header = { + "Authorization": f"Bearer {cached_token}" + } + req_body = { + "endpointId": int(e_id), + "replicasDesired": int(new_replicas) } - autoscaling_policy = ConcurrentQueryPolicy(**autoscaling_policy_config) - - e_id, e_name, model_name = endpoint_settings["endpoint_id"], endpoint_settings["endpoint_name"], \ - endpoint_settings["model_name"] - - logging.info(f"Querying the autoscaler for endpoint {e_id} with user settings {endpoint_settings}.") - - # For every endpoint we just update the policy configuration. - autoscaling_policy.min_replicas = endpoint_settings["scale_min"] - autoscaling_policy.max_replicas = endpoint_settings["scale_max"] - # We retrieve a list of replicas for every endpoint. The number - # of running replicas is the length of that list. - current_replicas = len(fedml_model_cache.get_endpoint_replicas_results(e_id)) - autoscaling_policy.current_replicas = current_replicas - logging.info(f"Endpoint {e_id} autoscaling policy: {autoscaling_policy}.") - - scale_op = autoscaler.scale_operation_endpoint( - autoscaling_policy, - str(e_id)) - - new_replicas = current_replicas + scale_op.value - - logging.info(f"Scaling operation {scale_op.value} for endpoint {e_id} .") - logging.info(f"New Replicas {new_replicas} for endpoint {e_id} .") - logging.info(f"Current Replicas {current_replicas} for endpoint {e_id} .") - if current_replicas == new_replicas: - # Basically the autoscaler decided that no scaling operation should take place. - logging.info(f"No scaling operation for endpoint {e_id}.") - return - - # Should scale in / out - curr_version = fedml.get_env_version() - - if curr_version == "release": - mlops_prefix = "https://open.fedml.ai/" - elif curr_version == "test": - mlops_prefix = "https://open-test.fedml.ai/" - else: - logging.error(f"Do not support the version {curr_version}.") - return - autoscale_url_path = "fedmlModelServer/api/v1/endpoint/auto-scale" - url = f"{mlops_prefix}{autoscale_url_path}" - - # Get cached token for authorization of autoscale request - cached_token = fedml_model_cache.get_end_point_token(e_id, e_name, model_name) - if cached_token is None: - logging.error(f"Failed to get the cached token for endpoint {e_id}.") - return - - req_header = { - "Authorization": f"Bearer {cached_token}" - } - req_body = { - "endpointId": int(e_id), - "replicasDesired": int(new_replicas) - } - try: - logging.info(f"Sending the autoscale request to MLOps platform. url {url}, " - f"body {req_body}., header {req_header}") - response = requests.post( - url, - headers=req_header, - json=req_body - ) - if response.status_code != 200: - logging.error(f"Failed to send the autoscale request to MLOps platform.") - else: - logging.info(f"Successfully sent the autoscale request to MLOps platform.") + try: + logging.info(f"Sending the autoscale request to MLOps platform. url {url}, " + f"body {req_body}., header {req_header}") + response = requests.post( + url, + headers=req_header, + json=req_body + ) + if response.status_code != 200: + logging.error(f"Failed to send the autoscale request to MLOps platform.") + else: + logging.info(f"Successfully sent the autoscale request to MLOps platform.") + except Exception as e: + logging.error(f"Failed to send the autoscale request to MLOps platform. {e}") except Exception as e: - logging.error(f"Failed to send the autoscale request to MLOps platform. {e}") + logging.error(f"Error in autoscaler reconcile after interval. {e}") + pass return @staticmethod From 0002dcc2ea9eb12a7a72e92c2f4d887b8cddb06f Mon Sep 17 00:00:00 2001 From: Raphael Jin Date: Fri, 3 May 2024 16:53:03 -0700 Subject: [PATCH 018/251] [Deploy] Restrict spacy version. --- python/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/setup.py b/python/setup.py index ae1efc0dff..b4b6545808 100644 --- a/python/setup.py +++ b/python/setup.py @@ -54,7 +54,7 @@ def finalize_options(self): 'redis', 'scikit-learn', 'smart-open==6.3.0', - 'spacy', + 'spacy>=3.2.0,<3.3.0', 'sqlalchemy', 'toposort', 'torch>=1.13.1', From addd91c8c3412951eed68662c0b1652577770bf2 Mon Sep 17 00:00:00 2001 From: Raphael Jin Date: Fri, 3 May 2024 17:02:55 -0700 Subject: [PATCH 019/251] [Deploy] Remove pydantic-settings to avoid PyYAML compatibility issue. --- python/setup.py | 1 - 1 file changed, 1 deletion(-) diff --git a/python/setup.py b/python/setup.py index b4b6545808..c78a597a4b 100644 --- a/python/setup.py +++ b/python/setup.py @@ -47,7 +47,6 @@ def finalize_options(self): 'prettytable', 'py-machineid', 'pydantic', - 'pydantic-settings', 'pytest', 'pytest-mock', 'python-rapidjson>=0.9.1', From 064cfcabb840f9235c283814bf8f347de70cc993 Mon Sep 17 00:00:00 2001 From: Raphael Jin Date: Fri, 3 May 2024 19:03:06 -0700 Subject: [PATCH 020/251] [Deploy] Put spacy in extra requirement --- python/setup.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/python/setup.py b/python/setup.py index c78a597a4b..cce0ddb2ca 100644 --- a/python/setup.py +++ b/python/setup.py @@ -53,7 +53,6 @@ def finalize_options(self): 'redis', 'scikit-learn', 'smart-open==6.3.0', - 'spacy>=3.2.0,<3.3.0', 'sqlalchemy', 'toposort', 'torch>=1.13.1', @@ -112,6 +111,10 @@ def finalize_options(self): "deepspeed>=0.10.2", ] +requirements_extra_nlp = [ + 'spacy>=3.2.0,<3.3.0', +] + # if platform.machine() == "x86_64": # requirements.append("MNN==1.1.6") @@ -177,6 +180,7 @@ def finalize_options(self): "llm": requirements_extra_llm, "mxnet": requirements_extra_mxnet, "tensorflow": requirements_extra_tf, + "nlp": requirements_extra_nlp, }, package_data={"": ["py.typed"]}, license="Apache 2.0", From 282f2e12785de3d04116886cb0343062e7f16ed8 Mon Sep 17 00:00:00 2001 From: Raphael Jin Date: Mon, 6 May 2024 10:51:34 -0700 Subject: [PATCH 021/251] [Deploy] Handle status when deploy failed. --- .../model_scheduler/device_server_runner.py | 25 ++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/python/fedml/computing/scheduler/model_scheduler/device_server_runner.py b/python/fedml/computing/scheduler/model_scheduler/device_server_runner.py index 89e74bbd74..4bcac6d2db 100755 --- a/python/fedml/computing/scheduler/model_scheduler/device_server_runner.py +++ b/python/fedml/computing/scheduler/model_scheduler/device_server_runner.py @@ -667,7 +667,30 @@ def callback_deployment_result_message(self, topic=None, payload=None): self.send_rollback_add_remove_op(run_id_str, rollback_dict) return else: - pass # This is the last worker that failed, so we should continue to "ABORTED" status + # This is the last worker that failed, so we should continue to "ABORTED" status + model_config_parameters = self.running_request_json[run_id_str]["parameters"] + inference_port = model_config_parameters.get("server_internal_port", + ServerConstants.MODEL_INFERENCE_DEFAULT_PORT) + inference_port_external = model_config_parameters.get("server_external_port", inference_port) + ip = self.get_ip_address(self.running_request_json[run_id_str]) + if ip.startswith("http://") or ip.startswith("https://"): + model_inference_url = "{}/inference/{}".format(ip, end_point_id) + else: + model_inference_url = "http://{}:{}/inference/{}".format(ip, inference_port_external, + end_point_id) + + self.send_deployment_status(end_point_id, end_point_name, + payload_json["model_name"], + model_inference_url, + ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_ABORTED) + + # For auto-scaling, should update the state to "DEPLOYED" + FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ + update_user_setting_replica_num(end_point_id=end_point_id, state="DEPLOYED") + + self.model_runner_mapping[run_id_str].replica_controller.under_rollback = False + + return elif run_operation == "UPDATE": # Overwrite the json with the rollback version diff rollback_version_diff = \ From e64b6c6b46e0dc81904abcaeed5932b373111ce6 Mon Sep 17 00:00:00 2001 From: Raphael Jin Date: Mon, 6 May 2024 12:32:23 -0700 Subject: [PATCH 022/251] [Deploy] Saved failed deployment log to a file. --- .../model_scheduler/device_client_constants.py | 7 +++++++ .../model_scheduler/device_model_deployment.py | 16 ++++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py b/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py index 915690e9a4..d2093569c3 100644 --- a/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py +++ b/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py @@ -274,6 +274,13 @@ def get_model_serving_dir(): os.makedirs(model_file_dir, exist_ok=True) return model_file_dir + @staticmethod + def get_deploy_failed_log_dir(): + model_file_dir = os.path.join(ClientConstants.get_fedml_home_dir(), "fedml", "logs", "failed_logs") + if not os.path.exists(model_file_dir): + os.makedirs(model_file_dir, exist_ok=True) + return model_file_dir + @staticmethod def get_model_infer_data_dir(): model_infer_data_dir = os.path.join(ClientConstants.get_fedml_home_dir(), "fedml", "models_infer_data") diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py index 8670633eeb..bd04228355 100755 --- a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py +++ b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py @@ -507,6 +507,22 @@ def log_deployment_result(end_point_id, model_id, cmd_container_name, cmd_type, if container_obj.status == "exited": logging.info("Container {} has exited, automatically remove it".format(cmd_container_name)) + + # Save the failed log into ~/.fedml/fedml-model-client/fedml/logs/failed_logs/ + # $run_id/$container_name.log + try: + parent_dir = os.path.join(ClientConstants.get_deploy_failed_log_dir()) + os.makedirs(parent_dir, exist_ok=True) + error_logs_dir = os.path.join(ClientConstants.get_deploy_failed_log_dir(), str(end_point_id)) + os.makedirs(error_logs_dir, exist_ok=True) + error_log_file = os.path.join(error_logs_dir, f"{cmd_container_name}.log") + with open(error_log_file, "w") as f: + f.write(f"Container {cmd_container_name} has exited\n") + f.write(f"Error logs: {err_logs}\n") + f.write(f"Output logs: {out_logs}\n") + except Exception as e: + logging.error(f"Failed to save the error logs with exception {e}") + client.api.remove_container(container_obj.id, v=True, force=True) break From c6cb5b0d1c9f75de449e8f2fce8f3e3d9e48d60d Mon Sep 17 00:00:00 2001 From: fedml-dimitris Date: Tue, 7 May 2024 15:39:22 -0400 Subject: [PATCH 023/251] Removing field validator for resolving spacy and pydantic conflict. --- .../model_scheduler/autoscaler/policies.py | 9 +++++---- .../autoscaler/test/autoscaler_test.py | 14 +++++++------- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/python/fedml/computing/scheduler/model_scheduler/autoscaler/policies.py b/python/fedml/computing/scheduler/model_scheduler/autoscaler/policies.py index fd49549812..0ad2cc0d13 100644 --- a/python/fedml/computing/scheduler/model_scheduler/autoscaler/policies.py +++ b/python/fedml/computing/scheduler/model_scheduler/autoscaler/policies.py @@ -1,4 +1,4 @@ -from pydantic import BaseModel, field_validator, NonNegativeInt, NonNegativeFloat +from pydantic import BaseModel, NonNegativeInt, NonNegativeFloat, validator class AutoscalingPolicy(BaseModel): @@ -70,9 +70,10 @@ class EWMPolicy(AutoscalingPolicy): ub_threshold: NonNegativeFloat # recommended value: 0.5 lb_threshold: NonNegativeFloat # recommended value: 0.5 - @field_validator("metric") - def validate_option(cls, v): - assert v in ["ewm_latency", "ewm_qps"] + @validator("metric") + def metric_match(cls, v) -> str: + if v not in ["ewm_latency", "ewm_qps"]: + raise ValueError("Wrong metric name.") return v diff --git a/python/fedml/computing/scheduler/model_scheduler/autoscaler/test/autoscaler_test.py b/python/fedml/computing/scheduler/model_scheduler/autoscaler/test/autoscaler_test.py index 7af1022c7d..eadc2dc9a9 100644 --- a/python/fedml/computing/scheduler/model_scheduler/autoscaler/test/autoscaler_test.py +++ b/python/fedml/computing/scheduler/model_scheduler/autoscaler/test/autoscaler_test.py @@ -111,21 +111,21 @@ def test_validate_scaling_bounds(self): # Validate scale up. scale_up = autoscaler.validate_scaling_bounds(ScaleOp.UP_OUT_OP, autoscaling_policy) - self.assertEquals(scale_up, ScaleOp.UP_OUT_OP) + self.assertEqual(scale_up, ScaleOp.UP_OUT_OP) # Validate scale down. scale_down = autoscaler.validate_scaling_bounds(ScaleOp.DOWN_IN_OP, autoscaling_policy) - self.assertEquals(scale_down, ScaleOp.DOWN_IN_OP) + self.assertEqual(scale_down, ScaleOp.DOWN_IN_OP) # Validate max out-of-bounds. autoscaling_policy.current_replicas = 3 scale_oob_max = autoscaler.validate_scaling_bounds(ScaleOp.UP_OUT_OP, autoscaling_policy) - self.assertEquals(scale_oob_max, ScaleOp.NO_OP) + self.assertEqual(scale_oob_max, ScaleOp.NO_OP) # Validate min out-of-bounds. autoscaling_policy.current_replicas = 1 scale_oob_min = autoscaler.validate_scaling_bounds(ScaleOp.DOWN_IN_OP, autoscaling_policy) - self.assertEquals(scale_oob_min, ScaleOp.NO_OP) + self.assertEqual(scale_oob_min, ScaleOp.NO_OP) def test_enforce_scaling_down_delay_interval(self): self.populate_redis_with_dummy_metrics() @@ -140,15 +140,15 @@ def test_enforce_scaling_down_delay_interval(self): autoscaling_policy.scaledown_delay_secs = 0.0 scale_down = autoscaler.enforce_scaling_down_delay_interval(ENV_ENDPOINT_ID_1, autoscaling_policy) - self.assertEquals(scale_down, ScaleOp.DOWN_IN_OP) + self.assertEqual(scale_down, ScaleOp.DOWN_IN_OP) autoscaling_policy.scaledown_delay_secs = 1 scale_noop = autoscaler.enforce_scaling_down_delay_interval(ENV_ENDPOINT_ID_1, autoscaling_policy) - self.assertEquals(scale_noop, ScaleOp.NO_OP) + self.assertEqual(scale_noop, ScaleOp.NO_OP) time.sleep(2) scale_down = autoscaler.enforce_scaling_down_delay_interval(ENV_ENDPOINT_ID_1, autoscaling_policy) - self.assertEquals(scale_down, ScaleOp.DOWN_IN_OP) + self.assertEqual(scale_down, ScaleOp.DOWN_IN_OP) self.clear_redis() From 3cc9bb26ea96cbcaf1833bbf5edd9cf7ff7c6713 Mon Sep 17 00:00:00 2001 From: Alay Shah Date: Fri, 10 May 2024 17:02:38 -0700 Subject: [PATCH 024/251] Add Util classes --- .../scheduler/comm_utils/GPUCardUtil.py | 46 +++++++++++++++++++ .../scheduler/comm_utils/HardwareUtil.py | 34 ++++++++++++++ .../scheduler/comm_utils/NvidiaGPUtil.py | 42 +++++++++++++++++ .../scheduler/comm_utils/QualcommNPUtil.py | 0 .../utils => comm_utils}/singleton.py | 11 ++++- .../model_scheduler/autoscaler/autoscaler.py | 2 +- 6 files changed, 133 insertions(+), 2 deletions(-) create mode 100644 python/fedml/computing/scheduler/comm_utils/GPUCardUtil.py create mode 100644 python/fedml/computing/scheduler/comm_utils/HardwareUtil.py create mode 100644 python/fedml/computing/scheduler/comm_utils/NvidiaGPUtil.py create mode 100644 python/fedml/computing/scheduler/comm_utils/QualcommNPUtil.py rename python/fedml/computing/scheduler/{model_scheduler/autoscaler/utils => comm_utils}/singleton.py (56%) diff --git a/python/fedml/computing/scheduler/comm_utils/GPUCardUtil.py b/python/fedml/computing/scheduler/comm_utils/GPUCardUtil.py new file mode 100644 index 0000000000..dd5cb9ff97 --- /dev/null +++ b/python/fedml/computing/scheduler/comm_utils/GPUCardUtil.py @@ -0,0 +1,46 @@ +from abc import ABC, abstractmethod +from dataclasses import dataclass +from enum import Enum, auto +from typing import Optional, List + + +class GPUCardType(Enum): + NVIDIA = auto() + QUALCOMM = auto() + UNKNOWN = auto() + + def __str__(self): + return self.name + + +@dataclass +class GPUCard: + id: int + uuid: str + name: str + load: float + memoryTotal: float + memoryUsed: float + memoryFree: float + driver: str + serial: Optional[str] + display_mode: Optional[str] + display_active: Optional[str] + temperature: Optional[float] + + +class GPUCardUtil(ABC): + + @classmethod + def detectGPUCardType(cls) -> GPUCardType: + raise NotImplementedError + + @staticmethod + @abstractmethod + def getAvailableGPUCardIDs() -> List[int]: + raise NotImplementedError + + @staticmethod + @abstractmethod + def getGPUCards() -> List[GPUCard]: + raise NotImplementedError diff --git a/python/fedml/computing/scheduler/comm_utils/HardwareUtil.py b/python/fedml/computing/scheduler/comm_utils/HardwareUtil.py new file mode 100644 index 0000000000..9d908886b0 --- /dev/null +++ b/python/fedml/computing/scheduler/comm_utils/HardwareUtil.py @@ -0,0 +1,34 @@ +import logging +from typing import Optional, List + +from fedml.computing.scheduler.comm_utils.GPUCardUtil import GPUCardUtil, GPUCard +from fedml.computing.scheduler.comm_utils.singleton import Singleton + + +class HardwareUtil(metaclass=Singleton): + + def __init__(self): + self._gpu_util: Optional[GPUCardUtil] = self.__get_util() + + @staticmethod + def __get_util() -> Optional[GPUCardUtil]: + for cls in GPUCardUtil.__subclasses__(): + try: + if cls.detectGPUCardType() is not None: + return cls() + except Exception as e: + pass + + logging.error("No GPU card detected") + return None + + def getGPUs(self) -> List[GPUCard]: + if self._gpu_util is None: + return [] + return self._gpu_util.getGPUCards() + + def getAvailableGPUCardIDs(self) -> List[int]: + if self._gpu_util is None: + return [] + return self._gpu_util.getAvailableGPUCardIDs() + diff --git a/python/fedml/computing/scheduler/comm_utils/NvidiaGPUtil.py b/python/fedml/computing/scheduler/comm_utils/NvidiaGPUtil.py new file mode 100644 index 0000000000..66317c67c8 --- /dev/null +++ b/python/fedml/computing/scheduler/comm_utils/NvidiaGPUtil.py @@ -0,0 +1,42 @@ +import subprocess +from typing import List + +from GPUtil import GPUtil, GPU + +from fedml.computing.scheduler.comm_utils.GPUCardUtil import GPUCard, GPUCardUtil, GPUCardType + + +def _convert(gpu: GPU) -> GPUCard: + return GPUCard( + id=gpu.id, + uuid=gpu.uuid, + name=gpu.name, + load=gpu.load, + memoryTotal=gpu.memoryTotal, + memoryUsed=gpu.memoryUsed, + memoryFree=gpu.memoryFree, + driver=gpu.driver, + serial=gpu.serial, + display_mode=gpu.display_mode, + display_active=gpu.display_active, + temperature=gpu.temperature + ) + + +class NvidiaGPUtil(GPUCardUtil): + + @staticmethod + def getAvailableGPUCardIDs() -> List[int]: + return GPUtil.getAvailable() + + @staticmethod + def getGPUCards() -> List[GPUCard]: + return [_convert(gpu) for gpu in GPUtil.getGPUs()] + + @classmethod + def detectGPUCardType(cls): + try: + subprocess.check_output(["nvidia-smi"], universal_newlines=True) + return GPUCardType.NVIDIA + except Exception: + return None diff --git a/python/fedml/computing/scheduler/comm_utils/QualcommNPUtil.py b/python/fedml/computing/scheduler/comm_utils/QualcommNPUtil.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/python/fedml/computing/scheduler/model_scheduler/autoscaler/utils/singleton.py b/python/fedml/computing/scheduler/comm_utils/singleton.py similarity index 56% rename from python/fedml/computing/scheduler/model_scheduler/autoscaler/utils/singleton.py rename to python/fedml/computing/scheduler/comm_utils/singleton.py index 5c76acea97..dd403965c1 100644 --- a/python/fedml/computing/scheduler/model_scheduler/autoscaler/utils/singleton.py +++ b/python/fedml/computing/scheduler/comm_utils/singleton.py @@ -1,3 +1,6 @@ +import threading + + class Singleton(type): """ @@ -8,8 +11,14 @@ class Singleton(type): """ _instances = {} + # For thread safety + _lock = threading.Lock() def __call__(cls, *args, **kwargs): if cls not in cls._instances: - cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) + with cls._lock: + # Another thread might have created the instance before the lock was acquired. + # So check again if the instance is already created. + if cls not in cls._instances: + cls._instances[cls] = super(Singleton, cls).__call__(*args, **kwargs) return cls._instances[cls] diff --git a/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py b/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py index 009345863a..bb2b59e7d9 100644 --- a/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py +++ b/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py @@ -8,7 +8,7 @@ from enum import Enum from fedml.computing.scheduler.model_scheduler.device_model_cache import FedMLModelCache from fedml.computing.scheduler.model_scheduler.autoscaler.policies import * -from fedml.computing.scheduler.model_scheduler.autoscaler.utils.singleton import Singleton +from fedml.computing.scheduler.comm_utils.singleton import Singleton class ScaleOp(Enum): From e18428603a8e31767cef653e6b37370e68a4cbec Mon Sep 17 00:00:00 2001 From: Alay Shah Date: Fri, 10 May 2024 18:20:39 -0700 Subject: [PATCH 025/251] Rename util files for pattern matching --- .../scheduler/comm_utils/{GPUCardUtil.py => gpu_utils.py} | 0 .../scheduler/comm_utils/{HardwareUtil.py => hardware_utils.py} | 2 +- .../scheduler/comm_utils/{NvidiaGPUtil.py => nvidia_utils.py} | 2 +- .../comm_utils/{QualcommNPUtil.py => qualcomm_utils.py} | 0 4 files changed, 2 insertions(+), 2 deletions(-) rename python/fedml/computing/scheduler/comm_utils/{GPUCardUtil.py => gpu_utils.py} (100%) rename python/fedml/computing/scheduler/comm_utils/{HardwareUtil.py => hardware_utils.py} (91%) rename python/fedml/computing/scheduler/comm_utils/{NvidiaGPUtil.py => nvidia_utils.py} (91%) rename python/fedml/computing/scheduler/comm_utils/{QualcommNPUtil.py => qualcomm_utils.py} (100%) diff --git a/python/fedml/computing/scheduler/comm_utils/GPUCardUtil.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils.py similarity index 100% rename from python/fedml/computing/scheduler/comm_utils/GPUCardUtil.py rename to python/fedml/computing/scheduler/comm_utils/gpu_utils.py diff --git a/python/fedml/computing/scheduler/comm_utils/HardwareUtil.py b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py similarity index 91% rename from python/fedml/computing/scheduler/comm_utils/HardwareUtil.py rename to python/fedml/computing/scheduler/comm_utils/hardware_utils.py index 9d908886b0..e57b905e4a 100644 --- a/python/fedml/computing/scheduler/comm_utils/HardwareUtil.py +++ b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py @@ -1,7 +1,7 @@ import logging from typing import Optional, List -from fedml.computing.scheduler.comm_utils.GPUCardUtil import GPUCardUtil, GPUCard +from fedml.computing.scheduler.comm_utils.gpu_utils import GPUCardUtil, GPUCard from fedml.computing.scheduler.comm_utils.singleton import Singleton diff --git a/python/fedml/computing/scheduler/comm_utils/NvidiaGPUtil.py b/python/fedml/computing/scheduler/comm_utils/nvidia_utils.py similarity index 91% rename from python/fedml/computing/scheduler/comm_utils/NvidiaGPUtil.py rename to python/fedml/computing/scheduler/comm_utils/nvidia_utils.py index 66317c67c8..0ce49e1ccd 100644 --- a/python/fedml/computing/scheduler/comm_utils/NvidiaGPUtil.py +++ b/python/fedml/computing/scheduler/comm_utils/nvidia_utils.py @@ -3,7 +3,7 @@ from GPUtil import GPUtil, GPU -from fedml.computing.scheduler.comm_utils.GPUCardUtil import GPUCard, GPUCardUtil, GPUCardType +from fedml.computing.scheduler.comm_utils.gpu_utils import GPUCard, GPUCardUtil, GPUCardType def _convert(gpu: GPU) -> GPUCard: diff --git a/python/fedml/computing/scheduler/comm_utils/QualcommNPUtil.py b/python/fedml/computing/scheduler/comm_utils/qualcomm_utils.py similarity index 100% rename from python/fedml/computing/scheduler/comm_utils/QualcommNPUtil.py rename to python/fedml/computing/scheduler/comm_utils/qualcomm_utils.py From 8c8c1da3a07b7862bc7c7ba1893c77e4080277ae Mon Sep 17 00:00:00 2001 From: alaydshah Date: Sat, 11 May 2024 03:30:57 +0000 Subject: [PATCH 026/251] MVP --- .../scheduler/comm_utils/__init__.py | 3 ++ .../__init__.py} | 0 .../comm_utils/{ => gpu_utils}/gpu_utils.py | 5 ++- .../{ => gpu_utils}/nvidia_utils.py | 23 +++++----- .../comm_utils/gpu_utils/qualcomm_utils.py | 0 .../scheduler/comm_utils/hardware_utils.py | 42 ++++++++++++------- 6 files changed, 44 insertions(+), 29 deletions(-) rename python/fedml/computing/scheduler/comm_utils/{qualcomm_utils.py => gpu_utils/__init__.py} (100%) rename python/fedml/computing/scheduler/comm_utils/{ => gpu_utils}/gpu_utils.py (89%) rename python/fedml/computing/scheduler/comm_utils/{ => gpu_utils}/nvidia_utils.py (82%) create mode 100644 python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py diff --git a/python/fedml/computing/scheduler/comm_utils/__init__.py b/python/fedml/computing/scheduler/comm_utils/__init__.py index e69de29bb2..adf0269b67 100644 --- a/python/fedml/computing/scheduler/comm_utils/__init__.py +++ b/python/fedml/computing/scheduler/comm_utils/__init__.py @@ -0,0 +1,3 @@ +import gpu_utils.gpu_utils +import gpu_utils.qualcomm_utils +import gpu_utils.nvidia_utils \ No newline at end of file diff --git a/python/fedml/computing/scheduler/comm_utils/qualcomm_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/__init__.py similarity index 100% rename from python/fedml/computing/scheduler/comm_utils/qualcomm_utils.py rename to python/fedml/computing/scheduler/comm_utils/gpu_utils/__init__.py diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py similarity index 89% rename from python/fedml/computing/scheduler/comm_utils/gpu_utils.py rename to python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py index dd5cb9ff97..e6691b4b5d 100644 --- a/python/fedml/computing/scheduler/comm_utils/gpu_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py @@ -1,4 +1,4 @@ -from abc import ABC, abstractmethod +from abc import ABC, abstractmethod, ABCMeta from dataclasses import dataclass from enum import Enum, auto from typing import Optional, List @@ -32,7 +32,7 @@ class GPUCard: class GPUCardUtil(ABC): @classmethod - def detectGPUCardType(cls) -> GPUCardType: + def detectGPUCardType(cls) -> Optional[GPUCardType]: raise NotImplementedError @staticmethod @@ -44,3 +44,4 @@ def getAvailableGPUCardIDs() -> List[int]: @abstractmethod def getGPUCards() -> List[GPUCard]: raise NotImplementedError + diff --git a/python/fedml/computing/scheduler/comm_utils/nvidia_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py similarity index 82% rename from python/fedml/computing/scheduler/comm_utils/nvidia_utils.py rename to python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py index 0ce49e1ccd..349230cef5 100644 --- a/python/fedml/computing/scheduler/comm_utils/nvidia_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py @@ -1,9 +1,9 @@ import subprocess -from typing import List +from typing import List, Optional from GPUtil import GPUtil, GPU -from fedml.computing.scheduler.comm_utils.gpu_utils import GPUCard, GPUCardUtil, GPUCardType +from fedml.computing.scheduler.comm_utils.gpu_utils.gpu_utils import GPUCard, GPUCardUtil, GPUCardType def _convert(gpu: GPU) -> GPUCard: @@ -24,19 +24,18 @@ def _convert(gpu: GPU) -> GPUCard: class NvidiaGPUtil(GPUCardUtil): - - @staticmethod - def getAvailableGPUCardIDs() -> List[int]: - return GPUtil.getAvailable() - - @staticmethod - def getGPUCards() -> List[GPUCard]: - return [_convert(gpu) for gpu in GPUtil.getGPUs()] - @classmethod - def detectGPUCardType(cls): + def detectGPUCardType(cls) -> Optional[GPUCardType]: try: subprocess.check_output(["nvidia-smi"], universal_newlines=True) return GPUCardType.NVIDIA except Exception: return None + + @staticmethod + def getGPUCards() -> List[GPUCard]: + return [_convert(gpu) for gpu in GPUtil.getGPUs()] + + @staticmethod + def getAvailableGPUCardIDs() -> List[int]: + return GPUtil.getAvailable() diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py index e57b905e4a..d26fb9c5b5 100644 --- a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py @@ -1,34 +1,46 @@ import logging + from typing import Optional, List -from fedml.computing.scheduler.comm_utils.gpu_utils import GPUCardUtil, GPUCard +from fedml.computing.scheduler.comm_utils.gpu_utils.gpu_utils import GPUCardUtil, GPUCard +from fedml.computing.scheduler.comm_utils.gpu_utils.nvidia_utils import NvidiaGPUtil from fedml.computing.scheduler.comm_utils.singleton import Singleton class HardwareUtil(metaclass=Singleton): - def __init__(self): - self._gpu_util: Optional[GPUCardUtil] = self.__get_util() + _gpu_utils = [NvidiaGPUtil] + _gpu_util: Optional[GPUCardUtil] = None @staticmethod - def __get_util() -> Optional[GPUCardUtil]: - for cls in GPUCardUtil.__subclasses__(): + def _get_util() -> Optional[GPUCardUtil]: + if HardwareUtil._gpu_util is not None: + return HardwareUtil._gpu_util + + for gpu_util in HardwareUtil._gpu_utils: try: - if cls.detectGPUCardType() is not None: - return cls() + if gpu_util.detectGPUCardType() is not None: + HardwareUtil._gpu_util = gpu_util() + return HardwareUtil._gpu_util except Exception as e: pass logging.error("No GPU card detected") return None - def getGPUs(self) -> List[GPUCard]: - if self._gpu_util is None: - return [] - return self._gpu_util.getGPUCards() + @staticmethod + def getGPUs() -> List[GPUCard]: + gpu_util = HardwareUtil._get_util() + return gpu_util.getGPUCards() if gpu_util is not None else [] + + @staticmethod + def getAvailableGPUCardIDs() -> List[int]: + gpu_util = HardwareUtil._get_util() + return gpu_util.getAvailainfbleGPUCardIDs() if gpu_util is not None else [] - def getAvailableGPUCardIDs(self) -> List[int]: - if self._gpu_util is None: - return [] - return self._gpu_util.getAvailableGPUCardIDs() +if __name__ == "__main__": + gpus = HardwareUtil.getGPUs() + get_available_gpu_cards = HardwareUtil.getAvailableGPUCardIDs() + print(gpus) + print(get_available_gpu_cards) From a35d433d4d2ec87dff89b3c01ff9057aee70f2d3 Mon Sep 17 00:00:00 2001 From: Alex Date: Sat, 11 May 2024 20:28:21 +0800 Subject: [PATCH 027/251] 1. Get the docker container with the timeout value so that we can avoid the system is blocked. 2. Fix the time error when checking the run process in the job monitor. 3. Support docker authentication when launching the job. 4. Send the edge status detecting message once in the protocol manager, save the status response message to the queue of job runner, and process the edge status in the job runner. 5. Open a process to download the package so that we can avoid the request is blocked and check the timeout. 6. Refactor the status center to make the status sequence work and stable. 7. Change the retain flag to true when establishing an MQTT connection so that we can handle messages even if the device is not started. 8. Sync the latest deployment module to this branch. --- python/fedml/api/api_test.py | 19 +- .../scheduler/comm_utils/container_utils.py | 4 +- .../scheduler/comm_utils/job_cleanup.py | 1 + .../scheduler/comm_utils/job_monitor.py | 15 +- .../scheduler/comm_utils/job_utils.py | 5 +- .../master/base_master_job_runner.py | 179 +++++++++++---- .../master/base_master_protocol_manager.py | 156 ++------------ .../scheduler/master/launch_job_runner.py | 2 +- .../scheduler/master/server_daemon.py | 18 +- .../model_scheduler/device_client_runner.py | 38 +++- .../device_model_deployment.py | 4 +- .../model_scheduler/device_server_runner.py | 203 +++++++++++++----- .../model_scheduler/job_runner_msg_sender.py | 5 +- .../model_scheduler/master_job_runner.py | 133 +++++++++--- .../master_protocol_manager.py | 46 ++-- .../model_scheduler/worker_job_runner.py | 26 ++- .../worker_protocol_manager.py | 24 ++- .../scheduler_core/general_constants.py | 2 + .../scheduler_core/message_center.py | 2 +- .../scheduler_base_job_runner.py | 95 +++++++- .../scheduler_base_job_runner_manager.py | 7 +- .../scheduler/scheduler_core/status_center.py | 8 +- .../status_manager_protocols.py | 12 +- .../scheduler/slave/base_slave_job_runner.py | 3 +- .../slave/base_slave_protocol_manager.py | 5 +- .../scheduler/slave/client_data_interface.py | 14 +- .../communication/mqtt/mqtt_manager.py | 2 +- python/fedml/core/mlops/mlops_metrics.py | 49 +++-- 28 files changed, 711 insertions(+), 366 deletions(-) diff --git a/python/fedml/api/api_test.py b/python/fedml/api/api_test.py index 54da088d0d..fc2fb77b20 100755 --- a/python/fedml/api/api_test.py +++ b/python/fedml/api/api_test.py @@ -18,20 +18,23 @@ yaml_file = os.path.join(python_dir, "examples", "launch", "hello_job.yaml") # Launch job -for i in range(0, 10): +launch_result_list = list() +for i in range(0, 1): launch_result = fedml.api.launch_job(yaml_file) + launch_result_list.append(launch_result) # launch_result = fedml.api.launch_job_on_cluster(yaml_file, "alex-cluster") if launch_result.result_code != 0: print(f"Failed to launch job. Reason: {launch_result.result_message}") -exit(1) - # Get job status -log_result = fedml.api.run_logs(launch_result.run_id, 1, 100) -if log_result is None or log_result.run_status is None: - print(f"Failed to get job status.") - exit(1) -print(f"Run status {log_result.run_status}") +while len(launch_result_list) > 0: + for launch_result in launch_result_list: + log_result = fedml.api.run_logs(launch_result.run_id, 1, 5) + if log_result is None or log_result.run_status is None: + print(f"Failed to get job status.") + #exit(1) + print(f"Run status {log_result.run_status}") + time.sleep(0.5) # Get job logs time.sleep(30) diff --git a/python/fedml/computing/scheduler/comm_utils/container_utils.py b/python/fedml/computing/scheduler/comm_utils/container_utils.py index cfaa5b6457..417aa7ba81 100644 --- a/python/fedml/computing/scheduler/comm_utils/container_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/container_utils.py @@ -20,7 +20,7 @@ def get_instance(): def get_docker_client(self): try: - client = docker.from_env() + client = docker.from_env(timeout=5, version="auto") except Exception: logging.error("Failed to connect to the docker daemon, please ensure that you have " "installed Docker Desktop or Docker Engine, and the docker is running") @@ -175,7 +175,7 @@ def get_container_rank_same_model(prefix: str): running_model_name = hash("model_endpoint_id_{}_name_{}_model_id_{}_name_{}_ver_{}") """ try: - client = docker.from_env() + docker.from_env(timeout=5, version="auto") except Exception: logging.error("Failed to connect to the docker daemon, please ensure that you have " "installed Docker Desktop or Docker Engine, and the docker is running") diff --git a/python/fedml/computing/scheduler/comm_utils/job_cleanup.py b/python/fedml/computing/scheduler/comm_utils/job_cleanup.py index ed30c1bf2e..6700b0bc7a 100644 --- a/python/fedml/computing/scheduler/comm_utils/job_cleanup.py +++ b/python/fedml/computing/scheduler/comm_utils/job_cleanup.py @@ -44,6 +44,7 @@ def sync_run_process_gpu(self): ComputeCacheManager.get_instance().get_gpu_cache().get_run_info_sync_lock_key("") ): count = 0 + client_data_interface.FedMLClientDataInterface.get_instance().create_job_table() job_list = client_data_interface.FedMLClientDataInterface.get_instance().get_jobs_from_db() for job in job_list.job_list: count += 1 diff --git a/python/fedml/computing/scheduler/comm_utils/job_monitor.py b/python/fedml/computing/scheduler/comm_utils/job_monitor.py index 30182a6207..6a9afb9d69 100644 --- a/python/fedml/computing/scheduler/comm_utils/job_monitor.py +++ b/python/fedml/computing/scheduler/comm_utils/job_monitor.py @@ -81,7 +81,7 @@ def monitor_slave_run_process_status(self): break # Calc the timeout - started_time = int(float(job.started_time)) + started_time = JobMonitor.get_started_time(job) timeout = time.time() - started_time job_type = JobRunnerUtils.parse_job_type(job.running_json) @@ -157,6 +157,15 @@ def monitor_slave_run_process_status(self): print(f"Exception when monitoring endpoint process on the slave agent.{traceback.format_exc()}") pass + @staticmethod + def get_started_time(job): + started_time = int(float(job.started_time)) + if started_time <= 0: + started_time = int(float(job.updated_time)) + if started_time <= 0: + started_time = time.time() + return started_time + def monitor_master_run_process_status(self, server_id, device_info_reporter=None): try: ComputeCacheManager.get_instance().set_redis_params() @@ -168,7 +177,7 @@ def monitor_master_run_process_status(self, server_id, device_info_reporter=None break # Calc the timeout - started_time = int(float(job.started_time)) + started_time = JobMonitor.get_started_time(job) timeout = time.time() - started_time # Get the timeout threshold @@ -416,7 +425,7 @@ def monitor_slave_endpoint_status(self): endpoint_name = endpoint_json.get("end_point_name", None) device_ids = endpoint_json.get("device_ids", []) - started_time = int(float(job.started_time)) + started_time = JobMonitor.get_started_time(job) timeout = time.time() - started_time if timeout > SchedulerConstants.ENDPOINT_DEPLOYMENT_DEPLOYING_TIMEOUT: print(f"[Worker][{job.job_id}:{job.edge_id}] Due to timeout, " diff --git a/python/fedml/computing/scheduler/comm_utils/job_utils.py b/python/fedml/computing/scheduler/comm_utils/job_utils.py index 1423c3e6ab..44290de37d 100644 --- a/python/fedml/computing/scheduler/comm_utils/job_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/job_utils.py @@ -568,8 +568,9 @@ def get_run_container_name(run_id: int) -> str: @staticmethod def get_docker_client(docker_args: DockerArgs) -> DockerClient: try: - client = docker.from_env() - client.login(username=docker_args.username, password=docker_args.password, registry=docker_args.registry) + client = docker.from_env(timeout=5, version="auto") + if docker_args.username != "" and docker_args.registry != "": + client.login(username=docker_args.username, password=docker_args.password, registry=docker_args.registry) except Exception as e: raise Exception(f"Failed to connect to the docker daemon, please ensure that you have " f"installed Docker Desktop or Docker Engine, and the docker is running. Exception {e}") diff --git a/python/fedml/computing/scheduler/master/base_master_job_runner.py b/python/fedml/computing/scheduler/master/base_master_job_runner.py index 3dbc1fd891..ce0515160f 100755 --- a/python/fedml/computing/scheduler/master/base_master_job_runner.py +++ b/python/fedml/computing/scheduler/master/base_master_job_runner.py @@ -23,6 +23,8 @@ from ..scheduler_core.general_constants import GeneralConstants from ..scheduler_core.scheduler_base_job_runner import FedMLSchedulerBaseJobRunner, RunnerError, RunnerCompletedError from abc import ABC, abstractmethod +from ..scheduler_core.scheduler_matcher import SchedulerMatcher +import fedml class FedMLBaseMasterJobRunner(FedMLSchedulerBaseJobRunner, ABC): @@ -424,11 +426,15 @@ def start_runner_process( ServerConstants.save_run_process(run_id, self.run_process.pid) return self.run_process - def put_run_edge_device_info_to_queue(self, run_id, device_info): - run_id_str = str(run_id) - if self.run_edge_device_info_queue is None: - self.run_edge_device_info_queue = Queue() - self.run_edge_device_info_queue.put(device_info) + def put_run_edge_device_info_to_queue(self, run_id, edge_id, device_info): + edge_ids = self.request_json.get("edgeids", None) + if edge_ids is None: + return + if int(edge_id) in edge_ids or str(edge_id) in edge_ids: + run_id_str = str(run_id) + if self.run_edge_device_info_queue is None: + self.run_edge_device_info_queue = Queue() + self.run_edge_device_info_queue.put(device_info) def should_continue_run_job(self, run_id): run_config = self.request_json["run_config"] @@ -467,23 +473,6 @@ def detect_edges_status( run_edges_realtime_status = dict() run_edges_realtime_status[run_id_str] = dict() - edge_info_global_dict = dict() - - # Send status message to all edges - allowed_cache_edge_status_time = 60 - for edge_id in edge_id_list: - # Check if the edge status was filled allowed_cache_edge_status_time seconds ago, - # if so no more checking message would be sent. - edge_info = edge_info_global_dict.get(edge_id, None) - if edge_info is not None: - timestamp = edge_info.get("timestamp", None) - time_interval = time.time() - timestamp - if time_interval <= allowed_cache_edge_status_time: - continue - - self.send_status_check_msg(run_id, edge_id, self.edge_id, context=status_check_context) - time.sleep(3) - total_sleep_seconds = 0 status_check_sleep_seconds = 10 allowed_status_check_sleep_seconds = 60 * 2 if status_timeout is None else status_timeout @@ -522,26 +511,14 @@ def detect_edges_status( active_edges_count += 1 active_edge_info_dict[str(edge_id)] = edge_info else: - # Check if the edge status was filled allowed_cache_edge_status_time seconds ago, - # if so no more checking message would be sent. - edge_info = edge_info_global_dict.get(edge_id, None) - if edge_info is not None: - timestamp = edge_info.get("timestamp", None) - time_interval = time.time() - timestamp - if time_interval <= allowed_cache_edge_status_time: - active_edges_count += 1 - active_edge_info_dict[str(edge_id)] = edge_info - continue - inactivate_edges.append(edge_id) - self.send_status_check_msg(run_id, edge_id, self.edge_id, context=status_check_context) # If all edges are ready then send the starting job message to them if active_edges_count == len(edge_id_list): logging.info(f"All edges are ready. Active edge id list is as follows. {active_edge_info_dict}") if callback_when_edges_ready is not None: logging.info("All edges are ready. Start to process the callback function.") - callback_when_edges_ready(active_edge_info_dict=active_edge_info_dict) + callback_when_edges_ready(self.request_json, active_edge_info_dict=active_edge_info_dict) else: logging.info("All edges are ready. No callback function to process.") break @@ -572,18 +549,11 @@ def detect_edges_status( if should_async and total_sleep_seconds >= allowed_status_check_sleep_seconds_for_async: if async_timeout > allowed_status_check_sleep_seconds_for_async: time.sleep(async_timeout - allowed_status_check_sleep_seconds_for_async) - self.send_training_request_to_edges(active_edge_info_dict) + self.send_training_request_to_edges(self.request_json, active_edge_info_dict) return True, active_edge_info_dict, inactivate_edges return True, active_edge_info_dict, inactivate_edges - def send_status_check_msg(self, run_id, edge_id, server_id, context=None): - topic_get_model_device_id = "server/client/request_device_info/" + str(edge_id) - payload = {"server_id": server_id, "run_id": run_id} - if context is not None: - payload["context"] = context - self.message_center.send_message(topic_get_model_device_id, json.dumps(payload)) - def report_exception_status(self, run_id): self.status_reporter.report_job_status(run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_EXCEPTION) @@ -602,10 +572,125 @@ def callback_run_metrics(self, topic, payload): self.run_metrics_queue = Queue() self.run_metrics_queue.put(payload) - def send_training_request_to_edges(self, active_edge_info_dict): - topic = GeneralConstants.MSG_TOPIC_SEND_TRAINING_REQUEST_TO_EDGES - payload = json.dumps(active_edge_info_dict) - self.message_center.receive_message(topic, payload) + # def send_training_request_to_edges(self, active_edge_info_dict): + # topic = GeneralConstants.MSG_TOPIC_SEND_TRAINING_REQUEST_TO_EDGES + # payload = json.dumps(active_edge_info_dict) + # self.message_center.receive_message(topic, payload) + def send_training_request_to_edges(self, request_json, active_edge_info_dict=None): + run_id = request_json["runId"] + edge_id_list = request_json["edgeids"] + run_config = request_json.get("run_config", {}) + run_params = run_config.get("parameters", {}) + job_yaml = run_params.get("job_yaml", {}) + job_yaml_default_none = run_params.get("job_yaml", None) + computing = job_yaml.get("computing", {}) + request_num_gpus = computing.get("minimum_num_gpus", None) + job_gpu_id_list = request_json.get("job_gpu_id_list", None) + assigned_gpu_num_dict = dict() + assigned_gpu_ids_dict = dict() + master_node_addr = "" + master_node_port = 0 + + logging.info(f"Send training request to Edge ids: {edge_id_list}, run_id {run_id}") + + should_match_gpu = False + if job_yaml_default_none is not None and request_num_gpus is not None and \ + int(request_num_gpus) > 0 and active_edge_info_dict is not None: + should_match_gpu = True + SchedulerMatcher.parse_and_print_gpu_info_for_all_edges(active_edge_info_dict, show_gpu_list=True) + + # Match and assign gpus to each device + assigned_gpu_num_dict, assigned_gpu_ids_dict = SchedulerMatcher.match_and_assign_gpu_resources_to_devices( + request_num_gpus, edge_id_list, active_edge_info_dict, job_gpu_id_list=job_gpu_id_list) + if assigned_gpu_num_dict is None or assigned_gpu_ids_dict is None: + # If no resources available, send failed message to MLOps and send exception message to all edges. + gpu_count, gpu_available_count = SchedulerMatcher.parse_and_print_gpu_info_for_all_edges( + active_edge_info_dict, should_print=True) + err_info = f"No resources available." \ + f"Total available GPU count {gpu_available_count} is less than " \ + f"request GPU count {request_num_gpus}" + logging.error(err_info) + + # Bug fix: This mqtt message needs to be sent so platform can clean up the failed run and change the + # status from running to failed. + self.mlops_metrics.report_server_training_status( + run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id + ) + + self.status_reporter.report_server_id_status( + run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id, + server_id=self.edge_id, server_agent_id=self.server_agent_id) + self.report_exception_status(run_id) + + serving_args = job_yaml.get("serving_args", {}) + endpoint_id = serving_args.get("endpoint_id", None) + if endpoint_id is not None: + fedml.mlops.log_endpoint_status( + endpoint_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_FAILED) + fedml.mlops.log_run_log_lines( + endpoint_id, 0, [err_info], + log_source=GeneralConstants.FEDML_LOG_SOURCE_TYPE_MODEL_END_POINT + ) + return + + # Generate master node addr and port + master_node_addr, master_node_port = SchedulerMatcher.get_master_node_info(edge_id_list, + active_edge_info_dict) + + # Generate new edge id list after matched + edge_id_list = SchedulerMatcher.generate_new_edge_list_for_gpu_matching(assigned_gpu_num_dict) + if len(edge_id_list) <= 0: + gpu_count, gpu_available_count = SchedulerMatcher.parse_and_print_gpu_info_for_all_edges( + active_edge_info_dict, should_print=True) + logging.error(f"Request parameter for GPU num is invalid." + f"Total available GPU count {gpu_available_count}." + f"Request GPU num {request_num_gpus}") + self.status_reporter.report_server_id_status( + run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id, + server_id=self.edge_id, server_agent_id=self.server_agent_id) + self.report_exception_status(run_id) + return + + if should_match_gpu: + # Report gpu num and related infos to MLOps. + serving_args = job_yaml.get("serving_args", {}) + endpoint_id = serving_args.get("endpoint_id", None) + if endpoint_id is not None: + endpoint_info = list() + for edge_id_item, gpu_num in assigned_gpu_num_dict.items(): + edge_info = active_edge_info_dict.get(str(edge_id_item), {}) + endpoint_info.append({ + "machine_id": edge_id_item, "endpoint_gpu_count": gpu_num, + "master_deploy_id": edge_info.get("master_device_id", 0), + "slave_deploy_id": edge_info.get("slave_device_id", 0)}) + topic_name = f"compute/mlops/endpoint" + endpoint_info_json = {"endpoint_id": endpoint_id, "endpoint_info": endpoint_info} + print(f"endpoint_info_json {endpoint_info_json}") + self.message_center.send_message(topic_name, json.dumps(endpoint_info_json)) + + client_rank = 1 + for edge_id in edge_id_list: + topic_start_train = "flserver_agent/" + str(edge_id) + "/start_train" + logging.info("start_train: send topic " + topic_start_train + " to client...") + request_json["client_rank"] = client_rank + client_rank += 1 + + if active_edge_info_dict is not None: + edge_info = active_edge_info_dict.get(str(edge_id), {}) + model_master_device_id = edge_info.get("master_device_id", None) + model_slave_device_id = edge_info.get("slave_device_id", None) + model_slave_device_id_list = edge_info.get("slave_device_id_list", None) + + if should_match_gpu: + request_json["scheduler_match_info"] = SchedulerMatcher.generate_match_info_for_scheduler( + edge_id, edge_id_list, master_node_addr, master_node_port, + assigned_gpu_num_dict, assigned_gpu_ids_dict, + model_master_device_id=model_master_device_id, + model_slave_device_id=model_slave_device_id, + model_slave_device_id_list=model_slave_device_id_list + ) + + self.message_center.send_message(topic_start_train, json.dumps(request_json)) def should_process_async_cluster(self): run_config = self.request_json.get("run_config", {}) diff --git a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py index 25cab5a17c..ef59431ee8 100755 --- a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py +++ b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py @@ -3,16 +3,10 @@ import json import logging import fedml -from ..scheduler_core.scheduler_matcher import SchedulerMatcher from ..comm_utils.constants import SchedulerConstants -from ..comm_utils.job_utils import JobRunnerUtils from ....core.mlops.mlops_runtime_log import MLOpsRuntimeLog from ....core.mlops.mlops_configs import MLOpsConfigs from ....core.mlops.mlops_runtime_log_daemon import MLOpsRuntimeLogDaemon -from ..comm_utils import sys_utils -from ....core.mlops.mlops_utils import MLOpsUtils -from ..model_scheduler import device_client_constants -from fedml.utils.debugging import debug from ..scheduler_core.compute_cache_manager import ComputeCacheManager from ..scheduler_core.ota_upgrade import FedMLOtaUpgrade from .deploy_job_launcher import FedMLDeployJobLauncher @@ -91,9 +85,6 @@ def generate_topics(self): # The topic for last-will messages. self.topic_last_will = "flserver_agent/last_will_msg" - # The topic for sending training request to edges (Request from the job runner when all edges are ready) - self.topic_send_training_request_to_edges = GeneralConstants.MSG_TOPIC_SEND_TRAINING_REQUEST_TO_EDGES - # Subscribe topics for starting train, stopping train and fetching client status. self.subscribed_topics.clear() self.add_subscribe_topic(self.topic_start_train) @@ -115,13 +106,10 @@ def add_protocol_handler(self): self.add_message_listener(self.topic_ota_msg, FedMLBaseMasterProtocolManager.callback_server_ota_msg) self.add_message_listener(self.topic_report_status, self.callback_report_current_status) self.add_message_listener(self.topic_response_device_info, self.callback_response_device_info) - self.add_message_listener(self.topic_response_device_info, self.callback_response_device_info) self.add_message_listener(self.topic_request_device_info_from_mlops, self.callback_request_device_info_from_mlops) self.add_message_listener(self.topic_requesst_job_status, self.callback_request_job_status) self.add_message_listener(self.topic_requesst_device_status_in_job, self.callback_request_device_status_in_job) - self.add_message_listener(self.topic_send_training_request_to_edges, - self.callback_send_training_request_to_edges) @abstractmethod def _get_job_runner_manager(self): @@ -197,7 +185,7 @@ def callback_start_train(self, topic=None, payload=None): # Print the payload logging.info("callback_start_train payload: {}".format(payload)) logging.info( - f"FedMLDebug - Receive: topic ({topic}), payload ({payload})" + f"FedMLDebug - run id {run_id}, Receive at callback_start_train: topic ({topic}), payload ({payload})" ) # Save the parameters @@ -212,7 +200,7 @@ def callback_start_train(self, topic=None, payload=None): if not self.run_as_cloud_server: self.mlops_metrics.report_server_id_status( run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_STARTING, edge_id=self.edge_id, - server_id=self.edge_id, server_agent_id=self.edge_id) + server_id=self.edge_id, server_agent_id=self.edge_id, running_json=payload) # Start server with multiprocessing mode if self.run_as_edge_server_and_agent or self.enable_simulation_cloud_agent: @@ -230,6 +218,8 @@ def callback_start_train(self, topic=None, payload=None): process = self._get_job_runner_manager().get_runner_process(run_id) if process is not None: GeneralConstants.save_run_process(run_id, process.pid, is_master=True) + + self.send_status_msg_to_edges(edge_id_list, run_id, self.edge_id) elif self.run_as_cloud_agent: self.init_job_task(request_json) @@ -261,6 +251,8 @@ def callback_start_train(self, topic=None, payload=None): status_center_queue=self.get_status_queue() ) + self.send_status_msg_to_edges(edge_id_list, run_id, self.edge_id) + def callback_stop_train(self, topic, payload, use_payload=None): # Print the payload logging.info( @@ -346,7 +338,7 @@ def callback_response_device_info(self, topic, payload): # Put device info into a multiprocessing queue so master runner checks if all edges are ready if context is None: - self._get_job_runner_manager().put_run_edge_device_info_to_queue(run_id, device_info) + self._get_job_runner_manager().put_run_edge_device_info_to_queue(run_id, edge_id, device_info) # if self.run_edge_device_info_global_queue is None: # self.run_edge_device_info_global_queue = Array('i', list()) @@ -368,10 +360,6 @@ def callback_request_job_status(self, topic, payload): def callback_request_device_status_in_job(self, topic, payload): self.response_device_status_in_job(topic, payload) - def callback_send_training_request_to_edges(self, topic, payload): - payload_json = json.loads(payload) - self.send_training_request_to_edges(active_edge_info_dict=payload_json) - def generate_protocol_manager(self): message_status_runner = self._generate_protocol_manager_instance( self.args, agent_config=self.agent_config @@ -457,124 +445,6 @@ def init_job_task(self, request_json): self.setup_listener_for_run_metrics(run_id) self.setup_listener_for_run_logs(run_id) - @debug - def send_training_request_to_edges(self, active_edge_info_dict=None): - run_id = self.request_json["runId"] - edge_id_list = self.request_json["edgeids"] - run_config = self.request_json.get("run_config", {}) - run_params = run_config.get("parameters", {}) - job_yaml = run_params.get("job_yaml", {}) - job_yaml_default_none = run_params.get("job_yaml", None) - computing = job_yaml.get("computing", {}) - request_num_gpus = computing.get("minimum_num_gpus", None) - job_gpu_id_list = self.request_json.get("job_gpu_id_list", None) - assigned_gpu_num_dict = dict() - assigned_gpu_ids_dict = dict() - master_node_addr = "" - master_node_port = 0 - - logging.info("Send training request to Edge ids: " + str(edge_id_list)) - - should_match_gpu = False - if job_yaml_default_none is not None and request_num_gpus is not None and \ - int(request_num_gpus) > 0 and active_edge_info_dict is not None: - should_match_gpu = True - SchedulerMatcher.parse_and_print_gpu_info_for_all_edges(active_edge_info_dict, show_gpu_list=True) - - # Match and assign gpus to each device - assigned_gpu_num_dict, assigned_gpu_ids_dict = SchedulerMatcher.match_and_assign_gpu_resources_to_devices( - request_num_gpus, edge_id_list, active_edge_info_dict, job_gpu_id_list=job_gpu_id_list) - if assigned_gpu_num_dict is None or assigned_gpu_ids_dict is None: - # If no resources available, send failed message to MLOps and send exception message to all edges. - gpu_count, gpu_available_count = SchedulerMatcher.parse_and_print_gpu_info_for_all_edges( - active_edge_info_dict, should_print=True) - err_info = f"No resources available." \ - f"Total available GPU count {gpu_available_count} is less than " \ - f"request GPU count {request_num_gpus}" - logging.error(err_info) - - # Bug fix: This mqtt message needs to be sent so platform can clean up the failed run and change the - # status from running to failed. - self.mlops_metrics.report_server_training_status( - run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id - ) - - self.status_reporter.report_server_id_status( - run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id, - server_id=self.edge_id, server_agent_id=self.server_agent_id) - self.report_exception_status(run_id) - - serving_args = job_yaml.get("serving_args", {}) - endpoint_id = serving_args.get("endpoint_id", None) - if endpoint_id is not None: - fedml.mlops.log_endpoint_status( - endpoint_id, device_client_constants.ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED) - fedml.mlops.log_run_log_lines( - endpoint_id, 0, [err_info], - log_source=device_client_constants.ClientConstants.FEDML_LOG_SOURCE_TYPE_MODEL_END_POINT - ) - return - - # Generate master node addr and port - master_node_addr, master_node_port = SchedulerMatcher.get_master_node_info(edge_id_list, - active_edge_info_dict) - - # Generate new edge id list after matched - edge_id_list = SchedulerMatcher.generate_new_edge_list_for_gpu_matching(assigned_gpu_num_dict) - if len(edge_id_list) <= 0: - gpu_count, gpu_available_count = SchedulerMatcher.parse_and_print_gpu_info_for_all_edges( - active_edge_info_dict, should_print=True) - logging.error(f"Request parameter for GPU num is invalid." - f"Total available GPU count {gpu_available_count}." - f"Request GPU num {request_num_gpus}") - self.status_reporter.report_server_id_status( - run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id, - server_id=self.edge_id, server_agent_id=self.server_agent_id) - self.report_exception_status(run_id) - return - - if should_match_gpu: - # Report gpu num and related infos to MLOps. - serving_args = job_yaml.get("serving_args", {}) - endpoint_id = serving_args.get("endpoint_id", None) - if endpoint_id is not None: - endpoint_info = list() - for edge_id_item, gpu_num in assigned_gpu_num_dict.items(): - edge_info = active_edge_info_dict.get(str(edge_id_item), {}) - endpoint_info.append({ - "machine_id": edge_id_item, "endpoint_gpu_count": gpu_num, - "master_deploy_id": edge_info.get("master_device_id", 0), - "slave_deploy_id": edge_info.get("slave_device_id", 0)}) - topic_name = f"compute/mlops/endpoint" - endpoint_info_json = {"endpoint_id": endpoint_id, "endpoint_info": endpoint_info} - print(f"endpoint_info_json {endpoint_info_json}") - self.message_center.send_message(topic_name, json.dumps(endpoint_info_json)) - - client_rank = 1 - for edge_id in edge_id_list: - topic_start_train = "flserver_agent/" + str(edge_id) + "/start_train" - logging.info("start_train: send topic " + topic_start_train + " to client...") - request_json = self.request_json - request_json["client_rank"] = client_rank - client_rank += 1 - - if active_edge_info_dict is not None: - edge_info = active_edge_info_dict.get(str(edge_id), {}) - model_master_device_id = edge_info.get("master_device_id", None) - model_slave_device_id = edge_info.get("slave_device_id", None) - model_slave_device_id_list = edge_info.get("slave_device_id_list", None) - - if should_match_gpu: - request_json["scheduler_match_info"] = SchedulerMatcher.generate_match_info_for_scheduler( - edge_id, edge_id_list, master_node_addr, master_node_port, - assigned_gpu_num_dict, assigned_gpu_ids_dict, - model_master_device_id=model_master_device_id, - model_slave_device_id=model_slave_device_id, - model_slave_device_id_list=model_slave_device_id_list - ) - - self.message_center.send_message(topic_start_train, json.dumps(request_json)) - def setup_listeners_for_edge_status(self, run_id, edge_ids, server_id): edge_status_topic = "fl_client/flclient_agent_" + str(server_id) + "/status" payload = {"run_id": run_id, "init_all_edge_id_list": edge_ids, "init_server_id": server_id} @@ -628,6 +498,18 @@ def send_training_stop_request_to_specific_edge(self, edge_id, payload): logging.info("stop_train: send topic " + topic_stop_train) self.message_center.send_message(topic_stop_train, payload) + def send_status_check_msg(self, run_id, edge_id, server_id, context=None): + topic_status_check = f"server/client/request_device_info/{edge_id}" + payload = {"server_id": server_id, "run_id": run_id} + if context is not None: + payload["context"] = context + self.message_center.send_message(topic_status_check, json.dumps(payload)) + + def send_status_msg_to_edges(self, edge_id_list, run_id, server_id, context=None): + # Send status message to all edges + for edge_id in edge_id_list: + self.send_status_check_msg(run_id, edge_id, self.edge_id, context=context) + def report_exception_status(self, run_id): self.status_reporter.report_job_status(run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_EXCEPTION) diff --git a/python/fedml/computing/scheduler/master/launch_job_runner.py b/python/fedml/computing/scheduler/master/launch_job_runner.py index c28458fc0f..3f26da1ef7 100755 --- a/python/fedml/computing/scheduler/master/launch_job_runner.py +++ b/python/fedml/computing/scheduler/master/launch_job_runner.py @@ -19,7 +19,7 @@ def __init__(self, args, run_id=0, request_json=None, agent_config=None, edge_id # Override def _generate_job_runner_instance(self, args, run_id=None, request_json=None, agent_config=None, edge_id=None,): return FedMLLaunchMasterJobRunner( - args, run_id=run_id, request_json=request_json, agent_config=self.agent_config, edge_id=edge_id + args, run_id=run_id, request_json=request_json, agent_config=agent_config, edge_id=edge_id ) # Override diff --git a/python/fedml/computing/scheduler/master/server_daemon.py b/python/fedml/computing/scheduler/master/server_daemon.py index bc02621a44..8fe85f3381 100755 --- a/python/fedml/computing/scheduler/master/server_daemon.py +++ b/python/fedml/computing/scheduler/master/server_daemon.py @@ -115,18 +115,24 @@ if os.path.exists(login_exit_file): print(f"[Server] Login process is exited, check the exit file {login_exit_file}") if retry_count > 3: - print(f"Retry count is over 3 times, exit the process. Check the log file for more details. " - f"Login logs: {login_logs}, Exit file: {login_exit_file}") - exit(1) + if args.role == ServerConstants.login_role_list[ServerConstants.LOGIN_MODE_CLOUD_AGENT_INDEX]: + retry_count = 0 + else: + print(f"Retry count is over 3 times, exit the process. Check the log file for more details. " + f"Login logs: {login_logs}, Exit file: {login_exit_file}") + exit(1) retry_flag = True if len(login_pids) == 0: message = f"[Server] Login process is exited, check the log file {login_logs}" print(message) if retry_count >= 3: - print(f"Retry count is over 3 times, exit the process. Check the log file for more details. " - f"Login logs: {login_logs}, Exit file: {login_exit_file}") - exit(1) + if args.role == ServerConstants.login_role_list[ServerConstants.LOGIN_MODE_CLOUD_AGENT_INDEX]: + retry_count = 0 + else: + print(f"Retry count is over 3 times, exit the process. Check the log file for more details. " + f"Login logs: {login_logs}, Exit file: {login_exit_file}") + exit(1) retry_flag = True if retry_flag: diff --git a/python/fedml/computing/scheduler/model_scheduler/device_client_runner.py b/python/fedml/computing/scheduler/model_scheduler/device_client_runner.py index 3c7d0fb05b..8bb03eebbd 100755 --- a/python/fedml/computing/scheduler/model_scheduler/device_client_runner.py +++ b/python/fedml/computing/scheduler/model_scheduler/device_client_runner.py @@ -24,6 +24,8 @@ import fedml from fedml import mlops from fedml.computing.scheduler.model_scheduler.device_model_msg_object import FedMLModelMsgObject +from fedml.computing.scheduler.scheduler_core.compute_cache_manager import ComputeCacheManager + from fedml.computing.scheduler.scheduler_core.compute_utils import ComputeUtils from fedml.core.distributed.communication.s3.remote_storage import S3Storage from .device_model_cache import FedMLModelCache @@ -356,7 +358,6 @@ def run_impl(self): ClientConstants.INFERENCE_ENGINE_TYPE_INT_DEFAULT) inference_end_point_id = run_id - self.mlops_metrics.report_sys_perf(self.args, self.agent_config["mqtt_config"], run_id=run_id) MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO) logging.info(f"[Worker] Received model deployment request from master for endpoint {run_id}.") @@ -478,7 +479,6 @@ def run_impl(self): if op == "add": worker_ip = self.get_ip_address(self.request_json) for rank in range(prev_rank + 1, prev_rank + 1 + op_num): - # TODO: Support Rollback if this for loop failed try: running_model_name, inference_output_url, inference_model_version, model_metadata, model_config = \ start_deployment( @@ -496,6 +496,18 @@ def run_impl(self): if inference_output_url == "": logging.error("[Worker] Failed to deploy the model.") + # Release the gpu occupancy + FedMLModelCache.get_instance().set_redis_params() + replica_occupied_gpu_ids_str = FedMLModelCache.get_instance().get_replica_gpu_ids( + run_id, end_point_name, model_name, self.edge_id, rank + 1) + logging.info(f"Release gpu ids {replica_occupied_gpu_ids_str} for " + f"failed deployment of replica no {rank + 1}.") + + if replica_occupied_gpu_ids_str is not None: + replica_occupied_gpu_ids = json.loads(replica_occupied_gpu_ids_str) + JobRunnerUtils.get_instance().release_partial_job_gpu(run_id, + self.edge_id, replica_occupied_gpu_ids) + # Send failed result back to master result_payload = self.send_deployment_results( end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED, @@ -892,7 +904,7 @@ def callback_start_deployment(self, topic, payload): run_id = inference_end_point_id self.args.run_id = run_id self.args.edge_id = self.edge_id - MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO) + MLOpsRuntimeLog(args=self.args).init_logs() MLOpsRuntimeLogDaemon.get_instance(self.args).set_log_source( ClientConstants.FEDML_LOG_SOURCE_TYPE_MODEL_END_POINT) MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor(run_id, self.edge_id) @@ -978,6 +990,26 @@ def callback_delete_deployment(self, topic, payload): model_msg_object.run_id, model_msg_object.end_point_name, model_msg_object.model_name, self.edge_id) + # Delete FEDML_GLOBAL_ENDPOINT_RUN_ID_MAP_TAG-${run_id} both in redis and local db + ComputeCacheManager.get_instance().gpu_cache.delete_endpoint_run_id_map(str(model_msg_object.run_id)) + + # Delete FEDML_EDGE_ID_MODEL_DEVICE_ID_MAP_TAG-${run_id} both in redis and local db + ComputeCacheManager.get_instance().gpu_cache.delete_edge_model_id_map(str(model_msg_object.run_id)) + + # Delete FEDML_GLOBAL_DEVICE_RUN_GPU_IDS_TAG-${run_id}-${device_id} both in redis and local db + ComputeCacheManager.get_instance().gpu_cache.delete_device_run_gpu_ids(str(self.edge_id), + str(model_msg_object.run_id)) + + # Delete FEDML_GLOBAL_DEVICE_RUN_NUM_GPUS_TAG-${run_id}-${device_id} both in redis and local db + ComputeCacheManager.get_instance().gpu_cache.delete_device_run_num_gpus(str(self.edge_id), + str(model_msg_object.run_id)) + + # Delete FEDML_MODEL_REPLICA_GPU_IDS_TAG-${run_id}-${end_point_name}-${model_name}-${device_id}-* + FedMLModelCache.get_instance().set_redis_params() + FedMLModelCache.get_instance().delete_all_replica_gpu_ids(model_msg_object.run_id, + model_msg_object.end_point_name, + model_msg_object.model_name, self.edge_id) + def exit_run_with_exception_entry(self): try: self.setup_client_mqtt_mgr() diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py index 9b370d9ae4..bf476dd468 100755 --- a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py +++ b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py @@ -209,7 +209,7 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version, infer_host = "127.0.0.1" try: - client = docker.from_env() + client = docker.from_env(timeout=5, version="auto") if enable_custom_image and docker_registry_user_name != "" and docker_registry_user_password != "" \ and docker_registry != "": client.login(username=docker_registry_user_name, password=docker_registry_user_password, @@ -466,7 +466,7 @@ def log_deployment_result(end_point_id, model_id, cmd_container_name, cmd_type, logging.info(f"Test: {inference_http_port}, Attempt: {deploy_attempt} / {deploy_attempt_threshold}") try: - client = docker.from_env() + client = docker.from_env(timeout=5, version="auto") except Exception: logging.error("Failed to connect to the docker daemon, please ensure that you have " "installed Docker Desktop or Docker Engine, and the docker is running") diff --git a/python/fedml/computing/scheduler/model_scheduler/device_server_runner.py b/python/fedml/computing/scheduler/model_scheduler/device_server_runner.py index 38acff8d82..4bcac6d2db 100755 --- a/python/fedml/computing/scheduler/model_scheduler/device_server_runner.py +++ b/python/fedml/computing/scheduler/model_scheduler/device_server_runner.py @@ -23,6 +23,7 @@ import fedml from fedml.computing.scheduler.comm_utils.run_process_utils import RunProcessUtils +from fedml.core.mlops.mlops_runtime_log import MLOpsFormatter from ..comm_utils import sys_utils from .device_server_data_interface import FedMLServerDataInterface @@ -122,6 +123,8 @@ def __init__(self, args, run_id=0, request_json=None, agent_config=None, edge_id self.replica_controller = None self.deployed_replica_payload = None + self.autoscaler_launcher = None + def build_dynamic_constrain_variables(self, run_id, run_config): pass @@ -304,6 +307,7 @@ def run_impl(self): inference_end_point_id, use_gpu, memory_size, model_version, inference_port = self.parse_model_run_params( self.request_json) + # TODO(Raphael): This measurement is for the host machine. Change to container's metrics self.mlops_metrics.report_sys_perf(self.args, self.agent_config["mqtt_config"], run_id=run_id) self.check_runner_stop_event() @@ -347,28 +351,38 @@ def run_impl(self): devices_sent_add_or_remove_msg = self.send_deployment_start_request_to_edges() # Handle "op:update" - devices_sent_update_remove_msg = self.send_first_scroll_update_msg() - - if len(devices_sent_add_or_remove_msg) == 0 and len(devices_sent_update_remove_msg) == 0: - # No device is added, updated or removed - logging.info("No device is added, updated or removed. No action needed for reconciliation.") - ip = self.get_ip_address(self.request_json) - master_port = os.getenv("FEDML_MASTER_PORT", None) - if master_port is not None: - inference_port = int(master_port) - model_inference_port = inference_port - if ip.startswith("http://") or ip.startswith("https://"): - model_inference_url = "{}/api/v1/predict".format(ip) - else: - model_inference_url = "http://{}:{}/api/v1/predict".format(ip, model_inference_port) + try: + devices_sent_update_remove_msg = self.send_first_scroll_update_msg() + + if len(devices_sent_add_or_remove_msg) == 0 and len(devices_sent_update_remove_msg) == 0: + # No device is added, updated or removed + logging.info("No device is added, updated or removed. No action needed for reconciliation.") + ip = self.get_ip_address(self.request_json) + master_port = os.getenv("FEDML_MASTER_PORT", None) + if master_port is not None: + inference_port = int(master_port) + model_inference_port = inference_port + if ip.startswith("http://") or ip.startswith("https://"): + model_inference_url = "{}/api/v1/predict".format(ip) + else: + model_inference_url = "http://{}:{}/api/v1/predict".format(ip, model_inference_port) - self.set_runner_completed_event(run_id) + self.set_runner_completed_event(run_id) - self.send_deployment_status(run_id, end_point_name, - model_name, - model_inference_url, - ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED) - return + self.send_deployment_status(run_id, end_point_name, + model_name, + model_inference_url, + ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED) + + # Set setting to "DEPLOYED" for autoscaling service reference + FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password) + FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ + update_user_setting_replica_num(end_point_id=run_id, state="DEPLOYED") + + return + except Exception as e: + logging.error(f"Failed to send first scroll update message due to {e}.") + logging.error(f"Exception traceback {traceback.format_exc()}.") logging.info("Start waiting for result callback from workers ...") @@ -437,6 +451,7 @@ def start_device_inference_gateway( def start_device_inference_monitor(self, run_id, end_point_name, model_id, model_name, model_version, check_stopped_event=True): # start inference monitor server + # Will report the qps related metrics to the MLOps logging.info(f"start the model inference monitor, end point {run_id}, model name {model_name}...") if check_stopped_event: self.check_runner_stop_event() @@ -563,10 +578,9 @@ def callback_deployment_result_message(self, topic=None, payload=None): filehandler = logging.FileHandler(log_file, "a") program_prefix = "FedML-Server @device-id-{}".format(self.edge_id) - formatter = logging.Formatter(fmt="[" + program_prefix + "] [%(asctime)s] [%(levelname)s] " - "[%(filename)s:%(lineno)d:%(funcName)s] %(" - "message)s", - datefmt="%a, %d %b %Y %H:%M:%S") + formatter = MLOpsFormatter(fmt="[" + program_prefix + "] [%(asctime)s] [%(levelname)s] " + "[%(filename)s:%(lineno)d:%(funcName)s] %(" + "message)s") filehandler.setFormatter(formatter) root_logger.addHandler(filehandler) @@ -630,21 +644,54 @@ def callback_deployment_result_message(self, topic=None, payload=None): if model_status != ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED: logging.error(f"Unsupported model status {model_status}.") - # Failure handler - if run_operation == "ADD_OR_REMOVE": - # TODO(Raphael): Also support rollback for scale out / in operation + # Avoid endless loop, if the rollback also failed, we should report the failure to the MLOps + if self.model_runner_mapping[run_id_str].replica_controller.under_rollback: self.send_deployment_status( end_point_id, end_point_name, payload_json["model_name"], "", ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED) return - elif run_operation == "UPDATE": - # Send the rollback message to the worker devices only if it has not been rollback - if self.model_runner_mapping[run_id_str].replica_controller.under_rollback: + + # Failure handler, send the rollback message to the worker devices only if it has not been rollback + if run_operation == "ADD_OR_REMOVE": + # During Scale out / in, + # the worker that already been scaled out / in should be sent the rollback message + rollback_dict = self.model_runner_mapping[run_id_str].replica_controller.rollback_add_or_remove_replica( + device_id=device_id, replica_no=replica_no, op_type=run_operation + ) + self.model_runner_mapping[run_id_str].replica_controller.under_rollback = True + + if rollback_dict is not None and len(rollback_dict) > 0: self.send_deployment_status( end_point_id, end_point_name, payload_json["model_name"], "", - ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED) + ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_ABORTING) + self.send_rollback_add_remove_op(run_id_str, rollback_dict) return + else: + # This is the last worker that failed, so we should continue to "ABORTED" status + model_config_parameters = self.running_request_json[run_id_str]["parameters"] + inference_port = model_config_parameters.get("server_internal_port", + ServerConstants.MODEL_INFERENCE_DEFAULT_PORT) + inference_port_external = model_config_parameters.get("server_external_port", inference_port) + ip = self.get_ip_address(self.running_request_json[run_id_str]) + if ip.startswith("http://") or ip.startswith("https://"): + model_inference_url = "{}/inference/{}".format(ip, end_point_id) + else: + model_inference_url = "http://{}:{}/inference/{}".format(ip, inference_port_external, + end_point_id) + + self.send_deployment_status(end_point_id, end_point_name, + payload_json["model_name"], + model_inference_url, + ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_ABORTED) + + # For auto-scaling, should update the state to "DEPLOYED" + FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ + update_user_setting_replica_num(end_point_id=end_point_id, state="DEPLOYED") + + self.model_runner_mapping[run_id_str].replica_controller.under_rollback = False + return + elif run_operation == "UPDATE": # Overwrite the json with the rollback version diff rollback_version_diff = \ self.model_runner_mapping[run_id_str].replica_controller.rollback_get_replica_version_diff( @@ -698,9 +745,9 @@ def callback_deployment_result_message(self, topic=None, payload=None): # Wait for all replica-level's result, not device-level if (self.model_runner_mapping[run_id_str].replica_controller.is_all_replica_num_reconciled() and self.model_runner_mapping[run_id_str].replica_controller.is_all_replica_version_reconciled()): - ''' + """ When all the devices have finished the add / delete / update operation - ''' + """ # Generate one unified inference api # Note that here we use the gateway port instead of the inference port that is used by the slave device model_config_parameters = request_json["parameters"] @@ -755,16 +802,28 @@ def callback_deployment_result_message(self, topic=None, payload=None): # Arrive here because only contains remove ops, so we do not need to update the model metadata pass + # For auto-scaling, should update the state to "DEPLOYED" FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - set_end_point_activation(end_point_id, end_point_name, True) + update_user_setting_replica_num(end_point_id=end_point_id, state="DEPLOYED") if self.model_runner_mapping[run_id_str].replica_controller.under_rollback: - self.send_deployment_status(end_point_id, end_point_name, - payload_json["model_name"], - model_inference_url, - ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_ABORTED) + # If first time failed (Still might need rollback), then send failed message to the MLOps + if not (FedMLModelCache.get_instance(self.redis_addr, self.redis_port). + get_end_point_activation(end_point_id)): + self.send_deployment_status( + end_point_id, end_point_name, payload_json["model_name"], "", + ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED) + else: + self.send_deployment_status(end_point_id, end_point_name, + payload_json["model_name"], + model_inference_url, + ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_ABORTED) self.model_runner_mapping[run_id_str].replica_controller.under_rollback = False else: + # Set the end point activation status to True, for scaling out / in and rolling update + FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ + set_end_point_activation(end_point_id, end_point_name, True) + self.send_deployment_status(end_point_id, end_point_name, payload_json["model_name"], model_inference_url, @@ -781,7 +840,10 @@ def callback_deployment_status_message(self, topic=None, payload=None): topic, payload)) pass - def send_deployment_start_request_to_edges(self): + def send_deployment_start_request_to_edges(self, in_request_json=None): + if in_request_json is not None: + self.request_json = in_request_json + # Iterate through replica_num_diff, both add and replace should be sent to the edge devices if "replica_num_diff" not in self.request_json or self.request_json["replica_num_diff"] is None: return [] @@ -895,15 +957,36 @@ def callback_start_deployment(self, topic, payload): model_config = request_json["model_config"] model_name = model_config["model_name"] + model_version = model_config["model_version"] model_id = model_config["model_id"] model_storage_url = model_config["model_storage_url"] scale_min = model_config.get("instance_scale_min", 0) scale_max = model_config.get("instance_scale_max", 0) inference_engine = model_config.get("inference_engine", 0) + enable_auto_scaling = request_json.get("enable_auto_scaling", False) + desired_replica_num = request_json.get("desired_replica_num", 1) + + target_queries_per_replica = request_json.get("target_queries_per_replica", 10) + aggregation_window_size_seconds = request_json.get("aggregation_window_size_seconds", 60) + scale_down_delay_seconds = request_json.get("scale_down_delay_seconds", 120) + inference_end_point_id = run_id logging.info("[Master] received start deployment request for end point {}.".format(run_id)) + # Set redis config + FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password) + + # Save the user setting (about replica number) of this run to Redis, if existed, update it + FedMLModelCache.get_instance(self.redis_addr, self.redis_port).set_user_setting_replica_num( + end_point_id=run_id, end_point_name=end_point_name, model_name=model_name, model_version=model_version, + replica_num=desired_replica_num, enable_auto_scaling=enable_auto_scaling, + scale_min=scale_min, scale_max=scale_max, state="DEPLOYING", + aggregation_window_size_seconds=aggregation_window_size_seconds, + target_queries_per_replica=target_queries_per_replica, + scale_down_delay_seconds=int(scale_down_delay_seconds) + ) + # Start log processor for current run self.args.run_id = run_id self.args.edge_id = self.edge_id @@ -912,6 +995,7 @@ def callback_start_deployment(self, topic, payload): ServerConstants.FEDML_LOG_SOURCE_TYPE_MODEL_END_POINT) MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor(run_id, self.edge_id) + # # Deprecated # self.ota_upgrade(payload, request_json) # Add additional parameters to the request_json @@ -924,8 +1008,7 @@ def callback_start_deployment(self, topic, payload): self.running_request_json[run_id_str] = request_json self.request_json["master_node_ip"] = self.get_ip_address(self.request_json) - # Target status of the devices - FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password) + # Set the target status of the devices to redis FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ set_end_point_device_info(request_json["end_point_id"], end_point_name, json.dumps(device_objs)) @@ -944,7 +1027,7 @@ def callback_start_deployment(self, topic, payload): "", ServerConstants.MODEL_DEPLOYMENT_STAGE1["index"], ServerConstants.MODEL_DEPLOYMENT_STAGE1["text"], - "Received request for end point {}".format(run_id)) + "Received request for endpoint {}".format(run_id)) # Report stage to mlops: MODEL_DEPLOYMENT_STAGE2 = "Initializing" self.send_deployment_stages(self.run_id, model_name, model_id, @@ -1078,7 +1161,7 @@ def delete_device_replica_info_on_master(self, endpoint_id, endpoint_name, model delete_item, endpoint_id, endpoint_name, model_name ) - logging.info(f"Deleted the record of the replaced device {delete_device_result_list}") + logging.info(f"Deleted the replica record on master: {edge_id_replica_no_dict}") def send_next_scroll_update_msg(self, run_id_str, device_id, replica_no): """ @@ -1128,6 +1211,20 @@ def send_next_scroll_update_msg(self, run_id_str, device_id, replica_no): self.send_deployment_start_request_to_edge(edge_id, self.running_request_json[run_id_str]) return + def send_rollback_add_remove_op(self, run_id, rollback_replica_dict): + """ + This method is used when the original add op failed, we need to rollback by delete the existed replicas + Input example: + rollback_replica_dict = {'96684': {'curr_num': 2, 'op': 'remove', 'target_num': 1}} + """ + existed_request_json = self.running_request_json[str(run_id)] + updated_request_json = copy.deepcopy(existed_request_json) + + # Reverse the replica_num_diff + updated_request_json["replica_num_diff"] = rollback_replica_dict + + self.send_deployment_start_request_to_edges(in_request_json=updated_request_json) + def callback_activate_deployment(self, topic, payload): logging.info("callback_activate_deployment: topic = %s, payload = %s" % (topic, payload)) @@ -1183,7 +1280,15 @@ def callback_delete_deployment(self, topic, payload): # Parse payload as the model message object. model_msg_object = FedMLModelMsgObject(topic, payload) - # Set end point as deactivated status + # Delete SQLite records + FedMLServerDataInterface.get_instance().delete_job_from_db(model_msg_object.run_id) + FedMLModelDatabase.get_instance().delete_deployment_result( + model_msg_object.run_id, model_msg_object.end_point_name, model_msg_object.model_name, + model_version=model_msg_object.model_version) + FedMLModelDatabase.get_instance().delete_deployment_run_info( + end_point_id=model_msg_object.inference_end_point_id) + + # Delete Redis Records FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password) FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ set_end_point_activation(model_msg_object.inference_end_point_id, @@ -1192,21 +1297,15 @@ def callback_delete_deployment(self, topic, payload): delete_end_point(model_msg_object.inference_end_point_id, model_msg_object.end_point_name, model_msg_object.model_name, model_msg_object.model_version) + # Send delete deployment request to the edge devices self.send_deployment_delete_request_to_edges(payload, model_msg_object) + # Stop processes on master self.set_runner_stopped_event(model_msg_object.run_id) - self.stop_device_inference_monitor(model_msg_object.run_id, model_msg_object.end_point_name, model_msg_object.model_id, model_msg_object.model_name, model_msg_object.model_version) - FedMLServerDataInterface.get_instance().delete_job_from_db(model_msg_object.run_id) - FedMLModelDatabase.get_instance().delete_deployment_result( - model_msg_object.run_id, model_msg_object.end_point_name, model_msg_object.model_name, - model_version=model_msg_object.model_version) - FedMLModelDatabase.get_instance().delete_deployment_run_info( - end_point_id=model_msg_object.inference_end_point_id) - def send_deployment_results_with_payload(self, end_point_id, end_point_name, payload, replica_id_list=None): self.send_deployment_results(end_point_id, end_point_name, payload["model_name"], payload["model_url"], diff --git a/python/fedml/computing/scheduler/model_scheduler/job_runner_msg_sender.py b/python/fedml/computing/scheduler/model_scheduler/job_runner_msg_sender.py index 104dacf716..482a21b2d4 100755 --- a/python/fedml/computing/scheduler/model_scheduler/job_runner_msg_sender.py +++ b/python/fedml/computing/scheduler/model_scheduler/job_runner_msg_sender.py @@ -90,7 +90,10 @@ def send_deployment_stages(end_point_id, model_name, model_id, model_inference_u logging.info(f"-------- Stages has been sent to mlops with stage {model_stages_index} and " f"payload {deployment_stages_payload}") - def send_deployment_start_request_to_edges(self): + def send_deployment_start_request_to_edges(self, in_request_json=None): + if in_request_json is not None: + self.request_json = in_request_json + # Iterate through replica_num_diff, both add and replace should be sent to the edge devices if "replica_num_diff" not in self.request_json or self.request_json["replica_num_diff"] is None: return [] diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py index d1cc68dc98..13876d0184 100755 --- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py +++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py @@ -9,7 +9,8 @@ from multiprocessing import Queue import fedml -from fedml.core.mlops import MLOpsRuntimeLog +from fedml.core.mlops import MLOpsRuntimeLog, MLOpsConfigs +from fedml.core.mlops.mlops_runtime_log import MLOpsFormatter from .device_client_constants import ClientConstants from .device_model_cache import FedMLModelCache from .device_server_constants import ServerConstants @@ -82,6 +83,7 @@ def run_impl( self.replica_controller = FedMLDeviceReplicaController(self.edge_id, self.request_json) # Start the process to report system performance(cpu,memory,etc.) to MLOps + # TODO(Raphael): This measurement is for the host machine. Change to container's metrics self.mlops_metrics.report_sys_perf(self.args, self.agent_config["mqtt_config"], run_id=run_id) # Check if we should stop the runner @@ -140,7 +142,7 @@ def run_impl( devices_sent_update_remove_msg = self.send_first_scroll_update_msg() if len(devices_sent_add_or_remove_msg) == 0 and len(devices_sent_update_remove_msg) == 0: - # No device is added or removed, and no device is updated or removed + # No device is added, updated or removed logging.info("No device is added, updated or removed. No action needed for reconciliation.") ip = GeneralConstants.get_ip_address(self.request_json) master_port = os.getenv("FEDML_MASTER_PORT", None) @@ -158,10 +160,20 @@ def run_impl( message_center=self.message_center ) + # Set setting to "DEPLOYED" for autoscaling service reference + FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password) + FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ + update_user_setting_replica_num(end_point_id=run_id, state="DEPLOYED") + + # Complete the job runner self.trigger_completed_event() + return except Exception as e: - logging.info(f"Exception at run impl {traceback.format_exc()}") + logging.error(f"Failed to send first scroll update message due to {e}.") + logging.error(f"Exception traceback {traceback.format_exc()}.") + + logging.info("Start waiting for result callback from workers ...") self.deployment_result_queue = run_extend_queue_list[0] while True: @@ -210,10 +222,9 @@ def process_deployment_result_message(self, topic=None, payload=None): filehandler = logging.FileHandler(log_file, "a") program_prefix = "FedML-Server @device-id-{}".format(self.edge_id) - formatter = logging.Formatter(fmt="[" + program_prefix + "] [%(asctime)s] [%(levelname)s] " - "[%(filename)s:%(lineno)d:%(funcName)s] %(" - "message)s", - datefmt="%a, %d %b %Y %H:%M:%S") + formatter = MLOpsFormatter(fmt="[" + program_prefix + "] [%(asctime)s] [%(levelname)s] " + "[%(filename)s:%(lineno)d:%(funcName)s] %(" + "message)s") filehandler.setFormatter(formatter) root_logger.addHandler(filehandler) @@ -270,15 +281,55 @@ def process_deployment_result_message(self, topic=None, payload=None): if model_status != ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED: logging.error(f"Unsupported model status {model_status}.") - # Failure handler - if run_operation == "ADD_OR_REMOVE": - # TODO(Raphael): Also support rollback for scale out / in operation + # Avoid endless loop, if the rollback also failed, we should report the failure to the MLOps + if self.replica_controller.under_rollback: self.send_deployment_status( end_point_id, end_point_name, payload_json["model_name"], "", ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED, message_center=self.message_center) return - else: + + # Failure handler, send the rollback message to the worker devices only if it has not been rollback + if run_operation == "ADD_OR_REMOVE": + # During Scale out / in, + # the worker that already been scaled out / in should be sent the rollback message + rollback_dict = self.replica_controller.rollback_add_or_remove_replica( + device_id=device_id, replica_no=replica_no, op_type=run_operation + ) + self.replica_controller.under_rollback = True + + if rollback_dict is not None and len(rollback_dict) > 0: + self.send_deployment_status( + end_point_id, end_point_name, payload_json["model_name"], "", + ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_ABORTING, + message_center=self.message_center) + self.send_rollback_add_remove_op(run_id_str, rollback_dict) + return + else: + # This is the last worker that failed, so we should continue to "ABORTED" status + model_config_parameters = self.running_request_json[run_id_str]["parameters"] + inference_port = model_config_parameters.get("server_internal_port", + ServerConstants.MODEL_INFERENCE_DEFAULT_PORT) + inference_port_external = model_config_parameters.get("server_external_port", inference_port) + ip = GeneralConstants.get_ip_address(self.request_json) + if ip.startswith("http://") or ip.startswith("https://"): + model_inference_url = "{}/inference/{}".format(ip, end_point_id) + else: + model_inference_url = "http://{}:{}/inference/{}".format(ip, inference_port_external, + end_point_id) + + self.send_deployment_status( + end_point_id, end_point_name, payload_json["model_name"], model_inference_url, + ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_ABORTED, message_center=self.message_center) + + # For auto-scaling, should update the state to "DEPLOYED" + FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ + update_user_setting_replica_num(end_point_id=end_point_id, state="DEPLOYED") + + self.replica_controller.under_rollback = False + + return + elif run_operation == "UPDATE": # Overwrite the json with the rollback version diff rollback_version_diff = self.replica_controller.rollback_get_replica_version_diff( device_id_trigger=device_id, replica_no_trigger=replica_no) @@ -325,9 +376,9 @@ def process_deployment_result_message(self, topic=None, payload=None): # Wait for all replica-level's result, not device-level if (self.replica_controller.is_all_replica_num_reconciled() and self.replica_controller.is_all_replica_version_reconciled()): - ''' + """ When all the devices have finished the add / delete / update operation - ''' + """ # Generate one unified inference api # Note that here we use the gateway port instead of the inference port that is used by the slave device model_config_parameters = request_json["parameters"] @@ -383,21 +434,31 @@ def process_deployment_result_message(self, topic=None, payload=None): # Arrive here because only contains remove ops, so we do not need to update the model metadata pass + # For auto-scaling, should update the state to "DEPLOYED" FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - set_end_point_activation(end_point_id, end_point_name, True) + update_user_setting_replica_num(end_point_id=end_point_id, state="DEPLOYED") if self.replica_controller.under_rollback: - self.send_deployment_status(end_point_id, end_point_name, - payload_json["model_name"], - model_inference_url, - ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_ABORTED) + # If first time failed (Still might need rollback), then send failed message to the MLOps + if not (FedMLModelCache.get_instance(self.redis_addr, self.redis_port). + get_end_point_activation(end_point_id)): + self.send_deployment_status( + end_point_id, end_point_name, payload_json["model_name"], "", + ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED, message_center=self.message_center) + else: + self.send_deployment_status( + end_point_id, end_point_name, payload_json["model_name"], model_inference_url, + ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_ABORTED, message_center=self.message_center) + self.replica_controller.under_rollback = False else: - self.send_deployment_status(end_point_id, end_point_name, - payload_json["model_name"], - model_inference_url, - ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED, - message_center=self.message_center) + # Set the end point activation status to True, for scaling out / in and rolling update + FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ + set_end_point_activation(end_point_id, end_point_name, True) + + self.send_deployment_status( + end_point_id, end_point_name, payload_json["model_name"], model_inference_url, + ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED, message_center=self.message_center) time.sleep(3) self.trigger_completed_event() @@ -457,6 +518,7 @@ def start_device_inference_monitor( redis_addr=None, redis_port=None, redis_password=None ): # start inference monitor server + # Will report the qps related metrics to the MLOps logging.info(f"start the model inference monitor, end point {run_id}, model name {model_name}...") run_id_str = str(run_id) pip_source_dir = os.path.dirname(__file__) @@ -503,8 +565,15 @@ def recover_inference_and_monitor(redis_addr=None, redis_port=None, redis_passwo if not is_activated: continue + agent_config = dict() + try: + agent_config["mqtt_config"], _, _, _ = MLOpsConfigs.fetch_all_configs() + except Exception as e: + pass + FedMLDeployMasterJobRunner.start_device_inference_gateway( - run_id, end_point_name, model_id, model_name, model_version, inference_port=inference_port) + run_id, end_point_name, model_id, model_name, model_version, inference_port=inference_port, + agent_config=agent_config, redis_addr=redis_addr, redis_port=redis_port, redis_password=redis_password) FedMLDeployMasterJobRunner.stop_device_inference_monitor( run_id, end_point_name, model_id, model_name, model_version) @@ -614,6 +683,20 @@ def send_rollback_msg(self, run_id_str): # send start deployment request to each device self.send_deployment_start_request_to_edge(edge_id, self.request_json) + def send_rollback_add_remove_op(self, run_id, rollback_replica_dict): + """ + This method is used when the original add op failed, we need to rollback by delete the existed replicas + Input example: + rollback_replica_dict = {'96684': {'curr_num': 2, 'op': 'remove', 'target_num': 1}} + """ + existed_request_json = self.request_json + updated_request_json = copy.deepcopy(existed_request_json) + + # Reverse the replica_num_diff + updated_request_json["replica_num_diff"] = rollback_replica_dict + + self.send_deployment_start_request_to_edges(in_request_json=updated_request_json) + def delete_device_replica_info_on_master(self, endpoint_id, endpoint_name, model_name, edge_id_replica_no_dict): FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password) # Remove the record of the replaced device @@ -635,7 +718,7 @@ def delete_device_replica_info_on_master(self, endpoint_id, endpoint_name, model delete_item, endpoint_id, endpoint_name, model_name ) - logging.info(f"Deleted the record of the replaced device {delete_device_result_list}") + logging.info(f"Deleted the replica record on master: {edge_id_replica_no_dict}") def save_deployed_replica_payload(self, payload_json): self.deployed_replica_payload = copy.deepcopy(payload_json) diff --git a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py index 962dcbbcb3..144d17fd02 100755 --- a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py +++ b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py @@ -105,7 +105,15 @@ def callback_delete_deployment(self, topic, payload): # Parse payload as the model message object. model_msg_object = FedMLModelMsgObject(topic, payload) - # Set end point as deactivated status + # Delete SQLite records + FedMLServerDataInterface.get_instance().delete_job_from_db(model_msg_object.run_id) + FedMLModelDatabase.get_instance().delete_deployment_result( + model_msg_object.run_id, model_msg_object.end_point_name, model_msg_object.model_name, + model_version=model_msg_object.model_version) + FedMLModelDatabase.get_instance().delete_deployment_run_info( + end_point_id=model_msg_object.inference_end_point_id) + + # Delete Redis Records FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password) FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ set_end_point_activation(model_msg_object.inference_end_point_id, @@ -114,22 +122,16 @@ def callback_delete_deployment(self, topic, payload): delete_end_point(model_msg_object.inference_end_point_id, model_msg_object.end_point_name, model_msg_object.model_name, model_msg_object.model_version) + # Send delete deployment request to the edge devices FedMLDeployJobRunnerManager.get_instance().send_deployment_delete_request_to_edges( model_msg_object.run_id, payload, model_msg_object, message_center=self.message_center) + # Stop processes on master FedMLDeployJobRunnerManager.get_instance().stop_job_runner(model_msg_object.run_id) - FedMLDeployJobRunnerManager.get_instance().stop_device_inference_monitor( model_msg_object.run_id, model_msg_object.end_point_name, model_msg_object.model_id, model_msg_object.model_name, model_msg_object.model_version) - FedMLServerDataInterface.get_instance().delete_job_from_db(model_msg_object.run_id) - FedMLModelDatabase.get_instance().delete_deployment_result( - model_msg_object.run_id, model_msg_object.end_point_name, model_msg_object.model_name, - model_version=model_msg_object.model_version) - FedMLModelDatabase.get_instance().delete_deployment_run_info( - end_point_id=model_msg_object.inference_end_point_id) - def callback_start_deployment(self, topic, payload): # noinspection PyBroadException try: @@ -149,15 +151,36 @@ def callback_start_deployment(self, topic, payload): model_config = request_json["model_config"] model_name = model_config["model_name"] + model_version = model_config["model_version"] model_id = model_config["model_id"] model_storage_url = model_config["model_storage_url"] scale_min = model_config.get("instance_scale_min", 0) scale_max = model_config.get("instance_scale_max", 0) inference_engine = model_config.get("inference_engine", 0) + enable_auto_scaling = request_json.get("enable_auto_scaling", False) + desired_replica_num = request_json.get("desired_replica_num", 1) + + target_queries_per_replica = request_json.get("target_queries_per_replica", 10) + aggregation_window_size_seconds = request_json.get("aggregation_window_size_seconds", 60) + scale_down_delay_seconds = request_json.get("scale_down_delay_seconds", 120) + inference_end_point_id = run_id logging.info("[Master] received start deployment request for end point {}.".format(run_id)) + # Set redis config + FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password) + + # Save the user setting (about replica number) of this run to Redis, if existed, update it + FedMLModelCache.get_instance(self.redis_addr, self.redis_port).set_user_setting_replica_num( + end_point_id=run_id, end_point_name=end_point_name, model_name=model_name, model_version=model_version, + replica_num=desired_replica_num, enable_auto_scaling=enable_auto_scaling, + scale_min=scale_min, scale_max=scale_max, state="DEPLOYING", + aggregation_window_size_seconds=aggregation_window_size_seconds, + target_queries_per_replica=target_queries_per_replica, + scale_down_delay_seconds=int(scale_down_delay_seconds) + ) + # Start log processor for current run self.args.run_id = run_id self.args.edge_id = self.edge_id @@ -176,8 +199,7 @@ def callback_start_deployment(self, topic, payload): self.running_request_json[run_id_str] = request_json self.request_json["master_node_ip"] = GeneralConstants.get_ip_address(self.request_json) - # Target status of the devices - FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password) + # Set the target status of the devices to redis FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ set_end_point_device_info(request_json["end_point_id"], end_point_name, json.dumps(device_objs)) @@ -194,7 +216,7 @@ def callback_start_deployment(self, topic, payload): # Report stage to mlops: MODEL_DEPLOYMENT_STAGE1 = "Received" FedMLDeployJobRunnerManager.get_instance().send_deployment_stages( self.run_id, model_name, model_id, "", ServerConstants.MODEL_DEPLOYMENT_STAGE1["index"], - ServerConstants.MODEL_DEPLOYMENT_STAGE1["text"], "Received request for end point {}".format(run_id), + ServerConstants.MODEL_DEPLOYMENT_STAGE1["text"], "Received request for endpoint {}".format(run_id), message_center=self.message_center) # Report stage to mlops: MODEL_DEPLOYMENT_STAGE2 = "Initializing" diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py index 78e2527e0c..d1cfd3b83c 100755 --- a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py +++ b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py @@ -77,6 +77,18 @@ def get_model_bin_file(unzip_package_full_path): def update_local_fedml_config(self, run_id, model_config, model_config_parameters=None): model_name = model_config["model_name"] model_storage_url = model_config["model_storage_url"] + end_point_name = self.request_json["end_point_name"] + model_version = model_config["model_version"] + + # Generate the model package dir for downloading. + model_version = model_version.replace(" ", "-") # Avoid using space for folder name + model_version = model_version.replace(":", "-") # Since docker mount will conflict with ":" + local_package_path = ClientConstants.get_model_package_dir() + os.makedirs(local_package_path, exist_ok=True) + this_run_model_dir = f"{run_id}_{end_point_name}_{model_name}_{model_version}" + this_run_model_full_path = os.path.join(local_package_path, this_run_model_dir) + self.agent_package_download_dir = this_run_model_full_path + self.agent_package_unzip_dir = this_run_model_full_path # Retrieve model package or model binary file. if self.model_is_from_open: @@ -127,7 +139,6 @@ def run_impl(self, run_extend_queue_list, sender_message_center, ClientConstants.INFERENCE_ENGINE_TYPE_INT_DEFAULT) inference_end_point_id = run_id - self.mlops_metrics.report_sys_perf(self.args, self.agent_config["mqtt_config"], run_id=run_id) MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO) logging.info(f"[Worker] Received model deployment request from master for endpoint {run_id}.") @@ -250,7 +261,6 @@ def run_impl(self, run_extend_queue_list, sender_message_center, if op == "add": worker_ip = GeneralConstants.get_ip_address(self.request_json) for rank in range(prev_rank + 1, prev_rank + 1 + op_num): - # TODO: Support Rollback if this for loop failed try: running_model_name, inference_output_url, inference_model_version, model_metadata, model_config = \ start_deployment( @@ -392,15 +402,17 @@ def run_impl(self, run_extend_queue_list, sender_message_center, if inference_output_url == "": logging.error("Failed to deploy the model...") - # If update failed, should release this replica's gpu + # Release the gpu occupancy FedMLModelCache.get_instance().set_redis_params() replica_occupied_gpu_ids_str = FedMLModelCache.get_instance().get_replica_gpu_ids( run_id, end_point_name, model_name, self.edge_id, rank + 1) + logging.info(f"Release gpu ids {replica_occupied_gpu_ids_str} for " + f"failed deployment of replica no {rank + 1}.") - replica_occupied_gpu_ids = json.loads(replica_occupied_gpu_ids_str) - - JobRunnerUtils.get_instance().release_partial_job_gpu( - run_id, self.edge_id, replica_occupied_gpu_ids) + if replica_occupied_gpu_ids_str is not None: + replica_occupied_gpu_ids = json.loads(replica_occupied_gpu_ids_str) + JobRunnerUtils.get_instance().release_partial_job_gpu( + run_id, self.edge_id, replica_occupied_gpu_ids) result_payload = self.send_deployment_results( end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED, diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py index 43bb3c4582..5f4835d9aa 100755 --- a/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py +++ b/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py @@ -15,6 +15,8 @@ from ..slave.base_slave_protocol_manager import FedMLBaseSlaveProtocolManager from .worker_job_runner_manager import FedMLDeployJobRunnerManager from .device_mqtt_inference_protocol import FedMLMqttInference +from ..scheduler_core.compute_cache_manager import ComputeCacheManager +from .device_model_cache import FedMLModelCache class FedMLDeployWorkerProtocolManager(FedMLBaseSlaveProtocolManager): @@ -141,7 +143,7 @@ def callback_start_deployment(self, topic, payload): run_id = inference_end_point_id self.args.run_id = run_id self.args.edge_id = self.edge_id - MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO) + MLOpsRuntimeLog(args=self.args).init_logs() MLOpsRuntimeLogDaemon.get_instance(self.args).set_log_source( ClientConstants.FEDML_LOG_SOURCE_TYPE_MODEL_END_POINT) MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor(run_id, self.edge_id) @@ -193,3 +195,23 @@ def callback_delete_deployment(self, topic, payload): FedMLModelDatabase.get_instance().delete_deployment_result_with_device_id( model_msg_object.run_id, model_msg_object.end_point_name, model_msg_object.model_name, self.edge_id) + + # Delete FEDML_GLOBAL_ENDPOINT_RUN_ID_MAP_TAG-${run_id} both in redis and local db + ComputeCacheManager.get_instance().gpu_cache.delete_endpoint_run_id_map(str(model_msg_object.run_id)) + + # Delete FEDML_EDGE_ID_MODEL_DEVICE_ID_MAP_TAG-${run_id} both in redis and local db + ComputeCacheManager.get_instance().gpu_cache.delete_edge_model_id_map(str(model_msg_object.run_id)) + + # Delete FEDML_GLOBAL_DEVICE_RUN_GPU_IDS_TAG-${run_id}-${device_id} both in redis and local db + ComputeCacheManager.get_instance().gpu_cache.delete_device_run_gpu_ids(str(self.edge_id), + str(model_msg_object.run_id)) + + # Delete FEDML_GLOBAL_DEVICE_RUN_NUM_GPUS_TAG-${run_id}-${device_id} both in redis and local db + ComputeCacheManager.get_instance().gpu_cache.delete_device_run_num_gpus(str(self.edge_id), + str(model_msg_object.run_id)) + + # Delete FEDML_MODEL_REPLICA_GPU_IDS_TAG-${run_id}-${end_point_name}-${model_name}-${device_id}-* + FedMLModelCache.get_instance().set_redis_params() + FedMLModelCache.get_instance().delete_all_replica_gpu_ids(model_msg_object.run_id, + model_msg_object.end_point_name, + model_msg_object.model_name, self.edge_id) diff --git a/python/fedml/computing/scheduler/scheduler_core/general_constants.py b/python/fedml/computing/scheduler/scheduler_core/general_constants.py index e642cacf1b..ba8842b30e 100755 --- a/python/fedml/computing/scheduler/scheduler_core/general_constants.py +++ b/python/fedml/computing/scheduler/scheduler_core/general_constants.py @@ -51,6 +51,8 @@ class GeneralConstants: FEDML_OTA_CMD_UPGRADE = "upgrade" FEDML_OTA_CMD_RESTART = "restart" + FEDML_LOG_SOURCE_TYPE_MODEL_END_POINT = "MODEL_END_POINT" + @staticmethod def get_package_unzip_dir(package_download_dir): package_unzip_dir = package_download_dir diff --git a/python/fedml/computing/scheduler/scheduler_core/message_center.py b/python/fedml/computing/scheduler/scheduler_core/message_center.py index dcf21d33b7..869ed6e510 100755 --- a/python/fedml/computing/scheduler/scheduler_core/message_center.py +++ b/python/fedml/computing/scheduler/scheduler_core/message_center.py @@ -218,7 +218,7 @@ def run_sender(self, message_event, message_queue, message_center_name): message_body = None if message_body is None: time.sleep(0.1) - self.retry_sending_undelivered_message() + # self.retry_sending_undelivered_message() continue # Generate the message entity object diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py index 46f1e7ff8f..0b4d47d52c 100755 --- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py +++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py @@ -2,11 +2,11 @@ import logging import os import platform +import random import shutil +import time import traceback -import urllib import zipfile -from urllib.parse import urljoin, urlparse from ..comm_utils.constants import SchedulerConstants from ..comm_utils.job_utils import JobRunnerUtils, DockerArgs from ..scheduler_entry.constants import Constants @@ -82,6 +82,8 @@ def __init__(self, args, edge_id=0, request_json=None, agent_config=None, run_id "${FEDSYS.CLIENT_OBJECT_LIST}": "", "${FEDSYS.LOG_SERVER_URL}": "", } + self.download_time = time.time() + self.download_finished = False def __repr__(self): return "<{klass} @{id:x} {attrs}>".format( @@ -154,18 +156,91 @@ def package_download_progress(self, count, blksize, filesize): self.prev_download_progress = progress_int logging.info("package downloaded size {} KB, progress {}%".format(downloaded_kb, progress_int)) + def download_package_proc(self, package_url, local_package_file): + import requests + headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) ' + 'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'} + user_agent_list = [ + 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.1.1 Safari/605.1.15', + 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:77.0) Gecko/20100101 Firefox/77.0', + 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36', + 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:77.0) Gecko/20100101 Firefox/77.0', + 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36', + ] + for _ in user_agent_list: + user_agent = random.choice(user_agent_list) + headers = {'User-Agent': user_agent} + + # Set the stream to true so that we can reduce the memory footprint when downloading large files. + request = requests.get(package_url, headers=headers, timeout=(10, 15), stream=True) + with open(local_package_file, 'wb') as f: + # 1024 * 1024 is 1MiB + download_size = 1024 * 1024 + total_size = 0 + for chunk in request.iter_content(download_size): + # Write the chunk to the file + written_size = f.write(chunk) + total_size += written_size + logging.info(f"package downloaded size {total_size/1024} KB") + self.download_time = time.time() + self.download_finished = True + def retrieve_and_unzip_package(self, package_name, package_url): local_package_path = self.agent_package_download_dir os.makedirs(local_package_path, exist_ok=True) filename, filename_without_extension, file_extension = GeneralConstants.get_filename_and_extension(package_url) - local_package_file = os.path.join(local_package_path, f"fedml_run_{self.run_id}_{filename_without_extension}") + local_package_file = os.path.join( + local_package_path, f"fedml_run_{self.run_id}_{self.edge_id}_{filename_without_extension}") if os.path.exists(local_package_file): os.remove(local_package_file) ssl._create_default_https_context = ssl._create_unverified_context - urllib.request.urlretrieve(package_url, local_package_file, - reporthook=self.package_download_progress) + + # Open a process to download the package so that we can avoid the request is blocked and check the timeout. + self.download_finished = False + self.download_time = time.time() + from multiprocessing import Process + download_process = Process(target=self.download_package_proc, args=(package_url, local_package_file)) + download_process.start() + allowed_block_download_time = 30 + while True: + block_time = time.time() - self.download_time + if block_time > allowed_block_download_time: + break + if self.download_finished: + break + time.sleep(3) + try: + if not self.download_finished: + download_process.terminate() + download_process.kill() + except Exception as e: + pass + + # Another method to async download. + # import socket + # socket.setdefaulttimeout(15) + # try: + # urllib.request.urlretrieve(package_url, local_package_file, + # reporthook=self.package_download_progress) + # except socket.timeout: + # retry_count = 1 + # max_retry_num = 5 + # while retry_count <= max_retry_num: + # try: + # urllib.request.urlretrieve(package_url, local_package_file, + # reporthook=self.package_download_progress) + # break + # except socket.timeout: + # error_info = 'Retry %d time' % retry_count if retry_count == 1 else \ + # 'Reloading for %d times' % retry_count + # logging.info(error_info) + # retry_count += 1 + # if retry_count > max_retry_num: + # logging.error("Download failed.") + # raise Exception("Download failed") + unzip_package_path = os.path.join(self.agent_package_unzip_dir, - f"unzip_fedml_run_{self.run_id}_{filename_without_extension}") + f"unzip_fedml_run_{self.run_id}_{self.edge_id}_{filename_without_extension}") try: shutil.rmtree(unzip_package_path, ignore_errors=True) except Exception as e: @@ -485,7 +560,7 @@ def callback_start_fl_job(self, job_pid): def start_job_perf(self, job_pid): GeneralConstants.save_learning_process(self.run_id, job_pid, data_dir=self.agent_data_dir) - self.mlops_metrics.report_job_perf(self.args, self.agent_config["mqtt_config"], job_pid) + #self.mlops_metrics.report_job_perf(self.args, self.agent_config["mqtt_config"], job_pid) def job_error_processor(self, error_list): self.check_runner_stop_event() @@ -516,10 +591,10 @@ def cleanup_containers_and_release_gpus(run_id, edge_id): job_type == SchedulerConstants.JOB_TASK_TYPE_DEPLOY): # Terminate the run docker container if exists - container_name = JobRunnerUtils.get_run_container_name(run_id) - docker_client = JobRunnerUtils.get_docker_client(DockerArgs()) - logging.info(f"Terminating the run docker container {container_name} if exists...") try: + container_name = JobRunnerUtils.get_run_container_name(run_id) + docker_client = JobRunnerUtils.get_docker_client(DockerArgs()) + logging.info(f"Terminating the run docker container {container_name} if exists...") JobRunnerUtils.remove_run_container_if_exists(container_name, docker_client) except Exception as e: logging.error(f"Exception {e} occurred when terminating docker container. " diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner_manager.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner_manager.py index 58198b6661..77768da6c0 100755 --- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner_manager.py +++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner_manager.py @@ -44,10 +44,9 @@ def complete_job_runner(self, run_id): if self.job_runners.get(run_id_str, None) is not None: self.job_runners[run_id_str].trigger_completed_event() - def put_run_edge_device_info_to_queue(self, run_id, device_info): - run_id_str = str(run_id) - if self.job_runners.get(run_id_str, None) is not None: - self.job_runners[run_id_str].put_run_edge_device_info_to_queue(run_id, device_info) + def put_run_edge_device_info_to_queue(self, run_id, edge_id, device_info): + for job_run_id, job_runner in self.job_runners.items(): + job_runner.put_run_edge_device_info_to_queue(run_id, edge_id, device_info) def get_runner_process(self, run_id, is_cloud_server=False): run_id_str = str(run_id) diff --git a/python/fedml/computing/scheduler/scheduler_core/status_center.py b/python/fedml/computing/scheduler/scheduler_core/status_center.py index 569f4d9257..76f811993e 100755 --- a/python/fedml/computing/scheduler/scheduler_core/status_center.py +++ b/python/fedml/computing/scheduler/scheduler_core/status_center.py @@ -373,10 +373,10 @@ def run_status_dispatcher_in_slave(self, status_event, status_queue, # Async request the job status from master when launching the job job_launch_message_map[status_entity.run_id] = {"topic": message_entity.topic, "payload": message_entity.payload} - status_manager_instances[status_entity.run_id]. \ - status_center_request_job_status_from_master_in_slave_agent( - message_entity.topic, message_entity.payload - ) + # status_manager_instances[status_entity.run_id]. \ + # status_center_request_job_status_from_master_in_slave_agent( + # message_entity.topic, message_entity.payload + # ) elif (message_entity.topic.startswith(FedMLStatusCenter.TOPIC_SLAVE_JOB_STOP_PREFIX) and message_entity.topic.endswith(FedMLStatusCenter.TOPIC_SLAVE_JOB_STOP_SUFFIX)): # Cleanup when stopped the job diff --git a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py index 4d2cf3a5ed..871b9026bf 100755 --- a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py +++ b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py @@ -78,7 +78,7 @@ def process_job_exception_status(self, master_id, status): def process_job_running_status(self, master_id, status): self.message_reporter.report_server_training_status( - self.run_id, status, edge_id=master_id, running_json=self.running_scheduler_contract) + self.run_id, status, edge_id=master_id, running_json=self.running_scheduler_contract, update_db=False) def status_center_process_master_status(self, topic, payload): request_json = json.loads(payload) @@ -121,13 +121,13 @@ def process_job_status_consensus(self, run_id, master_id, status): status, edge_status_item) if consensus_device_status is not None: self.message_reporter.report_client_training_status( - edge_id_item, consensus_device_status, run_id=run_id) + edge_id_item, consensus_device_status, run_id=run_id, update_db=False) # Save the job status to local storage FedMLServerDataInterface.get_instance().save_job_status(run_id, master_id, status, status) # Report the status to message center - self.message_reporter.report_server_training_status(run_id, status, edge_id=master_id) + self.message_reporter.report_server_training_status(run_id, status, edge_id=master_id, update_db=False) # Broadcast the status to slave agents self.message_reporter.report_job_status(run_id, status) @@ -207,7 +207,7 @@ def process_device_status(self, run_id, edge_id, status): # Report client status consensus_status = self.get_device_consensus_status_in_current_device(edge_id, status) - self.message_reporter.report_client_training_status(edge_id, consensus_status, run_id=run_id) + self.message_reporter.report_client_training_status(edge_id, consensus_status, run_id=run_id, update_db=False) # Report server status based on the fault tolerance model and parameters edge_nums = len(edge_id_status_dict.keys()) - 1 @@ -263,7 +263,7 @@ def parse_fault_tolerance_params(self, run_id): def report_server_status(self, run_id, edge_id, server_id, status): self.status_reporter.report_server_id_status( - run_id, status, edge_id=edge_id, server_id=server_id, server_agent_id=edge_id) + run_id, status, edge_id=edge_id, server_id=server_id, server_agent_id=edge_id, update_db=False) def report_exception_status( self, edge_id_list, run_id=0, server_id=None, status=None, payload=None): @@ -282,7 +282,7 @@ def status_center_process_slave_status_to_master_in_slave_agent(self, topic, pay self.message_center.send_message(topic, payload) # Post the status message to the listener queue of message center - self.message_center.receive_message(GeneralConstants.MSG_TOPIC_REPORT_DEVICE_STATUS_IN_JOB, payload) + #self.message_center.receive_message(GeneralConstants.MSG_TOPIC_REPORT_DEVICE_STATUS_IN_JOB, payload) def status_center_process_slave_status_to_mlops_in_slave_agent(self, topic, payload): # Forward the status message to message center. diff --git a/python/fedml/computing/scheduler/slave/base_slave_job_runner.py b/python/fedml/computing/scheduler/slave/base_slave_job_runner.py index 4448dd49fa..de2956ad94 100755 --- a/python/fedml/computing/scheduler/slave/base_slave_job_runner.py +++ b/python/fedml/computing/scheduler/slave/base_slave_job_runner.py @@ -1,3 +1,4 @@ +import json import logging import multiprocessing import os @@ -104,7 +105,7 @@ def run_impl(self, run_extend_queue_list, sender_message_center, self.status_reporter.report_client_id_status( self.edge_id, GeneralConstants.MSG_MLOPS_CLIENT_STATUS_INITIALIZING, - running_json=self.start_request_json, run_id=run_id) + running_json=json.dumps(self.request_json), run_id=run_id) # get training params private_local_data_dir = data_config.get("privateLocalData", "") diff --git a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py index 514aa98cd7..b3cd154d23 100755 --- a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py +++ b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py @@ -240,7 +240,7 @@ def callback_start_train(self, topic, payload): # Print the payload logging.info( - f"FedMLDebug - Receive: topic ({topic}), payload ({payload})" + f"FedMLDebug - run id {run_id}, Receive at callback_start_train: topic ({topic}), payload ({payload})" ) # Occupy GPUs @@ -418,6 +418,7 @@ def callback_response_device_status_in_job(self, topic, payload): edge_id = payload_json.get("edge_id", None) # process the status + logging.info("process status in the device status callback.") self.process_status(run_id, job_status, edge_id) def callback_response_job_status(self, topic, payload): @@ -430,6 +431,7 @@ def callback_response_job_status(self, topic, payload): edge_id = payload_json.get("edge_id", None) # process the status + logging.info("process status in the job status callback.") self.process_status(run_id, job_status, edge_id) def callback_broadcasted_job_status(self, topic, payload): @@ -439,6 +441,7 @@ def callback_broadcasted_job_status(self, topic, payload): job_status = payload_json.get("status", None) # process the status + logging.info("process status in the broadcast job status callback.") self.process_status(run_id, job_status, self.edge_id) def generate_protocol_manager(self): diff --git a/python/fedml/computing/scheduler/slave/client_data_interface.py b/python/fedml/computing/scheduler/slave/client_data_interface.py index 34a7b89bd2..0e9e84381a 100755 --- a/python/fedml/computing/scheduler/slave/client_data_interface.py +++ b/python/fedml/computing/scheduler/slave/client_data_interface.py @@ -143,7 +143,7 @@ def create_job_table(self): updated_time TEXT, round_index INT, total_rounds INT, - running_json TEXT);''') + running_json TEXT NULL);''') self.db_connection.commit() except Exception as e: pass @@ -405,14 +405,14 @@ class FedMLClientJobModel(object): def __init__(self): self.job_id = 0 self.edge_id = 0 - self.started_time = "" - self.ended_time = "" - self.progress = 0 - self.eta = 0 - self.failed_time = "" + self.started_time = "0" + self.ended_time = "0" + self.progress = 0.0 + self.eta = 0.0 + self.failed_time = "0" self.error_code = -1 self.msg = "" - self.updated_time = "" + self.updated_time = "0" self.round_index = 0 self.total_rounds = 0 self.status = "" diff --git a/python/fedml/core/distributed/communication/mqtt/mqtt_manager.py b/python/fedml/core/distributed/communication/mqtt/mqtt_manager.py index bdafe159c2..937e9f6644 100644 --- a/python/fedml/core/distributed/communication/mqtt/mqtt_manager.py +++ b/python/fedml/core/distributed/communication/mqtt/mqtt_manager.py @@ -14,7 +14,7 @@ class MqttManager(object): def __init__(self, host, port, user, pwd, keepalive_time, client_id, last_will_topic=None, last_will_msg=None, - clean_session=True, retain_msg=False): + clean_session=True, retain_msg=True): self._client = None self.mqtt_connection_id = None self._host = host diff --git a/python/fedml/core/mlops/mlops_metrics.py b/python/fedml/core/mlops/mlops_metrics.py index e0855c74b5..afa96f6870 100644 --- a/python/fedml/core/mlops/mlops_metrics.py +++ b/python/fedml/core/mlops/mlops_metrics.py @@ -67,15 +67,17 @@ def comm_sanity_check(self): else: return True - def report_client_training_status(self, edge_id, status, running_json=None, is_from_model=False, run_id=0): + def report_client_training_status(self, edge_id, status, running_json=None, + is_from_model=False, run_id=0, update_db=True): self.common_report_client_training_status(edge_id, status, run_id=run_id) - if is_from_model: - from ...computing.scheduler.model_scheduler.device_client_data_interface import FedMLClientDataInterface - FedMLClientDataInterface.get_instance().save_job(run_id, edge_id, status, running_json) - else: - from ...computing.scheduler.slave.client_data_interface import FedMLClientDataInterface - FedMLClientDataInterface.get_instance().save_job(run_id, edge_id, status, running_json) + if update_db: + if is_from_model: + from ...computing.scheduler.model_scheduler.device_client_data_interface import FedMLClientDataInterface + FedMLClientDataInterface.get_instance().save_job(run_id, edge_id, status, running_json) + else: + from ...computing.scheduler.slave.client_data_interface import FedMLClientDataInterface + FedMLClientDataInterface.get_instance().save_job(run_id, edge_id, status, running_json) def report_client_device_status_to_web_ui(self, edge_id, status, run_id=0): """ @@ -169,20 +171,22 @@ def common_report_client_id_status(self, run_id, edge_id, status, server_id="0", # logging.info("report_client_id_status. message_json = %s" % message_json) self.send_message(topic_name, message_json) - def report_server_training_status(self, run_id, status, edge_id=0, role=None, running_json=None, is_from_model=False): + def report_server_training_status(self, run_id, status, edge_id=0, role=None, + running_json=None, is_from_model=False, update_db=True): # if not self.comm_sanity_check(): # return self.common_report_server_training_status(run_id, status, role=role, edge_id=edge_id) - if is_from_model: - from ...computing.scheduler.model_scheduler.device_server_data_interface import FedMLServerDataInterface - FedMLServerDataInterface.get_instance().save_job(run_id, self.edge_id, status, running_json) - else: - from ...computing.scheduler.master.server_data_interface import FedMLServerDataInterface - FedMLServerDataInterface.get_instance().save_job(run_id, self.edge_id, status, running_json) + if update_db: + if is_from_model: + from ...computing.scheduler.model_scheduler.device_server_data_interface import FedMLServerDataInterface + FedMLServerDataInterface.get_instance().save_job(run_id, self.edge_id, status, running_json) + else: + from ...computing.scheduler.master.server_data_interface import FedMLServerDataInterface + FedMLServerDataInterface.get_instance().save_job(run_id, self.edge_id, status, running_json) def report_job_status(self, run_id, status): - topic_name = "master_agent/slave_agent/job_status" + topic_name = f"master_agent/slave_agent/job_status/{run_id}" payload = {"run_id": run_id, "status": status} message_json = json.dumps(payload) @@ -251,7 +255,7 @@ def broadcast_server_training_status(self, run_id, status, role=None, is_from_mo FedMLServerDataInterface.get_instance().save_job(run_id, self.edge_id, status) def report_server_id_status(self, run_id, status, edge_id=None, server_id=None, server_agent_id=None, - is_from_model=False, running_json=None): + is_from_model=False, running_json=None, update_db=True): # if not self.comm_sanity_check(): # return topic_name = "fl_server/flserver_agent_" + str(server_agent_id if server_agent_id is not None else @@ -267,12 +271,13 @@ def report_server_id_status(self, run_id, status, edge_id=None, server_id=None, # logging.info("report_server_id_status. message_json = %s" % message_json) self.send_message(topic_name, message_json) - if is_from_model: - from ...computing.scheduler.model_scheduler.device_server_data_interface import FedMLServerDataInterface - FedMLServerDataInterface.get_instance().save_job(run_id, in_edge_id, status, running_json) - else: - from ...computing.scheduler.master.server_data_interface import FedMLServerDataInterface - FedMLServerDataInterface.get_instance().save_job(run_id, in_edge_id, status, running_json) + if update_db: + if is_from_model: + from ...computing.scheduler.model_scheduler.device_server_data_interface import FedMLServerDataInterface + FedMLServerDataInterface.get_instance().save_job(run_id, in_edge_id, status, running_json) + else: + from ...computing.scheduler.master.server_data_interface import FedMLServerDataInterface + FedMLServerDataInterface.get_instance().save_job(run_id, in_edge_id, status, running_json) def report_client_training_metric(self, metric_json): # if not self.comm_sanity_check(): From 6b987edd4ed0f421cdd1dbc872b5fa89fe736313 Mon Sep 17 00:00:00 2001 From: Alay Shah Date: Sat, 11 May 2024 14:51:15 -0700 Subject: [PATCH 028/251] Add GPU Type Registry --- .../comm_utils/gpu_utils/gpu_utils.py | 17 +++++++++++++-- .../scheduler/comm_utils/hardware_utils.py | 21 ++++++++----------- 2 files changed, 24 insertions(+), 14 deletions(-) diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py index e6691b4b5d..2fc5cf619b 100644 --- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py @@ -29,9 +29,23 @@ class GPUCard: temperature: Optional[float] -class GPUCardUtil(ABC): +class GPUTypeRegistry(type, ABC): + GPU_TYPE_REGISTRY = {} + + def __new__(cls, name, bases, attrs): + new_cls = type.__new__(cls, name, bases, attrs) + cls.GPU_TYPE_REGISTRY[new_cls.__name__.lower()] = new_cls + return new_cls @classmethod + def get_gpu_utils(cls): + return cls.GPU_TYPE_REGISTRY.values() + + +class GPUCardUtil(metaclass=GPUTypeRegistry): + + @classmethod + @abstractmethod def detectGPUCardType(cls) -> Optional[GPUCardType]: raise NotImplementedError @@ -44,4 +58,3 @@ def getAvailableGPUCardIDs() -> List[int]: @abstractmethod def getGPUCards() -> List[GPUCard]: raise NotImplementedError - diff --git a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py index d26fb9c5b5..d12effa826 100644 --- a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py @@ -2,22 +2,19 @@ from typing import Optional, List -from fedml.computing.scheduler.comm_utils.gpu_utils.gpu_utils import GPUCardUtil, GPUCard -from fedml.computing.scheduler.comm_utils.gpu_utils.nvidia_utils import NvidiaGPUtil +from fedml.computing.scheduler.comm_utils.gpu_utils.gpu_utils import GPUCardUtil, GPUCard, GPUTypeRegistry from fedml.computing.scheduler.comm_utils.singleton import Singleton class HardwareUtil(metaclass=Singleton): - - _gpu_utils = [NvidiaGPUtil] - _gpu_util: Optional[GPUCardUtil] = None + __gpu_util: Optional[GPUCardUtil] = None @staticmethod - def _get_util() -> Optional[GPUCardUtil]: - if HardwareUtil._gpu_util is not None: - return HardwareUtil._gpu_util + def __get_util() -> Optional[GPUCardUtil]: + if HardwareUtil.__gpu_util is not None: + return HardwareUtil.__gpu_util - for gpu_util in HardwareUtil._gpu_utils: + for gpu_util in GPUTypeRegistry.get_gpu_utils(): try: if gpu_util.detectGPUCardType() is not None: HardwareUtil._gpu_util = gpu_util() @@ -30,13 +27,13 @@ def _get_util() -> Optional[GPUCardUtil]: @staticmethod def getGPUs() -> List[GPUCard]: - gpu_util = HardwareUtil._get_util() + gpu_util = HardwareUtil.__get_util() return gpu_util.getGPUCards() if gpu_util is not None else [] @staticmethod def getAvailableGPUCardIDs() -> List[int]: - gpu_util = HardwareUtil._get_util() - return gpu_util.getAvailainfbleGPUCardIDs() if gpu_util is not None else [] + gpu_util = HardwareUtil.__get_util() + return gpu_util.getAvailableGPUCardIDs() if gpu_util is not None else [] if __name__ == "__main__": From 436233b752740ddae7304d1581f030baf8536173 Mon Sep 17 00:00:00 2001 From: Alay Shah Date: Sat, 11 May 2024 15:11:13 -0700 Subject: [PATCH 029/251] Rolling back GPU Registry change --- .../computing/scheduler/comm_utils/__init__.py | 3 --- .../scheduler/comm_utils/gpu_utils/gpu_utils.py | 17 ++--------------- .../scheduler/comm_utils/hardware_utils.py | 8 +++++--- 3 files changed, 7 insertions(+), 21 deletions(-) diff --git a/python/fedml/computing/scheduler/comm_utils/__init__.py b/python/fedml/computing/scheduler/comm_utils/__init__.py index adf0269b67..e69de29bb2 100644 --- a/python/fedml/computing/scheduler/comm_utils/__init__.py +++ b/python/fedml/computing/scheduler/comm_utils/__init__.py @@ -1,3 +0,0 @@ -import gpu_utils.gpu_utils -import gpu_utils.qualcomm_utils -import gpu_utils.nvidia_utils \ No newline at end of file diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py index 2fc5cf619b..ced1c53d3e 100644 --- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py @@ -1,4 +1,4 @@ -from abc import ABC, abstractmethod, ABCMeta +from abc import ABC, abstractmethod from dataclasses import dataclass from enum import Enum, auto from typing import Optional, List @@ -29,20 +29,7 @@ class GPUCard: temperature: Optional[float] -class GPUTypeRegistry(type, ABC): - GPU_TYPE_REGISTRY = {} - - def __new__(cls, name, bases, attrs): - new_cls = type.__new__(cls, name, bases, attrs) - cls.GPU_TYPE_REGISTRY[new_cls.__name__.lower()] = new_cls - return new_cls - - @classmethod - def get_gpu_utils(cls): - return cls.GPU_TYPE_REGISTRY.values() - - -class GPUCardUtil(metaclass=GPUTypeRegistry): +class GPUCardUtil(ABC): @classmethod @abstractmethod diff --git a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py index d12effa826..c468e2181c 100644 --- a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py @@ -1,10 +1,12 @@ import logging - from typing import Optional, List -from fedml.computing.scheduler.comm_utils.gpu_utils.gpu_utils import GPUCardUtil, GPUCard, GPUTypeRegistry +from fedml.computing.scheduler.comm_utils.gpu_utils.gpu_utils import GPUCardUtil, GPUCard +from fedml.computing.scheduler.comm_utils.gpu_utils.nvidia_utils import NvidiaGPUtil from fedml.computing.scheduler.comm_utils.singleton import Singleton +GPU_CARD_UTILS = [NvidiaGPUtil] + class HardwareUtil(metaclass=Singleton): __gpu_util: Optional[GPUCardUtil] = None @@ -14,7 +16,7 @@ def __get_util() -> Optional[GPUCardUtil]: if HardwareUtil.__gpu_util is not None: return HardwareUtil.__gpu_util - for gpu_util in GPUTypeRegistry.get_gpu_utils(): + for gpu_util in GPU_CARD_UTILS: try: if gpu_util.detectGPUCardType() is not None: HardwareUtil._gpu_util = gpu_util() From 502c031e3838526de681eb2cf062789023659c08 Mon Sep 17 00:00:00 2001 From: alaydshah Date: Sun, 12 May 2024 02:21:38 +0000 Subject: [PATCH 030/251] Qualcomm Util -> get gpus --- .../comm_utils/gpu_utils/gpu_utils.py | 26 ++++---- .../comm_utils/gpu_utils/nvidia_utils.py | 45 +++++++------- .../comm_utils/gpu_utils/qualcomm_utils.py | 60 +++++++++++++++++++ .../scheduler/comm_utils/hardware_utils.py | 29 ++++----- 4 files changed, 112 insertions(+), 48 deletions(-) diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py index ced1c53d3e..2731e51a3b 100644 --- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py @@ -1,5 +1,5 @@ from abc import ABC, abstractmethod -from dataclasses import dataclass +from dataclasses import dataclass, field from enum import Enum, auto from typing import Optional, List @@ -16,32 +16,34 @@ def __str__(self): @dataclass class GPUCard: id: int - uuid: str name: str - load: float + driver: str + serial: str memoryTotal: float - memoryUsed: float memoryFree: float - driver: str - serial: Optional[str] - display_mode: Optional[str] - display_active: Optional[str] - temperature: Optional[float] + memoryUsed: float + memoryUtil: float + load: Optional[float] = 0.0 + uuid: Optional[str] = "" + display_mode: Optional[str] = "" + display_active: Optional[str] = "" + temperature: Optional[float] = 0.0 class GPUCardUtil(ABC): @classmethod @abstractmethod - def detectGPUCardType(cls) -> Optional[GPUCardType]: + def detect_gpu_card_type(cls) -> Optional[GPUCardType]: raise NotImplementedError @staticmethod @abstractmethod - def getAvailableGPUCardIDs() -> List[int]: + def get_available_gpu_card_ids(order: str = "memory", limit: int = 1, max_load: float = 0.01, + max_memory: float = 0.01) -> List[int]: raise NotImplementedError @staticmethod @abstractmethod - def getGPUCards() -> List[GPUCard]: + def get_gpu_cards() -> List[GPUCard]: raise NotImplementedError diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py index 349230cef5..f229774ce0 100644 --- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py @@ -6,26 +6,9 @@ from fedml.computing.scheduler.comm_utils.gpu_utils.gpu_utils import GPUCard, GPUCardUtil, GPUCardType -def _convert(gpu: GPU) -> GPUCard: - return GPUCard( - id=gpu.id, - uuid=gpu.uuid, - name=gpu.name, - load=gpu.load, - memoryTotal=gpu.memoryTotal, - memoryUsed=gpu.memoryUsed, - memoryFree=gpu.memoryFree, - driver=gpu.driver, - serial=gpu.serial, - display_mode=gpu.display_mode, - display_active=gpu.display_active, - temperature=gpu.temperature - ) - - class NvidiaGPUtil(GPUCardUtil): @classmethod - def detectGPUCardType(cls) -> Optional[GPUCardType]: + def detect_gpu_card_type(cls) -> Optional[GPUCardType]: try: subprocess.check_output(["nvidia-smi"], universal_newlines=True) return GPUCardType.NVIDIA @@ -33,9 +16,27 @@ def detectGPUCardType(cls) -> Optional[GPUCardType]: return None @staticmethod - def getGPUCards() -> List[GPUCard]: - return [_convert(gpu) for gpu in GPUtil.getGPUs()] + def get_gpu_cards() -> List[GPUCard]: + return [NvidiaGPUtil.__convert(gpu) for gpu in GPUtil.getGPUs()] + + @staticmethod + def get_available_gpu_card_ids(order: str = "memory", limit: int = 1, maxLoad: float = 0.01, maxMemory: float = 0.01) -> List[int]: + return GPUtil.getAvailable(order='memory', limit=limit, maxLoad=0.01, maxMemory=0.01) @staticmethod - def getAvailableGPUCardIDs() -> List[int]: - return GPUtil.getAvailable() + def __convert(gpu: GPU) -> GPUCard: + return GPUCard( + id=gpu.id, + name=gpu.name, + driver=gpu.driver, + serial=gpu.serial, + memoryTotal=gpu.memoryTotal, + memoryFree=gpu.memoryFree, + memoryUsed=gpu.memoryUsed, + memoryUtil=gpu.memoryUtil, + load=gpu.load, + uuid=gpu.uuid, + display_mode=gpu.display_mode, + display_active=gpu.display_active, + temperature=gpu.temperature + ) diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py index e69de29bb2..1b56b7d05e 100644 --- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py @@ -0,0 +1,60 @@ +import logging +import subprocess +from typing import List, Optional + +from fedml.computing.scheduler.comm_utils.gpu_utils.gpu_utils import GPUCard, GPUCardUtil, GPUCardType +from qaicrt import Util, QIDList, QDevInfo, QStatus + + +class QualcommNPUtil(GPUCardUtil): + @classmethod + def detect_gpu_card_type(cls) -> Optional[GPUCardType]: + try: + subprocess.check_output(["/opt/qti-aic/tools/qaic-util"], universal_newlines=True) + return GPUCardType.QUALCOMM + except Exception: + return None + + @staticmethod + def get_gpu_cards() -> List[GPUCard]: + cards = [] + util = Util() + status, card_list = util.getDeviceIds() + if status.value == 0: + for card in card_list: + status, card_info = util.getDeviceInfo(card) + if status.value == 0 and card_info.devStatus.value == 1: + cards.append(QualcommNPUtil.__convert(card_info)) + + else: + logging.error("Qualcomm Card Status not Healthy") + return cards + + @staticmethod + def get_available_gpu_card_ids(order: str = "memory", limit: int = 1, max_load: float = 0.01, + max_memory: float = 0.01) -> List[int]: + available_gpu_card_ids = [] + + if order != "memory": + raise NotImplementedError(f"Qualcomm utils doesn't have support to compute availability based on {order}. " + f"Supported criteria: [memory]") + + return available_gpu_card_ids + + @staticmethod + def __convert(npu) -> GPUCard: + memory_total = npu.devData.resourceInfo.dramTotal / 1024 + memory_free = npu.devData.resourceInfo.dramFree / 1024 + memory_used = memory_total - memory_free + memory_utilized = float(memory_used) / float(memory_total) + + return GPUCard( + id=npu.qid, + name=npu.pciInfo.devicename, + driver=npu.devData.fwQCImageVersionString, + serial=npu.devData.serial, + memoryTotal=memory_total, + memoryFree=memory_free, + memoryUsed=memory_used, + memoryUtil=memory_utilized, + ) diff --git a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py index c468e2181c..4e6f83e963 100644 --- a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py @@ -3,24 +3,25 @@ from fedml.computing.scheduler.comm_utils.gpu_utils.gpu_utils import GPUCardUtil, GPUCard from fedml.computing.scheduler.comm_utils.gpu_utils.nvidia_utils import NvidiaGPUtil +from fedml.computing.scheduler.comm_utils.gpu_utils.qualcomm_utils import QualcommNPUtil from fedml.computing.scheduler.comm_utils.singleton import Singleton -GPU_CARD_UTILS = [NvidiaGPUtil] +GPU_CARD_UTILS = [NvidiaGPUtil, QualcommNPUtil] class HardwareUtil(metaclass=Singleton): __gpu_util: Optional[GPUCardUtil] = None - @staticmethod - def __get_util() -> Optional[GPUCardUtil]: - if HardwareUtil.__gpu_util is not None: - return HardwareUtil.__gpu_util + @classmethod + def __get_util(cls) -> Optional[GPUCardUtil]: + if cls.__gpu_util is not None: + return cls.__gpu_util for gpu_util in GPU_CARD_UTILS: try: - if gpu_util.detectGPUCardType() is not None: - HardwareUtil._gpu_util = gpu_util() - return HardwareUtil._gpu_util + if gpu_util.detect_gpu_card_type() is not None: + cls.__gpu_util = gpu_util() + return cls.__gpu_util except Exception as e: pass @@ -28,18 +29,18 @@ def __get_util() -> Optional[GPUCardUtil]: return None @staticmethod - def getGPUs() -> List[GPUCard]: + def get_gpus() -> List[GPUCard]: gpu_util = HardwareUtil.__get_util() - return gpu_util.getGPUCards() if gpu_util is not None else [] + return gpu_util.get_gpu_cards() if gpu_util is not None else [] @staticmethod - def getAvailableGPUCardIDs() -> List[int]: + def get_available_gpu_card_ids() -> List[int]: gpu_util = HardwareUtil.__get_util() - return gpu_util.getAvailableGPUCardIDs() if gpu_util is not None else [] + return gpu_util.get_available_gpu_card_ids() if gpu_util is not None else [] if __name__ == "__main__": - gpus = HardwareUtil.getGPUs() - get_available_gpu_cards = HardwareUtil.getAvailableGPUCardIDs() + gpus = HardwareUtil.get_gpus() + get_available_gpu_cards = HardwareUtil.get_available_gpu_card_ids() print(gpus) print(get_available_gpu_cards) From fba65b28d14d3013b6893726ea60bfc8f5200904 Mon Sep 17 00:00:00 2001 From: alaydshah Date: Sun, 12 May 2024 04:40:22 +0000 Subject: [PATCH 031/251] Qualcomm Util -> get_available_gpu_card_ids --- .../scheduler/comm_utils/gpu_utils/gpu_utils.py | 3 +-- .../scheduler/comm_utils/gpu_utils/nvidia_utils.py | 2 +- .../scheduler/comm_utils/gpu_utils/qualcomm_utils.py | 11 +++++++---- .../computing/scheduler/comm_utils/hardware_utils.py | 7 ++++--- 4 files changed, 13 insertions(+), 10 deletions(-) diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py index 2731e51a3b..e098ce55ac 100644 --- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py @@ -39,8 +39,7 @@ def detect_gpu_card_type(cls) -> Optional[GPUCardType]: @staticmethod @abstractmethod - def get_available_gpu_card_ids(order: str = "memory", limit: int = 1, max_load: float = 0.01, - max_memory: float = 0.01) -> List[int]: + def get_available_gpu_card_ids(order: str, limit: int, max_load: float, max_memory: float) -> List[int]: raise NotImplementedError @staticmethod diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py index f229774ce0..8da4e89573 100644 --- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py @@ -20,7 +20,7 @@ def get_gpu_cards() -> List[GPUCard]: return [NvidiaGPUtil.__convert(gpu) for gpu in GPUtil.getGPUs()] @staticmethod - def get_available_gpu_card_ids(order: str = "memory", limit: int = 1, maxLoad: float = 0.01, maxMemory: float = 0.01) -> List[int]: + def get_available_gpu_card_ids(order: str, limit: int, maxLoad: float, maxMemory: float) -> List[int]: return GPUtil.getAvailable(order='memory', limit=limit, maxLoad=0.01, maxMemory=0.01) @staticmethod diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py index 1b56b7d05e..9ab629a9cc 100644 --- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py @@ -1,4 +1,5 @@ import logging +import math import subprocess from typing import List, Optional @@ -31,15 +32,17 @@ def get_gpu_cards() -> List[GPUCard]: return cards @staticmethod - def get_available_gpu_card_ids(order: str = "memory", limit: int = 1, max_load: float = 0.01, - max_memory: float = 0.01) -> List[int]: - available_gpu_card_ids = [] + def get_available_gpu_card_ids(order: str, limit: int, max_load: float, max_memory: float) -> List[int]: if order != "memory": raise NotImplementedError(f"Qualcomm utils doesn't have support to compute availability based on {order}. " f"Supported criteria: [memory]") - return available_gpu_card_ids + gpu_cards: List[GPUCard] = QualcommNPUtil.get_gpu_cards() + gpu_cards = list(filter(lambda card: card.memoryUtil < max_memory, gpu_cards)) + gpu_cards.sort(key=lambda card: float('inf') if math.isnan(card.memoryUtil) else card.memoryUtil, reverse=False) + gpu_cards = gpu_cards[0:min(limit, len(gpu_cards))] + return list(map(lambda card: card.id, gpu_cards)) @staticmethod def __convert(npu) -> GPUCard: diff --git a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py index 4e6f83e963..0ba8aa664d 100644 --- a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py @@ -34,13 +34,14 @@ def get_gpus() -> List[GPUCard]: return gpu_util.get_gpu_cards() if gpu_util is not None else [] @staticmethod - def get_available_gpu_card_ids() -> List[int]: + def get_available_gpu_card_ids(order: str = "memory", limit: int = 1, max_load: float = 0.01, + max_memory: float = 0.01) -> List[int]: gpu_util = HardwareUtil.__get_util() - return gpu_util.get_available_gpu_card_ids() if gpu_util is not None else [] + return gpu_util.get_available_gpu_card_ids(order, limit, max_load, max_memory) if gpu_util is not None else [] if __name__ == "__main__": gpus = HardwareUtil.get_gpus() - get_available_gpu_cards = HardwareUtil.get_available_gpu_card_ids() + get_available_gpu_cards = HardwareUtil.get_available_gpu_card_ids(limit=len(gpus)) print(gpus) print(get_available_gpu_cards) From 63c682ca3e64be70cb9e66e104a4e45dcf17d8b8 Mon Sep 17 00:00:00 2001 From: Alay Shah Date: Sat, 11 May 2024 21:53:17 -0700 Subject: [PATCH 032/251] Add sys path in init --- .../scheduler/comm_utils/gpu_utils/qualcomm_utils.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py index 9ab629a9cc..38d95e0836 100644 --- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py @@ -1,13 +1,16 @@ import logging import math import subprocess +import sys from typing import List, Optional from fedml.computing.scheduler.comm_utils.gpu_utils.gpu_utils import GPUCard, GPUCardUtil, GPUCardType -from qaicrt import Util, QIDList, QDevInfo, QStatus class QualcommNPUtil(GPUCardUtil): + def __init__(self): + sys.path.append("/opt/qti-aic/dev/lib/x86_64/") + @classmethod def detect_gpu_card_type(cls) -> Optional[GPUCardType]: try: @@ -18,6 +21,8 @@ def detect_gpu_card_type(cls) -> Optional[GPUCardType]: @staticmethod def get_gpu_cards() -> List[GPUCard]: + from qaicrt import Util, QIDList, QDevInfo, QStatus + cards = [] util = Util() status, card_list = util.getDeviceIds() From d3c081d5ec0cc3285582ce6712b8b3ff7429f42a Mon Sep 17 00:00:00 2001 From: Alay Shah Date: Sat, 11 May 2024 23:09:40 -0700 Subject: [PATCH 033/251] Replace GPUtil with Hardware Util --- .../scheduler/comm_utils/container_utils.py | 6 +++--- .../comm_utils/gpu_utils/gpu_utils.py | 3 ++- .../comm_utils/gpu_utils/nvidia_utils.py | 3 ++- .../comm_utils/gpu_utils/qualcomm_utils.py | 2 ++ .../scheduler/comm_utils/job_utils.py | 16 --------------- .../scheduler/comm_utils/sys_utils.py | 20 +++++++++++-------- .../computing/scheduler/env/collect_env.py | 6 ++---- 7 files changed, 23 insertions(+), 33 deletions(-) diff --git a/python/fedml/computing/scheduler/comm_utils/container_utils.py b/python/fedml/computing/scheduler/comm_utils/container_utils.py index f337dd9997..4e09315b78 100644 --- a/python/fedml/computing/scheduler/comm_utils/container_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/container_utils.py @@ -8,10 +8,10 @@ from docker import errors from fedml.computing.scheduler.comm_utils import sys_utils +from fedml.computing.scheduler.comm_utils.hardware_utils import HardwareUtil from fedml.core.common.singleton import Singleton from fedml.computing.scheduler.comm_utils.constants import SchedulerConstants import time -from GPUtil import getGPUs class ContainerUtils(Singleton): @@ -252,7 +252,7 @@ def get_container_perf(self, c_name) -> ContainerMetrics: CPU % MEM USAGE / LIMIT MEM % NET I/O BLOCK I/O 0.26% 8.703GiB / 503.5GiB 1.73% 17.4GB / 176MB 545kB / 20.9GB - GPU: We currently use GPUtil to get the GPU stats on host machine since one GPU is not + GPU: We currently use HardwareUtil to get the GPU stats on host machine since one GPU is not shared by multiple containers (TODO: get the GPU stats inside the container) """ @@ -350,7 +350,7 @@ def gpu_stats(gpu_ids): utilz, memory, temp = None, None, None gpu_stats_map = {} # gpu_id: int -> {"gpu_utilization", "gpu_memory_allocated", "gpu_temp"} try: - gpus = getGPUs() + gpus = HardwareUtil.get_gpus() for i in gpu_ids: gpu = gpus[i] diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py index e098ce55ac..3007bc07bc 100644 --- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py @@ -1,5 +1,5 @@ from abc import ABC, abstractmethod -from dataclasses import dataclass, field +from dataclasses import dataclass from enum import Enum, auto from typing import Optional, List @@ -19,6 +19,7 @@ class GPUCard: name: str driver: str serial: str + vendor: str memoryTotal: float memoryFree: float memoryUsed: float diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py index 8da4e89573..58c3888e68 100644 --- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py @@ -30,6 +30,7 @@ def __convert(gpu: GPU) -> GPUCard: name=gpu.name, driver=gpu.driver, serial=gpu.serial, + vendor=GPUCardType.NVIDIA.name, memoryTotal=gpu.memoryTotal, memoryFree=gpu.memoryFree, memoryUsed=gpu.memoryUsed, @@ -38,5 +39,5 @@ def __convert(gpu: GPU) -> GPUCard: uuid=gpu.uuid, display_mode=gpu.display_mode, display_active=gpu.display_active, - temperature=gpu.temperature + temperature=gpu.temperature, ) diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py index 38d95e0836..ca55fdab7c 100644 --- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py @@ -51,6 +51,7 @@ def get_available_gpu_card_ids(order: str, limit: int, max_load: float, max_memo @staticmethod def __convert(npu) -> GPUCard: + # TODO (alaydshah): Add support for load, memoryUtil, temperature memory_total = npu.devData.resourceInfo.dramTotal / 1024 memory_free = npu.devData.resourceInfo.dramFree / 1024 memory_used = memory_total - memory_free @@ -61,6 +62,7 @@ def __convert(npu) -> GPUCard: name=npu.pciInfo.devicename, driver=npu.devData.fwQCImageVersionString, serial=npu.devData.serial, + vendor=GPUCardType.QUALCOMM.name, memoryTotal=memory_total, memoryFree=memory_free, memoryUsed=memory_used, diff --git a/python/fedml/computing/scheduler/comm_utils/job_utils.py b/python/fedml/computing/scheduler/comm_utils/job_utils.py index 384cbacd1d..afa6293396 100644 --- a/python/fedml/computing/scheduler/comm_utils/job_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/job_utils.py @@ -2,7 +2,6 @@ import os import platform import traceback -import GPUtil import docker import fedml from docker import errors, DockerClient @@ -159,23 +158,8 @@ def occupy_gpu_ids(self, run_id, request_gpu_num, device_id, inner_id=None, @staticmethod def search_and_refresh_available_gpu_ids(available_gpu_ids): trimmed_gpu_ids = JobRunnerUtils.trim_unavailable_gpu_ids(available_gpu_ids) - # if len(trimmed_gpu_ids) <= 0: - # available_gpu_ids = JobRunnerUtils.balance_available_gpu_ids(trimmed_gpu_ids) return trimmed_gpu_ids - @staticmethod - def balance_available_gpu_ids(available_gpu_ids): - gpu_list, realtime_available_gpu_ids = JobRunnerUtils.get_gpu_list_and_realtime_gpu_available_ids() - available_gpu_ids = realtime_available_gpu_ids - if len(available_gpu_ids) <= 0: - for gpu in gpu_list: - gpu = GPUtil.GPU(gpu) - if gpu.memoryUtil > 0.8: - continue - available_gpu_ids.append(gpu.id) - - return available_gpu_ids.copy() - @staticmethod def request_gpu_ids(request_gpu_num, available_gpu_ids): available_gpu_count = len(available_gpu_ids) diff --git a/python/fedml/computing/scheduler/comm_utils/sys_utils.py b/python/fedml/computing/scheduler/comm_utils/sys_utils.py index 64313b0864..f1989fbe5a 100644 --- a/python/fedml/computing/scheduler/comm_utils/sys_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/sys_utils.py @@ -10,6 +10,7 @@ import psutil import yaml +from fedml.computing.scheduler.comm_utils.hardware_utils import HardwareUtil from fedml.computing.scheduler.comm_utils.yaml_utils import load_yaml_config import json from urllib import request @@ -18,7 +19,6 @@ from packaging import version import sys import subprocess -import GPUtil from fedml.computing.scheduler.slave.client_constants import ClientConstants @@ -95,7 +95,7 @@ def get_sys_runner_info(): pass try: - gpus = GPUtil.getGPUs() + gpus = HardwareUtil.get_gpus() memory_total = 0.0 memory_free = 0.0 for gpu in gpus: @@ -105,9 +105,11 @@ def get_sys_runner_info(): gpu_available_mem = "{:.1f} G".format(memory_free / 1024.0) gpu_total_mem = "{:.1f}G".format(memory_total / 1024.0) gpu_count = len(gpus) - gpu_vendor = "nvidia" + if gpu_count: + gpu_vendor = gpus[0].vendor + gpu_device_name = gpus[0].name - gpu_device_name = torch.cuda.get_device_name(0) + # gpu_device_name = torch.cuda.get_device_name(0) gpu_info = gpu_device_name except: pass @@ -168,7 +170,7 @@ def get_gpu_list(): return ret_gpu_list[0:simulation_gpu_count] - gpu_list = GPUtil.getGPUs() + gpu_list = HardwareUtil.get_gpus() ret_gpu_list = list() for gpu in gpu_list: ret_gpu_item = {"ID": gpu.id, "uuid": gpu.uuid, "load": gpu.load, @@ -189,7 +191,8 @@ def get_available_gpu_id_list(limit=1) -> List[int]: available_gpu_ids.append(count) return available_gpu_ids[0:simulation_gpu_count] - gpu_available_list = GPUtil.getAvailable(order='memory', limit=limit, maxLoad=0.01, maxMemory=0.01) + gpu_available_list = HardwareUtil.get_available_gpu_card_ids(order='memory', limit=limit, max_load=0.01, + max_memory=0.01) return gpu_available_list @@ -219,9 +222,10 @@ def get_gpu_count_vendor(): gpu_count = 0 gpu_vendor = "" try: - gpus = GPUtil.getGPUs() + gpus = HardwareUtil.get_gpus() gpu_count = len(gpus) - gpu_vendor = "nvidia" + if gpu_count: + gpu_vendor = gpus[0].vendor except: pass diff --git a/python/fedml/computing/scheduler/env/collect_env.py b/python/fedml/computing/scheduler/env/collect_env.py index dcece6a720..63f7e66b85 100644 --- a/python/fedml/computing/scheduler/env/collect_env.py +++ b/python/fedml/computing/scheduler/env/collect_env.py @@ -1,9 +1,8 @@ import os import traceback -import GPUtil - import fedml +from fedml.computing.scheduler.comm_utils.hardware_utils import HardwareUtil from fedml.computing.scheduler.slave.client_diagnosis import ClientDiagnosis @@ -59,8 +58,7 @@ def collect_env(): try: print("\n======== GPU Configuration ========") - import GPUtil - gpus = GPUtil.getGPUs() + gpus = HardwareUtil.get_gpus() memory_total = 0.0 memory_free = 0.0 gpu_name = "" From 8b2ff80167e085f9763e44083cd7d8de95b17811 Mon Sep 17 00:00:00 2001 From: alaydshah Date: Sun, 12 May 2024 06:23:05 +0000 Subject: [PATCH 034/251] Make fedml env hardware agnostic --- .../computing/scheduler/env/collect_env.py | 17 ++++++++--------- 1 file changed, 8 insertions(+), 9 deletions(-) diff --git a/python/fedml/computing/scheduler/env/collect_env.py b/python/fedml/computing/scheduler/env/collect_env.py index 63f7e66b85..b2f7bd7f5e 100644 --- a/python/fedml/computing/scheduler/env/collect_env.py +++ b/python/fedml/computing/scheduler/env/collect_env.py @@ -11,7 +11,7 @@ def collect_env(): print("FedML version: " + str(fedml.__version__)) env_version = fedml.get_env_version() print("FedML ENV version: " + str(env_version)) - + print("Execution path:" + str(os.path.abspath(fedml.__file__))) print("\n======== Running Environment ========") @@ -62,26 +62,25 @@ def collect_env(): memory_total = 0.0 memory_free = 0.0 gpu_name = "" + vendor = "" for gpu in gpus: memory_total += gpu.memoryTotal memory_free += gpu.memoryFree gpu_name = gpu.name + vendor = gpu.vendor - print("NVIDIA GPU Info: " + gpu_name) + print(f"{vendor} GPU Info: " + gpu_name) print("Available GPU memory: {:.1f} G / {:.1f}G".format( memory_free / 1024.0, memory_total / 1024.0)) + device_count = len(gpus) + print("device_count = {}".format(device_count)) + import torch torch_is_available = torch.cuda.is_available() print("torch_is_available = {}".format(torch_is_available)) - device_count = torch.cuda.device_count() - print("device_count = {}".format(device_count)) - - device_name = torch.cuda.get_device_name(0) - print("device_name = {}".format(device_name)) - except: print("No GPU devices") @@ -108,4 +107,4 @@ def collect_env(): print(f"You can not connect to {mqtt_url}.\n") except Exception as e: print(f"The connection exception: {traceback.format_exc()}") - pass \ No newline at end of file + pass From f9aaee926f31810697d060cc271f5fec5a9d8e5f Mon Sep 17 00:00:00 2001 From: Alay Shah Date: Sun, 12 May 2024 00:09:45 -0700 Subject: [PATCH 035/251] Nit --- .../fedml/computing/scheduler/comm_utils/hardware_utils.py | 6 +++--- python/fedml/computing/scheduler/comm_utils/sys_utils.py | 4 ++-- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py index 0ba8aa664d..1aeb5eb0be 100644 --- a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py @@ -34,14 +34,14 @@ def get_gpus() -> List[GPUCard]: return gpu_util.get_gpu_cards() if gpu_util is not None else [] @staticmethod - def get_available_gpu_card_ids(order: str = "memory", limit: int = 1, max_load: float = 0.01, - max_memory: float = 0.01) -> List[int]: + def get_available_gpu_ids(order: str = "memory", limit: int = 1, max_load: float = 0.01, + max_memory: float = 0.01) -> List[int]: gpu_util = HardwareUtil.__get_util() return gpu_util.get_available_gpu_card_ids(order, limit, max_load, max_memory) if gpu_util is not None else [] if __name__ == "__main__": gpus = HardwareUtil.get_gpus() - get_available_gpu_cards = HardwareUtil.get_available_gpu_card_ids(limit=len(gpus)) + get_available_gpu_cards = HardwareUtil.get_available_gpu_ids(limit=len(gpus)) print(gpus) print(get_available_gpu_cards) diff --git a/python/fedml/computing/scheduler/comm_utils/sys_utils.py b/python/fedml/computing/scheduler/comm_utils/sys_utils.py index f1989fbe5a..aaa37bc4db 100644 --- a/python/fedml/computing/scheduler/comm_utils/sys_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/sys_utils.py @@ -191,8 +191,8 @@ def get_available_gpu_id_list(limit=1) -> List[int]: available_gpu_ids.append(count) return available_gpu_ids[0:simulation_gpu_count] - gpu_available_list = HardwareUtil.get_available_gpu_card_ids(order='memory', limit=limit, max_load=0.01, - max_memory=0.01) + gpu_available_list = HardwareUtil.get_available_gpu_ids(order='memory', limit=limit, max_load=0.01, + max_memory=0.01) return gpu_available_list From 7e547f4ea78d08202ae23ff19b9ba6a05fffe2f6 Mon Sep 17 00:00:00 2001 From: Alex Date: Sun, 12 May 2024 20:29:44 +0800 Subject: [PATCH 036/251] [CoreEngine] make the deployment and federated learning work. --- python/fedml/__init__.py | 10 +++++++--- .../scheduler/comm_utils/container_utils.py | 2 +- .../computing/scheduler/comm_utils/job_monitor.py | 10 +++------- .../scheduler/master/base_master_job_runner.py | 15 +++++++++------ .../model_scheduler/worker_job_runner.py | 2 +- .../scheduler_core/scheduler_base_job_runner.py | 15 +++++++++++---- .../scheduler_core/status_manager_protocols.py | 5 ++++- python/fedml/core/mlops/mlops_device_perfs.py | 13 ++++++++----- 8 files changed, 44 insertions(+), 28 deletions(-) diff --git a/python/fedml/__init__.py b/python/fedml/__init__.py index 92b72357a0..8044387b65 100644 --- a/python/fedml/__init__.py +++ b/python/fedml/__init__.py @@ -90,9 +90,13 @@ def init(args=None, check_env=True, should_init_logs=True): # Windows/Linux/MacOS compatability issues on multi-processing # https://github.com/pytorch/pytorch/issues/3492 """ - if multiprocessing.get_start_method() != "spawn": - # force all platforms (Windows/Linux/MacOS) to use the same way (spawn) for multiprocessing - multiprocessing.set_start_method("spawn", force=True) + if multiprocessing.get_start_method() != "fork": + # force all platforms (Windows/Linux/macOS) to use the same way (fork) for multiprocessing + multiprocessing.set_start_method("fork", force=True) + + # if multiprocessing.get_start_method() != "spawn": + # # force all platforms (Windows/Linux/MacOS) to use the same way (spawn) for multiprocessing + # multiprocessing.set_start_method("spawn", force=True) """ # https://stackoverflow.com/questions/53014306/error-15-initializing-libiomp5-dylib-but-found-libiomp5-dylib-already-initial diff --git a/python/fedml/computing/scheduler/comm_utils/container_utils.py b/python/fedml/computing/scheduler/comm_utils/container_utils.py index 9469c8b471..4cbf642a45 100644 --- a/python/fedml/computing/scheduler/comm_utils/container_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/container_utils.py @@ -178,7 +178,7 @@ def get_container_rank_same_model(prefix: str): running_model_name = hash("model_endpoint_id_{}_name_{}_model_id_{}_name_{}_ver_{}") """ try: - docker.from_env(timeout=5, version="auto") + client = docker.from_env(timeout=5, version="auto") except Exception: logging.error("Failed to connect to the docker daemon, please ensure that you have " "installed Docker Desktop or Docker Engine, and the docker is running") diff --git a/python/fedml/computing/scheduler/comm_utils/job_monitor.py b/python/fedml/computing/scheduler/comm_utils/job_monitor.py index 84723d373a..9bee76e780 100644 --- a/python/fedml/computing/scheduler/comm_utils/job_monitor.py +++ b/python/fedml/computing/scheduler/comm_utils/job_monitor.py @@ -208,6 +208,8 @@ def monitor_replicas_number(): endpoint_replicas_details = {} if isinstance(endpoint_detail, str): endpoint_replicas_details = json.loads(endpoint_detail) + if isinstance(endpoint_replicas_details, str): + endpoint_replicas_details = json.loads(endpoint_replicas_details) if "result" in endpoint_replicas_details: endpoint_replica_details = {} @@ -220,13 +222,7 @@ def monitor_replicas_number(): for endpoint_id, num_replica in res_to_mlops.items(): curr_version = fedml.get_env_version() num_replica_url_path = "fedmlModelServer/api/v1/endpoint/replica-info" - if curr_version == "release": - mlops_prefix = "https://open.fedml.ai/" - elif curr_version == "test": - mlops_prefix = "https://open-test.fedml.ai/" - else: - logging.error(f"Do not support the version {curr_version}.") - return + mlops_prefix = fedml._get_backend_service() url = f"{mlops_prefix}{num_replica_url_path}" cached_token = FedMLModelCache.get_instance().get_end_point_token_with_eid(endpoint_id) diff --git a/python/fedml/computing/scheduler/master/base_master_job_runner.py b/python/fedml/computing/scheduler/master/base_master_job_runner.py index ce0515160f..07c297c65d 100755 --- a/python/fedml/computing/scheduler/master/base_master_job_runner.py +++ b/python/fedml/computing/scheduler/master/base_master_job_runner.py @@ -221,10 +221,10 @@ def _process_run_logs_queue(self, run_logs_queue): def run_server_job( self, process_event, completed_event, edge_id_status_queue=None, - edge_device_info_queue=None, run_metrics_queue=None, - run_event_queue=None, run_artifacts_queue=None, run_logs_queue=None, - sender_message_queue=None, listener_message_queue=None, - edge_device_info_global_queue=None, status_center_queue=None + edge_device_info_queue=None, run_metrics_queue=None, run_event_queue=None, + run_artifacts_queue=None, run_logs_queue=None, edge_device_info_global_queue=None, + run_extend_queue_list=None, sender_message_center_queue=None, listener_message_queue=None, + status_center_queue=None ): print(f"Server runner process id {os.getpid()}, run id {self.run_id}") @@ -239,10 +239,10 @@ def run_server_job( try: MLOpsUtils.set_ntp_offset(self.ntp_offset) - self.rebuild_message_status_center(sender_message_queue, listener_message_queue, status_center_queue) + self.rebuild_message_status_center(sender_message_center_queue, listener_message_queue, status_center_queue) self.run_server_job_impl(process_event, completed_event, - message_center_queue=sender_message_queue) + message_center_queue=sender_message_center_queue) except RunnerError: logging.info("Runner stopped.") self.status_reporter.report_server_id_status( @@ -703,5 +703,8 @@ def should_process_async_cluster(self): return False, self.async_check_timeout + def get_client_id_list(self, server_edge_id_list): + return server_edge_id_list + diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py index d1cfd3b83c..ac9328592c 100755 --- a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py +++ b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py @@ -353,7 +353,7 @@ def run_impl(self, run_extend_queue_list, sender_message_center, return True elif op == "update" or op == "rollback": # Update is combine of delete and add - worker_ip = self.get_ip_address(self.request_json) + worker_ip = GeneralConstants.get_ip_address(self.request_json) for rank in replica_rank_to_update: # Delete a replica (container) if exists self.replica_handler.remove_replica(rank) diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py index 0b4d47d52c..f40b8ecfb6 100755 --- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py +++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py @@ -112,7 +112,8 @@ def build_dynamic_constrain_variables(self, run_id, run_config): self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.RUN_ID}"] = run_id self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.PRIVATE_LOCAL_DATA}"] = private_data_dir.replace(" ", "") - self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_ID_LIST}"] = str(local_edge_id_list).replace(" ", "") + self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_ID_LIST}"] = \ + str(self.get_client_id_list(server_edge_id_list)).replace(" ", "") self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.SYNTHETIC_DATA_URL}"] = synthetic_data_url.replace(" ", "") self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.IS_USING_LOCAL_DATA}"] = str(is_using_local_data) self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_NUM}"] = len(server_edge_id_list) @@ -129,6 +130,11 @@ def build_dynamic_constrain_variables(self, run_id, run_config): "LOG_SERVER_URL" ] + def get_client_id_list(self, server_edge_id_list): + local_edge_id_list = list() + local_edge_id_list.append(int(self.edge_id)) + return local_edge_id_list + @staticmethod def unzip_file(zip_file, unzip_file_path) -> str: if zipfile.is_zipfile(zip_file): @@ -497,11 +503,12 @@ def execute_job_task(self, unzip_package_path, entry_file_full_path, conf_file_f if job_yaml_default_none is None: # Generate the job executing commands for previous federated learning (Compatibility) python_program = get_python_program() - logging.info("Run the client: {} {} --cf {} --rank {} --role client".format( - python_program, entry_file_full_path, conf_file_full_path, str(dynamic_args_config.get("rank", 1)))) rank = str(dynamic_args_config.get("rank", 1)) + role = "server" if rank == "0" else "client" + logging.info(f"Run the {role}: {python_program} {entry_file_full_path} --cf {conf_file_full_path} " + f"--rank {rank} --role {role}") entry_command = f"{python_program} {entry_file_full_path} --cf " \ - f"{conf_file_full_path} --rank {rank} --role client" + f"{conf_file_full_path} --rank {rank} --role {role}" shell_cmd_list = [entry_command] # Run the job executing commands for previous federated learning (Compatibility) diff --git a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py index 871b9026bf..811ff2a2d5 100755 --- a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py +++ b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py @@ -182,10 +182,12 @@ def process_device_status(self, run_id, edge_id, status): server_id = edge_id_status_dict.get("server", 0) enable_fault_tolerance, fault_tolerance_rate = self.parse_fault_tolerance_params(run_id) running_edges_list = list() + edge_nums = 0 for edge_id_item, status_item in edge_id_status_dict.items(): if edge_id_item == "server": continue + edge_nums += 1 if status_item is None or status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED or \ status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_EXCEPTION: number_of_failed_edges += 1 @@ -210,7 +212,8 @@ def process_device_status(self, run_id, edge_id, status): self.message_reporter.report_client_training_status(edge_id, consensus_status, run_id=run_id, update_db=False) # Report server status based on the fault tolerance model and parameters - edge_nums = len(edge_id_status_dict.keys()) - 1 + if edge_nums <= 0: + return status_to_report = self.calculate_server_status( run_id, edge_nums, number_of_failed_edges, number_of_finished_edges, number_of_killed_edges, running_edges_list, enable_fault_tolerance=enable_fault_tolerance, diff --git a/python/fedml/core/mlops/mlops_device_perfs.py b/python/fedml/core/mlops/mlops_device_perfs.py index b258692645..a0e1e972b6 100644 --- a/python/fedml/core/mlops/mlops_device_perfs.py +++ b/python/fedml/core/mlops/mlops_device_perfs.py @@ -166,11 +166,14 @@ def report_device_realtime_stats_entry(self, sys_event, role, is_client=False): sleep_time_interval_for_server_monitor = 60 while not self.should_stop_device_realtime_stats(): - if role == ROLE_DEVICE_INFO_REPORTER: - time.sleep(sleep_time_interval_for_device_info) - elif role == ROLE_DEVICE_JOB_TOTAL_MONITOR: - time.sleep(sleep_time_interval_for_client_monitor if is_client - else sleep_time_interval_for_server_monitor) + if self.enable_job_total_monitor: + if role == ROLE_DEVICE_INFO_REPORTER: + time.sleep(sleep_time_interval_for_device_info) + elif role == ROLE_DEVICE_JOB_TOTAL_MONITOR: + time.sleep(sleep_time_interval_for_client_monitor if is_client + else sleep_time_interval_for_server_monitor) + else: + time.sleep(time_interval_map[role]) try: if role == ROLE_DEVICE_INFO_REPORTER: From d1e4fbeaad73a252d4118eceeab9d3414db948db Mon Sep 17 00:00:00 2001 From: Alex Date: Sun, 12 May 2024 20:58:48 +0800 Subject: [PATCH 037/251] [CoreEngine] change the text of package downloading. --- .../scheduler/scheduler_core/scheduler_base_job_runner.py | 2 +- python/fedml/core/mlops/mlops_device_perfs.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py index f40b8ecfb6..054efe437a 100755 --- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py +++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py @@ -187,7 +187,7 @@ def download_package_proc(self, package_url, local_package_file): # Write the chunk to the file written_size = f.write(chunk) total_size += written_size - logging.info(f"package downloaded size {total_size/1024} KB") + logging.info("package downloaded size %.2f KB", total_size/1024) self.download_time = time.time() self.download_finished = True diff --git a/python/fedml/core/mlops/mlops_device_perfs.py b/python/fedml/core/mlops/mlops_device_perfs.py index a0e1e972b6..29183a6e78 100644 --- a/python/fedml/core/mlops/mlops_device_perfs.py +++ b/python/fedml/core/mlops/mlops_device_perfs.py @@ -157,7 +157,7 @@ def report_device_realtime_stats_entry(self, sys_event, role, is_client=False): } job_monitor_obj = None - if role == ROLE_AUTO_SCALER: + if role == ROLE_AUTO_SCALER or role == ROLE_DEVICE_JOB_TOTAL_MONITOR: # job_monitor Should be initialized once job_monitor_obj = JobMonitor.get_instance() From 4f65467df09602d2ae67b1d89f7d41d5457c8fa4 Mon Sep 17 00:00:00 2001 From: Alex Date: Sun, 12 May 2024 22:43:44 +0800 Subject: [PATCH 038/251] [CoreEngine] pass the job type when release gpu ids, set the message center name for deployments. --- .../fedml/computing/scheduler/comm_utils/job_utils.py | 3 +++ .../model_scheduler/master_protocol_manager.py | 2 ++ .../model_scheduler/worker_protocol_manager.py | 2 ++ .../scheduler_core/scheduler_base_job_runner.py | 10 +--------- .../computing/scheduler/slave/base_slave_job_runner.py | 4 +++- .../scheduler/slave/base_slave_job_runner_manager.py | 4 ++-- .../scheduler/slave/base_slave_protocol_manager.py | 5 +++-- 7 files changed, 16 insertions(+), 14 deletions(-) diff --git a/python/fedml/computing/scheduler/comm_utils/job_utils.py b/python/fedml/computing/scheduler/comm_utils/job_utils.py index ece165e92c..bc5985533b 100644 --- a/python/fedml/computing/scheduler/comm_utils/job_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/job_utils.py @@ -728,6 +728,9 @@ def parse_job_type(running_json): job_type = job_yaml.get("job_type", None) job_type = job_yaml.get("task_type", SchedulerConstants.JOB_TASK_TYPE_TRAIN) if job_type is None else job_type + model_config = running_json_obj.get("model_config", None) + if model_config is not None: + job_type = SchedulerConstants.JOB_TASK_TYPE_DEPLOY return job_type @staticmethod diff --git a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py index 144d17fd02..a5f2a37dfe 100755 --- a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py +++ b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py @@ -17,6 +17,8 @@ class FedMLDeployMasterProtocolManager(FedMLBaseMasterProtocolManager): def __init__(self, args, agent_config=None): FedMLBaseMasterProtocolManager.__init__(self, args, agent_config=agent_config) + self.message_center_name = "deploy_master_agent" + self.topic_start_deployment = None self.topic_activate_endpoint = None self.topic_deactivate_deployment = None diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py index 5f4835d9aa..3a0f835b6c 100755 --- a/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py +++ b/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py @@ -23,6 +23,8 @@ class FedMLDeployWorkerProtocolManager(FedMLBaseSlaveProtocolManager): def __init__(self, args, agent_config=None): FedMLBaseSlaveProtocolManager.__init__(self, args, agent_config=agent_config) + self.message_center_name = "deploy_slave_agent" + self.topic_start_deployment = None self.topic_delete_deployment = None diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py index 054efe437a..03d3fd5d92 100755 --- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py +++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py @@ -584,15 +584,7 @@ def start_runner_process( return None @staticmethod - def cleanup_containers_and_release_gpus(run_id, edge_id): - job_type = JobRunnerUtils.get_job_type_from_run_id(run_id) - - if not job_type: - logging.info(f"Failed to get job type from run id {run_id}. This is not an error as it would usually " - f"happen when the job is not found in the database because job is already finished and " - f"cleaned up. Exiting cleanup_containers_and_release_gpus.") - return - + def cleanup_containers_and_release_gpus(run_id, edge_id, job_type=SchedulerConstants.JOB_TASK_TYPE_TRAIN): # Check if the job type is not "serve" or "deploy" if not (job_type == SchedulerConstants.JOB_TASK_TYPE_SERVE or job_type == SchedulerConstants.JOB_TASK_TYPE_DEPLOY): diff --git a/python/fedml/computing/scheduler/slave/base_slave_job_runner.py b/python/fedml/computing/scheduler/slave/base_slave_job_runner.py index de2956ad94..cc7c3c222b 100755 --- a/python/fedml/computing/scheduler/slave/base_slave_job_runner.py +++ b/python/fedml/computing/scheduler/slave/base_slave_job_runner.py @@ -15,6 +15,7 @@ from multiprocessing import Process from ..scheduler_core.scheduler_base_job_runner import FedMLSchedulerBaseJobRunner, RunnerError, RunnerCompletedError from ..scheduler_core.general_constants import GeneralConstants +from ..comm_utils.job_utils import JobRunnerUtils class FedMLBaseSlaveJobRunner(FedMLSchedulerBaseJobRunner, ABC): @@ -78,7 +79,8 @@ def run(self, process_event, completed_event, run_extend_queue_list, self.computing_started_time, computing_ended_time, self.args.account_id, self.args.api_key) logging.info("Release resources.") - FedMLSchedulerBaseJobRunner.cleanup_containers_and_release_gpus(self.run_id, self.edge_id) + job_type = JobRunnerUtils.parse_job_type(self.request_json) + FedMLSchedulerBaseJobRunner.cleanup_containers_and_release_gpus(self.run_id, self.edge_id, job_type) MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(self.run_id, self.edge_id) if self.mlops_metrics is not None: self.mlops_metrics.stop_sys_perf() diff --git a/python/fedml/computing/scheduler/slave/base_slave_job_runner_manager.py b/python/fedml/computing/scheduler/slave/base_slave_job_runner_manager.py index c058d5dd0e..80e486224e 100755 --- a/python/fedml/computing/scheduler/slave/base_slave_job_runner_manager.py +++ b/python/fedml/computing/scheduler/slave/base_slave_job_runner_manager.py @@ -8,5 +8,5 @@ class FedMLBaseSlaveJobRunnerManager(FedMLSchedulerBaseJobRunnerManager, ABC): def __init__(self): FedMLSchedulerBaseJobRunnerManager.__init__(self) - def cleanup_containers_and_release_gpus(self, run_id, edge_id): - FedMLSchedulerBaseJobRunner.cleanup_containers_and_release_gpus(run_id, edge_id) + def cleanup_containers_and_release_gpus(self, run_id, edge_id, job_type): + FedMLSchedulerBaseJobRunner.cleanup_containers_and_release_gpus(run_id, edge_id, job_type) diff --git a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py index b3cd154d23..fc67ec2ece 100755 --- a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py +++ b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py @@ -301,7 +301,8 @@ def callback_stop_train(self, topic, payload): # logging.info("Stop run with multiprocessing...") # Stop client with multiprocessing mode run_id_str = str(run_id) - self._get_job_runner_manager().cleanup_containers_and_release_gpus(run_id, edge_id) + self._get_job_runner_manager().cleanup_containers_and_release_gpus( + run_id, edge_id, SchedulerConstants.JOB_TASK_TYPE_TRAIN) self.sync_run_stop_status(run_status=run_status) # Register the job stopping message into the status center @@ -512,7 +513,7 @@ def process_status(self, run_id, status, edge_id): job_type = JobRunnerUtils.parse_job_type(running_json) if not SchedulerConstants.is_deploy_job(job_type): logging.info(f"[run/device][{run_id}/{edge_id}] Release gpu resource when run ended.") - self._get_job_runner_manager().cleanup_containers_and_release_gpus(run_id, edge_id) + self._get_job_runner_manager().cleanup_containers_and_release_gpus(run_id, edge_id, job_type) # Stop the runner process run_process = self._get_job_runner_manager().get_runner_process(run_id) From 906e8b01de21988a358687e31b4854a73885e9b7 Mon Sep 17 00:00:00 2001 From: alaydshah Date: Sun, 12 May 2024 17:43:57 +0000 Subject: [PATCH 039/251] Minor Bug --- .../computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py index 58c3888e68..14d8230d06 100644 --- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py @@ -20,8 +20,8 @@ def get_gpu_cards() -> List[GPUCard]: return [NvidiaGPUtil.__convert(gpu) for gpu in GPUtil.getGPUs()] @staticmethod - def get_available_gpu_card_ids(order: str, limit: int, maxLoad: float, maxMemory: float) -> List[int]: - return GPUtil.getAvailable(order='memory', limit=limit, maxLoad=0.01, maxMemory=0.01) + def get_available_gpu_card_ids(order: str, limit: int, max_load: float, max_memory: float) -> List[int]: + return GPUtil.getAvailable(order=order, limit=limit, maxLoad=max_load, maxMemory=max_memory) @staticmethod def __convert(gpu: GPU) -> GPUCard: From 3a5daf65ac8908cdbc94b8cb9fb0e4bc8b7bb5ca Mon Sep 17 00:00:00 2001 From: alaydshah Date: Sun, 12 May 2024 19:37:11 +0000 Subject: [PATCH 040/251] Add Hardware specific docker device mapping --- .../scheduler/comm_utils/gpu_utils/gpu_utils.py | 7 ++++++- .../scheduler/comm_utils/gpu_utils/nvidia_utils.py | 11 ++++++++++- .../scheduler/comm_utils/gpu_utils/qualcomm_utils.py | 9 ++++++++- .../computing/scheduler/comm_utils/hardware_utils.py | 7 ++++++- 4 files changed, 30 insertions(+), 4 deletions(-) diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py index 3007bc07bc..c7ce91f694 100644 --- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py @@ -1,7 +1,7 @@ from abc import ABC, abstractmethod from dataclasses import dataclass from enum import Enum, auto -from typing import Optional, List +from typing import Optional, List, Dict class GPUCardType(Enum): @@ -47,3 +47,8 @@ def get_available_gpu_card_ids(order: str, limit: int, max_load: float, max_memo @abstractmethod def get_gpu_cards() -> List[GPUCard]: raise NotImplementedError + + @staticmethod + @abstractmethod + def get_docker_gpu_device_mapping(gpu_ids: List[int]) -> Optional[Dict]: + raise NotImplementedError diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py index 14d8230d06..f0da4f8fb4 100644 --- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py @@ -1,12 +1,15 @@ import subprocess -from typing import List, Optional +from typing import List, Optional, Dict +import docker +from docker import types from GPUtil import GPUtil, GPU from fedml.computing.scheduler.comm_utils.gpu_utils.gpu_utils import GPUCard, GPUCardUtil, GPUCardType class NvidiaGPUtil(GPUCardUtil): + @classmethod def detect_gpu_card_type(cls) -> Optional[GPUCardType]: try: @@ -23,6 +26,12 @@ def get_gpu_cards() -> List[GPUCard]: def get_available_gpu_card_ids(order: str, limit: int, max_load: float, max_memory: float) -> List[int]: return GPUtil.getAvailable(order=order, limit=limit, maxLoad=max_load, maxMemory=max_memory) + @staticmethod + def get_docker_gpu_device_mapping(gpu_ids: List[int]) -> Optional[Dict]: + if gpu_ids and len(gpu_ids): + return {"device_requests": [docker.types.DeviceRequest(device_ids=gpu_ids, capabilities=[["gpu"]])]} + return None + @staticmethod def __convert(gpu: GPU) -> GPUCard: return GPUCard( diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py index ca55fdab7c..c25c34dcaa 100644 --- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py @@ -2,12 +2,13 @@ import math import subprocess import sys -from typing import List, Optional +from typing import List, Optional, Dict from fedml.computing.scheduler.comm_utils.gpu_utils.gpu_utils import GPUCard, GPUCardUtil, GPUCardType class QualcommNPUtil(GPUCardUtil): + def __init__(self): sys.path.append("/opt/qti-aic/dev/lib/x86_64/") @@ -49,6 +50,12 @@ def get_available_gpu_card_ids(order: str, limit: int, max_load: float, max_memo gpu_cards = gpu_cards[0:min(limit, len(gpu_cards))] return list(map(lambda card: card.id, gpu_cards)) + @staticmethod + def get_docker_gpu_device_mapping(gpu_ids: List[int]) -> Optional[Dict]: + if gpu_ids and len(gpu_ids): + return {"devices": [f"/dev/accel/accel{gpu_id}:/dev/accel/accel{gpu_id}" for gpu_id in gpu_ids]} + return None + @staticmethod def __convert(npu) -> GPUCard: # TODO (alaydshah): Add support for load, memoryUtil, temperature diff --git a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py index 1aeb5eb0be..140f316554 100644 --- a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py @@ -1,5 +1,5 @@ import logging -from typing import Optional, List +from typing import Optional, List, Dict from fedml.computing.scheduler.comm_utils.gpu_utils.gpu_utils import GPUCardUtil, GPUCard from fedml.computing.scheduler.comm_utils.gpu_utils.nvidia_utils import NvidiaGPUtil @@ -39,6 +39,11 @@ def get_available_gpu_ids(order: str = "memory", limit: int = 1, max_load: float gpu_util = HardwareUtil.__get_util() return gpu_util.get_available_gpu_card_ids(order, limit, max_load, max_memory) if gpu_util is not None else [] + @staticmethod + def get_docker_gpu_device_mapping(gpu_ids: List[int]) -> Optional[Dict]: + gpu_util = HardwareUtil.__get_util() + return gpu_util.get_docker_gpu_device_mapping(gpu_ids) + if __name__ == "__main__": gpus = HardwareUtil.get_gpus() From e57af1eaaa49490a4d28c802a844ca33cf6e42cd Mon Sep 17 00:00:00 2001 From: alaydshah Date: Sun, 12 May 2024 19:51:36 +0000 Subject: [PATCH 041/251] Bug Fix --- .../computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py index f0da4f8fb4..79071bf935 100644 --- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py @@ -29,7 +29,8 @@ def get_available_gpu_card_ids(order: str, limit: int, max_load: float, max_memo @staticmethod def get_docker_gpu_device_mapping(gpu_ids: List[int]) -> Optional[Dict]: if gpu_ids and len(gpu_ids): - return {"device_requests": [docker.types.DeviceRequest(device_ids=gpu_ids, capabilities=[["gpu"]])]} + gpu_id_list = list(map(lambda x: str(x), gpu_ids)) + return {"device_requests": [docker.types.DeviceRequest(device_ids=gpu_id_list, capabilities=[["gpu"]])]} return None @staticmethod From e70f56f7b0449e9be8ae59262a1a47bb8eb2ab02 Mon Sep 17 00:00:00 2001 From: alaydshah Date: Sun, 12 May 2024 22:10:56 +0000 Subject: [PATCH 042/251] Add util function to get gpu_ids from container name --- .../comm_utils/gpu_utils/gpu_utils.py | 7 +++ .../comm_utils/gpu_utils/nvidia_utils.py | 13 ++++- .../comm_utils/gpu_utils/qualcomm_utils.py | 55 ++++++++++++++++++- .../scheduler/comm_utils/hardware_utils.py | 14 ++++- 4 files changed, 86 insertions(+), 3 deletions(-) diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py index c7ce91f694..292bcb3624 100644 --- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py @@ -3,6 +3,8 @@ from enum import Enum, auto from typing import Optional, List, Dict +from docker import DockerClient + class GPUCardType(Enum): NVIDIA = auto() @@ -52,3 +54,8 @@ def get_gpu_cards() -> List[GPUCard]: @abstractmethod def get_docker_gpu_device_mapping(gpu_ids: List[int]) -> Optional[Dict]: raise NotImplementedError + + @staticmethod + @abstractmethod + def get_docker_gpu_ids_by_container_name(container_name: str, docker_client: DockerClient) -> List[int]: + raise NotImplementedError diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py index 79071bf935..0c05b25644 100644 --- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py @@ -1,8 +1,9 @@ +import logging import subprocess from typing import List, Optional, Dict import docker -from docker import types +from docker import types, DockerClient from GPUtil import GPUtil, GPU from fedml.computing.scheduler.comm_utils.gpu_utils.gpu_utils import GPUCard, GPUCardUtil, GPUCardType @@ -33,6 +34,16 @@ def get_docker_gpu_device_mapping(gpu_ids: List[int]) -> Optional[Dict]: return {"device_requests": [docker.types.DeviceRequest(device_ids=gpu_id_list, capabilities=[["gpu"]])]} return None + @staticmethod + def get_docker_gpu_ids_by_container_name(container_name: str, docker_client: DockerClient) -> List[int]: + try: + gpu_ids = docker_client.api.inspect_container(container_name)["HostConfig"]["DeviceRequests"][0]["DeviceIDs"] + return list(map(int, gpu_ids)) + except Exception as e: + logging.error(f"Failed to get GPU IDs: {e}") + pass + return [] + @staticmethod def __convert(gpu: GPU) -> GPUCard: return GPUCard( diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py index c25c34dcaa..5f0eb3b5c6 100644 --- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py @@ -1,13 +1,17 @@ import logging import math +import re import subprocess import sys from typing import List, Optional, Dict +from docker import DockerClient + from fedml.computing.scheduler.comm_utils.gpu_utils.gpu_utils import GPUCard, GPUCardUtil, GPUCardType class QualcommNPUtil(GPUCardUtil): + NPU_CARD_PATH = "/dev/accel/accel" def __init__(self): sys.path.append("/opt/qti-aic/dev/lib/x86_64/") @@ -53,9 +57,22 @@ def get_available_gpu_card_ids(order: str, limit: int, max_load: float, max_memo @staticmethod def get_docker_gpu_device_mapping(gpu_ids: List[int]) -> Optional[Dict]: if gpu_ids and len(gpu_ids): - return {"devices": [f"/dev/accel/accel{gpu_id}:/dev/accel/accel{gpu_id}" for gpu_id in gpu_ids]} + return { + "devices": [f"{QualcommNPUtil.NPU_CARD_PATH}{gpu_id}:{QualcommNPUtil.NPU_CARD_PATH}{gpu_id}" for gpu_id + in gpu_ids]} return None + @staticmethod + def get_docker_gpu_ids_by_container_name(container_name: str, docker_client: DockerClient) -> List[int]: + gpu_ids = [] + try: + docker_inspect_info = docker_client.api.inspect_container(container_name) + gpu_ids = QualcommNPUtil.__parse_gpu_ids(docker_inspect_info.get("HostConfig", {})) + except Exception as e: + logging.error(f"Failed to get GPU IDs: {e}") + pass + return gpu_ids + @staticmethod def __convert(npu) -> GPUCard: # TODO (alaydshah): Add support for load, memoryUtil, temperature @@ -75,3 +92,39 @@ def __convert(npu) -> GPUCard: memoryUsed=memory_used, memoryUtil=memory_utilized, ) + + @staticmethod + def __parse_gpu_ids(host_config: dict) -> List[int]: + devices = host_config.get('Devices', []) + gpu_ids = [] + for device in devices: + gpu_id = QualcommNPUtil.__extract_integer_from_host_path(device.get('PathOnHost', None)) + + # Check explicitly if gpu_id is not None, as gpu_id can be 0, which is a valid value to include. + if gpu_id is not None: + gpu_ids.append(gpu_id) + return gpu_ids + + @staticmethod + def __extract_integer_from_host_path(host_path: str) -> Optional[int]: + if not host_path: + logging.error("Host Path is None; GPU Id extraction Failed") + return None + + npu_card_path = QualcommNPUtil.NPU_CARD_PATH + + # Check if host_path starts with npu_card_path + if host_path.startswith(npu_card_path): + + # Extract the numeric suffix from the host path + suffix = host_path[len(npu_card_path):] # Get the substring after npu_card_path + match = re.match(r'^(\d+)', suffix) # Use regex to match the leading integer + if match: + return int(match.group(1)) # Return the extracted integer + else: + logging.error(f"Failed to extract GPU id from Host Path {host_path}") + else: + logging.error(f"Host Path {host_path} doesn't start with NPU Card Path {npu_card_path}") + + # Return None if extraction fails + return None diff --git a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py index 140f316554..a0d27fd7db 100644 --- a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py @@ -1,6 +1,8 @@ import logging from typing import Optional, List, Dict +from docker import DockerClient + from fedml.computing.scheduler.comm_utils.gpu_utils.gpu_utils import GPUCardUtil, GPUCard from fedml.computing.scheduler.comm_utils.gpu_utils.nvidia_utils import NvidiaGPUtil from fedml.computing.scheduler.comm_utils.gpu_utils.qualcomm_utils import QualcommNPUtil @@ -42,7 +44,17 @@ def get_available_gpu_ids(order: str = "memory", limit: int = 1, max_load: float @staticmethod def get_docker_gpu_device_mapping(gpu_ids: List[int]) -> Optional[Dict]: gpu_util = HardwareUtil.__get_util() - return gpu_util.get_docker_gpu_device_mapping(gpu_ids) + if gpu_util is not None: + return gpu_util.get_docker_gpu_device_mapping(gpu_ids) + return None + + @staticmethod + def get_docker_gpu_ids_by_container_name(container_name: str, docker_client: DockerClient) -> List[int]: + gpu_ids = [] + gpu_util = HardwareUtil.__get_util() + if gpu_util is not None: + gpu_ids = gpu_util.get_docker_gpu_ids_by_container_name(container_name, docker_client) + return gpu_ids if __name__ == "__main__": From 33da11e4024b617085b497e2fbc7a5a339fe4d45 Mon Sep 17 00:00:00 2001 From: Alay Shah Date: Sun, 12 May 2024 15:24:26 -0700 Subject: [PATCH 043/251] Make gpu stats fetching hardware agnostic --- .../scheduler/comm_utils/container_utils.py | 40 +++++++------------ 1 file changed, 14 insertions(+), 26 deletions(-) diff --git a/python/fedml/computing/scheduler/comm_utils/container_utils.py b/python/fedml/computing/scheduler/comm_utils/container_utils.py index 4e09315b78..3d076c0ffe 100644 --- a/python/fedml/computing/scheduler/comm_utils/container_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/container_utils.py @@ -320,7 +320,7 @@ def get_container_perf(self, c_name) -> ContainerMetrics: round(blk_read_bytes / (1024 * 1024), 1), round(blk_write_bytes / (1024 * 1024), 1)) # Calculate the gpu usage - gpus_stat = self.generate_container_gpu_stats(c_name) + gpus_stat = self.generate_container_gpu_stats(container_name=c_name) # Record timestamp timestamp = stats["read"] @@ -328,39 +328,27 @@ def get_container_perf(self, c_name) -> ContainerMetrics: return ContainerUtils.ContainerMetrics(cpu_percent, mem_gb_used, mem_gb_avail, recv_megabytes, sent_megabytes, blk_read_bytes, blk_write_bytes, timestamp, gpus_stat) - def generate_container_gpu_stats(self, c_name): - gpu_ids = self.get_gpu_ids_by_container_name(c_name) + def generate_container_gpu_stats(self, container_name): + client = self.get_docker_client() + gpu_ids = HardwareUtil.get_docker_gpu_ids_by_container_name(container_name=container_name, docker_client=client) gpu_stats = self.gpu_stats(gpu_ids) return gpu_stats - def get_gpu_ids_by_container_name(self, c_name): - client = self.get_docker_client() - gpu_ids = [] - try: - gpu_ids = client.api.inspect_container(c_name)["HostConfig"]["DeviceRequests"][0]["DeviceIDs"] - gpu_ids = list(map(int, gpu_ids)) - except Exception as e: - logging.error(f"Failed to get GPU IDs: {e}") - pass - - return gpu_ids - @staticmethod def gpu_stats(gpu_ids): utilz, memory, temp = None, None, None gpu_stats_map = {} # gpu_id: int -> {"gpu_utilization", "gpu_memory_allocated", "gpu_temp"} + gpu_ids = set(gpu_ids) try: - gpus = HardwareUtil.get_gpus() - - for i in gpu_ids: - gpu = gpus[i] - gpu_stats_map[i] = { - "gpu_utilization": gpu.load*100, - "gpu_memory_allocated": gpu.memoryUtil*100, - "gpu_temp": gpu.temperature, - # "gpu_power_usage": pynvml.nvmlDeviceGetPowerUsage(handle) / 1000, # in watts - # "gpu_time_spent_accessing_memory": utilz.memory # in ms - } + for gpu in HardwareUtil.get_gpus(): + if gpu.id in gpu_ids: + gpu_stats_map[gpu.id] = { + "gpu_utilization": gpu.load * 100, + "gpu_memory_allocated": gpu.memoryUsed / gpu.memoryTotal * 100, + "gpu_temp": gpu.temperature, + # "gpu_power_usage": pynvml.nvmlDeviceGetPowerUsage(handle) / 1000, # in watts + # "gpu_time_spent_accessing_memory": utilz.memory # in ms + } except Exception as e: logging.error(f"Failed to get GPU stats: {e}") From 2d458a8342d7f545fe0991d0d6bb0bf2293a7cc8 Mon Sep 17 00:00:00 2001 From: Alay Shah Date: Sun, 12 May 2024 15:27:11 -0700 Subject: [PATCH 044/251] Nits --- .../fedml/computing/scheduler/comm_utils/container_utils.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/python/fedml/computing/scheduler/comm_utils/container_utils.py b/python/fedml/computing/scheduler/comm_utils/container_utils.py index 3d076c0ffe..c7645104c0 100644 --- a/python/fedml/computing/scheduler/comm_utils/container_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/container_utils.py @@ -2,6 +2,8 @@ import os import traceback import datetime +from typing import List + from dateutil.parser import isoparse import docker @@ -335,7 +337,7 @@ def generate_container_gpu_stats(self, container_name): return gpu_stats @staticmethod - def gpu_stats(gpu_ids): + def gpu_stats(gpu_ids: List[int]): utilz, memory, temp = None, None, None gpu_stats_map = {} # gpu_id: int -> {"gpu_utilization", "gpu_memory_allocated", "gpu_temp"} gpu_ids = set(gpu_ids) From be635dbe2cf2ce29b36508846062fccc112485a9 Mon Sep 17 00:00:00 2001 From: Alay Shah Date: Sun, 12 May 2024 16:22:44 -0700 Subject: [PATCH 045/251] Update container creation during deployment --- .../scheduler/comm_utils/container_utils.py | 5 +- .../comm_utils/gpu_utils/gpu_utils.py | 2 +- .../comm_utils/gpu_utils/nvidia_utils.py | 7 ++- .../comm_utils/gpu_utils/qualcomm_utils.py | 4 +- .../scheduler/comm_utils/hardware_utils.py | 4 +- .../device_model_deployment.py | 59 +++++++++---------- 6 files changed, 40 insertions(+), 41 deletions(-) diff --git a/python/fedml/computing/scheduler/comm_utils/container_utils.py b/python/fedml/computing/scheduler/comm_utils/container_utils.py index c7645104c0..2f5fa31fb5 100644 --- a/python/fedml/computing/scheduler/comm_utils/container_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/container_utils.py @@ -227,9 +227,8 @@ def pull_image_with_policy(self, image_pull_policy, image_name, client=None): raise Exception(f"Unsupported image pull policy: {image_pull_policy}") class ContainerMetrics: - def __init__(self, cpu_percent, mem_used_megabytes, mem_avail_megabytes, network_recv_megabytes, network_sent_megabytes, - blk_read_megabytes, blk_write_megabytes, timestamp, gpus_stat - ): + def __init__(self, cpu_percent, mem_used_megabytes, mem_avail_megabytes, network_recv_megabytes, + network_sent_megabytes, blk_read_megabytes, blk_write_megabytes, timestamp, gpus_stat): self.cpu_percent = cpu_percent self.mem_used_megabytes = mem_used_megabytes self.mem_avail_megabytes = mem_avail_megabytes diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py index 292bcb3624..bc7a3b8216 100644 --- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/gpu_utils.py @@ -52,7 +52,7 @@ def get_gpu_cards() -> List[GPUCard]: @staticmethod @abstractmethod - def get_docker_gpu_device_mapping(gpu_ids: List[int]) -> Optional[Dict]: + def get_docker_gpu_device_mapping(gpu_ids: Optional[List[int]], num_gpus: int = 0) -> Optional[Dict]: raise NotImplementedError @staticmethod diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py index 0c05b25644..34d0c3be1c 100644 --- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/nvidia_utils.py @@ -28,11 +28,12 @@ def get_available_gpu_card_ids(order: str, limit: int, max_load: float, max_memo return GPUtil.getAvailable(order=order, limit=limit, maxLoad=max_load, maxMemory=max_memory) @staticmethod - def get_docker_gpu_device_mapping(gpu_ids: List[int]) -> Optional[Dict]: - if gpu_ids and len(gpu_ids): + def get_docker_gpu_device_mapping(gpu_ids: List[int], num_gpus: int = 0) -> Optional[Dict]: + if gpu_ids is not None and len(gpu_ids): gpu_id_list = list(map(lambda x: str(x), gpu_ids)) return {"device_requests": [docker.types.DeviceRequest(device_ids=gpu_id_list, capabilities=[["gpu"]])]} - return None + else: + return {"device_requests": [docker.types.DeviceRequest(count=num_gpus, capabilities=[['gpu']])]} @staticmethod def get_docker_gpu_ids_by_container_name(container_name: str, docker_client: DockerClient) -> List[int]: diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py index 5f0eb3b5c6..9c7ea21ea9 100644 --- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py @@ -55,8 +55,8 @@ def get_available_gpu_card_ids(order: str, limit: int, max_load: float, max_memo return list(map(lambda card: card.id, gpu_cards)) @staticmethod - def get_docker_gpu_device_mapping(gpu_ids: List[int]) -> Optional[Dict]: - if gpu_ids and len(gpu_ids): + def get_docker_gpu_device_mapping(gpu_ids: Optional[List[int]], num_gpus: int = 0) -> Optional[Dict]: + if gpu_ids is not None and len(gpu_ids): return { "devices": [f"{QualcommNPUtil.NPU_CARD_PATH}{gpu_id}:{QualcommNPUtil.NPU_CARD_PATH}{gpu_id}" for gpu_id in gpu_ids]} diff --git a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py index a0d27fd7db..0062418631 100644 --- a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py @@ -42,10 +42,10 @@ def get_available_gpu_ids(order: str = "memory", limit: int = 1, max_load: float return gpu_util.get_available_gpu_card_ids(order, limit, max_load, max_memory) if gpu_util is not None else [] @staticmethod - def get_docker_gpu_device_mapping(gpu_ids: List[int]) -> Optional[Dict]: + def get_docker_gpu_device_mapping(gpu_ids: Optional[List[int]], num_gpus: int = 0) -> Optional[Dict]: gpu_util = HardwareUtil.__get_util() if gpu_util is not None: - return gpu_util.get_docker_gpu_device_mapping(gpu_ids) + return gpu_util.get_docker_gpu_device_mapping(gpu_ids, num_gpus) return None @staticmethod diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py index bd04228355..8d3be211a2 100755 --- a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py +++ b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py @@ -18,6 +18,7 @@ import fedml from fedml.computing.scheduler.comm_utils import sys_utils, security_utils from fedml.computing.scheduler.comm_utils.container_utils import ContainerUtils +from fedml.computing.scheduler.comm_utils.hardware_utils import HardwareUtil from fedml.computing.scheduler.comm_utils.job_utils import JobRunnerUtils for type_name in collections.abc.__all__: @@ -231,24 +232,6 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version, except docker.errors.APIError: raise Exception("Failed to get the container object") - # Allocate the GPU - # TODO: Make sure no competition for each replica in a single deployment - if exist_container_obj is not None: - client.api.remove_container(exist_container_obj.id, v=True, force=True) - device_requests = [] - if no_real_gpu_allocation is not None: - use_gpu = not no_real_gpu_allocation - if use_gpu: - logging.info("Number of GPUs: {}".format(num_gpus)) - if gpu_ids is not None: - gpu_id_list = map(lambda x: str(x), gpu_ids) - device_requests.append( - docker.types.DeviceRequest(device_ids=list(gpu_id_list), capabilities=[['gpu']])) - else: - device_requests.append( - docker.types.DeviceRequest(count=num_gpus, capabilities=[['gpu']])) - logging.info(f"device_requests: {device_requests}") - # Pull the inference image logging.info(f"Start pulling the inference image {inference_image_name}... with policy {image_pull_policy}") ContainerUtils.get_instance().pull_image_with_policy(image_pull_policy, inference_image_name) @@ -306,6 +289,32 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version, } environment["MAIN_ENTRY"] = relative_entry + host_config_dict = { + "binds": binds, + "port_bindings": { + port_inside_container: usr_indicated_worker_port + }, + "shm_size": shm_size, + "storage_opt": storage_opt, + "tmpfs": tmpfs, + "cpu_count": cpus, + "mem_limit": memory + } + + # Allocate the GPU + # TODO: Make sure no competition for each replica in a single deployment + if exist_container_obj is not None: + client.api.remove_container(exist_container_obj.id, v=True, force=True) + device_requests = {} + if no_real_gpu_allocation is not None: + use_gpu = not no_real_gpu_allocation + if use_gpu: + logging.info("Number of GPUs: {}".format(num_gpus)) + device_requests = HardwareUtil.get_docker_gpu_device_mapping(gpu_ids, num_gpus) + logging.info(f"device_requests: {device_requests}") + + host_config_dict.update(device_requests) + # Environment variables if not enable_custom_image: # For some image, the default user is root. Unified to fedml. @@ -325,24 +334,14 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version, environment[key] = extra_envs[key] try: + host_config = client.api.create_host_config(**host_config_dict) new_container = client.api.create_container( image=inference_image_name, name=default_server_container_name, volumes=volumns, ports=[port_inside_container], # port open inside the container environment=environment, - host_config=client.api.create_host_config( - binds=binds, - port_bindings={ - port_inside_container: usr_indicated_worker_port # Could be either None or a port number - }, - device_requests=device_requests, - shm_size=shm_size, - storage_opt=storage_opt, - tmpfs=tmpfs, - cpu_count=cpus, - mem_limit=memory, - ), + host_config=host_config, detach=True, command=customized_image_entry_cmd if enable_custom_image else None, entrypoint=customized_image_entry_cmd if enable_custom_image else None From 9d5b54f1c0ab826e6a900def7bd26743500f4575 Mon Sep 17 00:00:00 2001 From: Alay Shah Date: Sun, 12 May 2024 18:02:52 -0700 Subject: [PATCH 046/251] Nit: Update naming --- .../scheduler/model_scheduler/device_model_deployment.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py index 8d3be211a2..ca83f21c33 100755 --- a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py +++ b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py @@ -305,15 +305,15 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version, # TODO: Make sure no competition for each replica in a single deployment if exist_container_obj is not None: client.api.remove_container(exist_container_obj.id, v=True, force=True) - device_requests = {} + device_mapping = {} if no_real_gpu_allocation is not None: use_gpu = not no_real_gpu_allocation if use_gpu: logging.info("Number of GPUs: {}".format(num_gpus)) - device_requests = HardwareUtil.get_docker_gpu_device_mapping(gpu_ids, num_gpus) - logging.info(f"device_requests: {device_requests}") + device_mapping = HardwareUtil.get_docker_gpu_device_mapping(gpu_ids, num_gpus) + logging.info(f"device_mapping: {device_mapping}") - host_config_dict.update(device_requests) + host_config_dict.update(device_mapping) # Environment variables if not enable_custom_image: From 2fcf57d9ca5b25dba4fe166ec7a95d9ea89c4239 Mon Sep 17 00:00:00 2001 From: Alay Shah Date: Sun, 12 May 2024 18:06:51 -0700 Subject: [PATCH 047/251] Add check as device_mapping can be None --- .../scheduler/model_scheduler/device_model_deployment.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py index ca83f21c33..1876373d25 100755 --- a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py +++ b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py @@ -313,7 +313,8 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version, device_mapping = HardwareUtil.get_docker_gpu_device_mapping(gpu_ids, num_gpus) logging.info(f"device_mapping: {device_mapping}") - host_config_dict.update(device_mapping) + if device_mapping: + host_config_dict.update(device_mapping) # Environment variables if not enable_custom_image: From 3f763957438256e63161bdd5d27ff262a855d340 Mon Sep 17 00:00:00 2001 From: Alex Date: Tue, 14 May 2024 01:21:30 +0800 Subject: [PATCH 048/251] [CoreEngine] make the protocol manager and job runner work on concurrent jobs. --- .../model_scheduler/master_job_runner.py | 26 +++++++------------ .../master_protocol_manager.py | 8 +++--- .../model_scheduler/worker_job_runner.py | 1 + .../scheduler_base_protocol_manager.py | 9 +++++++ .../slave/base_slave_protocol_manager.py | 7 +++-- 5 files changed, 29 insertions(+), 22 deletions(-) diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py index 13876d0184..e3073700d2 100755 --- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py +++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py @@ -74,6 +74,7 @@ def run_impl( model_id, model_storage_url, scale_min, scale_max, inference_engine, model_is_from_open, \ inference_end_point_id, use_gpu, memory_size, model_version, inference_port = \ FedMLDeployMasterJobRunner.parse_model_run_params(self.request_json) + self.run_id = run_id # Print request parameters. logging.info("model deployment request: {}".format(self.request_json)) @@ -120,9 +121,7 @@ def run_impl( self.stop_device_inference_monitor( run_id, end_point_name, model_id, model_name, model_version) self.start_device_inference_monitor( - run_id, end_point_name, model_id, model_name, model_version, - redis_addr=self.redis_addr, redis_port=self.redis_port, redis_password=self.redis_password - ) + run_id, end_point_name, model_id, model_name, model_version) # Changed the status to "IDLE" self.status_reporter.report_server_id_status( @@ -467,7 +466,7 @@ def process_deployment_result_message(self, topic=None, payload=None): def start_device_inference_gateway( run_id, end_point_name, model_id, model_name, model_version, inference_port=ServerConstants.MODEL_INFERENCE_DEFAULT_PORT, - agent_config=None, redis_addr=None, redis_port=None, redis_password=None + agent_config=None, redis_addr="localhost", redis_port=6379, redis_password="fedml_default" ): # start unified inference server running_model_name = ServerConstants.get_running_model_name(end_point_name, @@ -515,7 +514,7 @@ def start_device_inference_gateway( @staticmethod def start_device_inference_monitor( run_id, end_point_name, model_id, model_name, model_version, check_stopped_event=True, - redis_addr=None, redis_port=None, redis_password=None + redis_addr="localhost", redis_port=6379, redis_password="fedml_default" ): # start inference monitor server # Will report the qps related metrics to the MLOps @@ -530,7 +529,7 @@ def start_device_inference_monitor( [python_program, monitor_file, "-v", fedml.get_env_version(), "-ep", run_id_str, "-epn", str(end_point_name), "-mi", str(model_id), "-mn", model_name, "-mv", model_version, "-iu", "infer_url", "-ra", redis_addr, - "-rp", redis_port, "-rpw", redis_password], + "-rp", str(redis_port), "-rpw", redis_password], should_capture_stdout=False, should_capture_stderr=False ) return monitor_process @@ -543,7 +542,7 @@ def stop_device_inference_monitor(run_id, end_point_name, model_id, model_name, model_id, model_name, model_version) @staticmethod - def recover_inference_and_monitor(redis_addr=None, redis_port=None, redis_password=None): + def recover_inference_and_monitor(): # noinspection PyBroadException try: history_jobs = FedMLServerDataInterface.get_instance().get_history_jobs() @@ -559,9 +558,8 @@ def recover_inference_and_monitor(redis_addr=None, redis_port=None, redis_passwo inference_end_point_id, use_gpu, memory_size, model_version, inference_port = \ FedMLDeployMasterJobRunner.parse_model_run_params(json.loads(job.running_json)) - FedMLModelCache.get_instance().set_redis_params(redis_addr, redis_password) - is_activated = FedMLModelCache.get_instance(redis_addr, redis_port). \ - get_end_point_activation(run_id) + FedMLModelCache.get_instance().set_redis_params() + is_activated = FedMLModelCache.get_instance().get_end_point_activation(run_id) if not is_activated: continue @@ -573,16 +571,12 @@ def recover_inference_and_monitor(redis_addr=None, redis_port=None, redis_passwo FedMLDeployMasterJobRunner.start_device_inference_gateway( run_id, end_point_name, model_id, model_name, model_version, inference_port=inference_port, - agent_config=agent_config, redis_addr=redis_addr, redis_port=redis_port, redis_password=redis_password) + agent_config=agent_config) FedMLDeployMasterJobRunner.stop_device_inference_monitor( run_id, end_point_name, model_id, model_name, model_version) FedMLDeployMasterJobRunner.start_device_inference_monitor( - run_id, end_point_name, model_id, model_name, model_version, - redis_addr=FedMLDeployMasterJobRunner.default_redis_addr, - redis_port=FedMLDeployMasterJobRunner.default_redis_port, - redis_password=FedMLDeployMasterJobRunner.default_redis_password - ) + run_id, end_point_name, model_id, model_name, model_version) except Exception as e: logging.info("recover inference and monitor: {}".format(traceback.format_exc())) diff --git a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py index a5f2a37dfe..eb23bf1278 100755 --- a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py +++ b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py @@ -199,7 +199,7 @@ def callback_start_deployment(self, topic, payload): self.request_json = request_json run_id_str = str(run_id) self.running_request_json[run_id_str] = request_json - self.request_json["master_node_ip"] = GeneralConstants.get_ip_address(self.request_json) + self.request_json["master_node_ip"] = GeneralConstants.get_ip_address(request_json) # Set the target status of the devices to redis FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ @@ -217,13 +217,13 @@ def callback_start_deployment(self, topic, payload): # Report stage to mlops: MODEL_DEPLOYMENT_STAGE1 = "Received" FedMLDeployJobRunnerManager.get_instance().send_deployment_stages( - self.run_id, model_name, model_id, "", ServerConstants.MODEL_DEPLOYMENT_STAGE1["index"], + run_id, model_name, model_id, "", ServerConstants.MODEL_DEPLOYMENT_STAGE1["index"], ServerConstants.MODEL_DEPLOYMENT_STAGE1["text"], "Received request for endpoint {}".format(run_id), message_center=self.message_center) # Report stage to mlops: MODEL_DEPLOYMENT_STAGE2 = "Initializing" FedMLDeployJobRunnerManager.get_instance().send_deployment_stages( - self.run_id, model_name, model_id, "", ServerConstants.MODEL_DEPLOYMENT_STAGE2["index"], + run_id, model_name, model_id, "", ServerConstants.MODEL_DEPLOYMENT_STAGE2["index"], ServerConstants.MODEL_DEPLOYMENT_STAGE2["text"], ServerConstants.MODEL_DEPLOYMENT_STAGE2["text"], message_center=self.message_center) @@ -262,7 +262,7 @@ def callback_start_deployment(self, topic, payload): # Send stage: MODEL_DEPLOYMENT_STAGE3 = "StartRunner" FedMLDeployJobRunnerManager.get_instance().send_deployment_stages( - self.run_id, model_name, model_id, "", ServerConstants.MODEL_DEPLOYMENT_STAGE3["index"], + run_id, model_name, model_id, "", ServerConstants.MODEL_DEPLOYMENT_STAGE3["index"], ServerConstants.MODEL_DEPLOYMENT_STAGE3["text"], ServerConstants.MODEL_DEPLOYMENT_STAGE3["text"], message_center=self.message_center) diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py index ac9328592c..f9cfdcd921 100755 --- a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py +++ b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py @@ -138,6 +138,7 @@ def run_impl(self, run_extend_queue_list, sender_message_center, inference_engine = model_config_parameters.get("inference_engine", ClientConstants.INFERENCE_ENGINE_TYPE_INT_DEFAULT) inference_end_point_id = run_id + self.run_id = run_id MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO) diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py index e3cac7a425..11d6fa44fd 100755 --- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py +++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py @@ -232,6 +232,15 @@ def rebuild_status_center(self, status_center_queue): self.status_reporter.edge_id = self.edge_id self.status_reporter.server_agent_id = self.server_agent_id + def generate_status_report(self, run_id, edge_id, server_agent_id=None): + status_reporter = MLOpsMetrics() + status_reporter.set_messenger(self, send_message_func=self.send_status_message) + status_reporter.run_id = run_id + status_reporter.edge_id = edge_id + if server_agent_id is not None: + status_reporter.server_agent_id = server_agent_id + return status_reporter + @abstractmethod def generate_protocol_manager(self): # Generate the protocol manager instance and set the attribute values. diff --git a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py index fc67ec2ece..4ff931e6fd 100755 --- a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py +++ b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py @@ -10,6 +10,7 @@ from ..comm_utils.constants import SchedulerConstants from ..comm_utils.job_utils import JobRunnerUtils, DockerArgs from ..comm_utils.run_process_utils import RunProcessUtils +from ....core.mlops import MLOpsMetrics from ....core.mlops.mlops_runtime_log import MLOpsRuntimeLog from ....core.mlops.mlops_configs import MLOpsConfigs from ....core.mlops.mlops_runtime_log_daemon import MLOpsRuntimeLogDaemon @@ -55,8 +56,6 @@ def __init__(self, args, agent_config=None): self.fl_topic_request_device_info = None self.communication_mgr = None self.subscribed_topics = list() - self.mlops_metrics = None - self.status_reporter = None self.job_runners = dict() self.ota_upgrade = FedMLOtaUpgrade(edge_id=args.edge_id) self.running_request_json = dict() @@ -263,6 +262,10 @@ def callback_start_train(self, topic, payload): run_id, matched_gpu_num, edge_id, inner_id=endpoint_id, model_master_device_id=model_master_device_id, model_slave_device_id=model_slave_device_id) + else: + self.generate_status_report(run_id, edge_id, server_agent_id=server_agent_id).report_client_id_status( + edge_id, GeneralConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED, run_id=run_id) + return logging.info( f"Run started, available gpu ids: {JobRunnerUtils.get_instance().get_available_gpu_id_list(edge_id)}") From 3417f300f2d9977042cb5a9d8fb4531eec67ce57 Mon Sep 17 00:00:00 2001 From: Alex Date: Wed, 15 May 2024 01:17:03 +0800 Subject: [PATCH 049/251] [CoreEngine] make the deployment status more stable. --- python/fedml/api/api_test.py | 6 +-- .../model_scheduler/job_runner_msg_sender.py | 12 ++---- .../model_scheduler/master_job_runner.py | 3 +- .../master_protocol_manager.py | 27 ++++++------ .../model_scheduler/worker_job_runner.py | 6 +-- .../worker_protocol_manager.py | 1 + .../scheduler_core/general_constants.py | 13 ++++++ .../scheduler_base_job_runner.py | 42 +++++++++++++------ .../scheduler_base_protocol_manager.py | 1 + .../scheduler/scheduler_core/status_center.py | 1 + .../status_manager_protocols.py | 15 +++++++ .../scheduler/slave/base_slave_job_runner.py | 10 ++--- 12 files changed, 91 insertions(+), 46 deletions(-) diff --git a/python/fedml/api/api_test.py b/python/fedml/api/api_test.py index fc2fb77b20..5d899bb1fd 100755 --- a/python/fedml/api/api_test.py +++ b/python/fedml/api/api_test.py @@ -4,7 +4,7 @@ import fedml # Login -fedml.set_env_version("local") +fedml.set_env_version("test") fedml.set_local_on_premise_platform_port(18080) error_code, error_msg = fedml.api.fedml_login(api_key="1316b93c82da40ce90113a2ed12f0b14") if error_code != 0: @@ -19,7 +19,7 @@ # Launch job launch_result_list = list() -for i in range(0, 1): +for i in range(0, 10): launch_result = fedml.api.launch_job(yaml_file) launch_result_list.append(launch_result) # launch_result = fedml.api.launch_job_on_cluster(yaml_file, "alex-cluster") @@ -33,7 +33,7 @@ if log_result is None or log_result.run_status is None: print(f"Failed to get job status.") #exit(1) - print(f"Run status {log_result.run_status}") + print(f"Run {launch_result.run_id}, status {log_result.run_status}") time.sleep(0.5) # Get job logs diff --git a/python/fedml/computing/scheduler/model_scheduler/job_runner_msg_sender.py b/python/fedml/computing/scheduler/model_scheduler/job_runner_msg_sender.py index 482a21b2d4..235c4deb74 100755 --- a/python/fedml/computing/scheduler/model_scheduler/job_runner_msg_sender.py +++ b/python/fedml/computing/scheduler/model_scheduler/job_runner_msg_sender.py @@ -33,8 +33,7 @@ def send_deployment_results(self, end_point_id, end_point_name, model_name, model_inference_url, model_version, inference_port, inference_engine, model_metadata, model_config, input_json, output_json, replica_id_list=None): - deployment_results_topic_prefix = "model_ops/model_device/return_deployment_result" - deployment_results_topic = "{}/{}".format(deployment_results_topic_prefix, end_point_id) + deployment_results_topic = "model_ops/model_device/return_deployment_result" deployment_results_payload = {"end_point_id": end_point_id, "end_point_name": end_point_name, "model_name": model_name, "model_url": model_inference_url, "version": model_version, "port": inference_port, @@ -48,15 +47,13 @@ def send_deployment_results(self, end_point_id, end_point_name, logging.info(f"[Master] deployment_results_payload is sent to mlops: {deployment_results_payload}") self.message_center.send_message_json(deployment_results_topic, json.dumps(deployment_results_payload)) - self.message_center.send_message_json(deployment_results_topic_prefix, json.dumps(deployment_results_payload)) @staticmethod def send_deployment_status( end_point_id, end_point_name, model_name, model_inference_url, model_status, message_center=None): if message_center is None: return - deployment_status_topic_prefix = "model_ops/model_device/return_deployment_status" - deployment_status_topic = "{}/{}".format(deployment_status_topic_prefix, end_point_id) + deployment_status_topic = "model_ops/model_device/return_deployment_status" deployment_status_payload = {"end_point_id": end_point_id, "end_point_name": end_point_name, "model_name": model_name, "model_url": model_inference_url, @@ -65,7 +62,6 @@ def send_deployment_status( logging.info(f"[Master] deployment_status_payload is sent to mlops: {deployment_status_payload}") message_center.send_message_json(deployment_status_topic, json.dumps(deployment_status_payload)) - message_center.send_message_json(deployment_status_topic_prefix, json.dumps(deployment_status_payload)) @staticmethod def send_deployment_stages(end_point_id, model_name, model_id, model_inference_url, @@ -73,8 +69,7 @@ def send_deployment_stages(end_point_id, model_name, model_id, model_inference_u message_center=None): if message_center is None: return - deployment_stages_topic_prefix = "model_ops/model_device/return_deployment_stages" - deployment_stages_topic = "{}/{}".format(deployment_stages_topic_prefix, end_point_id) + deployment_stages_topic = "model_ops/model_device/return_deployment_stages" deployment_stages_payload = {"model_name": model_name, "model_id": model_id, "model_url": model_inference_url, @@ -85,7 +80,6 @@ def send_deployment_stages(end_point_id, model_name, model_id, model_inference_u "timestamp": int(format(time.time_ns() / 1000.0, '.0f'))} message_center.send_message_json(deployment_stages_topic, json.dumps(deployment_stages_payload)) - message_center.send_message_json(deployment_stages_topic_prefix, json.dumps(deployment_stages_payload)) logging.info(f"-------- Stages has been sent to mlops with stage {model_stages_index} and " f"payload {deployment_stages_payload}") diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py index e3073700d2..8ce9d0e102 100755 --- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py +++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py @@ -41,6 +41,7 @@ def __init__(self, args, run_id=0, request_json=None, agent_config=None, edge_id agent_log_file_dir=ServerConstants.get_log_file_dir() ) + self.is_deployment_runner = True self.infer_host = "127.0.0.1" self.redis_addr = "local" self.redis_port = "6379" @@ -306,7 +307,7 @@ def process_deployment_result_message(self, topic=None, payload=None): return else: # This is the last worker that failed, so we should continue to "ABORTED" status - model_config_parameters = self.running_request_json[run_id_str]["parameters"] + model_config_parameters = self.request_json["parameters"] inference_port = model_config_parameters.get("server_internal_port", ServerConstants.MODEL_INFERENCE_DEFAULT_PORT) inference_port_external = model_config_parameters.get("server_external_port", inference_port) diff --git a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py index eb23bf1278..b4c5b41d74 100755 --- a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py +++ b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py @@ -18,6 +18,7 @@ def __init__(self, args, agent_config=None): FedMLBaseMasterProtocolManager.__init__(self, args, agent_config=agent_config) self.message_center_name = "deploy_master_agent" + self.is_deployment_status_center = True self.topic_start_deployment = None self.topic_activate_endpoint = None @@ -215,18 +216,6 @@ def callback_start_deployment(self, topic, payload): self.subscribe_deployment_messages_from_slave_devices(request_json) - # Report stage to mlops: MODEL_DEPLOYMENT_STAGE1 = "Received" - FedMLDeployJobRunnerManager.get_instance().send_deployment_stages( - run_id, model_name, model_id, "", ServerConstants.MODEL_DEPLOYMENT_STAGE1["index"], - ServerConstants.MODEL_DEPLOYMENT_STAGE1["text"], "Received request for endpoint {}".format(run_id), - message_center=self.message_center) - - # Report stage to mlops: MODEL_DEPLOYMENT_STAGE2 = "Initializing" - FedMLDeployJobRunnerManager.get_instance().send_deployment_stages( - run_id, model_name, model_id, "", ServerConstants.MODEL_DEPLOYMENT_STAGE2["index"], - ServerConstants.MODEL_DEPLOYMENT_STAGE2["text"], ServerConstants.MODEL_DEPLOYMENT_STAGE2["text"], - message_center=self.message_center) - ServerConstants.save_runner_infos(self.args.device_id + "." + self.args.os_name, self.edge_id, run_id=run_id) # Num diff @@ -260,6 +249,18 @@ def callback_start_deployment(self, topic, payload): if process is not None: ServerConstants.save_run_process(run_id, process.pid) + # Report stage to mlops: MODEL_DEPLOYMENT_STAGE1 = "Received" + FedMLDeployJobRunnerManager.get_instance().send_deployment_stages( + run_id, model_name, model_id, "", ServerConstants.MODEL_DEPLOYMENT_STAGE1["index"], + ServerConstants.MODEL_DEPLOYMENT_STAGE1["text"], "Received request for endpoint {}".format(run_id), + message_center=self.message_center) + + # Report stage to mlops: MODEL_DEPLOYMENT_STAGE2 = "Initializing" + FedMLDeployJobRunnerManager.get_instance().send_deployment_stages( + run_id, model_name, model_id, "", ServerConstants.MODEL_DEPLOYMENT_STAGE2["index"], + ServerConstants.MODEL_DEPLOYMENT_STAGE2["text"], ServerConstants.MODEL_DEPLOYMENT_STAGE2["text"], + message_center=self.message_center) + # Send stage: MODEL_DEPLOYMENT_STAGE3 = "StartRunner" FedMLDeployJobRunnerManager.get_instance().send_deployment_stages( run_id, model_name, model_id, "", ServerConstants.MODEL_DEPLOYMENT_STAGE3["index"], @@ -328,6 +329,8 @@ def subscribe_deployment_messages_from_slave_devices(self, request_json): logging.info("subscribe device messages {}".format(deployment_results_topic)) + self.setup_listeners_for_edge_status(run_id, edge_id_list, self.edge_id) + def subscribe_spec_device_message(self, run_id, device_id): if device_id == self.edge_id: return diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py index f9cfdcd921..332dab2547 100755 --- a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py +++ b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py @@ -31,6 +31,7 @@ def __init__(self, args, run_id=0, request_json=None, agent_config=None, edge_id agent_log_file_dir=ClientConstants.get_log_file_dir() ) + self.is_deployment_runner = True self.infer_host = "127.0.0.1" self.redis_addr = "local" self.redis_port = "6379" @@ -286,11 +287,8 @@ def run_impl(self, run_extend_queue_list, sender_message_center, inference_engine, model_metadata, model_config) self.status_reporter.run_id = self.run_id - self.status_reporter.report_client_id_status( - self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED, - is_from_model=True, run_id=self.run_id) - return False + raise Exception("[Worker] Failed to deploy the model.") else: # Send failed successful result back to master logging.info("Finished deployment, continue to send results to master...") diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py index 3a0f835b6c..f9bc70452d 100755 --- a/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py +++ b/python/fedml/computing/scheduler/model_scheduler/worker_protocol_manager.py @@ -24,6 +24,7 @@ def __init__(self, args, agent_config=None): FedMLBaseSlaveProtocolManager.__init__(self, args, agent_config=agent_config) self.message_center_name = "deploy_slave_agent" + self.is_deployment_status_center = True self.topic_start_deployment = None self.topic_delete_deployment = None diff --git a/python/fedml/computing/scheduler/scheduler_core/general_constants.py b/python/fedml/computing/scheduler/scheduler_core/general_constants.py index ba8842b30e..0cc6044d4b 100755 --- a/python/fedml/computing/scheduler/scheduler_core/general_constants.py +++ b/python/fedml/computing/scheduler/scheduler_core/general_constants.py @@ -44,6 +44,19 @@ class GeneralConstants: MSG_MLOPS_SERVER_STATUS_FINISHED = "FINISHED" MSG_MLOPS_SERVER_STATUS_EXCEPTION = "EXCEPTION" + MSG_MODELOPS_DEPLOYMENT_STATUS_INITIALIZING = "INITIALIZING" + MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYING = "DEPLOYING" + MSG_MODELOPS_DEPLOYMENT_STATUS_INFERRING = "INFERRING" + MSG_MODELOPS_DEPLOYMENT_STATUS_OVERLOAD = "OVERLOAD" + MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED = "FAILED" + MSG_MODELOPS_DEPLOYMENT_STATUS_RESCALING = "RESCALING" + MSG_MODELOPS_DEPLOYMENT_STATUS_UPDATING = "UPDATING" + MSG_MODELOPS_DEPLOYMENT_STATUS_UPDATING_FAILED = "UPDATING_FAILED" + MSG_MODELOPS_DEPLOYMENT_STATUS_ABORTING = "ABORTING" + MSG_MODELOPS_DEPLOYMENT_STATUS_ABORTED = "ABORTED" + MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED = "DEPLOYED" + MSG_MODELOPS_DEPLOYMENT_STATUS_KILLED = "KILLED" + MASTER_LOGIN_PROGRAM = "server_login.py" SLAVE_LOGIN_PROGRAM = "client_login.py" diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py index 03d3fd5d92..69b69f4d4c 100755 --- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py +++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py @@ -1,5 +1,6 @@ import json import logging +import multiprocessing import os import platform import random @@ -7,6 +8,7 @@ import time import traceback import zipfile +import queue from ..comm_utils.constants import SchedulerConstants from ..comm_utils.job_utils import JobRunnerUtils, DockerArgs from ..scheduler_entry.constants import Constants @@ -82,8 +84,7 @@ def __init__(self, args, edge_id=0, request_json=None, agent_config=None, run_id "${FEDSYS.CLIENT_OBJECT_LIST}": "", "${FEDSYS.LOG_SERVER_URL}": "", } - self.download_time = time.time() - self.download_finished = False + self.is_deployment_runner = False def __repr__(self): return "<{klass} @{id:x} {attrs}>".format( @@ -162,7 +163,7 @@ def package_download_progress(self, count, blksize, filesize): self.prev_download_progress = progress_int logging.info("package downloaded size {} KB, progress {}%".format(downloaded_kb, progress_int)) - def download_package_proc(self, package_url, local_package_file): + def download_package_proc(self, package_url, local_package_file, completed_event, info_queue): import requests headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) ' 'AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36'} @@ -188,8 +189,8 @@ def download_package_proc(self, package_url, local_package_file): written_size = f.write(chunk) total_size += written_size logging.info("package downloaded size %.2f KB", total_size/1024) - self.download_time = time.time() - self.download_finished = True + info_queue.put(time.time()) + completed_event.set() def retrieve_and_unzip_package(self, package_name, package_url): local_package_path = self.agent_package_download_dir @@ -202,26 +203,43 @@ def retrieve_and_unzip_package(self, package_name, package_url): ssl._create_default_https_context = ssl._create_unverified_context # Open a process to download the package so that we can avoid the request is blocked and check the timeout. - self.download_finished = False - self.download_time = time.time() from multiprocessing import Process - download_process = Process(target=self.download_package_proc, args=(package_url, local_package_file)) + completed_event = multiprocessing.Event() + info_queue = multiprocessing.Queue() + download_process = Process(target=self.download_package_proc, + args=(package_url, local_package_file, completed_event, info_queue)) download_process.start() - allowed_block_download_time = 30 + allowed_block_download_time = 60 + download_finished = False + download_time = time.time() while True: - block_time = time.time() - self.download_time + try: + queue_time = info_queue.get(block=False, timeout=3) + download_time = queue_time + except queue.Empty as e: + pass + + block_time = time.time() - download_time if block_time > allowed_block_download_time: break - if self.download_finished: + + if completed_event.is_set(): + download_finished = True break time.sleep(3) try: - if not self.download_finished: + if not download_finished: download_process.terminate() download_process.kill() except Exception as e: pass + if not download_finished: + raise Exception("Download timeout, please check if your network is stable.") + + if not os.path.exists(local_package_file): + raise Exception(f"Failed to download, the zip file is not exist at {local_package_file}.") + # Another method to async download. # import socket # socket.setdefaulttimeout(15) diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py index 11d6fa44fd..9bb8b7a7ec 100755 --- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py +++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py @@ -224,6 +224,7 @@ def start_status_listener_center(self): def rebuild_status_center(self, status_center_queue): self.status_center = FedMLStatusCenter(message_queue=status_center_queue) + self.status_center.is_deployment_status_center = self.is_deployment_status_center if self.status_reporter is None: self.status_reporter = MLOpsMetrics() diff --git a/python/fedml/computing/scheduler/scheduler_core/status_center.py b/python/fedml/computing/scheduler/scheduler_core/status_center.py index 76f811993e..4a55dbb679 100755 --- a/python/fedml/computing/scheduler/scheduler_core/status_center.py +++ b/python/fedml/computing/scheduler/scheduler_core/status_center.py @@ -99,6 +99,7 @@ def __init__(self, message_queue=None): self.status_message_center = None self.status_manager_instance = None self.status_runner = None + self.is_deployment_status_center = False def __repr__(self): return "<{klass} @{id:x} {attrs}>".format( diff --git a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py index 811ff2a2d5..01caf7db67 100755 --- a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py +++ b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py @@ -2,6 +2,7 @@ import logging import os import shutil +import time from os import listdir from ....core.mlops.mlops_runtime_log_daemon import MLOpsRuntimeLogDaemon @@ -67,6 +68,9 @@ def process_job_completed_status(self, master_id, status): # self.remove_listener_for_run_metrics(self.run_id) # self.remove_listener_for_run_logs(self.run_id) + if self.status_center.is_deployment_status_center and status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED: + self.report_deployment_status(self.run_id, GeneralConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED) + def process_job_exception_status(self, master_id, status): # Send the exception status to slave devices. self.report_exception_status( @@ -302,3 +306,14 @@ def status_center_request_job_status_from_master_in_slave_agent(self, topic, pay topic_request_job_status = f"{GeneralConstants.MSG_TOPIC_REQUEST_JOB_STATUS_PREFIX}{master_id}" payload_request_job_status = {"run_id": run_id, "edge_id": edge_id} self.message_center.send_message(topic_request_job_status, json.dumps(payload_request_job_status)) + + def report_deployment_status(self, run_id, status): + deployment_status_topic = "model_ops/model_device/return_deployment_status" + deployment_status_payload = {"end_point_id": run_id, "end_point_name": "", + "model_name": "", + "model_url": "", + "model_status": status, + "timestamp": int(format(time.time_ns() / 1000.0, '.0f'))} + logging.info(f"[StatusCenter] deployment_status_payload is sent to mlops: {deployment_status_payload}") + + self.message_center.send_message_json(deployment_status_topic, json.dumps(deployment_status_payload)) diff --git a/python/fedml/computing/scheduler/slave/base_slave_job_runner.py b/python/fedml/computing/scheduler/slave/base_slave_job_runner.py index cc7c3c222b..5e530dbba7 100755 --- a/python/fedml/computing/scheduler/slave/base_slave_job_runner.py +++ b/python/fedml/computing/scheduler/slave/base_slave_job_runner.py @@ -71,7 +71,7 @@ def run(self, process_event, completed_event, run_extend_queue_list, logging.error(f"Runner exited with errors. Exception: {e}, Traceback {traceback.format_exc()}") self.status_reporter.report_client_id_status( self.edge_id, GeneralConstants.MSG_MLOPS_CLIENT_STATUS_FAILED, - server_id=self.server_id, run_id=self.run_id) + is_from_model=self.is_deployment_runner, server_id=self.server_id, run_id=self.run_id) finally: if self.mlops_metrics is not None: computing_ended_time = MLOpsUtils.get_ntp_time() @@ -107,7 +107,7 @@ def run_impl(self, run_extend_queue_list, sender_message_center, self.status_reporter.report_client_id_status( self.edge_id, GeneralConstants.MSG_MLOPS_CLIENT_STATUS_INITIALIZING, - running_json=json.dumps(self.request_json), run_id=run_id) + is_from_model=self.is_deployment_runner, running_json=json.dumps(self.request_json), run_id=run_id) # get training params private_local_data_dir = data_config.get("privateLocalData", "") @@ -192,7 +192,7 @@ def run_impl(self, run_extend_queue_list, sender_message_center, self.status_reporter.report_client_id_status( self.edge_id, GeneralConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED, - server_id=self.server_id, run_id=run_id) + is_from_model=self.is_deployment_runner, server_id=self.server_id, run_id=run_id) if is_launch_task: sys_utils.log_return_info(f"job {run_id}", ret_code) @@ -225,7 +225,7 @@ def run_impl(self, run_extend_queue_list, sender_message_center, # Send failed msg when exceptions. self.status_reporter.report_client_id_status( self.edge_id, GeneralConstants.MSG_MLOPS_CLIENT_STATUS_FAILED, - server_id=self.server_id, run_id=run_id) + is_from_model=self.is_deployment_runner, server_id=self.server_id, run_id=run_id) @abstractmethod def _generate_job_runner_instance(self, args, run_id=None, request_json=None, agent_config=None, edge_id=None): @@ -239,7 +239,7 @@ def reset_devices_status(self, edge_id, status): self.status_reporter.run_id = self.run_id self.status_reporter.edge_id = edge_id self.status_reporter.report_client_id_status( - edge_id, status, server_id=self.server_id, run_id=self.run_id) + edge_id, status, is_from_model=self.is_deployment_runner, server_id=self.server_id, run_id=self.run_id) def start_runner_process( self, run_id, request_json, edge_id=None, From 060eef68749c68cf1d5ad187c8c8f82f2dd78e1c Mon Sep 17 00:00:00 2001 From: Alex Date: Wed, 15 May 2024 01:27:53 +0800 Subject: [PATCH 050/251] [CoreEngine] update the keys. --- python/fedml/api/api_test.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/fedml/api/api_test.py b/python/fedml/api/api_test.py index 5d899bb1fd..5a01a76448 100755 --- a/python/fedml/api/api_test.py +++ b/python/fedml/api/api_test.py @@ -6,7 +6,7 @@ # Login fedml.set_env_version("test") fedml.set_local_on_premise_platform_port(18080) -error_code, error_msg = fedml.api.fedml_login(api_key="1316b93c82da40ce90113a2ed12f0b14") +error_code, error_msg = fedml.api.fedml_login(api_key="") if error_code != 0: print("API Key is invalid!") exit(1) From 66af3db9bbe06fd13f91ead915dd4e860dd83bac Mon Sep 17 00:00:00 2001 From: Alex Date: Wed, 15 May 2024 01:57:35 +0800 Subject: [PATCH 051/251] [CoreEngine] make the client and server runner deprecated. --- .../master/{server_runner.py => server_runner_deprecated.py} | 0 ...device_client_runner.py => device_client_runner_deprecated.py} | 0 ...device_server_runner.py => device_server_runner_deprecated.py} | 0 .../slave/{client_runner.py => client_runner_deprecated.py} | 0 4 files changed, 0 insertions(+), 0 deletions(-) rename python/fedml/computing/scheduler/master/{server_runner.py => server_runner_deprecated.py} (100%) rename python/fedml/computing/scheduler/model_scheduler/{device_client_runner.py => device_client_runner_deprecated.py} (100%) rename python/fedml/computing/scheduler/model_scheduler/{device_server_runner.py => device_server_runner_deprecated.py} (100%) rename python/fedml/computing/scheduler/slave/{client_runner.py => client_runner_deprecated.py} (100%) diff --git a/python/fedml/computing/scheduler/master/server_runner.py b/python/fedml/computing/scheduler/master/server_runner_deprecated.py similarity index 100% rename from python/fedml/computing/scheduler/master/server_runner.py rename to python/fedml/computing/scheduler/master/server_runner_deprecated.py diff --git a/python/fedml/computing/scheduler/model_scheduler/device_client_runner.py b/python/fedml/computing/scheduler/model_scheduler/device_client_runner_deprecated.py similarity index 100% rename from python/fedml/computing/scheduler/model_scheduler/device_client_runner.py rename to python/fedml/computing/scheduler/model_scheduler/device_client_runner_deprecated.py diff --git a/python/fedml/computing/scheduler/model_scheduler/device_server_runner.py b/python/fedml/computing/scheduler/model_scheduler/device_server_runner_deprecated.py similarity index 100% rename from python/fedml/computing/scheduler/model_scheduler/device_server_runner.py rename to python/fedml/computing/scheduler/model_scheduler/device_server_runner_deprecated.py diff --git a/python/fedml/computing/scheduler/slave/client_runner.py b/python/fedml/computing/scheduler/slave/client_runner_deprecated.py similarity index 100% rename from python/fedml/computing/scheduler/slave/client_runner.py rename to python/fedml/computing/scheduler/slave/client_runner_deprecated.py From 54f8b00201dad42b04f232b3d9cb310c214a1ed2 Mon Sep 17 00:00:00 2001 From: Alex Date: Wed, 15 May 2024 18:12:16 +0800 Subject: [PATCH 052/251] [CoreEngine] make the cloud agent work. --- .../scheduler/master/base_master_agent.py | 6 ++--- .../master/base_master_job_runner.py | 4 ++++ .../master/base_master_job_runner_manager.py | 13 ++++++++++ .../master/base_master_protocol_manager.py | 24 ++++++++++++------- .../scheduler/master/cloud_server_manager.py | 6 ++++- .../scheduler/master/server_login.py | 2 +- .../scheduler_core/account_manager.py | 8 ++++--- 7 files changed, 47 insertions(+), 16 deletions(-) diff --git a/python/fedml/computing/scheduler/master/base_master_agent.py b/python/fedml/computing/scheduler/master/base_master_agent.py index 66bc35d96f..3aff523c24 100755 --- a/python/fedml/computing/scheduler/master/base_master_agent.py +++ b/python/fedml/computing/scheduler/master/base_master_agent.py @@ -17,18 +17,18 @@ def __init__(self): self.master_api_process = None self.mlops_metrics = MLOpsMetrics() self.status_reporter = None - self.enable_simulation_cloud_agent = True + self.enable_simulation_cloud_agent = False self.use_local_process_as_cloud_server = False self.protocol_mgr = None def login( self, user_id, api_key=None, device_id=None, - os_name=None, role=None + os_name=None, role=None, runner_cmd=None ): # Login account login_result = FedMLAccountManager.get_instance().login( user_id, api_key=api_key, device_id=device_id, - os_name=os_name, role=role + os_name=os_name, role=role, runner_cmd=runner_cmd ) if login_result is not None: self.agent_args = login_result diff --git a/python/fedml/computing/scheduler/master/base_master_job_runner.py b/python/fedml/computing/scheduler/master/base_master_job_runner.py index 07c297c65d..1827de481d 100755 --- a/python/fedml/computing/scheduler/master/base_master_job_runner.py +++ b/python/fedml/computing/scheduler/master/base_master_job_runner.py @@ -136,6 +136,10 @@ def run_impl( logging.info("Detect all status of Edge ids: " + str(edge_ids)) + self.status_reporter.report_server_id_status( + self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_STARTING, edge_id=self.edge_id, + server_id=self.edge_id, server_agent_id=self.edge_id) + status_ok, active_edge_info_dict, inactivate_edges = self.detect_edges_status( edge_device_info_queue, edge_device_info_global_queue=edge_device_info_global_queue, callback_when_edges_ready=self.send_training_request_to_edges) diff --git a/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py b/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py index 694fab5f5f..b1066910c1 100755 --- a/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py +++ b/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py @@ -36,6 +36,19 @@ def start_job_runner( status_center_queue=status_center_queue ) + def stop_job_runner( + self, run_id, args=None, edge_id=None, request_json=None, + run_as_cloud_agent=False + ): + super().stop_job_runner(run_id) + + if run_as_cloud_agent: + cloud_server_mgr = FedMLCloudServerManager( + args, run_id=run_id, edge_id=edge_id, request_json=request_json, + agent_config=args.agent_config + ) + cloud_server_mgr.stop_cloud_server() + def _start_cloud_server( self, args, run_id, request_json, edge_id=None, use_local_process_as_cloud_server=False diff --git a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py index ef59431ee8..b8479ebc03 100755 --- a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py +++ b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py @@ -44,7 +44,7 @@ def __init__(self, args, agent_config=None): self.run_as_cloud_server = False self.run_as_edge_server_and_agent = False self.run_as_cloud_server_and_agent = False - self.enable_simulation_cloud_agent = True + self.enable_simulation_cloud_agent = False self.use_local_process_as_cloud_server = False self.ota_upgrade = FedMLOtaUpgrade(edge_id=args.edge_id) self.running_request_json = dict() @@ -140,12 +140,6 @@ def callback_start_train(self, topic=None, payload=None): except Exception: pass - # Parse the message when running in the cloud server mode. - if self.run_as_cloud_server: - message_bytes = payload.encode("ascii") - base64_bytes = base64.b64decode(message_bytes) - payload = base64_bytes.decode("ascii") - # Parse the parameters # [NOTES] Example Request JSON: # https://fedml-inc.larksuite.com/wiki/ScnIwUif9iupbjkYS0LuBrd6sod#WjbEdhYrvogmlGxKTOGu98C6sSb @@ -264,6 +258,9 @@ def callback_stop_train(self, topic, payload, use_payload=None): run_id = request_json.get("runId", None) run_id = request_json.get("id", None) if run_id is None else run_id run_id_str = str(run_id) + server_id = request_json.get("serverId", None) + if server_id is None: + server_id = request_json.get("server_id", None) # Broadcast the job status to all edges self.rebuild_status_center(self.get_status_queue()) @@ -273,8 +270,14 @@ def callback_stop_train(self, topic, payload, use_payload=None): if self.running_request_json.get(run_id_str, None) is not None: self.running_request_json.pop(run_id_str) + # Send the stopping request to edges + if self.run_as_cloud_agent: + self.send_training_stop_request_to_cloud_server(server_id, payload) + # Stop the job runner - self._get_job_runner_manager().stop_job_runner(run_id) + self._get_job_runner_manager().stop_job_runner( + run_id, args=self.args, edge_id=self.edge_id, request_json=request_json, + run_as_cloud_agent=self.run_as_cloud_agent) def callback_run_logs(self, topic, payload): run_id = str(topic).split('/')[-1] @@ -498,6 +501,11 @@ def send_training_stop_request_to_specific_edge(self, edge_id, payload): logging.info("stop_train: send topic " + topic_stop_train) self.message_center.send_message(topic_stop_train, payload) + def send_training_stop_request_to_cloud_server(self, edge_id, payload): + topic_stop_train = "mlops/flserver_agent_" + str(edge_id) + "/stop_train" + logging.info("stop_train: send topic " + topic_stop_train) + self.message_center.send_message(topic_stop_train, payload) + def send_status_check_msg(self, run_id, edge_id, server_id, context=None): topic_status_check = f"server/client/request_device_info/{edge_id}" payload = {"server_id": server_id, "run_id": run_id} diff --git a/python/fedml/computing/scheduler/master/cloud_server_manager.py b/python/fedml/computing/scheduler/master/cloud_server_manager.py index ed39707034..342d785afe 100755 --- a/python/fedml/computing/scheduler/master/cloud_server_manager.py +++ b/python/fedml/computing/scheduler/master/cloud_server_manager.py @@ -3,6 +3,8 @@ import logging import os import traceback + +import fedml from fedml.computing.scheduler.comm_utils.sys_utils import get_python_program @@ -18,10 +20,12 @@ def __init__(self, args, run_id=None, edge_id=None, request_json=None, agent_con self.edge_id = edge_id self.request_json = request_json self.agent_config = agent_config + if version is None: + version = fedml.get_env_version() self.version = version image_version = self.version if image_version == "local": - image_version = "dev" + image_version = "test" self.server_docker_base_image = "/fedml-device-image:" + image_version self.cloud_server_name = None diff --git a/python/fedml/computing/scheduler/master/server_login.py b/python/fedml/computing/scheduler/master/server_login.py index 3d8d1f6fc9..8dd0696bc8 100755 --- a/python/fedml/computing/scheduler/master/server_login.py +++ b/python/fedml/computing/scheduler/master/server_login.py @@ -39,6 +39,6 @@ def logout(): master_agent = FedMLLaunchMasterAgent() if args.type == 'login': master_agent.login(args.api_key, api_key=args.api_key, device_id=args.device_id, - os_name=args.os_name, role=args.role) + os_name=args.os_name, role=args.role, runner_cmd=args.runner_cmd) else: master_agent.logout() diff --git a/python/fedml/computing/scheduler/scheduler_core/account_manager.py b/python/fedml/computing/scheduler/scheduler_core/account_manager.py index da04fc3989..4b4cc9fd31 100755 --- a/python/fedml/computing/scheduler/scheduler_core/account_manager.py +++ b/python/fedml/computing/scheduler/scheduler_core/account_manager.py @@ -48,10 +48,10 @@ def __init__(self): def get_instance(): return FedMLAccountManager() - def login(self, user_id, api_key="", device_id=None, os_name=None, role=None): + def login(self, user_id, api_key="", device_id=None, os_name=None, role=None, runner_cmd=None): # Build the agent args self.build_agent_args( - user_id, api_key=api_key, device_id=device_id, os_name=os_name, role=role + user_id, api_key=api_key, device_id=device_id, os_name=os_name, role=role, runner_cmd=runner_cmd ) # Fetch configs from the MLOps config server. @@ -126,7 +126,7 @@ def login(self, user_id, api_key="", device_id=None, os_name=None, role=None): return self.agent_args - def build_agent_args(self, user_id, api_key=None, device_id=None, os_name=None, role=None): + def build_agent_args(self, user_id, api_key=None, device_id=None, os_name=None, role=None, runner_cmd=None): # Generate the suffix for device based on the role device_id_suffix = None is_master = False @@ -197,6 +197,7 @@ def build_agent_args(self, user_id, api_key=None, device_id=None, os_name=None, # Set the unique device id self.agent_args.is_from_docker = is_from_docker or is_from_fedml_docker_hub self.agent_args.unique_device_id = unique_device_id + self.agent_args.runner_cmd = runner_cmd def fill_argent_args( self, log_server_url=None, server_id=None, edge_id=None, @@ -440,6 +441,7 @@ def __init__(self, role=None, account_id=None, api_key=None, server_id=None, cur self.using_mlops = True self.server_agent_id = None self.general_edge_id = None + self.runner_cmd = None def is_cloud_server(self): return self.role == FedMLAccountManager.ROLE_CLOUD_SERVER From 75cd1781468de07756af9d194d4b77b8eba2432b Mon Sep 17 00:00:00 2001 From: Alex Date: Wed, 15 May 2024 18:19:29 +0800 Subject: [PATCH 053/251] [CoreEngine] update the version. --- python/fedml/__init__.py | 2 +- python/setup.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/python/fedml/__init__.py b/python/fedml/__init__.py index 8044387b65..6b3ac3f61b 100644 --- a/python/fedml/__init__.py +++ b/python/fedml/__init__.py @@ -34,7 +34,7 @@ _global_training_type = None _global_comm_backend = None -__version__ = "0.8.30" +__version__ = "0.8.31" # This is the deployment environment used for different roles (RD/PM/BD/Public Developers). Potential VALUE: local, dev, test, release diff --git a/python/setup.py b/python/setup.py index cce0ddb2ca..fa425c98f7 100644 --- a/python/setup.py +++ b/python/setup.py @@ -120,7 +120,7 @@ def finalize_options(self): setup( name="fedml", - version="0.8.30", + version="0.8.31", author="FedML Team", author_email="ch@fedml.ai", description="A research and production integrated edge-cloud library for " From 90dc19bb957fb0b175d5e1f75e1bc3e91c18b700 Mon Sep 17 00:00:00 2001 From: Alex Date: Wed, 15 May 2024 19:32:36 +0800 Subject: [PATCH 054/251] [CoreEngine] stop the run in the cloud agent. --- .../master/base_master_job_runner_manager.py | 10 ++-- .../master/base_master_protocol_manager.py | 25 +++++++-- .../scheduler/master/cloud_server_manager.py | 53 +++++++++++-------- .../scheduler_core/general_constants.py | 5 ++ .../status_manager_protocols.py | 3 ++ 5 files changed, 64 insertions(+), 32 deletions(-) diff --git a/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py b/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py index b1066910c1..f4735227bc 100755 --- a/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py +++ b/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py @@ -37,17 +37,15 @@ def start_job_runner( ) def stop_job_runner( - self, run_id, args=None, edge_id=None, request_json=None, + self, run_id, args=None, server_id=None, request_json=None, run_as_cloud_agent=False ): super().stop_job_runner(run_id) if run_as_cloud_agent: - cloud_server_mgr = FedMLCloudServerManager( - args, run_id=run_id, edge_id=edge_id, request_json=request_json, - agent_config=args.agent_config - ) - cloud_server_mgr.stop_cloud_server() + stopping_process = Process( + target=FedMLCloudServerManager.stop_cloud_server, args=(run_id, server_id, args.agent_config)) + stopping_process.start() def _start_cloud_server( self, args, run_id, request_json, edge_id=None, diff --git a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py index b8479ebc03..0873548a42 100755 --- a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py +++ b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py @@ -33,6 +33,7 @@ def __init__(self, args, agent_config=None): self.agent_config = agent_config self.topic_start_train = None self.topic_stop_train = None + self.topic_exit_train = None self.topic_report_status = None self.topic_ota_msg = None self.topic_response_device_info = None @@ -61,6 +62,9 @@ def generate_topics(self): # The topi for stopping training self.topic_stop_train = "mlops/flserver_agent_" + str(self.edge_id) + "/stop_train" + # The topi for exiting training + self.topic_exit_train = GeneralConstants.get_topic_exit_train(self.edge_id) + # The topic for reporting current device status. self.topic_report_status = "mlops/report_device_status" @@ -89,6 +93,7 @@ def generate_topics(self): self.subscribed_topics.clear() self.add_subscribe_topic(self.topic_start_train) self.add_subscribe_topic(self.topic_stop_train) + self.add_subscribe_topic(self.topic_exit_train) self.add_subscribe_topic(self.topic_report_status) self.add_subscribe_topic(self.topic_ota_msg) self.add_subscribe_topic(self.topic_response_device_info) @@ -103,6 +108,7 @@ def add_protocol_handler(self): # Add the message listeners for all topics self.add_message_listener(self.topic_start_train, self.callback_start_train) self.add_message_listener(self.topic_stop_train, self.callback_stop_train) + self.add_message_listener(self.topic_exit_train, self.callback_exit_train) self.add_message_listener(self.topic_ota_msg, FedMLBaseMasterProtocolManager.callback_server_ota_msg) self.add_message_listener(self.topic_report_status, self.callback_report_current_status) self.add_message_listener(self.topic_response_device_info, self.callback_response_device_info) @@ -270,13 +276,24 @@ def callback_stop_train(self, topic, payload, use_payload=None): if self.running_request_json.get(run_id_str, None) is not None: self.running_request_json.pop(run_id_str) - # Send the stopping request to edges - if self.run_as_cloud_agent: - self.send_training_stop_request_to_cloud_server(server_id, payload) + # Stop the job runner + self._get_job_runner_manager().stop_job_runner( + run_id, args=self.args, server_id=server_id, request_json=request_json, + run_as_cloud_agent=self.run_as_cloud_agent) + + def callback_exit_train(self, topic, payload): + # Parse the parameters. + request_json = json.loads(payload) + run_id = request_json.get("runId", None) + run_id = request_json.get("id", None) if run_id is None else run_id + run_id_str = str(run_id) + server_id = request_json.get("serverId", None) + if server_id is None: + server_id = request_json.get("server_id", None) # Stop the job runner self._get_job_runner_manager().stop_job_runner( - run_id, args=self.args, edge_id=self.edge_id, request_json=request_json, + run_id, args=self.args, server_id=server_id, request_json=request_json, run_as_cloud_agent=self.run_as_cloud_agent) def callback_run_logs(self, topic, payload): diff --git a/python/fedml/computing/scheduler/master/cloud_server_manager.py b/python/fedml/computing/scheduler/master/cloud_server_manager.py index 342d785afe..040a0f38a3 100755 --- a/python/fedml/computing/scheduler/master/cloud_server_manager.py +++ b/python/fedml/computing/scheduler/master/cloud_server_manager.py @@ -12,6 +12,7 @@ class FedMLCloudServerManager: FEDML_CLOUD_SERVER_PREFIX = "fedml-server-run-" LOCAL_RUNNER_INFO_DIR_NAME = 'runner_infos' STATUS_IDLE = "IDLE" + FEDML_SERVER_BASE_IMAGE = "/fedml-device-image:" def __init__(self, args, run_id=None, edge_id=None, request_json=None, agent_config=None, version=None): self.server_docker_image = None @@ -26,7 +27,7 @@ def __init__(self, args, run_id=None, edge_id=None, request_json=None, agent_con image_version = self.version if image_version == "local": image_version = "test" - self.server_docker_base_image = "/fedml-device-image:" + image_version + self.server_docker_base_image = FedMLCloudServerManager._get_server_base_image(image_version) self.cloud_server_name = None @staticmethod @@ -125,44 +126,52 @@ def start_cloud_server(self, packages_config): logging.info("start run with k8s: " + run_deployment_cmd) os.system(run_deployment_cmd) - def stop_cloud_server(self): - self.cloud_server_name = FedMLCloudServerManager.FEDML_CLOUD_SERVER_PREFIX + str(self.run_id) \ - + "-" + str(self.edge_id) - self.server_docker_image = ( - self.agent_config["docker_config"]["registry_server"] - + self.agent_config["docker_config"]["registry_dir"] - + self.server_docker_base_image + @staticmethod + def stop_cloud_server(run_id, server_id, agent_config): + cloud_server_name = FedMLCloudServerManager._get_cloud_server_name(run_id, server_id) + server_docker_image = ( + agent_config["docker_config"]["registry_server"] + + agent_config["docker_config"]["registry_dir"] + + FedMLCloudServerManager._get_server_base_image(fedml.get_env_version()) ) delete_deployment_cmd = ( "export FEDML_AGGREGATOR_NAME=" - + self.cloud_server_name + + cloud_server_name + ";export FEDML_AGGREGATOR_SVC=" - + self.cloud_server_name + + cloud_server_name + ";export FEDML_AGGREGATOR_VERSION=" - + self.version + + fedml.get_env_version() + ';export FEDML_AGGREGATOR_IMAGE_PATH="' - + self.server_docker_image + + server_docker_image + '"' + ";export FEDML_CONF_ID=" - + self.cloud_server_name + + cloud_server_name + ";export FEDML_DATA_PV_ID=" - + self.cloud_server_name + + cloud_server_name + ";export FEDML_DATA_PVC_ID=" - + self.cloud_server_name + + cloud_server_name + ";export FEDML_REGISTRY_SECRET_SUFFIX=" - + self.cloud_server_name + + cloud_server_name + ";kubectl -n fedml-devops-aggregator-" - + self.version + + fedml.get_env_version() + " delete deployment " - + self.cloud_server_name + + cloud_server_name + ";kubectl -n fedml-devops-aggregator-" - + self.version + + fedml.get_env_version() + " delete svc " - + self.cloud_server_name + + cloud_server_name + ";kubectl -n fedml-devops-aggregator-" - + self.version + + fedml.get_env_version() + " delete secret secret-" - + self.cloud_server_name + + cloud_server_name ) logging.info("stop run with k8s: " + delete_deployment_cmd) os.system(delete_deployment_cmd) + + @staticmethod + def _get_server_base_image(version): + return f"{FedMLCloudServerManager.FEDML_SERVER_BASE_IMAGE}{version}" + + @staticmethod + def _get_cloud_server_name(run_id, server_id): + return f"{FedMLCloudServerManager.FEDML_CLOUD_SERVER_PREFIX}{run_id}-{server_id}" diff --git a/python/fedml/computing/scheduler/scheduler_core/general_constants.py b/python/fedml/computing/scheduler/scheduler_core/general_constants.py index 0cc6044d4b..347f157333 100755 --- a/python/fedml/computing/scheduler/scheduler_core/general_constants.py +++ b/python/fedml/computing/scheduler/scheduler_core/general_constants.py @@ -206,3 +206,8 @@ def get_ip_address(request_json, infer_host=None): ip = infer_host return ip + + @staticmethod + def get_topic_exit_train(server_id): + topic_exit_train = f"status_center/flserver_agent_{server_id}/exit_train" + return topic_exit_train diff --git a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py index 01caf7db67..921632472b 100755 --- a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py +++ b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py @@ -67,6 +67,9 @@ def process_job_completed_status(self, master_id, status): # self.stop_cloud_server() # self.remove_listener_for_run_metrics(self.run_id) # self.remove_listener_for_run_logs(self.run_id) + payload_exit_train = {"runId": self.run_id, "serverId": master_id} + self.message_center.receive_message( + GeneralConstants.get_topic_exit_train(master_id), json.dumps(payload_exit_train)) if self.status_center.is_deployment_status_center and status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED: self.report_deployment_status(self.run_id, GeneralConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED) From 7064783779837b7bd73d68fcdc4d7ac269b4586f Mon Sep 17 00:00:00 2001 From: Alex Date: Wed, 15 May 2024 20:17:11 +0800 Subject: [PATCH 055/251] [CoreEngine] stop the run in the cloud agent. --- .../scheduler/master/base_master_job_runner_manager.py | 4 ++-- .../scheduler/master/base_master_protocol_manager.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py b/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py index f4735227bc..2b6c4d3b5a 100755 --- a/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py +++ b/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py @@ -38,11 +38,11 @@ def start_job_runner( def stop_job_runner( self, run_id, args=None, server_id=None, request_json=None, - run_as_cloud_agent=False + run_as_cloud_agent=False, run_as_cloud_server=False ): super().stop_job_runner(run_id) - if run_as_cloud_agent: + if run_as_cloud_agent or run_as_cloud_server: stopping_process = Process( target=FedMLCloudServerManager.stop_cloud_server, args=(run_id, server_id, args.agent_config)) stopping_process.start() diff --git a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py index 0873548a42..88b25f33ab 100755 --- a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py +++ b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py @@ -294,7 +294,7 @@ def callback_exit_train(self, topic, payload): # Stop the job runner self._get_job_runner_manager().stop_job_runner( run_id, args=self.args, server_id=server_id, request_json=request_json, - run_as_cloud_agent=self.run_as_cloud_agent) + run_as_cloud_agent=self.run_as_cloud_agent, run_as_cloud_server=self.run_as_cloud_server) def callback_run_logs(self, topic, payload): run_id = str(topic).split('/')[-1] From 162f7598cdd84a19d3c6d230f27985b229aaaa7b Mon Sep 17 00:00:00 2001 From: Alex Date: Wed, 15 May 2024 20:57:47 +0800 Subject: [PATCH 056/251] [CoreEngine] make the cloud server id work. --- .../computing/scheduler/scheduler_core/account_manager.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/python/fedml/computing/scheduler/scheduler_core/account_manager.py b/python/fedml/computing/scheduler/scheduler_core/account_manager.py index 4b4cc9fd31..3491e102f6 100755 --- a/python/fedml/computing/scheduler/scheduler_core/account_manager.py +++ b/python/fedml/computing/scheduler/scheduler_core/account_manager.py @@ -138,7 +138,7 @@ def build_agent_args(self, user_id, api_key=None, device_id=None, os_name=None, device_id_suffix = FedMLAccountManager.DEVICE_ID_SUFFIX_CLOUD_AGENT is_master = True elif role == FedMLAccountManager.ROLE_CLOUD_SERVER: - device_id_suffix = FedMLAccountManager.DEVICE_ID_SUFFIX_CLOUD_SERVER + device_id_suffix = "" is_master = True elif role == FedMLAccountManager.ROLE_EDGE_DEVICE: device_id_suffix = FedMLAccountManager.DEVICE_ID_SUFFIX_EDGE_DEVICE @@ -193,6 +193,8 @@ def build_agent_args(self, user_id, api_key=None, device_id=None, os_name=None, docker_tag = FedMLAccountManager.DEVICE_ID_DOCKER_HUB_TAG if is_from_fedml_docker_hub else docker_tag unique_device_id = f"{self.agent_args.current_device_id}@{self.agent_args.os_name}" \ f"{docker_tag}{device_id_suffix}" + if role == FedMLAccountManager.ROLE_CLOUD_SERVER: + unique_device_id = self.agent_args.current_device_id # Set the unique device id self.agent_args.is_from_docker = is_from_docker or is_from_fedml_docker_hub From 496732af205ca526da3f6c5fbf0c3d9ca7e77fa3 Mon Sep 17 00:00:00 2001 From: Alex Date: Thu, 16 May 2024 02:47:13 +0800 Subject: [PATCH 057/251] [CoreEngine] make the deployment work. --- .../master/base_master_job_runner_manager.py | 11 +++++++++++ .../scheduler/master/base_master_protocol_manager.py | 2 +- .../scheduler/model_scheduler/master_job_runner.py | 3 +-- .../model_scheduler/master_protocol_manager.py | 5 ++++- 4 files changed, 17 insertions(+), 4 deletions(-) diff --git a/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py b/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py index 2b6c4d3b5a..6831c9d034 100755 --- a/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py +++ b/python/fedml/computing/scheduler/master/base_master_job_runner_manager.py @@ -47,6 +47,17 @@ def stop_job_runner( target=FedMLCloudServerManager.stop_cloud_server, args=(run_id, server_id, args.agent_config)) stopping_process.start() + def complete_job_runner( + self, run_id, args=None, server_id=None, request_json=None, + run_as_cloud_agent=False, run_as_cloud_server=False + ): + super().complete_job_runner(run_id) + + if run_as_cloud_agent or run_as_cloud_server: + stopping_process = Process( + target=FedMLCloudServerManager.stop_cloud_server, args=(run_id, server_id, args.agent_config)) + stopping_process.start() + def _start_cloud_server( self, args, run_id, request_json, edge_id=None, use_local_process_as_cloud_server=False diff --git a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py index 88b25f33ab..53a0aee151 100755 --- a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py +++ b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py @@ -292,7 +292,7 @@ def callback_exit_train(self, topic, payload): server_id = request_json.get("server_id", None) # Stop the job runner - self._get_job_runner_manager().stop_job_runner( + self._get_job_runner_manager().complete_job_runner( run_id, args=self.args, server_id=server_id, request_json=request_json, run_as_cloud_agent=self.run_as_cloud_agent, run_as_cloud_server=self.run_as_cloud_server) diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py index 8ce9d0e102..6149e60939 100755 --- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py +++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py @@ -361,8 +361,7 @@ def process_deployment_result_message(self, topic=None, payload=None): # Update the global deployment result mapping self.slave_deployment_results_map[str(device_id)] = model_status - logging.info("callback_deployment_result_message: topic {}, payload {}, result mapping {}.".format( - topic, payload, self.slave_deployment_results_map)) + logging.info("callback_deployment_result_message: topic {}, payload {}.".format(topic, payload)) request_json = self.request_json if request_json is None: diff --git a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py index b4c5b41d74..e7cf150040 100755 --- a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py +++ b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py @@ -100,9 +100,12 @@ def print_connected_info(self): pass def callback_deployment_result_message(self, topic=None, payload=None): - logging.info(f"Received deployment result: {self}") + logging.info(f"Received deployment result") FedMLDeployJobRunnerManager.get_instance().save_deployment_result(topic, payload) + def callback_exit_train(self, topic, payload): + pass + def callback_delete_deployment(self, topic, payload): logging.info("[Master] callback_delete_deployment") # Parse payload as the model message object. From 48c0a4c96c5ab8c0ae08fc25391c0b57feb6a925 Mon Sep 17 00:00:00 2001 From: Alex Date: Thu, 16 May 2024 08:27:59 +0800 Subject: [PATCH 058/251] [CoreEngine] refactor the complete job callback. --- .../master/base_master_protocol_manager.py | 14 +++++------ .../master_protocol_manager.py | 2 +- .../scheduler_core/general_constants.py | 11 ++++++--- .../status_manager_protocols.py | 4 ++-- .../scheduler_core/task_protocol_manager.py | 24 ------------------- 5 files changed, 18 insertions(+), 37 deletions(-) delete mode 100755 python/fedml/computing/scheduler/scheduler_core/task_protocol_manager.py diff --git a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py index 53a0aee151..f678389489 100755 --- a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py +++ b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py @@ -33,7 +33,7 @@ def __init__(self, args, agent_config=None): self.agent_config = agent_config self.topic_start_train = None self.topic_stop_train = None - self.topic_exit_train = None + self.topic_complete_job = None self.topic_report_status = None self.topic_ota_msg = None self.topic_response_device_info = None @@ -62,8 +62,8 @@ def generate_topics(self): # The topi for stopping training self.topic_stop_train = "mlops/flserver_agent_" + str(self.edge_id) + "/stop_train" - # The topi for exiting training - self.topic_exit_train = GeneralConstants.get_topic_exit_train(self.edge_id) + # The topic for completing job + self.topic_complete_job = GeneralConstants.get_topic_complete_job(self.edge_id) # The topic for reporting current device status. self.topic_report_status = "mlops/report_device_status" @@ -93,7 +93,7 @@ def generate_topics(self): self.subscribed_topics.clear() self.add_subscribe_topic(self.topic_start_train) self.add_subscribe_topic(self.topic_stop_train) - self.add_subscribe_topic(self.topic_exit_train) + self.add_subscribe_topic(self.topic_complete_job) self.add_subscribe_topic(self.topic_report_status) self.add_subscribe_topic(self.topic_ota_msg) self.add_subscribe_topic(self.topic_response_device_info) @@ -108,7 +108,7 @@ def add_protocol_handler(self): # Add the message listeners for all topics self.add_message_listener(self.topic_start_train, self.callback_start_train) self.add_message_listener(self.topic_stop_train, self.callback_stop_train) - self.add_message_listener(self.topic_exit_train, self.callback_exit_train) + self.add_message_listener(self.topic_complete_job, self.callback_complete_job) self.add_message_listener(self.topic_ota_msg, FedMLBaseMasterProtocolManager.callback_server_ota_msg) self.add_message_listener(self.topic_report_status, self.callback_report_current_status) self.add_message_listener(self.topic_response_device_info, self.callback_response_device_info) @@ -281,7 +281,7 @@ def callback_stop_train(self, topic, payload, use_payload=None): run_id, args=self.args, server_id=server_id, request_json=request_json, run_as_cloud_agent=self.run_as_cloud_agent) - def callback_exit_train(self, topic, payload): + def callback_complete_job(self, topic, payload): # Parse the parameters. request_json = json.loads(payload) run_id = request_json.get("runId", None) @@ -291,7 +291,7 @@ def callback_exit_train(self, topic, payload): if server_id is None: server_id = request_json.get("server_id", None) - # Stop the job runner + # Complete the job runner self._get_job_runner_manager().complete_job_runner( run_id, args=self.args, server_id=server_id, request_json=request_json, run_as_cloud_agent=self.run_as_cloud_agent, run_as_cloud_server=self.run_as_cloud_server) diff --git a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py index e7cf150040..8a578cb7d2 100755 --- a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py +++ b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py @@ -103,7 +103,7 @@ def callback_deployment_result_message(self, topic=None, payload=None): logging.info(f"Received deployment result") FedMLDeployJobRunnerManager.get_instance().save_deployment_result(topic, payload) - def callback_exit_train(self, topic, payload): + def callback_complete_job(self, topic, payload): pass def callback_delete_deployment(self, topic, payload): diff --git a/python/fedml/computing/scheduler/scheduler_core/general_constants.py b/python/fedml/computing/scheduler/scheduler_core/general_constants.py index 347f157333..68c1a8e09d 100755 --- a/python/fedml/computing/scheduler/scheduler_core/general_constants.py +++ b/python/fedml/computing/scheduler/scheduler_core/general_constants.py @@ -208,6 +208,11 @@ def get_ip_address(request_json, infer_host=None): return ip @staticmethod - def get_topic_exit_train(server_id): - topic_exit_train = f"status_center/flserver_agent_{server_id}/exit_train" - return topic_exit_train + def get_topic_complete_job(server_id): + topic_complete_job = f"status_center/master_agent_{server_id}/complete_job" + return topic_complete_job + + @staticmethod + def get_payload_complete_job(run_id, server_id): + payload_complete_job = {"runId": run_id, "serverId": server_id} + return payload_complete_job diff --git a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py index 921632472b..e5dd312c80 100755 --- a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py +++ b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py @@ -67,9 +67,9 @@ def process_job_completed_status(self, master_id, status): # self.stop_cloud_server() # self.remove_listener_for_run_metrics(self.run_id) # self.remove_listener_for_run_logs(self.run_id) - payload_exit_train = {"runId": self.run_id, "serverId": master_id} self.message_center.receive_message( - GeneralConstants.get_topic_exit_train(master_id), json.dumps(payload_exit_train)) + GeneralConstants.get_topic_complete_job(master_id), + json.dumps(GeneralConstants.get_payload_complete_job(self.run_id, master_id))) if self.status_center.is_deployment_status_center and status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED: self.report_deployment_status(self.run_id, GeneralConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED) diff --git a/python/fedml/computing/scheduler/scheduler_core/task_protocol_manager.py b/python/fedml/computing/scheduler/scheduler_core/task_protocol_manager.py deleted file mode 100755 index ddf4bb9b6e..0000000000 --- a/python/fedml/computing/scheduler/scheduler_core/task_protocol_manager.py +++ /dev/null @@ -1,24 +0,0 @@ - - -class TaskProtocolManager(object): - def __init__(self): - pass - - def log_metrics(self): - # Build the message for logging metrics - - # Send the message to MQTT server - - pass - - def log_model(self): - pass - - def log_artifacts_log(self): - pass - - def log_artifacts(self): - pass - - - From 525479399feae51cbfc9bdf66ce20e843694f10e Mon Sep 17 00:00:00 2001 From: Alex Date: Thu, 16 May 2024 08:41:33 +0800 Subject: [PATCH 059/251] [CoreEngine] refactor the complete job callback. --- .../scheduler/master/base_master_protocol_manager.py | 8 ++++---- .../computing/scheduler/master/master_protocol_manager.py | 7 +++++++ .../scheduler/model_scheduler/master_protocol_manager.py | 3 --- 3 files changed, 11 insertions(+), 7 deletions(-) diff --git a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py index f678389489..67507182a9 100755 --- a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py +++ b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py @@ -291,10 +291,10 @@ def callback_complete_job(self, topic, payload): if server_id is None: server_id = request_json.get("server_id", None) - # Complete the job runner - self._get_job_runner_manager().complete_job_runner( - run_id, args=self.args, server_id=server_id, request_json=request_json, - run_as_cloud_agent=self.run_as_cloud_agent, run_as_cloud_server=self.run_as_cloud_server) + self._process_job_complete_status(run_id, server_id, request_json) + + def _process_job_complete_status(self, run_id, server_id, complete_payload): + pass def callback_run_logs(self, topic, payload): run_id = str(topic).split('/')[-1] diff --git a/python/fedml/computing/scheduler/master/master_protocol_manager.py b/python/fedml/computing/scheduler/master/master_protocol_manager.py index 5eef5914e7..ca9621e41d 100755 --- a/python/fedml/computing/scheduler/master/master_protocol_manager.py +++ b/python/fedml/computing/scheduler/master/master_protocol_manager.py @@ -34,3 +34,10 @@ def _init_extra_items(self): # Override def print_connected_info(self): super().print_connected_info() + + # Override + def _process_job_complete_status(self, run_id, server_id, complete_payload): + # Complete the job runner + self._get_job_runner_manager().complete_job_runner( + run_id, args=self.args, server_id=server_id, request_json=complete_payload, + run_as_cloud_agent=self.run_as_cloud_agent, run_as_cloud_server=self.run_as_cloud_server) diff --git a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py index 8a578cb7d2..01165ff82e 100755 --- a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py +++ b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py @@ -103,9 +103,6 @@ def callback_deployment_result_message(self, topic=None, payload=None): logging.info(f"Received deployment result") FedMLDeployJobRunnerManager.get_instance().save_deployment_result(topic, payload) - def callback_complete_job(self, topic, payload): - pass - def callback_delete_deployment(self, topic, payload): logging.info("[Master] callback_delete_deployment") # Parse payload as the model message object. From 86852c7cd1ff65c3540e07d0f965c84382ebd294 Mon Sep 17 00:00:00 2001 From: Alex Date: Thu, 16 May 2024 09:16:27 +0800 Subject: [PATCH 060/251] [CoreEngine] fixed the monitor issue. --- python/fedml/computing/scheduler/comm_utils/job_monitor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/fedml/computing/scheduler/comm_utils/job_monitor.py b/python/fedml/computing/scheduler/comm_utils/job_monitor.py index 9bee76e780..8ae6e1c744 100644 --- a/python/fedml/computing/scheduler/comm_utils/job_monitor.py +++ b/python/fedml/computing/scheduler/comm_utils/job_monitor.py @@ -223,7 +223,7 @@ def monitor_replicas_number(): curr_version = fedml.get_env_version() num_replica_url_path = "fedmlModelServer/api/v1/endpoint/replica-info" mlops_prefix = fedml._get_backend_service() - url = f"{mlops_prefix}{num_replica_url_path}" + url = f"{mlops_prefix}/{num_replica_url_path}" cached_token = FedMLModelCache.get_instance().get_end_point_token_with_eid(endpoint_id) if cached_token is None: From 349493cefb4cf64605fec2c348874435339a1f30 Mon Sep 17 00:00:00 2001 From: Alex Date: Thu, 16 May 2024 09:45:10 +0800 Subject: [PATCH 061/251] [CoreEngine] update the serve example. --- python/examples/launch/serve_mnist/fedml_model_config.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/python/examples/launch/serve_mnist/fedml_model_config.yaml b/python/examples/launch/serve_mnist/fedml_model_config.yaml index 48254ccca4..f212dbb81d 100644 --- a/python/examples/launch/serve_mnist/fedml_model_config.yaml +++ b/python/examples/launch/serve_mnist/fedml_model_config.yaml @@ -1,6 +1,8 @@ workspace: "./" entry_point: "mnist_serve_main.py" +auto_detect_public_ip: true + data_cache_dir: "" bootstrap: "" From ba453d1b9f0259ca68cb640f97790e19ff1b3ebe Mon Sep 17 00:00:00 2001 From: Alex Date: Thu, 16 May 2024 10:01:35 +0800 Subject: [PATCH 062/251] [CoreEngine] update the model master runner. --- .../model_scheduler/device_model_inference.py | 56 +++++++++---------- .../model_scheduler/master_job_runner.py | 16 ++---- 2 files changed, 33 insertions(+), 39 deletions(-) diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py index b8d85edd31..eb3088f327 100755 --- a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py +++ b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py @@ -26,35 +26,35 @@ pass -# class Settings(BaseSettings): -# redis_addr: str -# redis_port: str -# redis_password: str -# end_point_name: str -# model_name: str -# model_version: str -# model_infer_url: str -# version: str -# use_mqtt_inference: bool -# use_worker_gateway: bool -# ext_info: str +class Settings(BaseSettings): + redis_addr: str + redis_port: str + redis_password: str + end_point_name: str + model_name: str + model_version: str + model_infer_url: str + version: str + use_mqtt_inference: bool + use_worker_gateway: bool + ext_info: str + + +settings = Settings() + +# class settings: +# redis_addr = "127.0.0.1" +# redis_port = 6379 +# redis_password = "fedml_default" +# end_point_name = "" +# model_name = "" +# model_version = "" +# model_infer_url = "127.0.0.1" +# version = "dev" +# use_mqtt_inference = False +# use_worker_gateway = False +# ext_info = "2b34303961245c4f175f2236282d7a272c040b0904747579087f6a760112030109010c215d54505707140005190a051c347f365c4a430c020a7d39120e26032a78730f797f7c031f0901657e75" # -# -# settings = Settings() - -class settings: - redis_addr = "127.0.0.1" - redis_port = 6379 - redis_password = "fedml_default" - end_point_name = "" - model_name = "" - model_version = "" - model_infer_url = "127.0.0.1" - version = "dev" - use_mqtt_inference = False - use_worker_gateway = False - ext_info = "2b34303961245c4f175f2236282d7a272c040b0904747579087f6a760112030109010c215d54505707140005190a051c347f365c4a430c020a7d39120e26032a78730f797f7c031f0901657e75" - api = FastAPI() diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py index 6149e60939..cecb9de7bc 100755 --- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py +++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py @@ -115,7 +115,6 @@ def run_impl( # start unified inference server self.start_device_inference_gateway( - run_id, end_point_name, model_id, model_name, model_version, agent_config=self.agent_config, inference_port=inference_port) # start inference monitor server @@ -464,20 +463,16 @@ def process_deployment_result_message(self, topic=None, payload=None): @staticmethod def start_device_inference_gateway( - run_id, end_point_name, model_id, - model_name, model_version, inference_port=ServerConstants.MODEL_INFERENCE_DEFAULT_PORT, + inference_port=ServerConstants.MODEL_INFERENCE_DEFAULT_PORT, agent_config=None, redis_addr="localhost", redis_port=6379, redis_password="fedml_default" ): # start unified inference server - running_model_name = ServerConstants.get_running_model_name(end_point_name, - model_name, model_version, run_id, model_id) python_program = get_python_program() master_port = os.getenv("FEDML_MASTER_PORT", None) if master_port is not None: inference_port = int(master_port) if not ServerConstants.is_running_on_k8s(): - logging.info(f"start the model inference gateway, end point {run_id}, " - f"model name {model_name} at port {inference_port}...") + logging.info(f"start the model inference gateway...") use_mqtt_inference = os.getenv("FEDML_USE_MQTT_INFERENCE", "False") use_mqtt_inference = True if use_mqtt_inference.lower() == 'true' else False use_worker_gateway = os.getenv("FEDML_USE_WORKER_GATEWAY", "False") @@ -501,8 +496,8 @@ def start_device_inference_gateway( "USE_MQTT_INFERENCE={} USE_WORKER_GATEWAY={} EXT_INFO={} " "{} -m uvicorn {} --host 0.0.0.0 --port {} --reload --reload-delay 3 --reload-dir {} " "--log-level critical".format( - redis_addr, redis_port, redis_password, end_point_name, - model_name, model_version, "", fedml.get_env_version(), use_mqtt_inference, + redis_addr, str(redis_port), redis_password, "", + "", "", "", fedml.get_env_version(), use_mqtt_inference, use_worker_gateway, ext_info, python_program, inference_gw_cmd, str(inference_port), fedml_base_dir), should_capture_stdout=False, should_capture_stderr=False) @@ -570,8 +565,7 @@ def recover_inference_and_monitor(): pass FedMLDeployMasterJobRunner.start_device_inference_gateway( - run_id, end_point_name, model_id, model_name, model_version, inference_port=inference_port, - agent_config=agent_config) + inference_port=inference_port, agent_config=agent_config) FedMLDeployMasterJobRunner.stop_device_inference_monitor( run_id, end_point_name, model_id, model_name, model_version) From 14429adfd510b9b95c99bc4c7318c723accd2cb4 Mon Sep 17 00:00:00 2001 From: Alex Date: Thu, 16 May 2024 10:07:14 +0800 Subject: [PATCH 063/251] [CoreEngine] update the model master runner. --- .../model_scheduler/master_job_runner.py | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py index cecb9de7bc..4d43d7c5c3 100755 --- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py +++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py @@ -115,7 +115,7 @@ def run_impl( # start unified inference server self.start_device_inference_gateway( - agent_config=self.agent_config, inference_port=inference_port) + inference_port=inference_port, agent_config=self.agent_config) # start inference monitor server self.stop_device_inference_monitor( @@ -540,6 +540,14 @@ def stop_device_inference_monitor(run_id, end_point_name, model_id, model_name, def recover_inference_and_monitor(): # noinspection PyBroadException try: + agent_config = dict() + try: + agent_config["mqtt_config"], _, _, _ = MLOpsConfigs.fetch_all_configs() + except Exception as e: + pass + + FedMLDeployMasterJobRunner.start_device_inference_gateway(agent_config=agent_config) + history_jobs = FedMLServerDataInterface.get_instance().get_history_jobs() for job in history_jobs.job_list: if job.running_json is None: @@ -558,15 +566,6 @@ def recover_inference_and_monitor(): if not is_activated: continue - agent_config = dict() - try: - agent_config["mqtt_config"], _, _, _ = MLOpsConfigs.fetch_all_configs() - except Exception as e: - pass - - FedMLDeployMasterJobRunner.start_device_inference_gateway( - inference_port=inference_port, agent_config=agent_config) - FedMLDeployMasterJobRunner.stop_device_inference_monitor( run_id, end_point_name, model_id, model_name, model_version) FedMLDeployMasterJobRunner.start_device_inference_monitor( From aa1df9922a4f50d9e6b2c372919163eacaca73aa Mon Sep 17 00:00:00 2001 From: Alex Date: Thu, 16 May 2024 13:18:32 +0800 Subject: [PATCH 064/251] [CoreEngine] make the handshaking protocol work. --- .../slave/base_slave_protocol_manager.py | 15 +++++ .../scheduler/slave/slave_protocol_manager.py | 56 +------------------ 2 files changed, 16 insertions(+), 55 deletions(-) diff --git a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py index 4ff931e6fd..1384e9906a 100755 --- a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py +++ b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py @@ -48,6 +48,7 @@ def __init__(self, args, agent_config=None): self.topic_report_status = None self.topic_ota_msg = None self.topic_request_device_info = None + self.topic_request_device_info_from_mlops = None self.topic_client_logout = None self.topic_response_job_status = None self.topic_report_device_status_in_job = None @@ -87,6 +88,9 @@ def generate_topics(self): # The topic for requesting device info from the client. self.topic_request_device_info = "server/client/request_device_info/" + str(self.edge_id) + # The topic for requesting device info from mlops. + self.topic_request_device_info_from_mlops = f"deploy/mlops/slave_agent/request_device_info/{self.edge_id}" + # The topic for requesting device info from MLOps. self.topic_client_logout = "mlops/client/logout/" + str(self.edge_id) @@ -114,6 +118,7 @@ def generate_topics(self): self.add_subscribe_topic(self.topic_report_status) self.add_subscribe_topic(self.topic_ota_msg) self.add_subscribe_topic(self.topic_request_device_info) + self.add_subscribe_topic(self.topic_request_device_info_from_mlops) self.add_subscribe_topic(self.topic_client_logout) self.add_subscribe_topic(self.topic_response_job_status) self.add_subscribe_topic(self.topic_report_device_status_in_job) @@ -132,6 +137,7 @@ def add_protocol_handler(self): self.add_message_listener(self.topic_ota_msg, FedMLBaseSlaveProtocolManager.callback_client_ota_msg) self.add_message_listener(self.topic_report_status, self.callback_report_current_status) self.add_message_listener(self.topic_request_device_info, self.callback_report_device_info) + self.add_message_listener(self.topic_request_device_info_from_mlops, self.callback_request_device_info_from_mlops) self.add_message_listener(self.topic_client_logout, self.callback_client_logout) self.add_message_listener(self.topic_response_job_status, self.callback_response_job_status) self.add_message_listener(self.topic_report_device_status_in_job, self.callback_response_device_status_in_job) @@ -402,6 +408,15 @@ def callback_report_device_info(self, topic, payload): response_payload["context"] = context self.message_center.send_message(response_topic, json.dumps(response_payload), run_id=run_id) + def callback_request_device_info_from_mlops(self, topic, payload): + self.response_device_info_to_mlops(topic, payload) + + def response_device_info_to_mlops(self, topic, payload): + response_topic = f"deploy/slave_agent/mlops/response_device_info" + response_payload = {"run_id": self.run_id, "slave_agent_device_id": self.edge_id, + "fedml_version": fedml.__version__} + self.message_center.send_message(response_topic, json.dumps(response_payload)) + def callback_client_logout(self, topic, payload): payload_json = json.loads(payload) secret = payload_json.get("auth", None) diff --git a/python/fedml/computing/scheduler/slave/slave_protocol_manager.py b/python/fedml/computing/scheduler/slave/slave_protocol_manager.py index ef8dac8730..a1067a0d96 100755 --- a/python/fedml/computing/scheduler/slave/slave_protocol_manager.py +++ b/python/fedml/computing/scheduler/slave/slave_protocol_manager.py @@ -13,19 +13,11 @@ class FedMLLaunchSlaveProtocolManager(FedMLBaseSlaveProtocolManager): def __init__(self, args, agent_config=None): FedMLBaseSlaveProtocolManager.__init__(self, args, agent_config=agent_config) - self.topic_request_deploy_slave_device_info_from_mlops = None - self.topic_request_deploy_master_device_info_from_mlops = None - self.topic_request_edge_device_info_from_mlops = None # Override def generate_topics(self): super().generate_topics() - # The topic for requesting device info from mlops. - self.topic_request_edge_device_info_from_mlops = f"deploy/mlops/slave_agent/request_device_info/{self.edge_id}" - - self.add_subscribe_topic(self.topic_request_edge_device_info_from_mlops) - # Override def add_protocol_handler(self): super().add_protocol_handler() @@ -106,54 +98,8 @@ def _init_extra_items(self): os.environ["FEDML_DEPLOY_MASTER_ID"] = str(self.model_device_server_id) os.environ["FEDML_DEPLOY_WORKER_IDS"] = str(self.model_device_client_edge_id_list) - # Subscribe handshaking messages from MLOps. - self.subscribe_handshaking_messages_from_mlops() - # Start the monitor process self.args = copy.deepcopy(in_args) self.mlops_metrics.stop_device_realtime_perf() self.mlops_metrics.report_device_realtime_perf(self.args, self.args.agent_config["mqtt_config"]) - pass - - def callback_response_device_info_to_mlops(self, topic, payload): - payload_json = json.loads(payload) - server_id = payload_json.get("server_id", 0) - run_id = payload_json.get("run_id", 0) - listen_edge_id = str(topic).split("/")[-1] - context = payload_json.get("context", None) - response_topic = f"deploy/slave_agent/mlops/response_device_info" - if self.mlops_metrics is not None and self.model_device_client_edge_id_list is not None and \ - self.model_device_server_id is not None: - device_info_json = { - "edge_id": listen_edge_id, - "fedml_version": fedml.__version__, - "user_id": self.args.user - } - salve_device_ids = list() - for model_client_edge_id in self.model_device_client_edge_id_list: - salve_device_ids.append(model_client_edge_id) - response_payload = {"slave_device_id": self.model_device_client_edge_id_list[0], - "slave_device_id_list": salve_device_ids, - "master_device_id": self.model_device_server_id, - "run_id": run_id, "edge_id": listen_edge_id, - "edge_info": device_info_json} - if context is not None: - response_payload["context"] = context - self.message_center.send_message(response_topic, json.dumps(response_payload), run_id=run_id) - - def subscribe_handshaking_messages_from_mlops(self): - # The topic for requesting deployment master device info from mlops. - self.topic_request_deploy_master_device_info_from_mlops = f"deploy/mlops/master_agent/request_device_info/{self.model_device_server_id}" - - # The topic for requesting deployment slave device info from mlops. - self.topic_request_deploy_slave_device_info_from_mlops = f"deploy/mlops/slave_agent/request_device_info/{self.model_device_client_edge_id_list[0]}" - - self.add_subscribe_topic(self.topic_request_deploy_master_device_info_from_mlops) - self.add_subscribe_topic(self.topic_request_deploy_slave_device_info_from_mlops) - - self.add_message_listener( - self.topic_request_edge_device_info_from_mlops, self.callback_response_device_info_to_mlops) - self.add_message_listener( - self.topic_request_deploy_master_device_info_from_mlops, self.callback_response_device_info_to_mlops) - self.add_message_listener( - self.topic_request_deploy_slave_device_info_from_mlops, self.callback_response_device_info_to_mlops) \ No newline at end of file + pass \ No newline at end of file From 94f408f186a2c4d889eb73ee445655e25eccdfd4 Mon Sep 17 00:00:00 2001 From: Alex Date: Thu, 16 May 2024 13:56:27 +0800 Subject: [PATCH 065/251] [CoreEngine] make the handshaking protocol work. --- .../computing/scheduler/master/base_master_protocol_manager.py | 2 +- .../computing/scheduler/slave/base_slave_protocol_manager.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py index 67507182a9..cee91578dd 100755 --- a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py +++ b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py @@ -448,7 +448,7 @@ def response_device_info_to_mlops(self, topic, payload): response_topic = f"deploy/master_agent/mlops/response_device_info" if self.mlops_metrics is not None: response_payload = {"run_id": self.run_id, "master_agent_device_id": self.edge_id, - "fedml_version": fedml.__version__} + "fedml_version": fedml.__version__, "edge_id": self.edge_id} self.mlops_metrics.report_json_message(response_topic, json.dumps(response_payload)) def init_job_task(self, request_json): diff --git a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py index 1384e9906a..aa69d4482d 100755 --- a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py +++ b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py @@ -414,7 +414,7 @@ def callback_request_device_info_from_mlops(self, topic, payload): def response_device_info_to_mlops(self, topic, payload): response_topic = f"deploy/slave_agent/mlops/response_device_info" response_payload = {"run_id": self.run_id, "slave_agent_device_id": self.edge_id, - "fedml_version": fedml.__version__} + "fedml_version": fedml.__version__, "edge_id": self.edge_id} self.message_center.send_message(response_topic, json.dumps(response_payload)) def callback_client_logout(self, topic, payload): From 0726f66c6a3a3c3ba7263983741552803a6bb5b2 Mon Sep 17 00:00:00 2001 From: Alex Date: Thu, 16 May 2024 18:36:24 +0800 Subject: [PATCH 066/251] [CoreEngine] update the model master runner. --- .../computing/scheduler/model_scheduler/master_job_runner.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py index 4d43d7c5c3..5991151e94 100755 --- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py +++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py @@ -114,13 +114,13 @@ def run_impl( message_center=self.message_center) # start unified inference server - self.start_device_inference_gateway( + self.inference_gateway_process = self.start_device_inference_gateway( inference_port=inference_port, agent_config=self.agent_config) # start inference monitor server self.stop_device_inference_monitor( run_id, end_point_name, model_id, model_name, model_version) - self.start_device_inference_monitor( + self.monitor_process = self.start_device_inference_monitor( run_id, end_point_name, model_id, model_name, model_version) # Changed the status to "IDLE" From 28984d54a8b3cb470438be98931bae50f7939e6b Mon Sep 17 00:00:00 2001 From: Alex Date: Thu, 16 May 2024 18:57:20 +0800 Subject: [PATCH 067/251] [CoreEngine] update the model master runner. --- .../scheduler/model_scheduler/master_job_runner.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py index 5991151e94..b0a15ed8ba 100755 --- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py +++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py @@ -114,13 +114,13 @@ def run_impl( message_center=self.message_center) # start unified inference server - self.inference_gateway_process = self.start_device_inference_gateway( + FedMLDeployMasterJobRunner.start_device_inference_gateway( inference_port=inference_port, agent_config=self.agent_config) # start inference monitor server - self.stop_device_inference_monitor( + FedMLDeployMasterJobRunner.stop_device_inference_monitor( run_id, end_point_name, model_id, model_name, model_version) - self.monitor_process = self.start_device_inference_monitor( + FedMLDeployMasterJobRunner.start_device_inference_monitor( run_id, end_point_name, model_id, model_name, model_version) # Changed the status to "IDLE" @@ -546,8 +546,6 @@ def recover_inference_and_monitor(): except Exception as e: pass - FedMLDeployMasterJobRunner.start_device_inference_gateway(agent_config=agent_config) - history_jobs = FedMLServerDataInterface.get_instance().get_history_jobs() for job in history_jobs.job_list: if job.running_json is None: @@ -566,6 +564,9 @@ def recover_inference_and_monitor(): if not is_activated: continue + FedMLDeployMasterJobRunner.start_device_inference_gateway( + inference_port=inference_port, agent_config=agent_config) + FedMLDeployMasterJobRunner.stop_device_inference_monitor( run_id, end_point_name, model_id, model_name, model_version) FedMLDeployMasterJobRunner.start_device_inference_monitor( From 310d43cbcde6d6fea8249b18ecf82e061816ce84 Mon Sep 17 00:00:00 2001 From: Alex Date: Thu, 16 May 2024 19:18:10 +0800 Subject: [PATCH 068/251] [CoreEngine] update the model master runner. --- .../model_scheduler/master_job_runner.py | 101 ++++++++++++------ 1 file changed, 71 insertions(+), 30 deletions(-) diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py index b0a15ed8ba..9b21237878 100755 --- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py +++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py @@ -25,7 +25,6 @@ class FedMLDeployMasterJobRunner(FedMLBaseMasterJobRunner, FedMLDeployJobRunnerMsgSender, ABC): - default_redis_addr = "local" default_redis_port = "6379" default_redis_password = "fedml_default" @@ -54,7 +53,7 @@ def __init__(self, args, run_id=0, request_json=None, agent_config=None, edge_id self.deployment_result_queue = Queue() # Override - def _generate_job_runner_instance(self, args, run_id=None, request_json=None, agent_config=None, edge_id=None,): + def _generate_job_runner_instance(self, args, run_id=None, request_json=None, agent_config=None, edge_id=None, ): return FedMLDeployMasterJobRunner( args, run_id=run_id, request_json=request_json, agent_config=self.agent_config, edge_id=edge_id ) @@ -65,10 +64,10 @@ def _generate_extend_queue_list(self): # Override def run_impl( - self, edge_id_status_queue, edge_device_info_queue, run_metrics_queue, - run_event_queue, run_artifacts_queue, run_logs_queue, edge_device_info_global_queue, - run_extend_queue_list=None, sender_message_queue=None, listener_message_queue=None, - status_center_queue=None + self, edge_id_status_queue, edge_device_info_queue, run_metrics_queue, + run_event_queue, run_artifacts_queue, run_logs_queue, edge_device_info_global_queue, + run_extend_queue_list=None, sender_message_queue=None, listener_message_queue=None, + status_center_queue=None ): # Parse the model parameters. run_id, end_point_name, token, user_id, user_name, device_ids, device_objs, model_config, model_name, \ @@ -331,7 +330,7 @@ def process_deployment_result_message(self, topic=None, payload=None): elif run_operation == "UPDATE": # Overwrite the json with the rollback version diff rollback_version_diff = self.replica_controller.rollback_get_replica_version_diff( - device_id_trigger=device_id, replica_no_trigger=replica_no) + device_id_trigger=device_id, replica_no_trigger=replica_no) # Change the target version to the start version self.replica_controller.rollback_setback_target_replica_version() @@ -465,6 +464,15 @@ def process_deployment_result_message(self, topic=None, payload=None): def start_device_inference_gateway( inference_port=ServerConstants.MODEL_INFERENCE_DEFAULT_PORT, agent_config=None, redis_addr="localhost", redis_port=6379, redis_password="fedml_default" + ): + from multiprocessing import Process + Process(target=FedMLDeployMasterJobRunner.start_device_inference_gateway_entry, + args=(inference_port, agent_config, redis_addr, redis_port, redis_password)).start() + + @staticmethod + def start_device_inference_gateway_entry( + inference_port=ServerConstants.MODEL_INFERENCE_DEFAULT_PORT, + agent_config=None, redis_addr="localhost", redis_port=6379, redis_password="fedml_default" ): # start unified inference server python_program = get_python_program() @@ -489,20 +497,32 @@ def start_device_inference_gateway( agent_config["mqtt_config"]["MQTT_USER"] + connect_str + agent_config["mqtt_config"]["MQTT_PWD"] + connect_str + str(agent_config["mqtt_config"]["MQTT_KEEPALIVE"]), "FEDML@9999GREAT") - inference_gateway_process = ServerConstants.exec_console_with_script( - "REDIS_ADDR=\"{}\" REDIS_PORT=\"{}\" REDIS_PASSWORD=\"{}\" " - "END_POINT_NAME=\"{}\" " - "MODEL_NAME=\"{}\" MODEL_VERSION=\"{}\" MODEL_INFER_URL=\"{}\" VERSION=\"{}\" " - "USE_MQTT_INFERENCE={} USE_WORKER_GATEWAY={} EXT_INFO={} " - "{} -m uvicorn {} --host 0.0.0.0 --port {} --reload --reload-delay 3 --reload-dir {} " - "--log-level critical".format( - redis_addr, str(redis_port), redis_password, "", - "", "", "", fedml.get_env_version(), use_mqtt_inference, - use_worker_gateway, ext_info, python_program, inference_gw_cmd, str(inference_port), - fedml_base_dir), - should_capture_stdout=False, should_capture_stderr=False) - - return inference_gateway_process + python_program = get_python_program() + os.system("REDIS_ADDR=\"{}\" REDIS_PORT=\"{}\" REDIS_PASSWORD=\"{}\" " + "END_POINT_NAME=\"{}\" " + "MODEL_NAME=\"{}\" MODEL_VERSION=\"{}\" MODEL_INFER_URL=\"{}\" VERSION=\"{}\" " + "USE_MQTT_INFERENCE={} USE_WORKER_GATEWAY={} EXT_INFO={} " + "{} -m uvicorn {} --host 0.0.0.0 --port {} --reload --reload-delay 3 --reload-dir {} " + "--log-level critical".format( + redis_addr, str(redis_port), redis_password, "", + "", "", "", fedml.get_env_version(), use_mqtt_inference, + use_worker_gateway, ext_info, python_program, inference_gw_cmd, str(inference_port), + fedml_base_dir)) + + # inference_gateway_process = ServerConstants.exec_console_with_script( + # "REDIS_ADDR=\"{}\" REDIS_PORT=\"{}\" REDIS_PASSWORD=\"{}\" " + # "END_POINT_NAME=\"{}\" " + # "MODEL_NAME=\"{}\" MODEL_VERSION=\"{}\" MODEL_INFER_URL=\"{}\" VERSION=\"{}\" " + # "USE_MQTT_INFERENCE={} USE_WORKER_GATEWAY={} EXT_INFO={} " + # "{} -m uvicorn {} --host 0.0.0.0 --port {} --reload --reload-delay 3 --reload-dir {} " + # "--log-level critical".format( + # redis_addr, str(redis_port), redis_password, "", + # "", "", "", fedml.get_env_version(), use_mqtt_inference, + # use_worker_gateway, ext_info, python_program, inference_gw_cmd, str(inference_port), + # fedml_base_dir), + # should_capture_stdout=False, should_capture_stderr=False) + # + # return inference_gateway_process return None @@ -510,6 +530,16 @@ def start_device_inference_gateway( def start_device_inference_monitor( run_id, end_point_name, model_id, model_name, model_version, check_stopped_event=True, redis_addr="localhost", redis_port=6379, redis_password="fedml_default" + ): + from multiprocessing import Process + Process(target=FedMLDeployMasterJobRunner.start_device_inference_monitor_entry, + args=(run_id, end_point_name, model_id, model_name, model_version, check_stopped_event, + redis_addr, redis_port, redis_password)).start() + + @staticmethod + def start_device_inference_monitor_entry( + run_id, end_point_name, model_id, model_name, model_version, check_stopped_event=True, + redis_addr="localhost", redis_port=6379, redis_password="fedml_default" ): # start inference monitor server # Will report the qps related metrics to the MLOps @@ -520,14 +550,25 @@ def start_device_inference_monitor( python_program = get_python_program() running_model_name = ServerConstants.get_running_model_name(end_point_name, model_name, model_version, run_id, model_id) - monitor_process = ServerConstants.exec_console_with_shell_script_list( - [python_program, monitor_file, "-v", fedml.get_env_version(), "-ep", run_id_str, - "-epn", str(end_point_name), "-mi", str(model_id), "-mn", model_name, - "-mv", model_version, "-iu", "infer_url", "-ra", redis_addr, - "-rp", str(redis_port), "-rpw", redis_password], - should_capture_stdout=False, should_capture_stderr=False - ) - return monitor_process + + os.system(f"{python_program} {monitor_file} -v {fedml.get_env_version()} -ep {run_id_str} " + f"-epn {end_point_name} -mi {model_id} -mn {model_name} -mv \"{model_version}\" " + f"-iu infer_url -ra {redis_addr} -rp {redis_port} -rpw redis_password") + + # from fedml.computing.scheduler.model_scheduler.device_model_monitor import FedMLModelMetrics + # monitor_center = FedMLModelMetrics( + # run_id_str, end_point_name, model_id, model_name, model_version, + # "infer_url", redis_addr, redis_port, redis_password, version=fedml.get_env_version()) + # monitor_center.start_monitoring_metrics_center() + + # monitor_process = ServerConstants.exec_console_with_shell_script_list( + # [python_program, monitor_file, "-v", fedml.get_env_version(), "-ep", run_id_str, + # "-epn", str(end_point_name), "-mi", str(model_id), "-mn", model_name, + # "-mv", model_version, "-iu", "infer_url", "-ra", redis_addr, + # "-rp", str(redis_port), "-rpw", redis_password], + # should_capture_stdout=False, should_capture_stderr=False + # ) + # return monitor_process @staticmethod def stop_device_inference_monitor(run_id, end_point_name, model_id, model_name, model_version): @@ -779,7 +820,7 @@ def parse_model_run_params(running_json): model_version = model_config["model_version"] model_config_parameters = running_json.get("parameters", {}) - inference_port = model_config_parameters.get("server_internal_port", # Internal port is for the gateway + inference_port = model_config_parameters.get("server_internal_port", # Internal port is for the gateway ServerConstants.MODEL_INFERENCE_DEFAULT_PORT) inference_port_external = model_config_parameters.get("server_external_port", inference_port) From f378d075d1d612f78a8340ef13eadf5566d03c68 Mon Sep 17 00:00:00 2001 From: Alex Date: Thu, 16 May 2024 19:37:50 +0800 Subject: [PATCH 069/251] [CoreEngine] update the model master runner. --- .../model_scheduler/device_model_inference.py | 56 +++++++++---------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py index eb3088f327..b8d85edd31 100755 --- a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py +++ b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py @@ -26,35 +26,35 @@ pass -class Settings(BaseSettings): - redis_addr: str - redis_port: str - redis_password: str - end_point_name: str - model_name: str - model_version: str - model_infer_url: str - version: str - use_mqtt_inference: bool - use_worker_gateway: bool - ext_info: str - - -settings = Settings() - -# class settings: -# redis_addr = "127.0.0.1" -# redis_port = 6379 -# redis_password = "fedml_default" -# end_point_name = "" -# model_name = "" -# model_version = "" -# model_infer_url = "127.0.0.1" -# version = "dev" -# use_mqtt_inference = False -# use_worker_gateway = False -# ext_info = "2b34303961245c4f175f2236282d7a272c040b0904747579087f6a760112030109010c215d54505707140005190a051c347f365c4a430c020a7d39120e26032a78730f797f7c031f0901657e75" +# class Settings(BaseSettings): +# redis_addr: str +# redis_port: str +# redis_password: str +# end_point_name: str +# model_name: str +# model_version: str +# model_infer_url: str +# version: str +# use_mqtt_inference: bool +# use_worker_gateway: bool +# ext_info: str # +# +# settings = Settings() + +class settings: + redis_addr = "127.0.0.1" + redis_port = 6379 + redis_password = "fedml_default" + end_point_name = "" + model_name = "" + model_version = "" + model_infer_url = "127.0.0.1" + version = "dev" + use_mqtt_inference = False + use_worker_gateway = False + ext_info = "2b34303961245c4f175f2236282d7a272c040b0904747579087f6a760112030109010c215d54505707140005190a051c347f365c4a430c020a7d39120e26032a78730f797f7c031f0901657e75" + api = FastAPI() From 871cba9792972cf6bd8956d3cac1fd09b4b2f67f Mon Sep 17 00:00:00 2001 From: Alex Date: Thu, 16 May 2024 19:40:51 +0800 Subject: [PATCH 070/251] [CoreEngine] update the model master runner. --- .../computing/scheduler/model_scheduler/master_job_runner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py index 9b21237878..2c48a4277e 100755 --- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py +++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py @@ -553,7 +553,7 @@ def start_device_inference_monitor_entry( os.system(f"{python_program} {monitor_file} -v {fedml.get_env_version()} -ep {run_id_str} " f"-epn {end_point_name} -mi {model_id} -mn {model_name} -mv \"{model_version}\" " - f"-iu infer_url -ra {redis_addr} -rp {redis_port} -rpw redis_password") + f"-iu infer_url -ra {redis_addr} -rp {redis_port} -rpw {redis_password}") # from fedml.computing.scheduler.model_scheduler.device_model_monitor import FedMLModelMetrics # monitor_center = FedMLModelMetrics( From 9c9b20f0cfcf4f99572f025d14729b5c789e0cdf Mon Sep 17 00:00:00 2001 From: Alex Date: Thu, 16 May 2024 20:14:01 +0800 Subject: [PATCH 071/251] [CoreEngine] not kill the subprocess when exiting the model master runner. --- .../master/base_master_job_runner.py | 5 +- .../scheduler/master/server_constants.py | 5 +- .../device_server_constants.py | 5 +- .../model_scheduler/master_job_runner.py | 90 ++++++------------- 4 files changed, 37 insertions(+), 68 deletions(-) diff --git a/python/fedml/computing/scheduler/master/base_master_job_runner.py b/python/fedml/computing/scheduler/master/base_master_job_runner.py index 1827de481d..18aa8a6eed 100755 --- a/python/fedml/computing/scheduler/master/base_master_job_runner.py +++ b/python/fedml/computing/scheduler/master/base_master_job_runner.py @@ -111,10 +111,13 @@ def run( if self.mlops_metrics is not None: self.mlops_metrics.stop_sys_perf() time.sleep(3) - ServerConstants.cleanup_run_process(self.run_id) + self.cleanup_runner_process(self.run_id) ServerConstants.cleanup_learning_process(self.run_id) ServerConstants.cleanup_bootstrap_process(self.run_id) + def cleanup_runner_process(self, run_id): + ServerConstants.cleanup_run_process(run_id) + @debug @abstractmethod def run_impl( diff --git a/python/fedml/computing/scheduler/master/server_constants.py b/python/fedml/computing/scheduler/master/server_constants.py index 058c57e199..b835ba1bde 100644 --- a/python/fedml/computing/scheduler/master/server_constants.py +++ b/python/fedml/computing/scheduler/master/server_constants.py @@ -268,9 +268,10 @@ def get_dataset_metadata_url(): return get_dataset_metadata_url @staticmethod - def cleanup_run_process(run_id): + def cleanup_run_process(run_id, not_kill_subprocess=False): RunProcessUtils.cleanup_run_process( - run_id, ServerConstants.get_data_dir(), ServerConstants.LOCAL_RUNNER_INFO_DIR_NAME) + run_id, ServerConstants.get_data_dir(), ServerConstants.LOCAL_RUNNER_INFO_DIR_NAME, + not_kill_subprocess=not_kill_subprocess) @staticmethod def save_run_process(run_id, process_id): diff --git a/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py b/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py index 86c7aac992..6b5b335863 100644 --- a/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py +++ b/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py @@ -295,9 +295,10 @@ def get_public_ip(): return ip @staticmethod - def cleanup_run_process(run_id): + def cleanup_run_process(run_id, not_kill_subprocess=False): RunProcessUtils.cleanup_run_process( - run_id, ServerConstants.get_data_dir(), ServerConstants.LOCAL_RUNNER_INFO_DIR_NAME) + run_id, ServerConstants.get_data_dir(), ServerConstants.LOCAL_RUNNER_INFO_DIR_NAME, + not_kill_subprocess=not_kill_subprocess) @staticmethod def save_run_process(run_id, process_id): diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py index 2c48a4277e..eef03d53f2 100755 --- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py +++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py @@ -460,17 +460,11 @@ def process_deployment_result_message(self, topic=None, payload=None): time.sleep(3) self.trigger_completed_event() - @staticmethod - def start_device_inference_gateway( - inference_port=ServerConstants.MODEL_INFERENCE_DEFAULT_PORT, - agent_config=None, redis_addr="localhost", redis_port=6379, redis_password="fedml_default" - ): - from multiprocessing import Process - Process(target=FedMLDeployMasterJobRunner.start_device_inference_gateway_entry, - args=(inference_port, agent_config, redis_addr, redis_port, redis_password)).start() + def cleanup_runner_process(self, run_id): + ServerConstants.cleanup_run_process(run_id, not_kill_subprocess=True) @staticmethod - def start_device_inference_gateway_entry( + def start_device_inference_gateway( inference_port=ServerConstants.MODEL_INFERENCE_DEFAULT_PORT, agent_config=None, redis_addr="localhost", redis_port=6379, redis_password="fedml_default" ): @@ -498,31 +492,22 @@ def start_device_inference_gateway_entry( agent_config["mqtt_config"]["MQTT_PWD"] + connect_str + str(agent_config["mqtt_config"]["MQTT_KEEPALIVE"]), "FEDML@9999GREAT") python_program = get_python_program() - os.system("REDIS_ADDR=\"{}\" REDIS_PORT=\"{}\" REDIS_PASSWORD=\"{}\" " - "END_POINT_NAME=\"{}\" " - "MODEL_NAME=\"{}\" MODEL_VERSION=\"{}\" MODEL_INFER_URL=\"{}\" VERSION=\"{}\" " - "USE_MQTT_INFERENCE={} USE_WORKER_GATEWAY={} EXT_INFO={} " - "{} -m uvicorn {} --host 0.0.0.0 --port {} --reload --reload-delay 3 --reload-dir {} " - "--log-level critical".format( - redis_addr, str(redis_port), redis_password, "", - "", "", "", fedml.get_env_version(), use_mqtt_inference, - use_worker_gateway, ext_info, python_program, inference_gw_cmd, str(inference_port), - fedml_base_dir)) - - # inference_gateway_process = ServerConstants.exec_console_with_script( - # "REDIS_ADDR=\"{}\" REDIS_PORT=\"{}\" REDIS_PASSWORD=\"{}\" " - # "END_POINT_NAME=\"{}\" " - # "MODEL_NAME=\"{}\" MODEL_VERSION=\"{}\" MODEL_INFER_URL=\"{}\" VERSION=\"{}\" " - # "USE_MQTT_INFERENCE={} USE_WORKER_GATEWAY={} EXT_INFO={} " - # "{} -m uvicorn {} --host 0.0.0.0 --port {} --reload --reload-delay 3 --reload-dir {} " - # "--log-level critical".format( - # redis_addr, str(redis_port), redis_password, "", - # "", "", "", fedml.get_env_version(), use_mqtt_inference, - # use_worker_gateway, ext_info, python_program, inference_gw_cmd, str(inference_port), - # fedml_base_dir), - # should_capture_stdout=False, should_capture_stderr=False) - # - # return inference_gateway_process + inference_gateway_process = ServerConstants.exec_console_with_script( + "REDIS_ADDR=\"{}\" REDIS_PORT=\"{}\" REDIS_PASSWORD=\"{}\" " + "END_POINT_NAME=\"{}\" " + "MODEL_NAME=\"{}\" MODEL_VERSION=\"{}\" MODEL_INFER_URL=\"{}\" VERSION=\"{}\" " + "USE_MQTT_INFERENCE={} USE_WORKER_GATEWAY={} EXT_INFO={} " + "{} -m uvicorn {} --host 0.0.0.0 --port {} --reload --reload-delay 3 --reload-dir {} " + "--log-level critical".format( + redis_addr, str(redis_port), redis_password, "", + "", "", "", fedml.get_env_version(), use_mqtt_inference, + use_worker_gateway, ext_info, python_program, inference_gw_cmd, str(inference_port), + fedml_base_dir), + should_capture_stdout=False, should_capture_stderr=False) + + return inference_gateway_process + else: + return inference_gateway_pids[0] return None @@ -530,16 +515,6 @@ def start_device_inference_gateway_entry( def start_device_inference_monitor( run_id, end_point_name, model_id, model_name, model_version, check_stopped_event=True, redis_addr="localhost", redis_port=6379, redis_password="fedml_default" - ): - from multiprocessing import Process - Process(target=FedMLDeployMasterJobRunner.start_device_inference_monitor_entry, - args=(run_id, end_point_name, model_id, model_name, model_version, check_stopped_event, - redis_addr, redis_port, redis_password)).start() - - @staticmethod - def start_device_inference_monitor_entry( - run_id, end_point_name, model_id, model_name, model_version, check_stopped_event=True, - redis_addr="localhost", redis_port=6379, redis_password="fedml_default" ): # start inference monitor server # Will report the qps related metrics to the MLOps @@ -550,25 +525,14 @@ def start_device_inference_monitor_entry( python_program = get_python_program() running_model_name = ServerConstants.get_running_model_name(end_point_name, model_name, model_version, run_id, model_id) - - os.system(f"{python_program} {monitor_file} -v {fedml.get_env_version()} -ep {run_id_str} " - f"-epn {end_point_name} -mi {model_id} -mn {model_name} -mv \"{model_version}\" " - f"-iu infer_url -ra {redis_addr} -rp {redis_port} -rpw {redis_password}") - - # from fedml.computing.scheduler.model_scheduler.device_model_monitor import FedMLModelMetrics - # monitor_center = FedMLModelMetrics( - # run_id_str, end_point_name, model_id, model_name, model_version, - # "infer_url", redis_addr, redis_port, redis_password, version=fedml.get_env_version()) - # monitor_center.start_monitoring_metrics_center() - - # monitor_process = ServerConstants.exec_console_with_shell_script_list( - # [python_program, monitor_file, "-v", fedml.get_env_version(), "-ep", run_id_str, - # "-epn", str(end_point_name), "-mi", str(model_id), "-mn", model_name, - # "-mv", model_version, "-iu", "infer_url", "-ra", redis_addr, - # "-rp", str(redis_port), "-rpw", redis_password], - # should_capture_stdout=False, should_capture_stderr=False - # ) - # return monitor_process + monitor_process = ServerConstants.exec_console_with_shell_script_list( + [python_program, monitor_file, "-v", fedml.get_env_version(), "-ep", run_id_str, + "-epn", str(end_point_name), "-mi", str(model_id), "-mn", model_name, + "-mv", model_version, "-iu", "infer_url", "-ra", redis_addr, + "-rp", str(redis_port), "-rpw", redis_password], + should_capture_stdout=False, should_capture_stderr=False + ) + return monitor_process @staticmethod def stop_device_inference_monitor(run_id, end_point_name, model_id, model_name, model_version): From 17b053cf7243860791616e75abcab647528daac9 Mon Sep 17 00:00:00 2001 From: Alex Date: Thu, 16 May 2024 20:19:05 +0800 Subject: [PATCH 072/251] [CoreEngine] not kill the subprocess when exiting the model master runner. --- .../scheduler/comm_utils/run_process_utils.py | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/python/fedml/computing/scheduler/comm_utils/run_process_utils.py b/python/fedml/computing/scheduler/comm_utils/run_process_utils.py index e64e708fb5..05cc342e36 100644 --- a/python/fedml/computing/scheduler/comm_utils/run_process_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/run_process_utils.py @@ -14,8 +14,10 @@ def get_run_process_prefix(prefix, run_id): return f"{prefix}-run@{run_id}@pid@" @staticmethod - def cleanup_run_process(run_id, data_dir, info_dir, - info_file_prefix=SchedulerConstants.RUN_PROCESS_TYPE_RUNNER_PROCESS): + def cleanup_run_process( + run_id, data_dir, info_dir, + info_file_prefix=SchedulerConstants.RUN_PROCESS_TYPE_RUNNER_PROCESS, not_kill_subprocess=False + ): try: local_pkg_data_dir = data_dir run_process_dir = os.path.join(local_pkg_data_dir, info_dir) @@ -43,12 +45,13 @@ def cleanup_run_process(run_id, data_dir, info_dir, try: process = psutil.Process(int(process_id)) - child_processes = process.children(recursive=True) - for sub_process in child_processes: - if platform.system() == 'Windows': - os.system("taskkill /PID {} /T /F".format(sub_process.pid)) - else: - os.kill(sub_process.pid, signal.SIGKILL) + if not not_kill_subprocess: + child_processes = process.children(recursive=True) + for sub_process in child_processes: + if platform.system() == 'Windows': + os.system("taskkill /PID {} /T /F".format(sub_process.pid)) + else: + os.kill(sub_process.pid, signal.SIGKILL) if process is not None: if platform.system() == 'Windows': From b8ec6163cd9df168d315f8f019e01d5b78cd2e9a Mon Sep 17 00:00:00 2001 From: Alex Date: Fri, 17 May 2024 21:34:25 +0800 Subject: [PATCH 073/251] [CoreEngine] fixed the issue that the gpu id is released with no properly cases. --- .../scheduler/comm_utils/job_monitor.py | 23 ++-- .../scheduler/scheduler_core/status_center.py | 76 ++----------- .../status_manager_protocols.py | 101 ++++++++++++++---- 3 files changed, 101 insertions(+), 99 deletions(-) diff --git a/python/fedml/computing/scheduler/comm_utils/job_monitor.py b/python/fedml/computing/scheduler/comm_utils/job_monitor.py index 8ae6e1c744..bada84d96e 100644 --- a/python/fedml/computing/scheduler/comm_utils/job_monitor.py +++ b/python/fedml/computing/scheduler/comm_utils/job_monitor.py @@ -354,15 +354,15 @@ def monitor_slave_run_process_status(self): # Check if all processes of the specific run are exited # FIXME: Proactively release the gpu ids when the run processes have not even started yet as the docker # image is being pulled - run_process_list = client_constants.ClientConstants.get_learning_process_list(job.job_id) - all_run_processes_exited = True if len(run_process_list) <= 0 else False - if all_run_processes_exited: - if not self.released_runs.get(str(job.job_id), False): - self.released_runs[str(job.job_id)] = True - # Release the gpu ids - print( - f"[run/device][{job.job_id}/{job.edge_id}] Release gpu resource when run processes has exited on monioring slave runs periodically.") - JobRunnerUtils.get_instance().release_gpu_ids(job.job_id, job.edge_id) + # run_process_list = client_constants.ClientConstants.get_learning_process_list(job.job_id) + # all_run_processes_exited = True if len(run_process_list) <= 0 else False + # if all_run_processes_exited: + # if not self.released_runs.get(str(job.job_id), False): + # self.released_runs[str(job.job_id)] = True + # # Release the gpu ids + # print( + # f"[run/device][{job.job_id}/{job.edge_id}] Release gpu resource when run processes has exited on monioring slave runs periodically.") + # JobRunnerUtils.get_instance().release_gpu_ids(job.job_id, job.edge_id) # Get the timeout threshold timeout_threshold = None @@ -381,8 +381,9 @@ def monitor_slave_run_process_status(self): # If the run processes have exited but run status is not completed and # timeout is out of the range, then release gpu ids and report failed status to the master agent. - if all_run_processes_exited and not SchedulerConstants.is_run_completed(job.status) and \ - timeout_threshold is not None and timeout > timeout_threshold: + # if all_run_processes_exited and not SchedulerConstants.is_run_completed(job.status) and \ + # timeout_threshold is not None and timeout > timeout_threshold: + if timeout_threshold is not None and timeout > timeout_threshold: # Report failed status to the master agent mlops.log_training_failed_status( run_id=job.job_id, edge_id=job.edge_id, enable_broadcast=True) diff --git a/python/fedml/computing/scheduler/scheduler_core/status_center.py b/python/fedml/computing/scheduler/scheduler_core/status_center.py index 4a55dbb679..c0e1b6633a 100755 --- a/python/fedml/computing/scheduler/scheduler_core/status_center.py +++ b/python/fedml/computing/scheduler/scheduler_core/status_center.py @@ -1,8 +1,6 @@ import logging import time -from ..slave.client_constants import ClientConstants -from ..master.server_constants import ServerConstants from enum import Enum, unique import multiprocessing from multiprocessing import Process, Queue @@ -11,7 +9,6 @@ from .message_center import FedMLMessageCenter import traceback from .status_manager_protocols import FedMLStatusManager -from .compute_cache_manager import ComputeCacheManager @unique @@ -87,11 +84,6 @@ class FedMLStatusCenter(object): def __init__(self, message_queue=None): self.status_queue = message_queue - self.job_status_in_slave = dict() - self.entire_job_status = None - self.job_status_in_master = dict() - self.slave_devices_status = dict() - self.master_devices_status = dict() self.status_center_process = None self.status_event = None self.status_sender_message_center_queue = None @@ -108,50 +100,6 @@ def __repr__(self): attrs=" ".join("{}={!r}".format(k, v) for k, v in self.__dict__.items()), ) - def add_job_status_in_slave(self, device_id, status): - self.job_status_in_slave[device_id] = self._status_transition(status) - - def add_job_status_in_master(self, device_id, status): - self.job_status_in_master[device_id] = self._status_transition(status) - - def set_entire_job_status(self, status): - self.entire_job_status = status - - def add_slave_device_status(self, device_id, status): - self.slave_devices_status[device_id] = self._status_transition(status) - - def add_master_device_status(self, device_id, status): - self.master_devices_status[device_id] = self._status_transition(status) - - def get_job_status_in_slave(self, device_id): - return self.job_status_in_slave.get(device_id, None) - - def get_job_status_in_master(self, device_id): - return self.job_status_in_master.get(device_id, None) - - def get_entire_job_status(self): - return self.entire_job_status - - def get_slave_device_status(self, device_id): - return self.slave_devices_status.get(device_id, None) - - def get_master_device_status(self, device_id): - return self.master_devices_status.get(device_id, None) - - def _status_transition(self, status): - transition_status = status - if self.entire_job_status is not None: - if self.entire_job_status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED or \ - self.entire_job_status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED: - if status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED or \ - status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED or \ - status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED: - transition_status = status - else: - transition_status = ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED - - return transition_status - def get_status_runner(self): return None @@ -205,16 +153,6 @@ def rebuild_message_center(self, message_center_queue): def rebuild_status_center(self, status_queue): pass - @staticmethod - def save_job_status(run_id, status): - ComputeCacheManager.get_instance().set_redis_params() - ComputeCacheManager.get_instance().get_status_cache().save_job_status(run_id, status) - - @staticmethod - def save_device_status_in_job(run_id, device_id, status): - ComputeCacheManager.get_instance().set_redis_params() - ComputeCacheManager.get_instance().get_status_cache().save_device_status_in_job(run_id, device_id, status) - def run_status_dispatcher(self, status_event, status_queue, sender_message_center_queue, listener_message_center_queue): @@ -272,6 +210,10 @@ def run_status_dispatcher(self, status_event, status_queue, else: status_manager_instances[status_entity.run_id].edge_id = status_entity.edge_id + # if the job status is completed then continue + if status_manager_instances[status_entity.run_id].is_job_completed(): + continue + # Process the master and slave status. if message_entity.topic.startswith(FedMLStatusCenter.TOPIC_MASTER_STATUS_PREFIX): # Process the job status @@ -279,7 +221,12 @@ def run_status_dispatcher(self, status_event, status_queue, message_entity.topic, message_entity.payload) # Save the job status - FedMLStatusCenter.save_job_status(status_entity.run_id, self.get_entire_job_status()) + status_manager_instances[status_entity.run_id].save_job_status() + + # Popup the status manager instance when the job status is completed + if status_manager_instances[status_entity.run_id].is_job_completed(): + status_manager_instances.pop(status_entity.run_id) + continue elif message_entity.topic.startswith(FedMLStatusCenter.TOPIC_SLAVE_STATUS_PREFIX): # Process the slave device status @@ -287,8 +234,7 @@ def run_status_dispatcher(self, status_event, status_queue, message_entity.topic, message_entity.payload) # Save the device status in job - FedMLStatusCenter.save_device_status_in_job(status_entity.run_id, status_entity.edge_id, - self.get_job_status_in_slave(status_entity.edge_id)) + status_manager_instances[status_entity.run_id].save_device_status_in_job(status_entity.edge_id) except Exception as e: if message_entity is not None: diff --git a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py index e5dd312c80..272423f147 100755 --- a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py +++ b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py @@ -12,6 +12,7 @@ from ..master.server_data_interface import FedMLServerDataInterface from .message_common import LogArgs from .general_constants import GeneralConstants +from ..scheduler_core.compute_cache_manager import ComputeCacheManager class FedMLStatusManager(object): @@ -33,6 +34,15 @@ def __init__(self, run_id=None, edge_id=None, server_id=None, self.log_args = LogArgs(role="server", edge_id=self.edge_id, server_id=self.server_id, log_file_dir=ServerConstants.get_log_file_dir()) + self.job_status_in_slave = dict() + self.entire_job_status = None + self.job_status_in_master = dict() + self.slave_devices_status = dict() + self.master_devices_status = dict() + self.completed_job_status_list = [ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED, + ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED, + ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED] + def __repr__(self): return "<{klass} @{id:x} {attrs}>".format( klass=self.__class__.__name__, @@ -40,6 +50,65 @@ def __repr__(self): attrs=" ".join("{}={!r}".format(k, v) for k, v in self.__dict__.items()), ) + def add_job_status_in_slave(self, device_id, status): + self.job_status_in_slave[device_id] = self._status_transition(status) + + def add_job_status_in_master(self, device_id, status): + self.job_status_in_master[device_id] = self._status_transition(status) + + def set_entire_job_status(self, status): + self.entire_job_status = status + + def add_slave_device_status(self, device_id, status): + self.slave_devices_status[device_id] = self._status_transition(status) + + def add_master_device_status(self, run_id, device_id, status): + self.master_devices_status[device_id] = self._status_transition(status) + + def get_job_status_in_slave(self, device_id): + return self.job_status_in_slave.get(device_id, None) + + def get_job_status_in_master(self, device_id): + return self.job_status_in_master.get(device_id, None) + + def get_entire_job_status(self): + return self.entire_job_status + + def get_slave_device_status(self, device_id): + return self.slave_devices_status.get(device_id, None) + + def get_master_device_status(self, device_id): + return self.master_devices_status.get(device_id, None) + + def is_job_completed(self): + if self.entire_job_status and self.entire_job_status in self.completed_job_status_list: + return True + return False + + def _status_transition(self, status): + transition_status = status + if self.entire_job_status is not None: + if self.entire_job_status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED or \ + self.entire_job_status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED: + if status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED or \ + status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED or \ + status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED: + transition_status = status + else: + transition_status = ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED + + return transition_status + + def save_job_status(self): + ComputeCacheManager.get_instance().set_redis_params() + ComputeCacheManager.get_instance().get_status_cache().save_job_status( + self.run_id, self.get_entire_job_status()) + + def save_device_status_in_job(self, device_id): + ComputeCacheManager.get_instance().set_redis_params() + ComputeCacheManager.get_instance().get_status_cache().save_device_status_in_job( + self.run_id, device_id, self.get_job_status_in_slave(device_id)) + def process_job_completed_status(self, master_id, status): # Stop the system performance monitor try: @@ -75,10 +144,8 @@ def process_job_completed_status(self, master_id, status): self.report_deployment_status(self.run_id, GeneralConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED) def process_job_exception_status(self, master_id, status): - # Send the exception status to slave devices. - self.report_exception_status( - self.edge_id_list, run_id=self.run_id, server_id=master_id, - status=ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED) + # Report exception job status + self.report_exception_status(status) # Save the job status to local storage FedMLServerDataInterface.get_instance().save_job_status(self.run_id, master_id, status, status) @@ -113,9 +180,9 @@ def status_center_process_master_status(self, topic, payload): def process_job_status_consensus(self, run_id, master_id, status): # Set the master status in the job and entire job status - self.status_center.set_entire_job_status(status) - self.status_center.add_job_status_in_master(master_id, status) - status = self.status_center.get_entire_job_status() + self.set_entire_job_status(status) + self.add_job_status_in_master(master_id, status) + status = self.get_entire_job_status() # Set the device status based on the job status edge_id_status_dict = self.client_agent_active_list.get(f"{run_id}", {}) @@ -152,8 +219,8 @@ def get_device_consensus_status_in_job(job_status, device_status): return None def get_device_consensus_status_in_current_device(self, edge_id, status): - self.status_center.add_job_status_in_slave(edge_id, status) - consensus_status = self.status_center.get_job_status_in_slave(edge_id) + self.add_job_status_in_slave(edge_id, status) + consensus_status = self.get_job_status_in_slave(edge_id) consensus_status = ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED \ if consensus_status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION else consensus_status return consensus_status @@ -275,25 +342,13 @@ def report_server_status(self, run_id, edge_id, server_id, status): self.status_reporter.report_server_id_status( run_id, status, edge_id=edge_id, server_id=server_id, server_agent_id=edge_id, update_db=False) - def report_exception_status( - self, edge_id_list, run_id=0, server_id=None, status=None, payload=None): - if payload is None: - payload_obj = {"runId": run_id, "edgeids": edge_id_list} - if server_id is not None: - payload_obj["serverId"] = server_id - else: - payload_obj = json.loads(payload) - payload_obj["run_status"] = ClientConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION if status is None else status - topic_exception = "flserver_agent/" + str(self.edge_id) + "/stop_train" - self.message_reporter.send_message(topic_exception, json.dumps(payload_obj)) + def report_exception_status(self, status): + self.status_reporter.report_job_status(self.run_id, status) def status_center_process_slave_status_to_master_in_slave_agent(self, topic, payload): # Forward the status message to the sender queue of message center. self.message_center.send_message(topic, payload) - # Post the status message to the listener queue of message center - #self.message_center.receive_message(GeneralConstants.MSG_TOPIC_REPORT_DEVICE_STATUS_IN_JOB, payload) - def status_center_process_slave_status_to_mlops_in_slave_agent(self, topic, payload): # Forward the status message to message center. self.message_center.send_message(topic, payload) From 8975a30028b966e899872c9d53f2b3aaa17afeda Mon Sep 17 00:00:00 2001 From: Alex Date: Fri, 17 May 2024 22:47:16 +0800 Subject: [PATCH 074/251] [CoreEngine] change the training timeout. --- python/fedml/computing/scheduler/comm_utils/constants.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/fedml/computing/scheduler/comm_utils/constants.py b/python/fedml/computing/scheduler/comm_utils/constants.py index f89d5640ce..f3fcd4ed5a 100644 --- a/python/fedml/computing/scheduler/comm_utils/constants.py +++ b/python/fedml/computing/scheduler/comm_utils/constants.py @@ -83,7 +83,7 @@ class SchedulerConstants: TRAIN_PROVISIONING_TIMEOUT = 60 * 25 TRAIN_STARTING_TIMEOUT = 60 * 15 TRAIN_STOPPING_TIMEOUT = 60 * 5 - TRAIN_RUNNING_TIMEOUT = 60 * 60 * 12 + TRAIN_RUNNING_TIMEOUT = 60 * 60 * 24 * 2000 TRAIN_INIT_TIMEOUT = 60 * 5 PUBLIC_REDIS_PORT = 6379 From c26eaff6bb7ca0fb5bd1dad5bde9ad5410d23253 Mon Sep 17 00:00:00 2001 From: Alay Shah Date: Fri, 17 May 2024 13:37:00 -0700 Subject: [PATCH 075/251] Add timestamp in status payload --- python/fedml/core/mlops/mlops_metrics.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/python/fedml/core/mlops/mlops_metrics.py b/python/fedml/core/mlops/mlops_metrics.py index afa96f6870..2ae415fab2 100644 --- a/python/fedml/core/mlops/mlops_metrics.py +++ b/python/fedml/core/mlops/mlops_metrics.py @@ -7,7 +7,7 @@ import requests import fedml -from . import MLOpsConfigs +from . import MLOpsConfigs, MLOpsUtils from .mlops_device_perfs import MLOpsDevicePerfStats from .mlops_job_perfs import MLOpsJobPerfStats from ...computing.scheduler.master.server_constants import ServerConstants @@ -221,6 +221,7 @@ def common_report_server_training_status(self, run_id, status, role=None, edge_i if role is None: role = "normal" msg = { + "timestamp": MLOpsUtils.get_ntp_time(), "run_id": run_id, "edge_id": edge_id, "status": status, From 7b576cbdc56fb5a6d4946a34d86a042eb41b2cc5 Mon Sep 17 00:00:00 2001 From: Alay Shah Date: Fri, 17 May 2024 14:24:57 -0700 Subject: [PATCH 076/251] Fix Import --- python/fedml/core/mlops/mlops_metrics.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/python/fedml/core/mlops/mlops_metrics.py b/python/fedml/core/mlops/mlops_metrics.py index 2ae415fab2..3746f498b4 100644 --- a/python/fedml/core/mlops/mlops_metrics.py +++ b/python/fedml/core/mlops/mlops_metrics.py @@ -7,7 +7,8 @@ import requests import fedml -from . import MLOpsConfigs, MLOpsUtils +from .mlops_utils import MLOpsUtils +from .mlops_configs import MLOpsConfigs from .mlops_device_perfs import MLOpsDevicePerfStats from .mlops_job_perfs import MLOpsJobPerfStats from ...computing.scheduler.master.server_constants import ServerConstants From 3f40511877a060e8d25149af5aab9ed0f351ce0d Mon Sep 17 00:00:00 2001 From: Alay Shah Date: Fri, 17 May 2024 17:46:59 -0700 Subject: [PATCH 077/251] Fix run logs cli command --- python/fedml/api/modules/run.py | 11 ++++++----- python/fedml/cli/modules/run.py | 12 ++++++------ .../scheduler/scheduler_entry/run_manager.py | 9 ++++----- 3 files changed, 16 insertions(+), 16 deletions(-) diff --git a/python/fedml/api/modules/run.py b/python/fedml/api/modules/run.py index 120a964316..cf50ce24b4 100644 --- a/python/fedml/api/modules/run.py +++ b/python/fedml/api/modules/run.py @@ -51,7 +51,7 @@ def start(platform: str, create_run_result: FedMLRunStartedModel, device_server: run_start_result = FedMLRunManager.get_instance().start_run(platform=platform, create_run_result=create_run_result, device_server=device_server, device_edges=device_edges, - api_key=api_key, + api_key=get_api_key(), feature_entry_point=feature_entry_point) return run_start_result @@ -79,7 +79,7 @@ def status(run_name: Optional[str], run_id: str, platform: str, api_key: str) -> _authenticate_and_validate_platform(api_key, platform) run_status = None - run_list_obj = list_run(run_name=run_name, run_id=run_id, platform=platform, api_key=api_key) + run_list_obj = list_run(run_name=run_name, run_id=run_id, platform=platform, api_key=get_api_key()) if run_list_obj is not None: if len(run_list_obj.run_list) > 1: @@ -93,12 +93,13 @@ def status(run_name: Optional[str], run_id: str, platform: str, api_key: str) -> # input: run_id, page_num, page_size, need_all_logs, platform, api_key # return RunLogResult(run_status, total_log_lines, total_log_pages, log_line_list, run_logs) def logs(run_id: str, page_num: int, page_size: int, need_all_logs: bool, platform: str, api_key: str) -> RunLogResult: - _authenticate_and_validate_platform(api_key, platform) + api_key = authenticate(api_key) + validate_platform(platform) if run_id is None: raise Exception("Please specify run id.") - _, run_status = status(run_name=None, run_id=run_id, platform=platform, api_key=get_api_key()) + _, run_status = status(run_name=None, run_id=run_id, platform=platform, api_key=api_key) total_log_nums, total_log_pages, log_line_list, run_logs = 0, 0, list(), None @@ -110,7 +111,7 @@ def logs(run_id: str, page_num: int, page_size: int, need_all_logs: bool, platfo user_api_key=api_key) if run_logs is not None: - total_log_pages, total_log_nums = run_logs.total_num, run_logs.total_pages + total_log_pages, total_log_nums = run_logs.total_pages, run_logs.total_num _parse_logs(log_line_list, run_logs) return RunLogResult(run_status=run_status, total_log_lines=total_log_nums, total_log_pages=total_log_pages, diff --git a/python/fedml/cli/modules/run.py b/python/fedml/cli/modules/run.py index b4e8a947fd..f2c24b445a 100644 --- a/python/fedml/cli/modules/run.py +++ b/python/fedml/cli/modules/run.py @@ -184,21 +184,21 @@ def status(platform, run_name, run_id, api_key, version): "--page_num", "-pn", type=int, - default=0, + default=1, help="request page num for logs. --need_all_logs should be set to False if you want to use this option.", ) @click.option( "--page_size", "-ps", type=int, - default=0, + default=10, help="request page size for logs, --need_all_logs should be set to False if you want to use this option.", ) @click.option( "--need_all_logs", "-a", type=bool, - default=True, + default=False, help="boolean value representing if all logs are needed. Default to True", ) def logs(platform, run_id, api_key, version, page_num, page_size, need_all_logs): @@ -217,8 +217,8 @@ def logs(platform, run_id, api_key, version, page_num, page_size, need_all_logs) return # Show run log summary info - log_head_table = PrettyTable(['Run ID', 'Total Log Lines', 'Log URL']) - log_head_table.add_row([run_id, run_log_result.total_log_lines, run_logs.log_full_url]) + log_head_table = PrettyTable(['Run ID', 'Printed Log Lines', 'Total Log Lines', 'Log URL']) + log_head_table.add_row([run_id, len(run_log_result.log_line_list), run_logs.total_num, run_logs.log_full_url]) click.echo("\nLogs summary info is as follows.") print(log_head_table) @@ -234,7 +234,7 @@ def logs(platform, run_id, api_key, version, page_num, page_size, need_all_logs) if len(run_log_result.log_line_list) > 0: click.echo("\nAll logs is as follows.") for log_line in run_log_result.log_line_list: - click.echo(log_line.rstrip('\n')) + click.echo(log_line) def _print_run_table(run_list_obj): diff --git a/python/fedml/computing/scheduler/scheduler_entry/run_manager.py b/python/fedml/computing/scheduler/scheduler_entry/run_manager.py index 84fe109054..b91935e7b2 100755 --- a/python/fedml/computing/scheduler/scheduler_entry/run_manager.py +++ b/python/fedml/computing/scheduler/scheduler_entry/run_manager.py @@ -162,10 +162,10 @@ def __init__(self, run_log_list_json): self.log_devices = list() for log_dev in log_devices_json: self.log_devices.append(FedMLRunLogDeviceModel(log_dev)) - self.total_num = run_log_list_json.get("total_num", 0) - self.total_pages = run_log_list_json.get("total_pages", 0) - self.current_page = run_log_list_json.get("current_page", 0) - self.log_lines = run_log_list_json.get("logs", []) + self.total_num = run_log_list_json.get("totalSize", 0) + self.total_pages = run_log_list_json.get("totalPages", 0) + self.current_page = run_log_list_json.get("pageNum", 0) + self.log_lines = run_log_list_json.get("logList", []) class FedMLRunLogDeviceModel(object): @@ -277,7 +277,6 @@ def get_run_logs(self, run_id: str, page_num: int, page_size: int, user_api_key: run_log_list_result = None run_logs_json = { "apiKey": user_api_key, - "edgeId": "-1", "pageNum": page_num, "pageSize": page_size, "runId": run_id, From 8c13ed2ad04b94bb291124c5c70b91c78cd50900 Mon Sep 17 00:00:00 2001 From: Alex Date: Mon, 20 May 2024 17:38:46 +0800 Subject: [PATCH 078/251] [CoreEngine] make the server status work. --- .../master_protocol_manager.py | 6 ++ .../scheduler_core/message_common.py | 2 + .../scheduler/scheduler_core/status_center.py | 62 ++++++++++++------- .../status_manager_protocols.py | 4 +- 4 files changed, 48 insertions(+), 26 deletions(-) diff --git a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py index 01165ff82e..09c2dd5d17 100755 --- a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py +++ b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py @@ -11,6 +11,7 @@ from .master_job_runner_manager import FedMLDeployJobRunnerManager from ..scheduler_core.general_constants import GeneralConstants from ..scheduler_core.endpoint_sync_protocol import FedMLEndpointSyncProtocol +from ..scheduler_core.compute_cache_manager import ComputeCacheManager class FedMLDeployMasterProtocolManager(FedMLBaseMasterProtocolManager): @@ -135,6 +136,11 @@ def callback_delete_deployment(self, topic, payload): model_msg_object.run_id, model_msg_object.end_point_name, model_msg_object.model_id, model_msg_object.model_name, model_msg_object.model_version) + # Report the launch job status with killed status. + launch_job_id = ComputeCacheManager.get_instance().get_gpu_cache().get_endpoint_run_id_map(self.run_id) + if launch_job_id is not None: + self.status_reporter.report_server_id_status(launch_job_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_KILLED) + def callback_start_deployment(self, topic, payload): # noinspection PyBroadException try: diff --git a/python/fedml/computing/scheduler/scheduler_core/message_common.py b/python/fedml/computing/scheduler/scheduler_core/message_common.py index 24449af3b5..13b99ff39d 100755 --- a/python/fedml/computing/scheduler/scheduler_core/message_common.py +++ b/python/fedml/computing/scheduler/scheduler_core/message_common.py @@ -49,6 +49,7 @@ def __init__(self, topic=None, payload=None, status_msg_body: dict = None): self.payload = payload self.run_id = None self.edge_id = None + self.server_id = None self.status = None if status_msg_body is not None: self.from_message_body(status_msg_body=status_msg_body) @@ -61,6 +62,7 @@ def from_message_body(self, status_msg_body: dict = None): self.run_id = payload_json.get("run_id", None) self.run_id = payload_json.get("runId", None) if self.run_id is None else self.run_id self.edge_id = payload_json.get("edge_id", None) + self.server_id = payload_json.get("server_id", None) self.status = payload_json.get("status", None) def get_message_body(self): diff --git a/python/fedml/computing/scheduler/scheduler_core/status_center.py b/python/fedml/computing/scheduler/scheduler_core/status_center.py index c0e1b6633a..fc1c726b5f 100755 --- a/python/fedml/computing/scheduler/scheduler_core/status_center.py +++ b/python/fedml/computing/scheduler/scheduler_core/status_center.py @@ -81,6 +81,7 @@ class FedMLStatusCenter(object): TOPIC_SLAVE_JOB_LAUNCH_SUFFIX = "/start_train" TOPIC_SLAVE_JOB_STOP_PREFIX = "flserver_agent/" TOPIC_SLAVE_JOB_STOP_SUFFIX = "/stop_train" + ALLOWED_MAX_JOB_STATUS_CACHE_NUM = 1000 def __init__(self, message_queue=None): self.status_queue = message_queue @@ -203,38 +204,43 @@ def run_status_dispatcher(self, status_event, status_queue, status_entity = FedMLStatusEntity(status_msg_body=message_body) # Generate status manager instance - if status_manager_instances.get(status_entity.run_id) is None: - status_manager_instances[status_entity.run_id] = FedMLStatusManager( - run_id=status_entity.run_id, edge_id=status_entity.edge_id, status_center=self, + run_id_str = str(status_entity.run_id) + run_id_int = int(status_entity.run_id) + if status_manager_instances.get(run_id_str) is None: + if len(status_manager_instances.keys()) >= FedMLStatusCenter.ALLOWED_MAX_JOB_STATUS_CACHE_NUM: + for iter_run_id, iter_status_mgr in status_manager_instances.items(): + if iter_status_mgr.is_job_completed(): + status_manager_instances.pop(iter_run_id) + break + status_manager_instances[run_id_str] = FedMLStatusManager( + run_id=run_id_int, edge_id=status_entity.edge_id, + server_id=status_entity.server_id, status_center=self, message_center=message_center) else: - status_manager_instances[status_entity.run_id].edge_id = status_entity.edge_id + status_manager_instances[run_id_str].edge_id = status_entity.edge_id + if status_entity.server_id is None and status_entity.server_id != 0: + status_manager_instances[run_id_str].server_id = status_entity.server_id # if the job status is completed then continue - if status_manager_instances[status_entity.run_id].is_job_completed(): + if status_manager_instances[run_id_str].is_job_completed(): continue # Process the master and slave status. if message_entity.topic.startswith(FedMLStatusCenter.TOPIC_MASTER_STATUS_PREFIX): # Process the job status - status_manager_instances[status_entity.run_id].status_center_process_master_status( + status_manager_instances[run_id_str].status_center_process_master_status( message_entity.topic, message_entity.payload) # Save the job status - status_manager_instances[status_entity.run_id].save_job_status() - - # Popup the status manager instance when the job status is completed - if status_manager_instances[status_entity.run_id].is_job_completed(): - status_manager_instances.pop(status_entity.run_id) - continue + status_manager_instances[run_id_str].save_job_status() elif message_entity.topic.startswith(FedMLStatusCenter.TOPIC_SLAVE_STATUS_PREFIX): # Process the slave device status - status_manager_instances[status_entity.run_id].status_center_process_slave_status( + status_manager_instances[run_id_str].status_center_process_slave_status( message_entity.topic, message_entity.payload) # Save the device status in job - status_manager_instances[status_entity.run_id].save_device_status_in_job(status_entity.edge_id) + status_manager_instances[run_id_str].save_device_status_in_job(status_entity.edge_id) except Exception as e: if message_entity is not None: @@ -295,40 +301,48 @@ def run_status_dispatcher_in_slave(self, status_event, status_queue, status_entity = FedMLStatusEntity(status_msg_body=message_body) # Generate status manager instance - if status_manager_instances.get(status_entity.run_id) is None: - status_manager_instances[status_entity.run_id] = FedMLStatusManager( - run_id=status_entity.run_id, edge_id=status_entity.edge_id, status_center=self, + run_id_str = str(status_entity.run_id) + run_id_int = int(status_entity.run_id) + if status_manager_instances.get(run_id_str) is None: + if len(status_manager_instances.keys()) >= FedMLStatusCenter.ALLOWED_MAX_JOB_STATUS_CACHE_NUM: + for iter_run_id, iter_status_mgr in status_manager_instances.items(): + if iter_status_mgr.is_job_completed(): + status_manager_instances.pop(iter_run_id) + break + + status_manager_instances[run_id_str] = FedMLStatusManager( + run_id=run_id_int, edge_id=status_entity.edge_id, status_center=self, message_center=message_center) else: - status_manager_instances[status_entity.run_id].edge_id = status_entity.edge_id + status_manager_instances[run_id_str].edge_id = status_entity.edge_id # Process the slave status if message_entity.topic.startswith(FedMLStatusCenter.TOPIC_SLAVE_STATUS_PREFIX): # Report the slave status to master - status_manager_instances[status_entity.run_id]. \ + status_manager_instances[run_id_str]. \ status_center_process_slave_status_to_master_in_slave_agent( message_entity.topic, message_entity.payload ) elif message_entity.topic.startswith(FedMLStatusCenter.TOPIC_SLAVE_STATUS_TO_MLOPS_PREFIX): # Report slave status to mlops (Active/IDLE message) - status_manager_instances[status_entity.run_id]. \ + status_manager_instances[run_id_str]. \ status_center_process_slave_status_to_mlops_in_slave_agent( message_entity.topic, message_entity.payload ) elif (message_entity.topic.startswith(FedMLStatusCenter.TOPIC_SLAVE_JOB_LAUNCH_PREFIX) and message_entity.topic.endswith(FedMLStatusCenter.TOPIC_SLAVE_JOB_LAUNCH_SUFFIX)): # Async request the job status from master when launching the job - job_launch_message_map[status_entity.run_id] = {"topic": message_entity.topic, + job_launch_message_map[run_id_str] = {"topic": message_entity.topic, "payload": message_entity.payload} - # status_manager_instances[status_entity.run_id]. \ + # status_manager_instances[run_id_str]. \ # status_center_request_job_status_from_master_in_slave_agent( # message_entity.topic, message_entity.payload # ) elif (message_entity.topic.startswith(FedMLStatusCenter.TOPIC_SLAVE_JOB_STOP_PREFIX) and message_entity.topic.endswith(FedMLStatusCenter.TOPIC_SLAVE_JOB_STOP_SUFFIX)): # Cleanup when stopped the job - if job_launch_message_map.get(status_entity.run_id, None) is not None: - job_launch_message_map.pop(status_entity.run_id) + if job_launch_message_map.get(run_id_str, None) is not None: + job_launch_message_map.pop(run_id_str) except Exception as e: if message_entity is not None: diff --git a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py index 272423f147..31afed463b 100755 --- a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py +++ b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py @@ -294,7 +294,7 @@ def process_device_status(self, run_id, edge_id, status): fault_tolerance_rate=fault_tolerance_rate) if status_to_report is not None: logging.info(f"Run completed when processing edge status, will report status {status_to_report}") - self.report_server_status(run_id, edge_id, server_id, status_to_report) + self.report_server_status(run_id, server_id, server_id, status_to_report) def calculate_server_status( self, run_id, total_edge_nums, number_of_failed_edges, number_of_finished_edges, @@ -340,7 +340,7 @@ def parse_fault_tolerance_params(self, run_id): def report_server_status(self, run_id, edge_id, server_id, status): self.status_reporter.report_server_id_status( - run_id, status, edge_id=edge_id, server_id=server_id, server_agent_id=edge_id, update_db=False) + run_id, status, edge_id=edge_id, server_id=server_id, server_agent_id=server_id, update_db=False) def report_exception_status(self, status): self.status_reporter.report_job_status(self.run_id, status) From 98eff5585390047ff99e7c268abb92cc9cfd7f11 Mon Sep 17 00:00:00 2001 From: Alex Date: Mon, 20 May 2024 17:40:26 +0800 Subject: [PATCH 079/251] [CoreEngine] make the server status work. --- .../fedml/computing/scheduler/scheduler_core/status_center.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/fedml/computing/scheduler/scheduler_core/status_center.py b/python/fedml/computing/scheduler/scheduler_core/status_center.py index fc1c726b5f..65258af2d9 100755 --- a/python/fedml/computing/scheduler/scheduler_core/status_center.py +++ b/python/fedml/computing/scheduler/scheduler_core/status_center.py @@ -309,7 +309,7 @@ def run_status_dispatcher_in_slave(self, status_event, status_queue, if iter_status_mgr.is_job_completed(): status_manager_instances.pop(iter_run_id) break - + status_manager_instances[run_id_str] = FedMLStatusManager( run_id=run_id_int, edge_id=status_entity.edge_id, status_center=self, message_center=message_center) From f037b32d4aca2142152aec3f03aa0ef1855616c8 Mon Sep 17 00:00:00 2001 From: Alex Date: Mon, 20 May 2024 19:59:01 +0800 Subject: [PATCH 080/251] [CoreEngine] make the job stopping work. --- .../master/base_master_job_runner.py | 12 +++++------- .../master/base_master_protocol_manager.py | 19 +++++++++++-------- .../scheduler_base_job_runner_manager.py | 4 ++++ .../status_manager_protocols.py | 2 +- .../slave/base_slave_protocol_manager.py | 15 +++++---------- python/fedml/core/mlops/mlops_metrics.py | 6 ++++-- 6 files changed, 30 insertions(+), 28 deletions(-) diff --git a/python/fedml/computing/scheduler/master/base_master_job_runner.py b/python/fedml/computing/scheduler/master/base_master_job_runner.py index 18aa8a6eed..9ebab258bb 100755 --- a/python/fedml/computing/scheduler/master/base_master_job_runner.py +++ b/python/fedml/computing/scheduler/master/base_master_job_runner.py @@ -285,6 +285,10 @@ def run_server_job_impl(self, process_event, completed_event, self.args.run_id = self.run_id MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO) + self.status_reporter.report_server_id_status( + run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_RUNNING, edge_id=self.edge_id, + server_id=self.edge_id, server_agent_id=self.edge_id) + # get training params private_local_data_dir = data_config.get("privateLocalData", "") is_using_local_data = 0 @@ -562,7 +566,7 @@ def detect_edges_status( return True, active_edge_info_dict, inactivate_edges def report_exception_status(self, run_id): - self.status_reporter.report_job_status(run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_EXCEPTION) + self.mlops_metrics.report_job_status(run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_EXCEPTION) def callback_run_logs(self, topic, payload): run_id = str(topic).split('/')[-1] @@ -618,12 +622,6 @@ def send_training_request_to_edges(self, request_json, active_edge_info_dict=Non f"request GPU count {request_num_gpus}" logging.error(err_info) - # Bug fix: This mqtt message needs to be sent so platform can clean up the failed run and change the - # status from running to failed. - self.mlops_metrics.report_server_training_status( - run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id - ) - self.status_reporter.report_server_id_status( run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id, server_id=self.edge_id, server_agent_id=self.server_agent_id) diff --git a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py index cee91578dd..46a6448269 100755 --- a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py +++ b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py @@ -267,19 +267,22 @@ def callback_stop_train(self, topic, payload, use_payload=None): server_id = request_json.get("serverId", None) if server_id is None: server_id = request_json.get("server_id", None) + edge_ids = request_json.get("edgeids", None) - # Broadcast the job status to all edges - self.rebuild_status_center(self.get_status_queue()) - self.status_reporter.report_job_status(run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_KILLED) + # Stop the job runner + self._get_job_runner_manager().stop_job_runner( + run_id, args=self.args, server_id=server_id, request_json=request_json, + run_as_cloud_agent=self.run_as_cloud_agent) # Cleanup the cached object if self.running_request_json.get(run_id_str, None) is not None: self.running_request_json.pop(run_id_str) - # Stop the job runner - self._get_job_runner_manager().stop_job_runner( - run_id, args=self.args, server_id=server_id, request_json=request_json, - run_as_cloud_agent=self.run_as_cloud_agent) + # Reset all edge status and server status + for iter_edge_id in edge_ids: + self.generate_status_report(run_id, iter_edge_id, server_agent_id=server_id).\ + report_client_id_status(iter_edge_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_KILLED, + run_id=run_id, server_id=server_id) def callback_complete_job(self, topic, payload): # Parse the parameters. @@ -536,7 +539,7 @@ def send_status_msg_to_edges(self, edge_id_list, run_id, server_id, context=None self.send_status_check_msg(run_id, edge_id, self.edge_id, context=context) def report_exception_status(self, run_id): - self.status_reporter.report_job_status(run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_EXCEPTION) + self.mlops_metrics.report_job_status(run_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_EXCEPTION) @staticmethod def get_start_train_topic_with_edge_id(edge_id): diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner_manager.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner_manager.py index 77768da6c0..dcc4045699 100755 --- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner_manager.py +++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner_manager.py @@ -39,6 +39,10 @@ def stop_job_runner(self, run_id): if self.job_runners.get(run_id_str, None) is not None: self.job_runners[run_id_str].trigger_stop_event() + def stop_all_job_runner(self): + for run_id, job_runner in self.job_runners.items(): + job_runner.trigger_stop_event() + def complete_job_runner(self, run_id): run_id_str = str(run_id) if self.job_runners.get(run_id_str, None) is not None: diff --git a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py index 31afed463b..96f5e4920f 100755 --- a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py +++ b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py @@ -343,7 +343,7 @@ def report_server_status(self, run_id, edge_id, server_id, status): run_id, status, edge_id=edge_id, server_id=server_id, server_agent_id=server_id, update_db=False) def report_exception_status(self, status): - self.status_reporter.report_job_status(self.run_id, status) + self.message_reporter.report_job_status(self.run_id, status) def status_center_process_slave_status_to_master_in_slave_agent(self, topic, payload): # Forward the status message to the sender queue of message center. diff --git a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py index aa69d4482d..a6d43936d2 100755 --- a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py +++ b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py @@ -57,7 +57,6 @@ def __init__(self, args, agent_config=None): self.fl_topic_request_device_info = None self.communication_mgr = None self.subscribed_topics = list() - self.job_runners = dict() self.ota_upgrade = FedMLOtaUpgrade(edge_id=args.edge_id) self.running_request_json = dict() self.start_request_json = None @@ -423,8 +422,7 @@ def callback_client_logout(self, topic, payload): if secret is None or str(secret) != "246b1be6-0eeb-4b17-b118-7d74de1975d4": return logging.info("Received the logout request.") - for runner in self.job_runners: - runner.trigger_stop_event() + self._get_job_runner_manager().stop_all_job_runner() self.disable_client_login = True time.sleep(3) os.system("fedml logout") @@ -451,7 +449,7 @@ def callback_response_job_status(self, topic, payload): # process the status logging.info("process status in the job status callback.") - self.process_status(run_id, job_status, edge_id) + self.process_status(run_id, job_status, edge_id, master_id=master_agent) def callback_broadcasted_job_status(self, topic, payload): # Parse the parameters @@ -489,15 +487,14 @@ def generate_protocol_manager(self): return message_status_runner - def process_status(self, run_id, status, edge_id): + def process_status(self, run_id, status, edge_id, master_id=None): run_id_str = str(run_id) # Process the completed status if status == GeneralConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED or \ status == GeneralConstants.MSG_MLOPS_CLIENT_STATUS_FAILED or \ status == GeneralConstants.MSG_MLOPS_CLIENT_STATUS_KILLED: - if self.job_runners.get(run_id_str, None) is not None: - self.job_runners[run_id_str].trigger_completed_event() + self._get_job_runner_manager().complete_job_runner(run_id) # Stop the sys perf process # noinspection PyBoardException @@ -584,9 +581,7 @@ def get_all_run_process_list_map(self): return run_process_dict def stop_job(self, run_id): - run_id_str = str(run_id) - if self.job_runners.get(run_id_str, None) is not None: - self.job_runners[run_id_str].trigger_stop_event() + self._get_job_runner_manager().stop_job_runner(run_id) @staticmethod def get_start_train_topic_with_edge_id(edge_id): diff --git a/python/fedml/core/mlops/mlops_metrics.py b/python/fedml/core/mlops/mlops_metrics.py index afa96f6870..c27a683759 100644 --- a/python/fedml/core/mlops/mlops_metrics.py +++ b/python/fedml/core/mlops/mlops_metrics.py @@ -185,9 +185,11 @@ def report_server_training_status(self, run_id, status, edge_id=0, role=None, from ...computing.scheduler.master.server_data_interface import FedMLServerDataInterface FedMLServerDataInterface.get_instance().save_job(run_id, self.edge_id, status, running_json) - def report_job_status(self, run_id, status): + def report_job_status(self, run_id, status, master_id=None): topic_name = f"master_agent/slave_agent/job_status/{run_id}" - payload = {"run_id": run_id, "status": status} + payload = {"run_id": run_id, "status": status, "fedml_version": fedml.__version__} + if master_id is not None: + payload["master_agent"] = master_id message_json = json.dumps(payload) self.send_message(topic_name, message_json) From a0664d87f54cb4d2e3faca683f4562bbdb8f5976 Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Mon, 20 May 2024 22:55:59 +0000 Subject: [PATCH 081/251] Add support for GPU Utilization --- .../comm_utils/gpu_utils/qualcomm_utils.py | 20 ++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py b/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py index 9c7ea21ea9..88114cf2ad 100644 --- a/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/gpu_utils/qualcomm_utils.py @@ -43,14 +43,16 @@ def get_gpu_cards() -> List[GPUCard]: @staticmethod def get_available_gpu_card_ids(order: str, limit: int, max_load: float, max_memory: float) -> List[int]: - - if order != "memory": + gpu_cards: List[GPUCard] = QualcommNPUtil.get_gpu_cards() + gpu_cards = list(filter(lambda card: (card.memoryUtil < max_memory and card.load < max_load), gpu_cards)) + if order == 'memory': + gpu_cards.sort(key=lambda card: float('inf') if math.isnan(card.memoryUtil) else card.memoryUtil, reverse=False) + elif order == 'load': + gpu_cards.sort(key=lambda card: float('inf') if math.isnan(card.memoryUtil) else card.load, reverse=False) + else: raise NotImplementedError(f"Qualcomm utils doesn't have support to compute availability based on {order}. " - f"Supported criteria: [memory]") + f"Supported criteria: [memory, load]") - gpu_cards: List[GPUCard] = QualcommNPUtil.get_gpu_cards() - gpu_cards = list(filter(lambda card: card.memoryUtil < max_memory, gpu_cards)) - gpu_cards.sort(key=lambda card: float('inf') if math.isnan(card.memoryUtil) else card.memoryUtil, reverse=False) gpu_cards = gpu_cards[0:min(limit, len(gpu_cards))] return list(map(lambda card: card.id, gpu_cards)) @@ -75,11 +77,14 @@ def get_docker_gpu_ids_by_container_name(container_name: str, docker_client: Doc @staticmethod def __convert(npu) -> GPUCard: - # TODO (alaydshah): Add support for load, memoryUtil, temperature + # TODO (alaydshah): Add support for temperature memory_total = npu.devData.resourceInfo.dramTotal / 1024 memory_free = npu.devData.resourceInfo.dramFree / 1024 memory_used = memory_total - memory_free memory_utilized = float(memory_used) / float(memory_total) + nsp_free = npu.devData.resourceInfo.nspFree + nsp_total = npu.devData.resourceInfo.nspTotal + load = (nsp_total - nsp_free) / nsp_total return GPUCard( id=npu.qid, @@ -91,6 +96,7 @@ def __convert(npu) -> GPUCard: memoryFree=memory_free, memoryUsed=memory_used, memoryUtil=memory_utilized, + load=load, ) @staticmethod From ae06bf312bf7feb1467a0d481557f2bfa95bc1a4 Mon Sep 17 00:00:00 2001 From: Alay Shah Date: Tue, 21 May 2024 01:52:39 -0700 Subject: [PATCH 082/251] Add 10 minutes TTL Cache for config fetch --- .../computing/scheduler/model_scheduler/modelops_configs.py | 2 ++ python/fedml/core/mlops/mlops_configs.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/python/fedml/computing/scheduler/model_scheduler/modelops_configs.py b/python/fedml/computing/scheduler/model_scheduler/modelops_configs.py index e988c29a8a..719f3825c4 100644 --- a/python/fedml/computing/scheduler/model_scheduler/modelops_configs.py +++ b/python/fedml/computing/scheduler/model_scheduler/modelops_configs.py @@ -4,6 +4,7 @@ import certifi import requests +import cachetools.func import fedml from fedml.core.mlops.mlops_utils import MLOpsUtils @@ -32,6 +33,7 @@ def get_instance(args): return ModelOpsConfigs._config_instance @staticmethod + @cachetools.func.ttl_cache(ttl=600) def get_request_params(): url = fedml._get_backend_service() url = "{}/fedmlOpsServer/configs/fetch".format(url) diff --git a/python/fedml/core/mlops/mlops_configs.py b/python/fedml/core/mlops/mlops_configs.py index c8c6422d6c..6c25c38128 100644 --- a/python/fedml/core/mlops/mlops_configs.py +++ b/python/fedml/core/mlops/mlops_configs.py @@ -4,6 +4,7 @@ import certifi import requests +import cachetools.func import fedml from fedml.core.mlops.mlops_utils import MLOpsUtils @@ -41,6 +42,7 @@ def __init__(self): pass @staticmethod + @cachetools.func.ttl_cache(ttl=600) def get_request_params(): url = fedml._get_backend_service() url = f"{url}/fedmlOpsServer/configs/fetch" From 3214f4a569e63a260000a14267e6e1f1147b7791 Mon Sep 17 00:00:00 2001 From: Alex Date: Tue, 21 May 2024 20:38:01 +0800 Subject: [PATCH 083/251] [CoreEngine] fixed the issue that the endpoint status is aborted when the deployment is fresh and failed. --- .../master/base_master_protocol_manager.py | 9 +++-- .../scheduler/master/deploy_job_launcher.py | 12 +++++- .../model_scheduler/master_job_runner.py | 6 ++- .../master_protocol_manager.py | 10 ++++- .../scheduler/scheduler_core/status_center.py | 5 ++- .../slave/base_slave_protocol_manager.py | 40 ------------------- python/fedml/core/mlops/mlops_runtime_log.py | 2 + 7 files changed, 35 insertions(+), 49 deletions(-) diff --git a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py index 46a6448269..c95d73f4bf 100755 --- a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py +++ b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py @@ -284,6 +284,10 @@ def callback_stop_train(self, topic, payload, use_payload=None): report_client_id_status(iter_edge_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_KILLED, run_id=run_id, server_id=server_id) + # To be compatible to the previous version of edge devices, we just send the stopping train message to edges. + # Currently, the latest version of edge devices don't need to process the stopping train message. + self.send_training_stop_request_to_edges(edge_ids, payload=payload, run_id=run_id) + def callback_complete_job(self, topic, payload): # Parse the parameters. request_json = json.loads(payload) @@ -508,13 +512,12 @@ def send_training_stop_request_to_edges( self, edge_id_list, payload=None, run_id=0): if payload is None: payload_obj = {"runId": run_id, "edgeids": edge_id_list} - else: - payload_obj = json.loads(payload) + payload = json.dumps(payload_obj) for edge_id in edge_id_list: topic_stop_train = "flserver_agent/" + str(edge_id) + "/stop_train" logging.info("stop_train: send topic " + topic_stop_train) - self.message_center.send_message(topic_stop_train, json.dumps(payload_obj)) + self.message_center.send_message(topic_stop_train, payload) def send_training_stop_request_to_specific_edge(self, edge_id, payload): topic_stop_train = "flserver_agent/" + str(edge_id) + "/stop_train" diff --git a/python/fedml/computing/scheduler/master/deploy_job_launcher.py b/python/fedml/computing/scheduler/master/deploy_job_launcher.py index e4af2a20be..359c6e641f 100755 --- a/python/fedml/computing/scheduler/master/deploy_job_launcher.py +++ b/python/fedml/computing/scheduler/master/deploy_job_launcher.py @@ -3,6 +3,7 @@ from fedml.computing.scheduler.model_scheduler import device_client_constants from fedml.computing.scheduler.model_scheduler.device_model_cards import FedMLModelCards from fedml.computing.scheduler.scheduler_entry.constants import Constants +from fedml.computing.scheduler.scheduler_core.compute_cache_manager import ComputeCacheManager class FedMLDeployJobLauncher: @@ -40,6 +41,8 @@ def deploy_model(serving_devices, request_json, run_id): "", random_list[1], None, in_model_id=model_id, in_model_version=model_version, endpoint_name=endpoint_name, endpoint_id=endpoint_id, run_id=run_id) + return endpoint_id + return None def check_model_device_ready_and_deploy(self, request_json, run_id, master_device_id, slave_device_id, run_edge_ids=None): @@ -87,4 +90,11 @@ def check_model_device_ready_and_deploy(self, request_json, run_id, master_devic serving_devices.extend(device_slave_ids) # Start to deploy the model - FedMLDeployJobLauncher.deploy_model(serving_devices, request_json, run_id=run_id) + endpoint_id = FedMLDeployJobLauncher.deploy_model(serving_devices, request_json, run_id=run_id) + + # Save the relationship between run id and endpoint + ComputeCacheManager.get_instance().set_redis_params() + ComputeCacheManager.get_instance().get_gpu_cache().set_endpoint_run_id_map( + endpoint_id, run_id) + + diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py index eef03d53f2..bf9cee3279 100755 --- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py +++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py @@ -51,6 +51,7 @@ def __init__(self, args, run_id=0, request_json=None, agent_config=None, edge_id self.deployed_replica_payload = None self.slave_deployment_results_map = dict() self.deployment_result_queue = Queue() + self.is_fresh_endpoint = True # Override def _generate_job_runner_instance(self, args, run_id=None, request_json=None, agent_config=None, edge_id=None, ): @@ -75,6 +76,7 @@ def run_impl( inference_end_point_id, use_gpu, memory_size, model_version, inference_port = \ FedMLDeployMasterJobRunner.parse_model_run_params(self.request_json) self.run_id = run_id + self.is_fresh_endpoint = self.request_json.get("is_fresh_endpoint", True) # Print request parameters. logging.info("model deployment request: {}".format(self.request_json)) @@ -246,7 +248,7 @@ def process_deployment_result_message(self, topic=None, payload=None): f"{self.request_json}") return - logging.info(f"End point {end_point_id}; Device {device_id}; replica {replica_no}; " + logging.info(f"Endpoint {end_point_id}; Device {device_id}; replica {replica_no}; " f"run_operation {run_operation} model status {model_status}.") # OPTIONAL DEBUG PARAMS @@ -280,7 +282,7 @@ def process_deployment_result_message(self, topic=None, payload=None): logging.error(f"Unsupported model status {model_status}.") # Avoid endless loop, if the rollback also failed, we should report the failure to the MLOps - if self.replica_controller.under_rollback: + if self.replica_controller.under_rollback or self.is_fresh_endpoint: self.send_deployment_status( end_point_id, end_point_name, payload_json["model_name"], "", ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED, diff --git a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py index 09c2dd5d17..d21ab44c6e 100755 --- a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py +++ b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py @@ -109,6 +109,10 @@ def callback_delete_deployment(self, topic, payload): # Parse payload as the model message object. model_msg_object = FedMLModelMsgObject(topic, payload) + # Get the launch job id + ComputeCacheManager.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password) + launch_job_id = ComputeCacheManager.get_instance().get_gpu_cache().get_endpoint_run_id_map(self.run_id) + # Delete SQLite records FedMLServerDataInterface.get_instance().delete_job_from_db(model_msg_object.run_id) FedMLModelDatabase.get_instance().delete_deployment_result( @@ -137,7 +141,6 @@ def callback_delete_deployment(self, topic, payload): model_msg_object.model_name, model_msg_object.model_version) # Report the launch job status with killed status. - launch_job_id = ComputeCacheManager.get_instance().get_gpu_cache().get_endpoint_run_id_map(self.run_id) if launch_job_id is not None: self.status_reporter.report_server_id_status(launch_job_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_KILLED) @@ -180,6 +183,11 @@ def callback_start_deployment(self, topic, payload): # Set redis config FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password) + # Query if the endpoint exists + endpoint_device_info = FedMLModelCache.get_instance(self.redis_addr, self.redis_port).get_end_point_device_info( + request_json["end_point_id"]) + request_json["is_fresh_endpoint"] = True if endpoint_device_info is None else False + # Save the user setting (about replica number) of this run to Redis, if existed, update it FedMLModelCache.get_instance(self.redis_addr, self.redis_port).set_user_setting_replica_num( end_point_id=run_id, end_point_name=end_point_name, model_name=model_name, model_version=model_version, diff --git a/python/fedml/computing/scheduler/scheduler_core/status_center.py b/python/fedml/computing/scheduler/scheduler_core/status_center.py index 65258af2d9..4ababbc826 100755 --- a/python/fedml/computing/scheduler/scheduler_core/status_center.py +++ b/python/fedml/computing/scheduler/scheduler_core/status_center.py @@ -331,9 +331,10 @@ def run_status_dispatcher_in_slave(self, status_event, status_queue, ) elif (message_entity.topic.startswith(FedMLStatusCenter.TOPIC_SLAVE_JOB_LAUNCH_PREFIX) and message_entity.topic.endswith(FedMLStatusCenter.TOPIC_SLAVE_JOB_LAUNCH_SUFFIX)): + pass # Async request the job status from master when launching the job - job_launch_message_map[run_id_str] = {"topic": message_entity.topic, - "payload": message_entity.payload} + # job_launch_message_map[run_id_str] = {"topic": message_entity.topic, + # "payload": message_entity.payload} # status_manager_instances[run_id_str]. \ # status_center_request_job_status_from_master_in_slave_agent( # message_entity.topic, message_entity.payload diff --git a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py index a6d43936d2..de97684061 100755 --- a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py +++ b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py @@ -10,7 +10,6 @@ from ..comm_utils.constants import SchedulerConstants from ..comm_utils.job_utils import JobRunnerUtils, DockerArgs from ..comm_utils.run_process_utils import RunProcessUtils -from ....core.mlops import MLOpsMetrics from ....core.mlops.mlops_runtime_log import MLOpsRuntimeLog from ....core.mlops.mlops_configs import MLOpsConfigs from ....core.mlops.mlops_runtime_log_daemon import MLOpsRuntimeLogDaemon @@ -44,7 +43,6 @@ def __init__(self, args, agent_config=None): self.unique_device_id = args.unique_device_id self.agent_config = agent_config self.topic_start_train = None - self.topic_stop_train = None self.topic_report_status = None self.topic_ota_msg = None self.topic_request_device_info = None @@ -53,7 +51,6 @@ def __init__(self, args, agent_config=None): self.topic_response_job_status = None self.topic_report_device_status_in_job = None self.fl_topic_start_train = None - self.fl_topic_stop_train = None self.fl_topic_request_device_info = None self.communication_mgr = None self.subscribed_topics = list() @@ -75,9 +72,6 @@ def generate_topics(self): # The topic for stopping training self.topic_start_train = "flserver_agent/" + str(self.edge_id) + "/start_train" - # The topi for stopping training - self.topic_stop_train = "flserver_agent/" + str(self.edge_id) + "/stop_train" - # The topic for reporting current device status. self.topic_report_status = "mlops/report_device_status" @@ -107,13 +101,11 @@ def generate_topics(self): if self.general_edge_id is not None: self.fl_topic_start_train = "flserver_agent/" + str(self.general_edge_id) + "/start_train" - self.fl_topic_stop_train = "flserver_agent/" + str(self.general_edge_id) + "/stop_train" self.fl_topic_request_device_info = "server/client/request_device_info/" + str(self.general_edge_id) # Subscribe topics for starting train, stopping train and fetching client status. self.subscribed_topics.clear() self.add_subscribe_topic(self.topic_start_train) - self.add_subscribe_topic(self.topic_stop_train) self.add_subscribe_topic(self.topic_report_status) self.add_subscribe_topic(self.topic_ota_msg) self.add_subscribe_topic(self.topic_request_device_info) @@ -123,7 +115,6 @@ def generate_topics(self): self.add_subscribe_topic(self.topic_report_device_status_in_job) if self.general_edge_id is not None: self.add_subscribe_topic(self.fl_topic_start_train) - self.add_subscribe_topic(self.fl_topic_stop_train) self.add_subscribe_topic(self.fl_topic_request_device_info) @abstractmethod @@ -132,7 +123,6 @@ def add_protocol_handler(self): # self.add_message_listener(self.topic_start_train, self.callback_start_train) # Add the message listeners for all topics self.add_message_listener(self.topic_start_train, self.callback_start_train) - self.add_message_listener(self.topic_stop_train, self.callback_stop_train) self.add_message_listener(self.topic_ota_msg, FedMLBaseSlaveProtocolManager.callback_client_ota_msg) self.add_message_listener(self.topic_report_status, self.callback_report_current_status) self.add_message_listener(self.topic_request_device_info, self.callback_report_device_info) @@ -141,7 +131,6 @@ def add_protocol_handler(self): self.add_message_listener(self.topic_response_job_status, self.callback_response_job_status) self.add_message_listener(self.topic_report_device_status_in_job, self.callback_response_device_status_in_job) self.add_message_listener(self.fl_topic_start_train, self.callback_start_train) - self.add_message_listener(self.fl_topic_stop_train, self.callback_stop_train) self.add_message_listener(self.fl_topic_request_device_info, self.callback_report_device_info) @abstractmethod @@ -295,27 +284,6 @@ def callback_start_train(self, topic, payload): # Register the job launch message into the status center self.register_job_launch_message(topic, payload) - def callback_stop_train(self, topic, payload): - # Parse the parameters. - edge_id = str(topic).split("/")[-2] - request_json = json.loads(payload) - is_retain = request_json.get("is_retain", False) - if is_retain: - return - run_id = request_json.get("runId", None) - run_id = request_json.get("id", None) if run_id is None else run_id - run_status = request_json.get("run_status", GeneralConstants.MSG_MLOPS_CLIENT_STATUS_KILLED) - - # logging.info("Stop run with multiprocessing...") - # Stop client with multiprocessing mode - run_id_str = str(run_id) - self._get_job_runner_manager().cleanup_containers_and_release_gpus( - run_id, edge_id, SchedulerConstants.JOB_TASK_TYPE_TRAIN) - self.sync_run_stop_status(run_status=run_status) - - # Register the job stopping message into the status center - self.register_job_stop_message(topic, payload) - def callback_report_current_status(self, topic, payload): logging.info( f"FedMLDebug - Receive: topic ({topic}), payload ({payload})" @@ -561,14 +529,6 @@ def remove_listener_job_status(self, run_id): self.remove_message_listener(topic_job_status_from_master) self.unsubscribe_msg(topic_job_status_from_master) - def sync_run_stop_status(self, run_status=GeneralConstants.MSG_MLOPS_CLIENT_STATUS_KILLED): - try: - self.status_reporter.report_client_id_status( - self.edge_id, run_status, server_id=self.server_id, run_id=self.run_id) - except Exception as e: - logging.error(f"Failed to sync run stop status with Exception {e}. Traceback: {traceback.format_exc()}") - pass - def get_all_run_process_list_map(self): run_process_dict = dict() all_runner_pid_dict = self._get_job_runner_manager().get_all_runner_pid_map() diff --git a/python/fedml/core/mlops/mlops_runtime_log.py b/python/fedml/core/mlops/mlops_runtime_log.py index 0bc4dc6b6c..0fc5db3d23 100644 --- a/python/fedml/core/mlops/mlops_runtime_log.py +++ b/python/fedml/core/mlops/mlops_runtime_log.py @@ -143,6 +143,8 @@ def __init__(self, args): self.should_write_log_file = args.using_mlops else: self.should_write_log_file = False + if not hasattr(args, "log_file_dir"): + setattr(args, "log_file_dir", "./logs") self.log_file_dir = args.log_file_dir self.log_file = None self.run_id = args.run_id From bcf988144acbccd998d4facefa379f51e847d111 Mon Sep 17 00:00:00 2001 From: Alex Date: Tue, 21 May 2024 20:58:04 +0800 Subject: [PATCH 084/251] [CoreEngine] save the relationship between endpoint and run id. --- .../computing/scheduler/master/deploy_job_launcher.py | 7 +------ .../scheduler/slave/base_slave_protocol_manager.py | 6 ++++++ 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/python/fedml/computing/scheduler/master/deploy_job_launcher.py b/python/fedml/computing/scheduler/master/deploy_job_launcher.py index 359c6e641f..50e4517547 100755 --- a/python/fedml/computing/scheduler/master/deploy_job_launcher.py +++ b/python/fedml/computing/scheduler/master/deploy_job_launcher.py @@ -3,7 +3,6 @@ from fedml.computing.scheduler.model_scheduler import device_client_constants from fedml.computing.scheduler.model_scheduler.device_model_cards import FedMLModelCards from fedml.computing.scheduler.scheduler_entry.constants import Constants -from fedml.computing.scheduler.scheduler_core.compute_cache_manager import ComputeCacheManager class FedMLDeployJobLauncher: @@ -90,11 +89,7 @@ def check_model_device_ready_and_deploy(self, request_json, run_id, master_devic serving_devices.extend(device_slave_ids) # Start to deploy the model - endpoint_id = FedMLDeployJobLauncher.deploy_model(serving_devices, request_json, run_id=run_id) + FedMLDeployJobLauncher.deploy_model(serving_devices, request_json, run_id=run_id) - # Save the relationship between run id and endpoint - ComputeCacheManager.get_instance().set_redis_params() - ComputeCacheManager.get_instance().get_gpu_cache().set_endpoint_run_id_map( - endpoint_id, run_id) diff --git a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py index de97684061..447bd05cd9 100755 --- a/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py +++ b/python/fedml/computing/scheduler/slave/base_slave_protocol_manager.py @@ -257,6 +257,12 @@ def callback_start_train(self, topic, payload): model_master_device_id=model_master_device_id, model_slave_device_id=model_slave_device_id) else: + # Save the relationship between run id and endpoint + ComputeCacheManager.get_instance().set_redis_params() + ComputeCacheManager.get_instance().get_gpu_cache().set_endpoint_run_id_map( + endpoint_id, run_id) + + # Report the run status with finished status and return self.generate_status_report(run_id, edge_id, server_agent_id=server_agent_id).report_client_id_status( edge_id, GeneralConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED, run_id=run_id) return From 1e69f68712a6439344126cc6a5b0dc82de59e179 Mon Sep 17 00:00:00 2001 From: Alex Date: Wed, 22 May 2024 01:24:20 +0800 Subject: [PATCH 085/251] [CoreEngine] report the killed status when deleting the deployment. --- .../scheduler/model_scheduler/master_protocol_manager.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py index d21ab44c6e..b65d1bc8de 100755 --- a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py +++ b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py @@ -111,7 +111,7 @@ def callback_delete_deployment(self, topic, payload): # Get the launch job id ComputeCacheManager.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password) - launch_job_id = ComputeCacheManager.get_instance().get_gpu_cache().get_endpoint_run_id_map(self.run_id) + launch_job_id = ComputeCacheManager.get_instance().get_gpu_cache().get_endpoint_run_id_map(model_msg_object.run_id) # Delete SQLite records FedMLServerDataInterface.get_instance().delete_job_from_db(model_msg_object.run_id) @@ -142,7 +142,9 @@ def callback_delete_deployment(self, topic, payload): # Report the launch job status with killed status. if launch_job_id is not None: - self.status_reporter.report_server_id_status(launch_job_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_KILLED) + self.generate_status_report(model_msg_object.run_id, self.edge_id, server_agent_id=self.edge_id).\ + report_server_id_status(launch_job_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_KILLED, + server_id=self.edge_id, server_agent_id=self.edge_id) def callback_start_deployment(self, topic, payload): # noinspection PyBroadException From 2306ee366d8ffe574b1da14092f7271a76b1441c Mon Sep 17 00:00:00 2001 From: Alex Date: Wed, 22 May 2024 02:11:10 +0800 Subject: [PATCH 086/251] [CoreEngine] make the server status work. --- .../fedml/computing/scheduler/scheduler_core/status_center.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/fedml/computing/scheduler/scheduler_core/status_center.py b/python/fedml/computing/scheduler/scheduler_core/status_center.py index 4ababbc826..fa43fd649a 100755 --- a/python/fedml/computing/scheduler/scheduler_core/status_center.py +++ b/python/fedml/computing/scheduler/scheduler_core/status_center.py @@ -218,7 +218,7 @@ def run_status_dispatcher(self, status_event, status_queue, message_center=message_center) else: status_manager_instances[run_id_str].edge_id = status_entity.edge_id - if status_entity.server_id is None and status_entity.server_id != 0: + if status_entity.server_id is not None and status_entity.server_id != 0: status_manager_instances[run_id_str].server_id = status_entity.server_id # if the job status is completed then continue From cd84d8209144e07d18e729c0d93287029a567e74 Mon Sep 17 00:00:00 2001 From: Alex Date: Wed, 22 May 2024 02:19:23 +0800 Subject: [PATCH 087/251] [CoreEngine] make the server status work. --- .../fedml/computing/scheduler/scheduler_core/status_center.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/fedml/computing/scheduler/scheduler_core/status_center.py b/python/fedml/computing/scheduler/scheduler_core/status_center.py index fa43fd649a..97c2115e76 100755 --- a/python/fedml/computing/scheduler/scheduler_core/status_center.py +++ b/python/fedml/computing/scheduler/scheduler_core/status_center.py @@ -218,7 +218,7 @@ def run_status_dispatcher(self, status_event, status_queue, message_center=message_center) else: status_manager_instances[run_id_str].edge_id = status_entity.edge_id - if status_entity.server_id is not None and status_entity.server_id != 0: + if status_entity.server_id is not None and str(status_entity.server_id) != "0": status_manager_instances[run_id_str].server_id = status_entity.server_id # if the job status is completed then continue From f7ab709a39af6981a3ae4d8985a060f92b937fdb Mon Sep 17 00:00:00 2001 From: Alay Dilipbhai Shah Date: Tue, 21 May 2024 16:29:09 -0700 Subject: [PATCH 088/251] Update setup.py --- python/setup.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/python/setup.py b/python/setup.py index fa425c98f7..0e314de29c 100644 --- a/python/setup.py +++ b/python/setup.py @@ -64,6 +64,8 @@ def finalize_options(self): 'uvicorn', 'wandb==0.13.2', 'wget', + # Need to pin this version due to breaking change released in python docker sdk + 'requests<2.32', ] requirements_extra_mpi = [ From 649e42fa3f259cc892ca257b822aa844627da39d Mon Sep 17 00:00:00 2001 From: Alay Shah Date: Tue, 21 May 2024 22:25:18 -0700 Subject: [PATCH 089/251] Remove Docker Client Timeout --- .../fedml/computing/scheduler/comm_utils/container_utils.py | 4 ++-- python/fedml/computing/scheduler/comm_utils/job_utils.py | 2 +- .../scheduler/model_scheduler/device_model_deployment.py | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/python/fedml/computing/scheduler/comm_utils/container_utils.py b/python/fedml/computing/scheduler/comm_utils/container_utils.py index f86e9fe1a2..2f5fa31fb5 100644 --- a/python/fedml/computing/scheduler/comm_utils/container_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/container_utils.py @@ -26,7 +26,7 @@ def get_instance(): def get_docker_client(self): try: - client = docker.from_env(timeout=5, version="auto") + client = docker.from_env() except Exception: logging.error("Failed to connect to the docker daemon, please ensure that you have " "installed Docker Desktop or Docker Engine, and the docker is running") @@ -180,7 +180,7 @@ def get_container_rank_same_model(prefix: str): running_model_name = hash("model_endpoint_id_{}_name_{}_model_id_{}_name_{}_ver_{}") """ try: - client = docker.from_env(timeout=5, version="auto") + client = docker.from_env() except Exception: logging.error("Failed to connect to the docker daemon, please ensure that you have " "installed Docker Desktop or Docker Engine, and the docker is running") diff --git a/python/fedml/computing/scheduler/comm_utils/job_utils.py b/python/fedml/computing/scheduler/comm_utils/job_utils.py index 08ce44d1dd..5b9a2c812a 100644 --- a/python/fedml/computing/scheduler/comm_utils/job_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/job_utils.py @@ -570,7 +570,7 @@ def get_run_container_name(run_id: int) -> str: @staticmethod def get_docker_client(docker_args: DockerArgs) -> DockerClient: try: - client = docker.from_env(timeout=5, version="auto") + client = docker.from_env() if docker_args.username != "" and docker_args.registry != "": client.login(username=docker_args.username, password=docker_args.password, registry=docker_args.registry) except Exception as e: diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py index f54965b599..1876373d25 100755 --- a/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py +++ b/python/fedml/computing/scheduler/model_scheduler/device_model_deployment.py @@ -210,7 +210,7 @@ def start_deployment(end_point_id, end_point_name, model_id, model_version, infer_host = "127.0.0.1" try: - client = docker.from_env(timeout=5, version="auto") + client = docker.from_env() if enable_custom_image and docker_registry_user_name != "" and docker_registry_user_password != "" \ and docker_registry != "": client.login(username=docker_registry_user_name, password=docker_registry_user_password, @@ -467,7 +467,7 @@ def log_deployment_result(end_point_id, model_id, cmd_container_name, cmd_type, logging.info(f"Attempt: {deploy_attempt} / {deploy_attempt_threshold} ...") try: - client = docker.from_env(timeout=5, version="auto") + client = docker.from_env() except Exception: logging.error("Failed to connect to the docker daemon, please ensure that you have " "installed Docker Desktop or Docker Engine, and the docker is running") From 8d9c8ed2876eee9dc14ff6f1ac30573c6a600c38 Mon Sep 17 00:00:00 2001 From: Alex Date: Thu, 23 May 2024 02:21:44 +0800 Subject: [PATCH 090/251] [CoreEngine] change the edge status in the status center. --- .../status_manager_protocols.py | 23 +++++++++---------- 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py index 96f5e4920f..e045458db5 100755 --- a/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py +++ b/python/fedml/computing/scheduler/scheduler_core/status_manager_protocols.py @@ -23,7 +23,7 @@ def __init__(self, run_id=None, edge_id=None, server_id=None, self.edge_id = edge_id self.server_id = server_id self.edge_id_list = edge_id_list - self.client_agent_active_list = dict() + self.edge_status_dict = None self.running_scheduler_contract = running_scheduler_contract if running_scheduler_contract is not None else dict() self.message_reporter = MLOpsMetrics() self.message_reporter.set_messenger(message_center) @@ -163,6 +163,8 @@ def status_center_process_master_status(self, topic, payload): status = request_json["status"] edge_id = request_json["edge_id"] server_id = request_json.get("server_id", None) + if server_id is None or str(server_id) == "0": + server_id = self.server_id run_id_str = str(run_id) # Process the job status @@ -185,8 +187,7 @@ def process_job_status_consensus(self, run_id, master_id, status): status = self.get_entire_job_status() # Set the device status based on the job status - edge_id_status_dict = self.client_agent_active_list.get(f"{run_id}", {}) - for edge_id_item, edge_status_item in edge_id_status_dict.items(): + for edge_id_item, edge_status_item in self.edge_status_dict.items(): if edge_id_item == "server": continue @@ -233,18 +234,17 @@ def status_center_process_slave_status(self, topic, payload): init_edge_id_list = payload_json.get("init_all_edge_id_list", None) init_server_id = payload_json.get("init_server_id", None) - active_item_dict = self.client_agent_active_list.get(f"{run_id}", None) - if active_item_dict is None: - self.client_agent_active_list[f"{run_id}"] = dict() + if self.edge_status_dict is None: + self.edge_status_dict = dict() if init_edge_id_list is not None: - self.client_agent_active_list[f"{run_id}"][f"server"] = init_server_id + self.edge_status_dict[f"server"] = init_server_id for edge_id_item in init_edge_id_list: - self.client_agent_active_list[f"{run_id}"][f"{edge_id_item}"] = \ + self.edge_status_dict[f"{edge_id_item}"] = \ ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE if run_id is not None and edge_id is not None: - self.client_agent_active_list[f"{run_id}"][f"{edge_id}"] = status + self.edge_status_dict[f"{edge_id}"] = status self.process_device_status(run_id, edge_id, status) @@ -252,12 +252,11 @@ def process_device_status(self, run_id, edge_id, status): number_of_failed_edges = 0 number_of_finished_edges = 0 number_of_killed_edges = 0 - edge_id_status_dict = self.client_agent_active_list.get(f"{run_id}", {}) - server_id = edge_id_status_dict.get("server", 0) + server_id = self.edge_status_dict.get("server", 0) enable_fault_tolerance, fault_tolerance_rate = self.parse_fault_tolerance_params(run_id) running_edges_list = list() edge_nums = 0 - for edge_id_item, status_item in edge_id_status_dict.items(): + for edge_id_item, status_item in self.edge_status_dict.items(): if edge_id_item == "server": continue From 1162f6c18b266796ac958d766ab4790ce50a0b88 Mon Sep 17 00:00:00 2001 From: Alex Date: Thu, 23 May 2024 15:13:23 +0800 Subject: [PATCH 091/251] [CoreEngine] forward the stopping request to the cloud server. --- .../master/base_master_protocol_manager.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py index c95d73f4bf..1c4cbba4f4 100755 --- a/python/fedml/computing/scheduler/master/base_master_protocol_manager.py +++ b/python/fedml/computing/scheduler/master/base_master_protocol_manager.py @@ -264,23 +264,26 @@ def callback_stop_train(self, topic, payload, use_payload=None): run_id = request_json.get("runId", None) run_id = request_json.get("id", None) if run_id is None else run_id run_id_str = str(run_id) + edge_ids = request_json.get("edgeids", None) server_id = request_json.get("serverId", None) if server_id is None: server_id = request_json.get("server_id", None) - edge_ids = request_json.get("edgeids", None) - - # Stop the job runner - self._get_job_runner_manager().stop_job_runner( - run_id, args=self.args, server_id=server_id, request_json=request_json, - run_as_cloud_agent=self.run_as_cloud_agent) + server_agent_id = server_id # Cleanup the cached object if self.running_request_json.get(run_id_str, None) is not None: self.running_request_json.pop(run_id_str) + # If it is the cloud agent, then forward the stopping request to the corresponding cloud server. + if self.run_as_cloud_agent: + server_agent_id = self.edge_id + topic_stop_train_to_cloud_server = f"mlops/flserver_agent_{server_id}/stop_train" + self.message_center.send_message(topic_stop_train_to_cloud_server, payload) + return + # Reset all edge status and server status for iter_edge_id in edge_ids: - self.generate_status_report(run_id, iter_edge_id, server_agent_id=server_id).\ + self.generate_status_report(run_id, iter_edge_id, server_agent_id=server_agent_id).\ report_client_id_status(iter_edge_id, GeneralConstants.MSG_MLOPS_SERVER_STATUS_KILLED, run_id=run_id, server_id=server_id) From 92b7e162b66fa09b22a6af2dcfc22acf46ddf5cb Mon Sep 17 00:00:00 2001 From: Raphael Jin Date: Tue, 28 May 2024 17:46:52 +0000 Subject: [PATCH 092/251] [Deploy] Try to convert the gpu_topology value type to int. --- .../model_scheduler/device_replica_controller.py | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/python/fedml/computing/scheduler/model_scheduler/device_replica_controller.py b/python/fedml/computing/scheduler/model_scheduler/device_replica_controller.py index 667d57c4f4..ea19efb8b6 100644 --- a/python/fedml/computing/scheduler/model_scheduler/device_replica_controller.py +++ b/python/fedml/computing/scheduler/model_scheduler/device_replica_controller.py @@ -67,7 +67,9 @@ def __init__(self, master_id, request_json: dict): def calc_total_gpu_num(self): total_gpu_num = 0 for device_id, gpu_num in self.devices_avail_gpus.items(): - total_gpu_num += gpu_num + if type(gpu_num) is not int: + logging.warning(f"The value in gpu_topology should be int, but got {type(gpu_num)}. Try to convert it.") + total_gpu_num += int(gpu_num) return total_gpu_num def init_id_replica_num(self): @@ -77,6 +79,11 @@ def init_id_replica_num(self): """ id_replica_num = {} for id, avail_num in self.devices_avail_gpus.items(): + if type(avail_num) is not int: + logging.warning(f"The value in gpu_topology should be int, " + f"but got {type(avail_num)}. Try to convert it.") + avail_num = int(avail_num) + if avail_num % self.gpu_per_replica != 0: raise ValueError("The number of gpus for each device should be divisible by gpu_per_replica") id_replica_num[str(id)] = avail_num // self.gpu_per_replica From 0a6eba9f6dfda93ed0df500e63b8b64bad5df44d Mon Sep 17 00:00:00 2001 From: Raphael Jin Date: Tue, 28 May 2024 22:48:38 +0000 Subject: [PATCH 093/251] [Deploy] Fix version diff function. --- .../scheduler/model_scheduler/master_job_runner_manager.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py index 0bfc205b34..c761cd6d8f 100755 --- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py +++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py @@ -64,4 +64,4 @@ def generate_request_json_with_replica_num_diff(run_id, edge_id, request_json): @staticmethod def generate_request_json_with_replica_version_diff(run_id, edge_id, request_json): - return FedMLDeployMasterJobRunner.generate_request_json_with_replica_num_diff(run_id, edge_id, request_json) + return FedMLDeployMasterJobRunner.generate_request_json_with_replica_version_diff(run_id, edge_id, request_json) From e8844d389b1aa7809df3ea3d8255c6ce83501e7b Mon Sep 17 00:00:00 2001 From: Raphael Jin Date: Tue, 28 May 2024 22:50:46 -0400 Subject: [PATCH 094/251] [Deploy] Fix timezone issue using pandas --- .../scheduler/model_scheduler/autoscaler/autoscaler.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py b/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py index bb2b59e7d9..eb9f08b0eb 100644 --- a/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py +++ b/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py @@ -50,11 +50,11 @@ def filter_by_timestamp(cls, filtered = metrics if before_now_minutes: less_than_ts = \ - str(pd.Timestamp.now() - pd.Timedelta(minutes=before_now_minutes)) + str(pd.Timestamp.utcnow().replace(tzinfo=None) - pd.Timedelta(minutes=before_now_minutes)) filtered = metrics.query("'{}' <= {}".format(less_than_ts, "timestamp")) if before_now_seconds: less_than_ts = \ - str(pd.Timestamp.now() - pd.Timedelta(seconds=before_now_seconds)) + str(pd.Timestamp.utcnow().replace(tzinfo=None) - pd.Timedelta(seconds=before_now_seconds)) filtered = metrics.query("'{}' <= {}".format(less_than_ts, "timestamp")) return filtered @@ -151,6 +151,7 @@ def scale_operation_query_concurrency(cls, # Otherwise, we proceed as normal. queries_num = period_data.shape[0] + logging.info(f"Detect {queries_num} of requests in {concurrent_query_policy.window_size_secs} seconds") try: # QSR: Queries per Second per Replica: (Number of Queries / Number of Current Replicas) / Window Size From b58720cd93952f8ea95366af59cf160e882706aa Mon Sep 17 00:00:00 2001 From: Alex Date: Wed, 29 May 2024 19:15:37 +0800 Subject: [PATCH 095/251] [CoreEngine] In order to make the inference logs work, we save the container inference logs to the single dir. --- .../computing/scheduler/comm_utils/job_monitor.py | 11 ++++++++--- .../scheduler_core/scheduler_base_job_runner.py | 8 +++++++- python/fedml/core/mlops/mlops_utils.py | 3 ++- 3 files changed, 17 insertions(+), 5 deletions(-) diff --git a/python/fedml/computing/scheduler/comm_utils/job_monitor.py b/python/fedml/computing/scheduler/comm_utils/job_monitor.py index bada84d96e..a7d5214a02 100644 --- a/python/fedml/computing/scheduler/comm_utils/job_monitor.py +++ b/python/fedml/computing/scheduler/comm_utils/job_monitor.py @@ -48,6 +48,7 @@ class JobMonitor(Singleton): ENDPOINT_CONTAINER_LOG_PREFIX = "endpoint" TIME_INTERVAL_FOR_INFERENCE_ON_GATEWAY = 60 * 10 + ENDPOINT_CONTAINER_LOG_SUBDIR = "monitor_endpoint_logs" def __init__(self): if not hasattr(self, "endpoint_unavailable_counter"): @@ -1055,8 +1056,11 @@ def monitor_endpoint_logs(self): model_version = model_config.get("model_version", None) endpoint_name = endpoint_json.get("end_point_name", None) + log_file_dir = os.path.join( + device_client_constants.ClientConstants.get_log_file_dir(), + JobMonitor.ENDPOINT_CONTAINER_LOG_SUBDIR) log_file_path, program_prefix = MLOpsLoggingUtils.build_log_file_path_with_run_params( - job.job_id, int(job.edge_id), device_server_constants.ServerConstants.get_log_file_dir(), is_server=True, + job.job_id, int(job.edge_id), log_file_dir, is_server=False, log_file_prefix=JobMonitor.ENDPOINT_CONTAINER_LOG_PREFIX, ) @@ -1130,8 +1134,9 @@ def monitor_endpoint_logs(self): nano_second_str = container_time.split(".")[1][:9] t_datetime_obj = isoparse(container_time) - if t_sec_offset is not None: - t_datetime_obj = t_datetime_obj + datetime.timedelta(seconds=t_sec_offset) + # ISSUE: this will cause the timestamp is not correct. + #if t_sec_offset is not None: + # t_datetime_obj = t_datetime_obj + datetime.timedelta(seconds=t_sec_offset) except Exception as e: logging.error(f"Exception when parsing the container log time {e}") t_datetime_obj = datetime.datetime.now() diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py index 69b69f4d4c..648ab18cf1 100755 --- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py +++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py @@ -12,7 +12,7 @@ from ..comm_utils.constants import SchedulerConstants from ..comm_utils.job_utils import JobRunnerUtils, DockerArgs from ..scheduler_entry.constants import Constants -from ....core.mlops import MLOpsMetrics +from ....core.mlops import MLOpsMetrics, MLOpsRuntimeLogDaemon from ....core.mlops.mlops_device_perfs import MLOpsDevicePerfStats from ..comm_utils.yaml_utils import load_yaml_config from .general_constants import GeneralConstants @@ -449,10 +449,16 @@ def trigger_stop_event(self): if self.run_process_event is not None: self.run_process_event.set() + time.sleep(1) + MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(self.run_id, self.edge_id) + def trigger_completed_event(self): if self.run_process_completed_event is not None: self.run_process_completed_event.set() + time.sleep(1) + MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(self.run_id, self.edge_id) + def execute_job_task(self, unzip_package_path, entry_file_full_path, conf_file_full_path, dynamic_args_config, fedml_config_object): run_config = self.request_json["run_config"] diff --git a/python/fedml/core/mlops/mlops_utils.py b/python/fedml/core/mlops/mlops_utils.py index 1d6db23d02..8bde9e4299 100644 --- a/python/fedml/core/mlops/mlops_utils.py +++ b/python/fedml/core/mlops/mlops_utils.py @@ -150,10 +150,11 @@ def get_edge_id_from_args(args): else: edge_id = 0 else: - if getattr(args, "client_id", None) is not None: + if getattr(args, "edge_id", None) is not None: edge_id = args.edge_id else: edge_id = 0 + return edge_id @staticmethod From 76250750358b57e572e78bb07fa03927bc460003 Mon Sep 17 00:00:00 2001 From: Raphael Jin Date: Thu, 30 May 2024 07:29:38 +0000 Subject: [PATCH 096/251] [Deploy] Avoid re-download the same model serving package. --- .../model_scheduler/worker_job_runner.py | 116 ++++++++---------- 1 file changed, 53 insertions(+), 63 deletions(-) diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py index 332dab2547..831064b591 100755 --- a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py +++ b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py @@ -146,10 +146,10 @@ def run_impl(self, run_extend_queue_list, sender_message_center, logging.info(f"[Worker] Received model deployment request from master for endpoint {run_id}.") self.replica_handler = FedMLDeviceReplicaHandler(self.edge_id, self.request_json) if self.replica_handler is not None: - logging.info(f"=================Worker replica Handler ======================" - f"Reconcile with num diff {self.replica_handler.replica_num_diff} " - f"and version diff {self.replica_handler.replica_version_diff}." - f"=============================================================") + logging.info("\n================= Worker replica Handler ======================\n" + f"Reconcile with num diff {self.replica_handler.replica_num_diff}\n" + f"and version diff {self.replica_handler.replica_version_diff}\n" + "===============================================================\n") else: logging.error(f"[Worker] Replica handler is None.") return False @@ -178,39 +178,13 @@ def run_impl(self, run_extend_queue_list, sender_message_center, logging.info("[Worker] No need to reconcile.") return True - logging.info( - f"================Worker Reconcile Operations ======================\n" - f" op: {op}; op num: {op_num}.\n" - f"==================================================================\n") - - # If not rollback, download package from MLOps; otherwise, use the backup package - if op != "rollback": - logging.info("Download and unzip model to local...") - unzip_package_path, _, _ = \ - self.update_local_fedml_config(run_id, model_config, model_config_parameters) - if unzip_package_path is None: - logging.info("Failed to update local fedml config.") - self.check_runner_stop_event() - self.status_reporter.report_client_id_status( - self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED, - is_from_model=True, run_id=run_id) - return False - - if not os.path.exists(unzip_package_path): - logging.info("Failed to unzip file.") - self.check_runner_stop_event() - self.status_reporter.report_client_id_status( - self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED, - is_from_model=True, run_id=run_id) - return False - else: - logging.info("Try to use backup package to rollback...") - # Find folder under "~/.fedml/fedml-model-client/fedml/model_packages \ - # /${end_point_id}_${end_point_name}_${model_name}_${model_version}" - backup_folder_full_path = None - models_root_dir = ClientConstants.get_model_package_dir() + logging.info("\n================ Worker Reconcile Operations ======================\n" + f" op: {op}; op num: {op_num}.\n" + "===================================================================\n") + if op == "rollback": # Find the version (notified by master) to rollback + logging.info("Try to use backup package to rollback...") version_diff_dict = self.request_json["replica_version_diff"][str(self.edge_id)] version_rollback_to = None for replica_no, rollback_ops in version_diff_dict.items(): @@ -222,39 +196,38 @@ def run_impl(self, run_extend_queue_list, sender_message_center, return False model_version = version_rollback_to - # Format the version to match the folder name - model_version_formatted = version_rollback_to.replace(" ", "-") - model_version_formatted = model_version_formatted.replace(":", "-") - - last_run_folder_sub_fd = f"{run_id}_{end_point_name}_{model_name}_{model_version_formatted}" - for folder in os.listdir(models_root_dir): - if last_run_folder_sub_fd in folder: - backup_folder_full_path = os.path.join(models_root_dir, folder) - break - if backup_folder_full_path is None: - logging.error(f"No backup folder found for run_id: {self.run_id} edge_id: {self.edge_id} " - f"under {models_root_dir} with sub folder {last_run_folder_sub_fd}, rollback failed.") - return False + # Construct the parent folder name for the package + model_version_formatted = model_version.replace(" ", "-") + model_version_formatted = model_version_formatted.replace(":", "-") + models_root_dir = ClientConstants.get_model_package_dir() + parent_fd = f"{run_id}_{end_point_name}_{model_name}_{model_version_formatted}" - # Inside backup folder, find unzipped package with prefix unzip_fedml_run - unzip_package_path_parent = None - for folder in os.listdir(backup_folder_full_path): - if folder.startswith("unzip_fedml_run"): - unzip_package_path_parent = os.path.join(backup_folder_full_path, folder) - break - - # Inside unzip folder, find the unzipped package, should be the only one - unzip_package_path = None - for folder in os.listdir(unzip_package_path_parent): - if os.path.isdir(os.path.join(unzip_package_path_parent, folder)): - unzip_package_path = os.path.join(unzip_package_path_parent, folder) - break + # Check if the package is already downloaded + unzip_package_path = "" + if os.path.exists(os.path.join(models_root_dir, parent_fd)): + unzip_package_path = self.find_previous_downloaded_pkg(os.path.join(models_root_dir, parent_fd)) + # Download the package if not found + if unzip_package_path == "": + logging.info("Download and unzip model to local...") + unzip_package_path, _, _ = \ + self.update_local_fedml_config(run_id, model_config, model_config_parameters) if unzip_package_path is None: - logging.error(f"No unzipped package found for run_id: {self.run_id} edge_id: {self.edge_id} " - f"under {backup_folder_full_path}, rollback failed.") + logging.info("Failed to update local fedml config.") + self.check_runner_stop_event() + self.status_reporter.report_client_id_status( + self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED, + is_from_model=True, run_id=run_id) return False + if not os.path.exists(unzip_package_path): + logging.info("Failed to unzip file.") + self.check_runner_stop_event() + self.status_reporter.report_client_id_status( + self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED, + is_from_model=True, run_id=run_id) + return False + self.check_runner_stop_event() running_model_name, inference_output_url, inference_model_version, model_metadata, model_config = \ @@ -535,3 +508,20 @@ def build_dynamic_args(self, run_id, run_config, package_conf_object, base_dir): # Override def build_dynamic_constrain_variables(self, run_id, run_config): pass + + @staticmethod + def find_previous_downloaded_pkg(parent_dir) -> str: + unzip_fd = "" + res = "" + + for folder in os.listdir(parent_dir): + if folder.startswith("unzip_fedml_run"): + unzip_fd = os.path.join(parent_dir, folder) + break + + for folder in os.listdir(unzip_fd): + if os.path.isdir(os.path.join(unzip_fd, folder)): + res = os.path.join(unzip_fd, folder) + break + + return res From 9d8b0df6ae63c22f4ef95d330ff4b154db68b500 Mon Sep 17 00:00:00 2001 From: Alay Shah Date: Thu, 30 May 2024 12:22:59 -0700 Subject: [PATCH 097/251] Add inference gateway logs --- .../model_scheduler/device_model_inference.py | 18 ++++- .../model_scheduler/master_job_runner.py | 77 +++++++++++-------- .../master_job_runner_manager.py | 4 +- .../master_protocol_manager.py | 2 +- 4 files changed, 65 insertions(+), 36 deletions(-) diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py index b8d85edd31..1b6d71ebb7 100755 --- a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py +++ b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py @@ -15,6 +15,7 @@ from fedml.computing.scheduler.model_scheduler.device_mqtt_inference_protocol import FedMLMqttInference from fedml.computing.scheduler.model_scheduler.device_http_proxy_inference_protocol import FedMLHttpProxyInference from fedml.computing.scheduler.comm_utils import sys_utils +from fedml.core.mlops import MLOpsRuntimeLog, MLOpsRuntimeLogDaemon try: from pydantic import BaseSettings @@ -56,6 +57,8 @@ class settings: ext_info = "2b34303961245c4f175f2236282d7a272c040b0904747579087f6a760112030109010c215d54505707140005190a051c347f365c4a430c020a7d39120e26032a78730f797f7c031f0901657e75" +logging_args = None + api = FastAPI() @@ -229,7 +232,8 @@ def retrieve_info_by_endpoint_id(end_point_id, in_end_point_name=None, in_model_ model_name = "" if in_end_point_name is not None: end_point_name = in_end_point_name - model_name = redis_key[len(f"{FedMLModelCache.FEDML_MODEL_DEPLOYMENT_STATUS_TAG}-{end_point_id}-{in_end_point_name}-"):] + model_name = redis_key[ + len(f"{FedMLModelCache.FEDML_MODEL_DEPLOYMENT_STATUS_TAG}-{end_point_id}-{in_end_point_name}-"):] else: # e.g. FEDML_MODEL_DEPLOYMENT_STATUS--1234-dummy_endpoint_name-dummy_model_name try: @@ -366,8 +370,20 @@ def logging_inference_request(request, response): logging.info("failed to log inference request and response to file.") +def set_logging_args(args=None): + global logging_args + logging_args = args + if logging_args is not None: + # Force run id to 0, as the gateway is shared by all the runs. + setattr(args, "run_id", "0") + MLOpsRuntimeLog.get_instance(args).init_logs(log_level=logging.INFO) + MLOpsRuntimeLogDaemon.get_instance(args).start_log_processor(args.run_id, args.edge_id) + logging.info("start the log processor") + + if __name__ == "__main__": import uvicorn + port = 2203 logging.basicConfig(level=logging.INFO) uvicorn.run(api, host="0.0.0.0", port=port, log_level="info") diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py index bf9cee3279..5680b2ac6d 100755 --- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py +++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py @@ -1,6 +1,7 @@ import copy import json import logging +import multiprocessing import os import time import queue @@ -9,6 +10,7 @@ from multiprocessing import Queue import fedml +import uvicorn from fedml.core.mlops import MLOpsRuntimeLog, MLOpsConfigs from fedml.core.mlops.mlops_runtime_log import MLOpsFormatter from .device_client_constants import ClientConstants @@ -22,6 +24,7 @@ from ..master.base_master_job_runner import FedMLBaseMasterJobRunner from .device_replica_controller import FedMLDeviceReplicaController from .job_runner_msg_sender import FedMLDeployJobRunnerMsgSender +from .device_model_inference import set_logging_args class FedMLDeployMasterJobRunner(FedMLBaseMasterJobRunner, FedMLDeployJobRunnerMsgSender, ABC): @@ -63,6 +66,11 @@ def _generate_job_runner_instance(self, args, run_id=None, request_json=None, ag def _generate_extend_queue_list(self): return [self.deployment_result_queue] + @staticmethod + def start_inference_gateway_server(inference_gw_cmd, port, args): + set_logging_args(args) + uvicorn.run(inference_gw_cmd, host="0.0.0.0", port=port, log_level="info") + # Override def run_impl( self, edge_id_status_queue, edge_device_info_queue, run_metrics_queue, @@ -116,7 +124,7 @@ def run_impl( # start unified inference server FedMLDeployMasterJobRunner.start_device_inference_gateway( - inference_port=inference_port, agent_config=self.agent_config) + args=self.args, inference_port=inference_port, agent_config=self.agent_config) # start inference monitor server FedMLDeployMasterJobRunner.stop_device_inference_monitor( @@ -462,13 +470,14 @@ def process_deployment_result_message(self, topic=None, payload=None): time.sleep(3) self.trigger_completed_event() + def cleanup_runner_process(self, run_id): ServerConstants.cleanup_run_process(run_id, not_kill_subprocess=True) @staticmethod def start_device_inference_gateway( - inference_port=ServerConstants.MODEL_INFERENCE_DEFAULT_PORT, - agent_config=None, redis_addr="localhost", redis_port=6379, redis_password="fedml_default" + args, inference_port=ServerConstants.MODEL_INFERENCE_DEFAULT_PORT, agent_config=None, + redis_addr="localhost", redis_port=6379, redis_password="fedml_default" ): # start unified inference server python_program = get_python_program() @@ -477,35 +486,39 @@ def start_device_inference_gateway( inference_port = int(master_port) if not ServerConstants.is_running_on_k8s(): logging.info(f"start the model inference gateway...") - use_mqtt_inference = os.getenv("FEDML_USE_MQTT_INFERENCE", "False") - use_mqtt_inference = True if use_mqtt_inference.lower() == 'true' else False - use_worker_gateway = os.getenv("FEDML_USE_WORKER_GATEWAY", "False") - use_worker_gateway = True if use_worker_gateway.lower() == 'true' else False + # use_mqtt_inference = os.getenv("FEDML_USE_MQTT_INFERENCE", "False") + # use_mqtt_inference = True if use_mqtt_inference.lower() == 'true' else False + # use_worker_gateway = os.getenv("FEDML_USE_WORKER_GATEWAY", "False") + # use_worker_gateway = True if use_worker_gateway.lower() == 'true' else False inference_gw_cmd = "fedml.computing.scheduler.model_scheduler.device_model_inference:api" inference_gateway_pids = RunProcessUtils.get_pid_from_cmd_line(inference_gw_cmd) if inference_gateway_pids is None or len(inference_gateway_pids) <= 0: - cur_dir = os.path.dirname(__file__) - fedml_base_dir = os.path.dirname(os.path.dirname(os.path.dirname(cur_dir))) - connect_str = "@FEDML@" - ext_info = sys_utils.random1( - agent_config["mqtt_config"]["BROKER_HOST"] + connect_str + - str(agent_config["mqtt_config"]["BROKER_PORT"]) + connect_str + - agent_config["mqtt_config"]["MQTT_USER"] + connect_str + - agent_config["mqtt_config"]["MQTT_PWD"] + connect_str + - str(agent_config["mqtt_config"]["MQTT_KEEPALIVE"]), "FEDML@9999GREAT") - python_program = get_python_program() - inference_gateway_process = ServerConstants.exec_console_with_script( - "REDIS_ADDR=\"{}\" REDIS_PORT=\"{}\" REDIS_PASSWORD=\"{}\" " - "END_POINT_NAME=\"{}\" " - "MODEL_NAME=\"{}\" MODEL_VERSION=\"{}\" MODEL_INFER_URL=\"{}\" VERSION=\"{}\" " - "USE_MQTT_INFERENCE={} USE_WORKER_GATEWAY={} EXT_INFO={} " - "{} -m uvicorn {} --host 0.0.0.0 --port {} --reload --reload-delay 3 --reload-dir {} " - "--log-level critical".format( - redis_addr, str(redis_port), redis_password, "", - "", "", "", fedml.get_env_version(), use_mqtt_inference, - use_worker_gateway, ext_info, python_program, inference_gw_cmd, str(inference_port), - fedml_base_dir), - should_capture_stdout=False, should_capture_stderr=False) + # cur_dir = os.path.dirname(__file__) + # fedml_base_dir = os.path.dirname(os.path.dirname(os.path.dirname(cur_dir))) + # connect_str = "@FEDML@" + # ext_info = sys_utils.random1( + # agent_config["mqtt_config"]["BROKER_HOST"] + connect_str + + # str(agent_config["mqtt_config"]["BROKER_PORT"]) + connect_str + + # agent_config["mqtt_config"]["MQTT_USER"] + connect_str + + # agent_config["mqtt_config"]["MQTT_PWD"] + connect_str + + # str(agent_config["mqtt_config"]["MQTT_KEEPALIVE"]), "FEDML@9999GREAT") + # python_program = get_python_program() + inference_gateway_process = multiprocessing.Process( + target=FedMLDeployMasterJobRunner.start_inference_gateway_server, args=(inference_gw_cmd, + inference_port, args) + ) + # inference_gateway_process = ServerConstants.exec_console_with_script( + # "REDIS_ADDR=\"{}\" REDIS_PORT=\"{}\" REDIS_PASSWORD=\"{}\" " + # "END_POINT_NAME=\"{}\" " + # "MODEL_NAME=\"{}\" MODEL_VERSION=\"{}\" MODEL_INFER_URL=\"{}\" VERSION=\"{}\" " + # "USE_MQTT_INFERENCE={} USE_WORKER_GATEWAY={} EXT_INFO={} " + # "{} -m uvicorn {} --host 0.0.0.0 --port {} --reload --reload-delay 3 --reload-dir {} " + # "--log-level critical".format( + # redis_addr, str(redis_port), redis_password, "", + # "", "", "", fedml.get_env_version(), use_mqtt_inference, + # use_worker_gateway, ext_info, python_program, inference_gw_cmd, str(inference_port), + # fedml_base_dir), + # should_capture_stdout=False, should_capture_stderr=False) return inference_gateway_process else: @@ -544,7 +557,7 @@ def stop_device_inference_monitor(run_id, end_point_name, model_id, model_name, model_id, model_name, model_version) @staticmethod - def recover_inference_and_monitor(): + def recover_inference_and_monitor(args): # noinspection PyBroadException try: agent_config = dict() @@ -571,8 +584,7 @@ def recover_inference_and_monitor(): if not is_activated: continue - FedMLDeployMasterJobRunner.start_device_inference_gateway( - inference_port=inference_port, agent_config=agent_config) + FedMLDeployMasterJobRunner.start_device_inference_gateway(args=args, inference_port=inference_port, agent_config=agent_config) FedMLDeployMasterJobRunner.stop_device_inference_monitor( run_id, end_point_name, model_id, model_name, model_version) @@ -807,3 +819,4 @@ def build_dynamic_args(self, run_id, run_config, package_conf_object, base_dir): # Override def build_dynamic_constrain_variables(self, run_id, run_config): pass + diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py index 0bfc205b34..7c700bb10f 100755 --- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py +++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py @@ -55,8 +55,8 @@ def stop_device_inference_monitor(self, run_id, end_point_name, model_id, model_ run_id, end_point_name, model_id, model_name, model_version) @staticmethod - def recover_inference_and_monitor(): - FedMLDeployMasterJobRunner.recover_inference_and_monitor() + def recover_inference_and_monitor(args): + FedMLDeployMasterJobRunner.recover_inference_and_monitor(args=args) @staticmethod def generate_request_json_with_replica_num_diff(run_id, edge_id, request_json): diff --git a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py index b65d1bc8de..8f77f609d0 100755 --- a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py +++ b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py @@ -82,7 +82,7 @@ def _init_extra_items(self): except Exception as e: pass - FedMLDeployJobRunnerManager.recover_inference_and_monitor() + FedMLDeployJobRunnerManager.recover_inference_and_monitor(args = self.args) # Override def _process_connection_ready(self): From 3fb45aac083d2ec7e341251ade6e0a18db986686 Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Mon, 3 Jun 2024 19:00:55 +0000 Subject: [PATCH 098/251] Make Inference Gateway Daemon Process --- .../computing/scheduler/model_scheduler/master_job_runner.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py index 5680b2ac6d..4d5974237d 100755 --- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py +++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py @@ -90,7 +90,7 @@ def run_impl( logging.info("model deployment request: {}".format(self.request_json)) logging.info("send deployment stages...") - # Generate the replica controller object. + # Generate the replica controller object self.replica_controller = FedMLDeviceReplicaController(self.edge_id, self.request_json) # Start the process to report system performance(cpu,memory,etc.) to MLOps @@ -519,6 +519,8 @@ def start_device_inference_gateway( # use_worker_gateway, ext_info, python_program, inference_gw_cmd, str(inference_port), # fedml_base_dir), # should_capture_stdout=False, should_capture_stderr=False) + inference_gateway_process.daemon = True + inference_gateway_process.start() return inference_gateway_process else: From 8595e0f6a7693ba6cd043731d8bcf4c648288214 Mon Sep 17 00:00:00 2001 From: fedml-dimitris Date: Mon, 3 Jun 2024 18:25:05 -0400 Subject: [PATCH 099/251] Adding fail fast and timeout enforcement per request policies. --- .../debug/inference_timeout/config.yaml | 10 ++ .../debug/inference_timeout/src/serve_main.py | 32 ++++ .../scheduler/comm_utils/constants.py | 2 - .../device_client_constants.py | 1 + .../device_http_inference_protocol.py | 9 +- .../model_scheduler/device_model_cache.py | 102 +++++++----- .../model_scheduler/device_model_inference.py | 153 +++++++++++++----- .../model_scheduler/master_job_runner.py | 4 +- .../master_protocol_manager.py | 6 +- .../customized_job_example/train_job.yaml | 4 +- 10 files changed, 226 insertions(+), 97 deletions(-) create mode 100644 python/examples/deploy/debug/inference_timeout/config.yaml create mode 100644 python/examples/deploy/debug/inference_timeout/src/serve_main.py diff --git a/python/examples/deploy/debug/inference_timeout/config.yaml b/python/examples/deploy/debug/inference_timeout/config.yaml new file mode 100644 index 0000000000..f6d2566e00 --- /dev/null +++ b/python/examples/deploy/debug/inference_timeout/config.yaml @@ -0,0 +1,10 @@ +workspace: "./src" +entry_point: "serve_main.py" +bootstrap: | + echo "Bootstrap start..." + sleep 5 + echo "Bootstrap finished" +auto_detect_public_ip: true +use_gpu: true + +request_timeout_sec: 10 diff --git a/python/examples/deploy/debug/inference_timeout/src/serve_main.py b/python/examples/deploy/debug/inference_timeout/src/serve_main.py new file mode 100644 index 0000000000..5884e41f85 --- /dev/null +++ b/python/examples/deploy/debug/inference_timeout/src/serve_main.py @@ -0,0 +1,32 @@ +from fedml.serving import FedMLPredictor +from fedml.serving import FedMLInferenceRunner +import uuid +import torch + +# Calculate the number of elements +num_elements = 1_073_741_824 // 4 # using integer division for whole elements + + +class DummyPredictor(FedMLPredictor): + def __init__(self): + super().__init__() + # Create a tensor with these many elements + tensor = torch.empty(num_elements, dtype=torch.float32) + + # Move the tensor to GPU + tensor_gpu = tensor.cuda() + + # for debug + with open("/tmp/dummy_gpu_occupier.txt", "w") as f: + f.write("GPU is occupied") + + self.worker_id = uuid.uuid4() + + def predict(self, request): + return {f"AlohaV0From{self.worker_id}": request} + + +if __name__ == "__main__": + predictor = DummyPredictor() + fedml_inference_runner = FedMLInferenceRunner(predictor) + fedml_inference_runner.run() diff --git a/python/fedml/computing/scheduler/comm_utils/constants.py b/python/fedml/computing/scheduler/comm_utils/constants.py index f3fcd4ed5a..22cb31de45 100644 --- a/python/fedml/computing/scheduler/comm_utils/constants.py +++ b/python/fedml/computing/scheduler/comm_utils/constants.py @@ -78,8 +78,6 @@ class SchedulerConstants: ENDPOINT_INFERENCE_READY_TIMEOUT = 15 ENDPOINT_STATUS_CHECK_TIMEOUT = 60 * 3 - MQTT_INFERENCE_TIMEOUT = 60 * 6 - TRAIN_PROVISIONING_TIMEOUT = 60 * 25 TRAIN_STARTING_TIMEOUT = 60 * 15 TRAIN_STOPPING_TIMEOUT = 60 * 5 diff --git a/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py b/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py index d2093569c3..7894f2c73e 100644 --- a/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py +++ b/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py @@ -95,6 +95,7 @@ class ClientConstants(object): INFERENCE_ENGINE_TYPE_INT_DEFAULT = 2 INFERENCE_MODEL_VERSION = "1" INFERENCE_INFERENCE_SERVER_VERSION = "v2" + INFERENCE_REQUEST_TIMEOUT = 30 MSG_MODELOPS_DEPLOYMENT_STATUS_INITIALIZING = "INITIALIZING" MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYING = "DEPLOYING" diff --git a/python/fedml/computing/scheduler/model_scheduler/device_http_inference_protocol.py b/python/fedml/computing/scheduler/model_scheduler/device_http_inference_protocol.py index e711a9e6a6..7e4c06ea5d 100755 --- a/python/fedml/computing/scheduler/model_scheduler/device_http_inference_protocol.py +++ b/python/fedml/computing/scheduler/model_scheduler/device_http_inference_protocol.py @@ -1,13 +1,12 @@ -import traceback -from typing import Mapping -from urllib.parse import urlparse - import httpx +import traceback from .device_client_constants import ClientConstants -import requests + from fastapi.responses import Response from fastapi.responses import StreamingResponse +from urllib.parse import urlparse +from typing import Mapping class FedMLHttpInference: diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py b/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py index edcdf7d0f1..fca7b81d42 100755 --- a/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py +++ b/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py @@ -33,6 +33,8 @@ class FedMLModelCache(Singleton): FEDML_KEY_COUNT_PER_SCAN = 1000 + FEDML_PENDING_REQUESTS_COUNTER = "FEDML_PENDING_REQUESTS_COUNTER" + def __init__(self): if not hasattr(self, "redis_pool"): self.redis_pool = None @@ -110,7 +112,7 @@ def set_user_setting_replica_num(self, end_point_id, replica_num: int, enable_auto_scaling: bool = False, scale_min: int = 0, scale_max: int = 0, state: str = "UNKNOWN", target_queries_per_replica: int = 60, aggregation_window_size_seconds: int = 60, - scale_down_delay_seconds: int = 120 + scale_down_delay_seconds: int = 120, timeout_s: int = 30 ) -> bool: """ Key: FEDML_MODEL_ENDPOINT_REPLICA_USER_SETTING_TAG-- @@ -136,7 +138,8 @@ def set_user_setting_replica_num(self, end_point_id, "scale_min": scale_min, "scale_max": scale_max, "state": state, "target_queries_per_replica": target_queries_per_replica, "aggregation_window_size_seconds": aggregation_window_size_seconds, - "scale_down_delay_seconds": scale_down_delay_seconds + "scale_down_delay_seconds": scale_down_delay_seconds, + "request_timeout_sec": timeout_s } try: self.redis_connection.set(self.get_user_setting_replica_num_key(end_point_id), json.dumps(replica_num_dict)) @@ -362,7 +365,7 @@ def get_idle_device(self, end_point_id, end_point_name, if "model_status" in result_payload and result_payload["model_status"] == "DEPLOYED": idle_device_list.append({"device_id": device_id, "end_point_id": end_point_id}) - logging.info(f"{len(idle_device_list)} devices has this model on it: {idle_device_list}") + logging.info(f"{len(idle_device_list)} devices this model has on it: {idle_device_list}") if len(idle_device_list) <= 0: return None, None @@ -824,38 +827,37 @@ def get_monitor_metrics_key(self, end_point_id, end_point_name, model_name, mode end_point_id, end_point_name, model_name, model_version) def get_endpoint_metrics(self, - endpoint_id, + end_point_id, k_recent=None) -> List[Any]: model_deployment_monitor_metrics = list() try: key_pattern = "{}*{}*".format( self.FEDML_MODEL_DEPLOYMENT_MONITOR_TAG, - endpoint_id) - model_deployment_monitor_endpoint_keys = \ + end_point_id) + model_deployment_monitor_endpoint_key = \ self.redis_connection.keys(pattern=key_pattern) # Since the reply is a list, we need to make sure the list # is non-empty otherwise the index will raise an error. - if model_deployment_monitor_endpoint_keys: + if model_deployment_monitor_endpoint_key: model_deployment_monitor_endpoint_key = \ - model_deployment_monitor_endpoint_keys[0] - else: - raise Exception("Function `get_endpoint_metrics` Key {} does not exist." - .format(key_pattern)) - # Set start and end index depending on the size of the - # list and the requested number of most recent records. - num_records = self.redis_connection.llen(name=model_deployment_monitor_endpoint_key) - # if k_most_recent is None, then fetch all by default. - start, end = 0, -1 - # if k_most_recent is positive then fetch [-k_most_recent:] - if k_recent and k_recent > 0: - start = num_records - k_recent - model_deployment_monitor_metrics = \ - self.redis_connection.lrange( - name=model_deployment_monitor_endpoint_key, - start=start, - end=end) - model_deployment_monitor_metrics = [ - json.loads(m) for m in model_deployment_monitor_metrics] + model_deployment_monitor_endpoint_key[0] + + # Set start and end index depending on the size of the + # list and the requested number of most recent records. + num_records = self.redis_connection.llen( + name=model_deployment_monitor_endpoint_key) + # if k_most_recent is None, then fetch all by default. + start, end = 0, -1 + # if k_most_recent is positive then fetch [-k_most_recent:] + if k_recent and k_recent > 0: + start = num_records - k_recent + model_deployment_monitor_metrics = \ + self.redis_connection.lrange( + name=model_deployment_monitor_endpoint_key, + start=start, + end=end) + model_deployment_monitor_metrics = [ + json.loads(m) for m in model_deployment_monitor_metrics] except Exception as e: logging.error(e) @@ -868,24 +870,24 @@ def get_endpoint_replicas_results(self, endpoint_id) -> List[Any]: key_pattern = "{}*{}*".format( self.FEDML_MODEL_DEPLOYMENT_RESULT_TAG, endpoint_id) - model_deployment_result_key = \ + model_deployment_result_keys = \ self.redis_connection.keys(pattern=key_pattern) - if model_deployment_result_key: + if model_deployment_result_keys: model_deployment_result_key = \ - model_deployment_result_key[0] + model_deployment_result_keys[0] + replicas_results = \ + self.redis_connection.lrange( + name=model_deployment_result_key, + start=0, + end=-1) + # Format the result value to a properly formatted json. + for replica_idx, replica in enumerate(replicas_results): + replicas_results[replica_idx] = json.loads(replica) + replicas_results[replica_idx]["result"] = \ + json.loads(replicas_results[replica_idx]["result"]) else: raise Exception("Function `get_endpoint_replicas_results` Key {} does not exist." .format(key_pattern)) - replicas_results = \ - self.redis_connection.lrange( - name=model_deployment_result_key, - start=0, - end=-1) - - # Format the result value to a properly formatted json. - for replica_idx, replica in enumerate(replicas_results): - replicas_results[replica_idx] = json.loads(replica) - replicas_results[replica_idx]["result"] = json.loads(replicas_results[replica_idx]["result"]) except Exception as e: logging.error(e) @@ -898,11 +900,13 @@ def get_endpoint_settings(self, endpoint_id) -> Dict: key_pattern = "{}*{}*".format( self.FEDML_MODEL_ENDPOINT_REPLICA_USER_SETTING_TAG, endpoint_id) - endpoint_settings = \ + endpoint_settings_keys = \ self.redis_connection.keys(pattern=key_pattern) - if endpoint_settings: + if endpoint_settings_keys: endpoint_settings = \ - json.load(endpoint_settings[0]) + json.load(endpoint_settings_keys[0]) + if not isinstance(endpoint_settings, dict): + endpoint_settings = json.loads(endpoint_settings) else: raise Exception("Function `get_endpoint_settings` Key {} does not exist." .format(key_pattern)) @@ -966,3 +970,17 @@ def delete_endpoint_scaling_down_decision_time(self, end_point_id) -> bool: return bool(self.redis_connection.hdel( self.FEDML_MODEL_ENDPOINT_SCALING_DOWN_DECISION_TIME_TAG, end_point_id)) + + def get_pending_requests_counter(self) -> int: + if not self.redis_connection.exists(self.FEDML_PENDING_REQUESTS_COUNTER): + self.redis_connection.set(self.FEDML_PENDING_REQUESTS_COUNTER, 0) + return int(self.redis_connection.get(self.FEDML_PENDING_REQUESTS_COUNTER)) + + def update_pending_requests_counter(self, increase=False, decrease=False) -> int: + if not self.redis_connection.exists(self.FEDML_PENDING_REQUESTS_COUNTER): + self.redis_connection.set(self.FEDML_PENDING_REQUESTS_COUNTER, 0) + if increase: + self.redis_connection.incr(self.FEDML_PENDING_REQUESTS_COUNTER) + if decrease: + self.redis_connection.decr(self.FEDML_PENDING_REQUESTS_COUNTER) + return self.get_pending_requests_counter() diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py index b8d85edd31..26c25bc09f 100755 --- a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py +++ b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py @@ -1,13 +1,16 @@ +import json import logging import time import traceback -from urllib.parse import urlparse import os + +from urllib.parse import urlparse from typing import Any, Mapping, MutableMapping, Union from fastapi import FastAPI, Request, Response, status from fastapi.responses import StreamingResponse +from fedml.computing.scheduler.model_scheduler.device_client_constants import ClientConstants from fedml.computing.scheduler.model_scheduler.device_http_inference_protocol import FedMLHttpInference from fedml.computing.scheduler.model_scheduler.device_server_constants import ServerConstants from fedml.computing.scheduler.model_scheduler.device_model_monitor import FedMLModelMetrics @@ -26,23 +29,7 @@ pass -# class Settings(BaseSettings): -# redis_addr: str -# redis_port: str -# redis_password: str -# end_point_name: str -# model_name: str -# model_version: str -# model_infer_url: str -# version: str -# use_mqtt_inference: bool -# use_worker_gateway: bool -# ext_info: str -# -# -# settings = Settings() - -class settings: +class Settings: redis_addr = "127.0.0.1" redis_port = 6379 redis_password = "fedml_default" @@ -58,10 +45,54 @@ class settings: api = FastAPI() +FEDML_MODEL_CACHE = FedMLModelCache.get_instance() +FEDML_MODEL_CACHE.set_redis_params( + redis_addr=Settings.redis_addr, + redis_port=Settings.redis_port, + redis_password=Settings.redis_password) + + +@api.middleware("http") +async def auth_middleware(request: Request, call_next): + + if "/inference" in request.url.path or "/api/v1/predict" in request.url.path: + try: + # Attempt to parse the JSON body. + request_json = await request.json() + except json.JSONDecodeError: + return Response("Invalid JSON.", status_code=status.HTTP_400_BAD_REQUEST) + + # Get total pending requests. + pending_requests_num = FEDML_MODEL_CACHE.get_pending_requests_counter() + if pending_requests_num: + end_point_id = request_json.get("end_point_id", None) + # Fetch metrics of the past k=3 requests. + pask_k_metrics = FEDML_MODEL_CACHE.get_endpoint_metrics( + end_point_id=end_point_id, + k_recent=3) + + # Get the request timeout from the endpoint settings. + request_timeout_s = FEDML_MODEL_CACHE.get_endpoint_settings(end_point_id) \ + .get("request_timeout_s", ClientConstants.INFERENCE_REQUEST_TIMEOUT) + + # Only proceed if the past k metrics collection is not empty. + if pask_k_metrics: + # Measure the average latency in seconds(!), hence the 0.001 multiplier. + past_k_latencies_sec = \ + [float(j_obj["current_latency"]) * 0.001 for j_obj in pask_k_metrics] + mean_latency = sum(past_k_latencies_sec) / len(past_k_latencies_sec) + + # If timeout threshold is exceeded then cancel and return time out error. + if (mean_latency * pending_requests_num) > request_timeout_s: + return Response("Request timed out.", status_code=status.HTTP_504_GATEWAY_TIMEOUT) + + response = await call_next(request) + return response + @api.get('/') async def root(): - return {'message': 'FedML Federated Inference Service!'} + return {'message': 'TensorOpera Inference Service!'} @api.get('/ready') @@ -141,6 +172,10 @@ async def _predict( input_json, header=None ) -> Union[MutableMapping[str, Any], Response, StreamingResponse]: + + FEDML_MODEL_CACHE.update_pending_requests_counter(increase=True) + inference_response = {} + in_end_point_id = end_point_id in_end_point_name = input_json.get("end_point_name", None) in_model_name = input_json.get("model_name", None) @@ -170,21 +205,26 @@ async def _predict( if not is_endpoint_activated(in_end_point_id): inference_response = {"error": True, "message": "endpoint is not activated."} logging_inference_request(input_json, inference_response) + FEDML_MODEL_CACHE.update_pending_requests_counter(decrease=True) return inference_response # Found idle inference device idle_device, end_point_id, model_id, model_name, model_version, inference_host, inference_output_url = \ found_idle_inference_device(in_end_point_id, in_end_point_name, in_model_name, in_model_version) if idle_device is None or idle_device == "": + FEDML_MODEL_CACHE.update_pending_requests_counter(decrease=True) return {"error": True, "error_code": status.HTTP_404_NOT_FOUND, "message": "can not found active inference worker for this endpoint."} # Start timing for model metrics model_metrics = FedMLModelMetrics(end_point_id, in_end_point_name, model_id, in_model_name, model_version, - settings.model_infer_url, - settings.redis_addr, settings.redis_port, settings.redis_password, - version=settings.version) + Settings.model_infer_url, + Settings.redis_addr, + Settings.redis_port, + Settings.redis_password, + version=Settings.version) + # Setting time to the time before authentication and idle device discovery. model_metrics.set_start_time(start_time) # Send inference request to idle device @@ -195,7 +235,12 @@ async def _predict( input_list["stream"] = input_list.get("stream", stream_flag) output_list = input_json.get("outputs", []) inference_response = await send_inference_request( - idle_device, end_point_id, inference_output_url, input_list, output_list, inference_type=in_return_type) + idle_device, + end_point_id, + inference_output_url, + input_list, + output_list, + inference_type=in_return_type) # Calculate model metrics try: @@ -207,11 +252,12 @@ async def _predict( pass logging_inference_request(input_json, inference_response) - + FEDML_MODEL_CACHE.update_pending_requests_counter(decrease=True) return inference_response else: inference_response = {"error": True, "message": "token is not valid."} logging_inference_request(input_json, inference_response) + FEDML_MODEL_CACHE.update_pending_requests_counter(decrease=True) return inference_response @@ -221,9 +267,7 @@ def retrieve_info_by_endpoint_id(end_point_id, in_end_point_name=None, in_model_ We allow missing end_point_name and model_name in the input parameters. return end_point_name, model_name """ - FedMLModelCache.get_instance().set_redis_params(settings.redis_addr, settings.redis_port, settings.redis_password) - redis_key = FedMLModelCache.get_instance(settings.redis_addr, settings.redis_port). \ - get_end_point_full_key_by_id(end_point_id) + redis_key = FEDML_MODEL_CACHE.get_end_point_full_key_by_id(end_point_id) if redis_key is not None: end_point_name = "" model_name = "" @@ -254,8 +298,7 @@ def found_idle_inference_device(end_point_id, end_point_name, in_model_name, in_ inference_output_url = "" model_version = "" # Found idle device (TODO: optimize the algorithm to search best device for inference) - FedMLModelCache.get_instance().set_redis_params(settings.redis_addr, settings.redis_port, settings.redis_password) - payload, idle_device = FedMLModelCache.get_instance(settings.redis_addr, settings.redis_port). \ + payload, idle_device = FEDML_MODEL_CACHE.\ get_idle_device(end_point_id, end_point_name, in_model_name, in_model_version) if payload is not None: logging.info("found idle deployment result {}".format(payload)) @@ -273,8 +316,12 @@ def found_idle_inference_device(end_point_id, end_point_name, in_model_name, in_ return idle_device, end_point_id, model_id, model_name, model_version, inference_host, inference_output_url -async def send_inference_request(idle_device, endpoint_id, inference_url, input_list, output_list, +async def send_inference_request(idle_device, end_point_id, inference_url, input_list, output_list, inference_type="default", has_public_ip=True): + + request_timeout_sec = FEDML_MODEL_CACHE.get_endpoint_settings(end_point_id) \ + .get("request_timeout_sec", ClientConstants.INFERENCE_REQUEST_TIMEOUT) + try: http_infer_available = os.getenv("FEDML_INFERENCE_HTTP_AVAILABLE", True) if not http_infer_available: @@ -283,24 +330,35 @@ async def send_inference_request(idle_device, endpoint_id, inference_url, input_ if http_infer_available: response_ok = await FedMLHttpInference.is_inference_ready( - inference_url, timeout=os.getenv("FEDML_GATEWAY_HTTP_READY_TIMEOUT", 20)) + inference_url, + timeout=request_timeout_sec) if response_ok: response_ok, inference_response = await FedMLHttpInference.run_http_inference_with_curl_request( - inference_url, input_list, output_list, inference_type=inference_type) + inference_url, + input_list, + output_list, + inference_type=inference_type, + timeout=request_timeout_sec) logging.info(f"Use http inference. return {response_ok}") return inference_response response_ok = await FedMLHttpProxyInference.is_inference_ready( - inference_url, timeout=os.getenv("FEDML_GATEWAY_HTTP_PROXY_READY_TIMEOUT", 20)) + inference_url, + timeout=request_timeout_sec) if response_ok: response_ok, inference_response = await FedMLHttpProxyInference.run_http_proxy_inference_with_request( - endpoint_id, inference_url, input_list, output_list, inference_type=inference_type) + end_point_id, + inference_url, + input_list, + output_list, + inference_type=inference_type, + timeout=request_timeout_sec) logging.info(f"Use http proxy inference. return {response_ok}") return inference_response if not has_public_ip: connect_str = "@FEDML@" - random_out = sys_utils.random2(settings.ext_info, "FEDML@9999GREAT") + random_out = sys_utils.random2(Settings.ext_info, "FEDML@9999GREAT") config_list = random_out.split(connect_str) agent_config = dict() agent_config["mqtt_config"] = dict() @@ -309,13 +367,24 @@ async def send_inference_request(idle_device, endpoint_id, inference_url, input_ agent_config["mqtt_config"]["MQTT_USER"] = config_list[2] agent_config["mqtt_config"]["MQTT_PWD"] = config_list[3] agent_config["mqtt_config"]["MQTT_KEEPALIVE"] = int(config_list[4]) - mqtt_inference = FedMLMqttInference(agent_config=agent_config, run_id=endpoint_id) + mqtt_inference = FedMLMqttInference( + agent_config=agent_config, + run_id=end_point_id) response_ok = mqtt_inference.run_mqtt_health_check_with_request( - idle_device, endpoint_id, inference_url) + idle_device, + end_point_id, + inference_url, + timeout=request_timeout_sec) inference_response = {"error": True, "message": "Failed to use http, http-proxy and mqtt for inference."} if response_ok: response_ok, inference_response = mqtt_inference.run_mqtt_inference_with_request( - idle_device, endpoint_id, inference_url, input_list, output_list, inference_type=inference_type) + idle_device, + end_point_id, + inference_url, + input_list, + output_list, + inference_type=inference_type, + timeout=request_timeout_sec) logging.info(f"Use mqtt inference. return {response_ok}.") return inference_response @@ -332,22 +401,18 @@ def auth_request_token(end_point_id, end_point_name, model_name, token): if token is None: return False - FedMLModelCache.get_instance().set_redis_params(settings.redis_addr, settings.redis_port, settings.redis_password) - cached_token = FedMLModelCache.get_instance(settings.redis_addr, settings.redis_port). \ + cached_token = FEDML_MODEL_CACHE.\ get_end_point_token(end_point_id, end_point_name, model_name) if cached_token is not None and str(cached_token) == str(token): return True return False - def is_endpoint_activated(end_point_id): if end_point_id is None: return False - FedMLModelCache.get_instance().set_redis_params(settings.redis_addr, settings.redis_port, settings.redis_password) - activated = FedMLModelCache.get_instance(settings.redis_addr, settings.redis_port).get_end_point_activation( - end_point_id) + activated = FEDML_MODEL_CACHE.get_end_point_activation(end_point_id) return activated diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py index bf9cee3279..cc1901de65 100755 --- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py +++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py @@ -114,11 +114,11 @@ def run_impl( ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYING, message_center=self.message_center) - # start unified inference server + # start unified inference gateway process if not started FedMLDeployMasterJobRunner.start_device_inference_gateway( inference_port=inference_port, agent_config=self.agent_config) - # start inference monitor server + # start inference monitor process FedMLDeployMasterJobRunner.stop_device_inference_monitor( run_id, end_point_name, model_id, model_name, model_version) FedMLDeployMasterJobRunner.start_device_inference_monitor( diff --git a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py index b65d1bc8de..668d1192ce 100755 --- a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py +++ b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py @@ -178,6 +178,9 @@ def callback_start_deployment(self, topic, payload): aggregation_window_size_seconds = request_json.get("aggregation_window_size_seconds", 60) scale_down_delay_seconds = request_json.get("scale_down_delay_seconds", 120) + model_config_parameters = request_json.get("parameters", {}) + timeout_s = model_config_parameters.get("request_timeout_sec", 30) + inference_end_point_id = run_id logging.info("[Master] received start deployment request for end point {}.".format(run_id)) @@ -197,7 +200,8 @@ def callback_start_deployment(self, topic, payload): scale_min=scale_min, scale_max=scale_max, state="DEPLOYING", aggregation_window_size_seconds=aggregation_window_size_seconds, target_queries_per_replica=target_queries_per_replica, - scale_down_delay_seconds=int(scale_down_delay_seconds) + scale_down_delay_seconds=int(scale_down_delay_seconds), + timeout_s=timeout_s ) # Start log processor for current run diff --git a/python/fedml/workflow/driver_example/customized_job_example/train_job.yaml b/python/fedml/workflow/driver_example/customized_job_example/train_job.yaml index e0e8f0f3be..86c9df6594 100755 --- a/python/fedml/workflow/driver_example/customized_job_example/train_job.yaml +++ b/python/fedml/workflow/driver_example/customized_job_example/train_job.yaml @@ -25,10 +25,12 @@ bootstrap: | pip install PyYAML==5.3.1 -i https://pypi.org/simple pip install fedml==0.8.29 pip install -U typing_extensions -i https://pypi.org/simple + pip install -U pydantic + pip install -U fastapi echo "Bootstrap finished." computing: - resource_type: RTX-4090 # e.g., A100-80G, please check the resource type list by "fedml show-resource-type" or visiting URL: https://open.fedml.ai/accelerator_resource_type + resource_type: A100-80GB-SXM # e.g., A100-80G, please check the resource type list by "fedml show-resource-type" or visiting URL: https://open.fedml.ai/accelerator_resource_type minimum_num_gpus: 1 # minimum # of GPUs to provision maximum_cost_per_hour: $10 # max cost per hour of all machines for your job # device_type: GPU # GPU or CPU From 9296884402f058f9fb64beca79aad6e5dea91596 Mon Sep 17 00:00:00 2001 From: Raphael Jin Date: Mon, 3 Jun 2024 22:53:22 +0000 Subject: [PATCH 100/251] [Deploy] Fix config reading from redis. --- .../scheduler/model_scheduler/device_model_cache.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py b/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py index fca7b81d42..45a58c7ab9 100755 --- a/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py +++ b/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py @@ -900,11 +900,14 @@ def get_endpoint_settings(self, endpoint_id) -> Dict: key_pattern = "{}*{}*".format( self.FEDML_MODEL_ENDPOINT_REPLICA_USER_SETTING_TAG, endpoint_id) + endpoint_settings_keys = \ self.redis_connection.keys(pattern=key_pattern) - if endpoint_settings_keys: + + if len(endpoint_settings_keys) > 0: endpoint_settings = \ - json.load(endpoint_settings_keys[0]) + self.redis_connection.get(endpoint_settings_keys[0]) + if not isinstance(endpoint_settings, dict): endpoint_settings = json.loads(endpoint_settings) else: From b1312e1db9131aec05213ef22bd48e397e708cf4 Mon Sep 17 00:00:00 2001 From: Alay Shah Date: Mon, 3 Jun 2024 17:25:58 -0700 Subject: [PATCH 101/251] Add global env file --- python/fedml/__init__.py | 9 +++++++-- python/fedml/computing/scheduler/env/__init__.py | 3 +++ python/fedml/computing/scheduler/env/collect_env.py | 11 +++++++++++ .../scheduler/scheduler_core/message_center.py | 4 +++- .../computing/scheduler/slave/client_constants.py | 8 +++++++- python/setup.py | 1 + 6 files changed, 32 insertions(+), 4 deletions(-) diff --git a/python/fedml/__init__.py b/python/fedml/__init__.py index 6b3ac3f61b..913457b5c7 100644 --- a/python/fedml/__init__.py +++ b/python/fedml/__init__.py @@ -1,5 +1,4 @@ import logging -from copy import deepcopy import multiprocess as multiprocessing import os @@ -9,7 +8,10 @@ import torch import fedml +import dotenv + from .computing.scheduler.env.collect_env import collect_env +from fedml.computing.scheduler.env import get_env_file from .constants import ( FEDML_BACKEND_SERVICE_URL_DEV, FEDML_BACKEND_SERVICE_URL_LOCAL, @@ -449,10 +451,13 @@ def _run_distributed(): def set_env_version(version): - os.environ['FEDML_ENV_VERSION'] = version + env_file = get_env_file() + dotenv.load_dotenv(dotenv_path=env_file) + dotenv.set_key(env_file, "FEDML_ENV_VERSION", version) def get_env_version(): + dotenv.load_dotenv(dotenv_path=get_env_file()) return "release" if os.environ.get('FEDML_ENV_VERSION') is None else os.environ['FEDML_ENV_VERSION'] diff --git a/python/fedml/computing/scheduler/env/__init__.py b/python/fedml/computing/scheduler/env/__init__.py index e69de29bb2..5bfeaa5509 100644 --- a/python/fedml/computing/scheduler/env/__init__.py +++ b/python/fedml/computing/scheduler/env/__init__.py @@ -0,0 +1,3 @@ +import os + +from collect_env import get_env_file diff --git a/python/fedml/computing/scheduler/env/collect_env.py b/python/fedml/computing/scheduler/env/collect_env.py index b2f7bd7f5e..ef471da3bc 100644 --- a/python/fedml/computing/scheduler/env/collect_env.py +++ b/python/fedml/computing/scheduler/env/collect_env.py @@ -4,6 +4,7 @@ import fedml from fedml.computing.scheduler.comm_utils.hardware_utils import HardwareUtil from fedml.computing.scheduler.slave.client_diagnosis import ClientDiagnosis +from ..slave.client_constants import ClientConstants def collect_env(): @@ -108,3 +109,13 @@ def collect_env(): except Exception as e: print(f"The connection exception: {traceback.format_exc()}") pass + + +def get_env_file(): + global_serivces_dir = ClientConstants.get_global_services_dir() + env_config_file = os.path.join(global_serivces_dir, ".env") + # Create file if not exists + if not os.path.exists(env_config_file): + with open(env_config_file, 'w') as f: + f.write("") + return env_config_file diff --git a/python/fedml/computing/scheduler/scheduler_core/message_center.py b/python/fedml/computing/scheduler/scheduler_core/message_center.py index 869ed6e510..dbe11700a0 100755 --- a/python/fedml/computing/scheduler/scheduler_core/message_center.py +++ b/python/fedml/computing/scheduler/scheduler_core/message_center.py @@ -11,6 +11,7 @@ from os.path import expanduser from fedml.core.distributed.communication.mqtt.mqtt_manager import MqttManager +from ..slave.client_constants import ClientConstants from ....core.mlops.mlops_metrics import MLOpsMetrics from operator import methodcaller from .message_common import FedMLMessageEntity, FedMLMessageRecord @@ -466,7 +467,8 @@ class MessageCenterStoppedException(Exception): class FedMLMessageCenterConstants: def __init__(self): + global_services_dir = ClientConstants.get_global_services_dir() self.home_dir = expanduser("~") - self.message_center_dir = os.path.join(self.home_dir, ".fedml", "global_services", "message_center") + self.message_center_dir = os.path.join(global_services_dir, "message_center") self.message_log_dir = os.path.join(self.message_center_dir, "logs") os.makedirs(self.message_log_dir, exist_ok=True) diff --git a/python/fedml/computing/scheduler/slave/client_constants.py b/python/fedml/computing/scheduler/slave/client_constants.py index 2e15080541..e5b3d41846 100644 --- a/python/fedml/computing/scheduler/slave/client_constants.py +++ b/python/fedml/computing/scheduler/slave/client_constants.py @@ -153,6 +153,13 @@ def get_database_dir(): os.makedirs(database_dir, exist_ok=True) return database_dir + @staticmethod + def get_global_services_dir(): + home_dir = expanduser("~") + global_services_dir = os.path.join(home_dir, ".fedml", "global_services") + os.makedirs(global_services_dir, exist_ok=True) + return global_services_dir + @staticmethod def cleanup_run_process(run_id): RunProcessUtils.cleanup_run_process( @@ -454,7 +461,6 @@ def remove_fedml_parent_pid_file(): f"Traceback: {traceback.format_exc()}") pass - if __name__ == "__main__": ignore = "*test*,abc*" ignore = tuple(ignore.split(',')) diff --git a/python/setup.py b/python/setup.py index 0e314de29c..e88788d1ff 100644 --- a/python/setup.py +++ b/python/setup.py @@ -66,6 +66,7 @@ def finalize_options(self): 'wget', # Need to pin this version due to breaking change released in python docker sdk 'requests<2.32', + 'python-dotenv', ] requirements_extra_mpi = [ From 19160a2e2eec75807605a7431eaa342d3ab1b009 Mon Sep 17 00:00:00 2001 From: Alay Shah Date: Mon, 3 Jun 2024 17:28:18 -0700 Subject: [PATCH 102/251] Nits --- python/fedml/computing/scheduler/env/__init__.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/python/fedml/computing/scheduler/env/__init__.py b/python/fedml/computing/scheduler/env/__init__.py index 5bfeaa5509..cc765f0979 100644 --- a/python/fedml/computing/scheduler/env/__init__.py +++ b/python/fedml/computing/scheduler/env/__init__.py @@ -1,3 +1 @@ -import os - from collect_env import get_env_file From 1b2eefe4c140ce86eb500af8e62945f64cf4d52a Mon Sep 17 00:00:00 2001 From: Alay Shah Date: Mon, 3 Jun 2024 17:46:06 -0700 Subject: [PATCH 103/251] Bug fix --- python/fedml/computing/scheduler/env/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/fedml/computing/scheduler/env/__init__.py b/python/fedml/computing/scheduler/env/__init__.py index cc765f0979..f157a33ec2 100644 --- a/python/fedml/computing/scheduler/env/__init__.py +++ b/python/fedml/computing/scheduler/env/__init__.py @@ -1 +1 @@ -from collect_env import get_env_file +from .collect_env import get_env_file From 3481aa8914636208ea7f152ff214a9619d212f71 Mon Sep 17 00:00:00 2001 From: Alay Shah Date: Mon, 3 Jun 2024 17:50:07 -0700 Subject: [PATCH 104/251] Write it to release by default --- python/fedml/__init__.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/python/fedml/__init__.py b/python/fedml/__init__.py index 913457b5c7..adde71a6d4 100644 --- a/python/fedml/__init__.py +++ b/python/fedml/__init__.py @@ -457,8 +457,10 @@ def set_env_version(version): def get_env_version(): - dotenv.load_dotenv(dotenv_path=get_env_file()) - return "release" if os.environ.get('FEDML_ENV_VERSION') is None else os.environ['FEDML_ENV_VERSION'] + env_file = get_env_file() + dotenv.load_dotenv(dotenv_path=env_file) + version = "release" if os.environ.get('FEDML_ENV_VERSION') is None else os.environ['FEDML_ENV_VERSION'] + dotenv.set_key(env_file, "FEDML_ENV_VERSION", version) def _get_backend_service(): From b2ea4d0bd44aaff5378de53dde774d8e5344f0bc Mon Sep 17 00:00:00 2001 From: Alay Shah Date: Mon, 3 Jun 2024 17:51:05 -0700 Subject: [PATCH 105/251] Nit --- python/fedml/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/fedml/__init__.py b/python/fedml/__init__.py index adde71a6d4..a79fc7d60d 100644 --- a/python/fedml/__init__.py +++ b/python/fedml/__init__.py @@ -461,7 +461,7 @@ def get_env_version(): dotenv.load_dotenv(dotenv_path=env_file) version = "release" if os.environ.get('FEDML_ENV_VERSION') is None else os.environ['FEDML_ENV_VERSION'] dotenv.set_key(env_file, "FEDML_ENV_VERSION", version) - + return version def _get_backend_service(): version = get_env_version() From 21138dd06521a91c2abb598c833f1f09c37134c5 Mon Sep 17 00:00:00 2001 From: Alay Shah Date: Mon, 3 Jun 2024 17:51:18 -0700 Subject: [PATCH 106/251] Nit --- python/fedml/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/python/fedml/__init__.py b/python/fedml/__init__.py index a79fc7d60d..35658d4920 100644 --- a/python/fedml/__init__.py +++ b/python/fedml/__init__.py @@ -463,6 +463,7 @@ def get_env_version(): dotenv.set_key(env_file, "FEDML_ENV_VERSION", version) return version + def _get_backend_service(): version = get_env_version() # from inspect import getframeinfo, stack @@ -517,7 +518,7 @@ def get_local_on_premise_platform_port(): def _get_local_s3_like_service_url(): return FEDML_S3_DOMAIN_LOCAL - + from fedml import device from fedml import data From 1cc15525e2b81a22bd3589b1fc3501bd32f467da Mon Sep 17 00:00:00 2001 From: fedml-dimitris Date: Tue, 4 Jun 2024 04:38:30 -0400 Subject: [PATCH 107/251] Hotfix mqtt timeout inference constant after refactoring. --- .../scheduler/model_scheduler/device_mqtt_inference_protocol.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/fedml/computing/scheduler/model_scheduler/device_mqtt_inference_protocol.py b/python/fedml/computing/scheduler/model_scheduler/device_mqtt_inference_protocol.py index b0bff261a4..1fac5a984b 100755 --- a/python/fedml/computing/scheduler/model_scheduler/device_mqtt_inference_protocol.py +++ b/python/fedml/computing/scheduler/model_scheduler/device_mqtt_inference_protocol.py @@ -105,7 +105,7 @@ def run_mqtt_inference_with_request( only_do_health_check=only_do_health_check, timeout=timeout ) - allowed_inference_timeout = SchedulerConstants.MQTT_INFERENCE_TIMEOUT if timeout is None else timeout + allowed_inference_timeout = timeout if timeout else -1 sleep_time_interval = 0.05 total_sleep_time = 0 while True: From 8e0318321ac267f18106d2cdead745076ce3dd3b Mon Sep 17 00:00:00 2001 From: fedml-dimitris Date: Tue, 4 Jun 2024 05:19:36 -0400 Subject: [PATCH 108/251] Improving pending requests counter robustness. --- .../model_scheduler/device_model_cache.py | 6 +- .../model_scheduler/device_model_inference.py | 160 +++++++++--------- 2 files changed, 88 insertions(+), 78 deletions(-) diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py b/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py index 45a58c7ab9..75cf4dbc2a 100755 --- a/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py +++ b/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py @@ -985,5 +985,9 @@ def update_pending_requests_counter(self, increase=False, decrease=False) -> int if increase: self.redis_connection.incr(self.FEDML_PENDING_REQUESTS_COUNTER) if decrease: - self.redis_connection.decr(self.FEDML_PENDING_REQUESTS_COUNTER) + # Making sure the counter never becomes negative! + if self.get_pending_requests_counter() < 0: + self.redis_connection.set(self.FEDML_PENDING_REQUESTS_COUNTER, 0) + else: + self.redis_connection.decr(self.FEDML_PENDING_REQUESTS_COUNTER) return self.get_pending_requests_counter() diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py index 26c25bc09f..0e866c9626 100755 --- a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py +++ b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py @@ -173,92 +173,98 @@ async def _predict( header=None ) -> Union[MutableMapping[str, Any], Response, StreamingResponse]: + # Always increase the pending requests counter on a new incoming request. FEDML_MODEL_CACHE.update_pending_requests_counter(increase=True) inference_response = {} - in_end_point_id = end_point_id - in_end_point_name = input_json.get("end_point_name", None) - in_model_name = input_json.get("model_name", None) - in_model_version = input_json.get("model_version", None) - in_end_point_token = input_json.get("token", None) - in_return_type = "default" - if header is not None: - in_return_type = header.get("Accept", "default") - - if in_model_version is None: - in_model_version = "*" # * | latest | specific version - - start_time = time.time_ns() - - # Allow missing end_point_name and model_name in the input parameters. - if in_model_name is None or in_end_point_name is None: - ret_endpoint_name, ret_model_name = retrieve_info_by_endpoint_id(in_end_point_id, in_end_point_name) - if in_model_name is None: - in_model_name = ret_model_name - if in_end_point_name is None: - in_end_point_name = ret_endpoint_name - - # Authenticate request token - inference_response = {} - if auth_request_token(in_end_point_id, in_end_point_name, in_model_name, in_end_point_token): - # Check the endpoint is activated - if not is_endpoint_activated(in_end_point_id): - inference_response = {"error": True, "message": "endpoint is not activated."} + try: + in_end_point_id = end_point_id + in_end_point_name = input_json.get("end_point_name", None) + in_model_name = input_json.get("model_name", None) + in_model_version = input_json.get("model_version", None) + in_end_point_token = input_json.get("token", None) + in_return_type = "default" + if header is not None: + in_return_type = header.get("Accept", "default") + + if in_model_version is None: + in_model_version = "*" # * | latest | specific version + + start_time = time.time_ns() + + # Allow missing end_point_name and model_name in the input parameters. + if in_model_name is None or in_end_point_name is None: + ret_endpoint_name, ret_model_name = retrieve_info_by_endpoint_id(in_end_point_id, in_end_point_name) + if in_model_name is None: + in_model_name = ret_model_name + if in_end_point_name is None: + in_end_point_name = ret_endpoint_name + + # Authenticate request token + if auth_request_token(in_end_point_id, in_end_point_name, in_model_name, in_end_point_token): + # Check the endpoint is activated + if not is_endpoint_activated(in_end_point_id): + inference_response = {"error": True, "message": "endpoint is not activated."} + logging_inference_request(input_json, inference_response) + FEDML_MODEL_CACHE.update_pending_requests_counter(decrease=True) + return inference_response + + # Found idle inference device + idle_device, end_point_id, model_id, model_name, model_version, inference_host, inference_output_url = \ + found_idle_inference_device(in_end_point_id, in_end_point_name, in_model_name, in_model_version) + if idle_device is None or idle_device == "": + FEDML_MODEL_CACHE.update_pending_requests_counter(decrease=True) + return {"error": True, "error_code": status.HTTP_404_NOT_FOUND, + "message": "can not found active inference worker for this endpoint."} + + # Start timing for model metrics + model_metrics = FedMLModelMetrics(end_point_id, in_end_point_name, + model_id, in_model_name, model_version, + Settings.model_infer_url, + Settings.redis_addr, + Settings.redis_port, + Settings.redis_password, + version=Settings.version) + # Setting time to the time before authentication and idle device discovery. + model_metrics.set_start_time(start_time) + + # Send inference request to idle device + logging.info("inference url {}.".format(inference_output_url)) + if inference_output_url != "": + input_list = input_json.get("inputs", input_json) + stream_flag = input_json.get("stream", False) + input_list["stream"] = input_list.get("stream", stream_flag) + output_list = input_json.get("outputs", []) + inference_response = await send_inference_request( + idle_device, + end_point_id, + inference_output_url, + input_list, + output_list, + inference_type=in_return_type) + + # Calculate model metrics + try: + model_metrics.calc_metrics(end_point_id, in_end_point_name, + model_id, model_name, model_version, + inference_output_url, idle_device) + except Exception as e: + logging.info("Calculate Inference Metrics Exception: {}".format(traceback.format_exc())) + pass + logging_inference_request(input_json, inference_response) FEDML_MODEL_CACHE.update_pending_requests_counter(decrease=True) return inference_response - - # Found idle inference device - idle_device, end_point_id, model_id, model_name, model_version, inference_host, inference_output_url = \ - found_idle_inference_device(in_end_point_id, in_end_point_name, in_model_name, in_model_version) - if idle_device is None or idle_device == "": + else: + inference_response = {"error": True, "message": "token is not valid."} + logging_inference_request(input_json, inference_response) FEDML_MODEL_CACHE.update_pending_requests_counter(decrease=True) - return {"error": True, "error_code": status.HTTP_404_NOT_FOUND, - "message": "can not found active inference worker for this endpoint."} - - # Start timing for model metrics - model_metrics = FedMLModelMetrics(end_point_id, in_end_point_name, - model_id, in_model_name, model_version, - Settings.model_infer_url, - Settings.redis_addr, - Settings.redis_port, - Settings.redis_password, - version=Settings.version) - # Setting time to the time before authentication and idle device discovery. - model_metrics.set_start_time(start_time) - - # Send inference request to idle device - logging.info("inference url {}.".format(inference_output_url)) - if inference_output_url != "": - input_list = input_json.get("inputs", input_json) - stream_flag = input_json.get("stream", False) - input_list["stream"] = input_list.get("stream", stream_flag) - output_list = input_json.get("outputs", []) - inference_response = await send_inference_request( - idle_device, - end_point_id, - inference_output_url, - input_list, - output_list, - inference_type=in_return_type) + return inference_response - # Calculate model metrics - try: - model_metrics.calc_metrics(end_point_id, in_end_point_name, - model_id, model_name, model_version, - inference_output_url, idle_device) - except Exception as e: - logging.info("Calculate Inference Metrics Exception: {}".format(traceback.format_exc())) - pass - - logging_inference_request(input_json, inference_response) - FEDML_MODEL_CACHE.update_pending_requests_counter(decrease=True) - return inference_response - else: - inference_response = {"error": True, "message": "token is not valid."} - logging_inference_request(input_json, inference_response) + except Exception as e: + logging.error("Inference Exception: {}".format(traceback.format_exc())) + # Need to reduce the pending requests counter in whatever exception that may be raised. FEDML_MODEL_CACHE.update_pending_requests_counter(decrease=True) - return inference_response def retrieve_info_by_endpoint_id(end_point_id, in_end_point_name=None, in_model_name=None, From b0a55adc403dc4196204d12a596726d0ccdbb156 Mon Sep 17 00:00:00 2001 From: fedml-dimitris Date: Tue, 4 Jun 2024 08:11:20 -0400 Subject: [PATCH 109/251] Returning well formatted json messages in the case of errored requests. --- .../model_scheduler/device_model_inference.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py index 0e866c9626..8192e7c300 100755 --- a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py +++ b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py @@ -8,7 +8,7 @@ from typing import Any, Mapping, MutableMapping, Union from fastapi import FastAPI, Request, Response, status -from fastapi.responses import StreamingResponse +from fastapi.responses import StreamingResponse, JSONResponse from fedml.computing.scheduler.model_scheduler.device_client_constants import ClientConstants from fedml.computing.scheduler.model_scheduler.device_http_inference_protocol import FedMLHttpInference @@ -60,7 +60,9 @@ async def auth_middleware(request: Request, call_next): # Attempt to parse the JSON body. request_json = await request.json() except json.JSONDecodeError: - return Response("Invalid JSON.", status_code=status.HTTP_400_BAD_REQUEST) + return JSONResponse( + {"error": True, "message": "Invalid JSON."}, + status_code=status.HTTP_400_BAD_REQUEST) # Get total pending requests. pending_requests_num = FEDML_MODEL_CACHE.get_pending_requests_counter() @@ -84,7 +86,9 @@ async def auth_middleware(request: Request, call_next): # If timeout threshold is exceeded then cancel and return time out error. if (mean_latency * pending_requests_num) > request_timeout_s: - return Response("Request timed out.", status_code=status.HTTP_504_GATEWAY_TIMEOUT) + return JSONResponse( + {"error": True, "message": "Request timed out."}, + status_code=status.HTTP_504_GATEWAY_TIMEOUT) response = await call_next(request) return response From 2e5353609b278f9b4c906d99b6e1f38b179bd3b2 Mon Sep 17 00:00:00 2001 From: Alay Shah Date: Tue, 4 Jun 2024 11:59:44 -0700 Subject: [PATCH 110/251] Fix bug --- python/fedml/__init__.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/python/fedml/__init__.py b/python/fedml/__init__.py index 35658d4920..a41b3e56af 100644 --- a/python/fedml/__init__.py +++ b/python/fedml/__init__.py @@ -459,8 +459,10 @@ def set_env_version(version): def get_env_version(): env_file = get_env_file() dotenv.load_dotenv(dotenv_path=env_file) - version = "release" if os.environ.get('FEDML_ENV_VERSION') is None else os.environ['FEDML_ENV_VERSION'] - dotenv.set_key(env_file, "FEDML_ENV_VERSION", version) + version = os.getenv('FEDML_ENV_VERSION') + if version is None: + version = "release" + set_env_version(version) return version From b5e4c252395c0ed316fa8d7b987381c0caeb738c Mon Sep 17 00:00:00 2001 From: Alay Shah Date: Tue, 4 Jun 2024 12:24:37 -0700 Subject: [PATCH 111/251] Make env variables override system, abstract dotenv api calls into functions --- python/fedml/__init__.py | 12 +++++------- python/fedml/computing/scheduler/env/__init__.py | 2 +- .../fedml/computing/scheduler/env/collect_env.py | 16 ++++++++++++++-- 3 files changed, 20 insertions(+), 10 deletions(-) diff --git a/python/fedml/__init__.py b/python/fedml/__init__.py index a41b3e56af..c13a64566e 100644 --- a/python/fedml/__init__.py +++ b/python/fedml/__init__.py @@ -11,7 +11,7 @@ import dotenv from .computing.scheduler.env.collect_env import collect_env -from fedml.computing.scheduler.env import get_env_file +from fedml.computing.scheduler.env import set_env_kv, load_env from .constants import ( FEDML_BACKEND_SERVICE_URL_DEV, FEDML_BACKEND_SERVICE_URL_LOCAL, @@ -451,15 +451,13 @@ def _run_distributed(): def set_env_version(version): - env_file = get_env_file() - dotenv.load_dotenv(dotenv_path=env_file) - dotenv.set_key(env_file, "FEDML_ENV_VERSION", version) + set_env_kv("FEDML_ENV_VERSION", version) + load_env() def get_env_version(): - env_file = get_env_file() - dotenv.load_dotenv(dotenv_path=env_file) - version = os.getenv('FEDML_ENV_VERSION') + load_env() + version = os.getenv("FEDML_ENV_VERSION") if version is None: version = "release" set_env_version(version) diff --git a/python/fedml/computing/scheduler/env/__init__.py b/python/fedml/computing/scheduler/env/__init__.py index f157a33ec2..0f71de6038 100644 --- a/python/fedml/computing/scheduler/env/__init__.py +++ b/python/fedml/computing/scheduler/env/__init__.py @@ -1 +1 @@ -from .collect_env import get_env_file +from .collect_env import load_env, set_env_kv diff --git a/python/fedml/computing/scheduler/env/collect_env.py b/python/fedml/computing/scheduler/env/collect_env.py index ef471da3bc..84b903cdbb 100644 --- a/python/fedml/computing/scheduler/env/collect_env.py +++ b/python/fedml/computing/scheduler/env/collect_env.py @@ -2,6 +2,7 @@ import traceback import fedml +import dotenv from fedml.computing.scheduler.comm_utils.hardware_utils import HardwareUtil from fedml.computing.scheduler.slave.client_diagnosis import ClientDiagnosis from ..slave.client_constants import ClientConstants @@ -112,10 +113,21 @@ def collect_env(): def get_env_file(): - global_serivces_dir = ClientConstants.get_global_services_dir() - env_config_file = os.path.join(global_serivces_dir, ".env") + global_services_dir = ClientConstants.get_global_services_dir() + env_config_file = os.path.join(global_services_dir, ".env") # Create file if not exists if not os.path.exists(env_config_file): with open(env_config_file, 'w') as f: f.write("") return env_config_file + + +def load_env(): + env_config_file = get_env_file() + dotenv.load_dotenv(dotenv_path=env_config_file, override=True) + + +def set_env_kv(key, value): + env_config_file = get_env_file() + dotenv.set_key(env_config_file, key, value) + load_env() From e2430fca23c361b37c2ff96614ce155829915541 Mon Sep 17 00:00:00 2001 From: fedml-dimitris Date: Wed, 5 Jun 2024 11:18:30 -0400 Subject: [PATCH 112/251] Renaming endpoint_id key to end_point_id --- .../scheduler/model_scheduler/autoscaler/autoscaler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py b/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py index eb9f08b0eb..4cab1e133c 100644 --- a/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py +++ b/python/fedml/computing/scheduler/model_scheduler/autoscaler/autoscaler.py @@ -339,7 +339,7 @@ def scale_operation_endpoint(self, # Fetch all metrics record from the database. metrics = self.fedml_model_cache.get_endpoint_metrics( - endpoint_id=endpoint_id) + end_point_id=endpoint_id) # Default to nothing. scale_op = ScaleOp.NO_OP From 600905f283dfc9d57b8f315c6844e76cd4ab4f20 Mon Sep 17 00:00:00 2001 From: Raphael Jin Date: Wed, 5 Jun 2024 19:04:19 +0000 Subject: [PATCH 113/251] [Deploy] Fix multi sub folder issue during deployment. --- .../model_scheduler/worker_job_runner.py | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py index 831064b591..370ba57b49 100755 --- a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py +++ b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py @@ -205,7 +205,7 @@ def run_impl(self, run_extend_queue_list, sender_message_center, # Check if the package is already downloaded unzip_package_path = "" if os.path.exists(os.path.join(models_root_dir, parent_fd)): - unzip_package_path = self.find_previous_downloaded_pkg(os.path.join(models_root_dir, parent_fd)) + unzip_package_path = self.find_previous_downloaded_pkg(os.path.join(models_root_dir, parent_fd), model_name) # Download the package if not found if unzip_package_path == "": @@ -510,7 +510,7 @@ def build_dynamic_constrain_variables(self, run_id, run_config): pass @staticmethod - def find_previous_downloaded_pkg(parent_dir) -> str: + def find_previous_downloaded_pkg(parent_dir: str, model_name: str) -> str: unzip_fd = "" res = "" @@ -519,8 +519,17 @@ def find_previous_downloaded_pkg(parent_dir) -> str: unzip_fd = os.path.join(parent_dir, folder) break + exact_matched = False + for folder in os.listdir(unzip_fd): - if os.path.isdir(os.path.join(unzip_fd, folder)): + if folder == model_name: + res = os.path.join(unzip_fd, folder) + exact_matched = True + break + + if not exact_matched: + # Use the first folder found + for folder in os.listdir(unzip_fd): res = os.path.join(unzip_fd, folder) break From 2ce07f4c53c3670b7b88e41f9c9ed101834d7807 Mon Sep 17 00:00:00 2001 From: alaydshah Date: Wed, 5 Jun 2024 19:41:50 +0000 Subject: [PATCH 114/251] Optimize Inference --- python/fedml/api/modules/device.py | 9 +- .../computing/scheduler/env/collect_env.py | 1 + .../model_scheduler/device_model_inference.py | 108 ++++++++---------- .../device_server_constants.py | 17 ++- .../model_scheduler/master_job_runner.py | 33 +++--- .../computing/scheduler/slave/client_login.py | 2 - python/fedml/core/mlops/mlops_configs.py | 4 + 7 files changed, 92 insertions(+), 82 deletions(-) diff --git a/python/fedml/api/modules/device.py b/python/fedml/api/modules/device.py index 84aa42e7b2..497fde9005 100644 --- a/python/fedml/api/modules/device.py +++ b/python/fedml/api/modules/device.py @@ -58,14 +58,15 @@ def _bind( docker, docker_rank, infer_host, redis_addr, redis_port, redis_password ): + fedml.load_env() if os.getenv(ModuleConstants.ENV_FEDML_INFER_HOST) is None: - os.environ[ModuleConstants.ENV_FEDML_INFER_HOST] = infer_host + fedml.set_env_kv(ModuleConstants.ENV_FEDML_INFER_HOST, infer_host) if os.getenv(ModuleConstants.ENV_FEDML_INFER_REDIS_ADDR) is None: - os.environ[ModuleConstants.ENV_FEDML_INFER_REDIS_ADDR] = redis_addr + fedml.set_env_kv(ModuleConstants.ENV_FEDML_INFER_REDIS_ADDR, redis_addr) if os.getenv(ModuleConstants.ENV_FEDML_INFER_REDIS_PORT) is None: - os.environ[ModuleConstants.ENV_FEDML_INFER_REDIS_PORT] = redis_port + fedml.set_env_kv(ModuleConstants.ENV_FEDML_INFER_REDIS_PORT, redis_port) if os.getenv(ModuleConstants.ENV_FEDML_INFER_REDIS_PASSWORD) is None: - os.environ[ModuleConstants.ENV_FEDML_INFER_REDIS_PASSWORD] = redis_password + fedml.set_env_kv(ModuleConstants.ENV_FEDML_INFER_REDIS_PASSWORD, redis_password) url = fedml._get_backend_service() platform_name = platform.system() diff --git a/python/fedml/computing/scheduler/env/collect_env.py b/python/fedml/computing/scheduler/env/collect_env.py index 84b903cdbb..da4d54e7a0 100644 --- a/python/fedml/computing/scheduler/env/collect_env.py +++ b/python/fedml/computing/scheduler/env/collect_env.py @@ -128,6 +128,7 @@ def load_env(): def set_env_kv(key, value): + os.environ[key] = value env_config_file = get_env_file() dotenv.set_key(env_config_file, key, value) load_env() diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py index 1b6d71ebb7..d39ac0e4e7 100755 --- a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py +++ b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py @@ -1,3 +1,4 @@ +import argparse import logging import time import traceback @@ -8,52 +9,27 @@ from fastapi import FastAPI, Request, Response, status from fastapi.responses import StreamingResponse +import fedml +from fedml.api.modules.constants import ModuleConstants from fedml.computing.scheduler.model_scheduler.device_http_inference_protocol import FedMLHttpInference from fedml.computing.scheduler.model_scheduler.device_server_constants import ServerConstants from fedml.computing.scheduler.model_scheduler.device_model_monitor import FedMLModelMetrics from fedml.computing.scheduler.model_scheduler.device_model_cache import FedMLModelCache from fedml.computing.scheduler.model_scheduler.device_mqtt_inference_protocol import FedMLMqttInference from fedml.computing.scheduler.model_scheduler.device_http_proxy_inference_protocol import FedMLHttpProxyInference +from fedml.core.mlops.mlops_configs import MLOpsConfigs from fedml.computing.scheduler.comm_utils import sys_utils from fedml.core.mlops import MLOpsRuntimeLog, MLOpsRuntimeLogDaemon -try: - from pydantic import BaseSettings -except Exception as e: - pass -try: - from pydantic_settings import BaseSettings -except Exception as e: - pass - - -# class Settings(BaseSettings): -# redis_addr: str -# redis_port: str -# redis_password: str -# end_point_name: str -# model_name: str -# model_version: str -# model_infer_url: str -# version: str -# use_mqtt_inference: bool -# use_worker_gateway: bool -# ext_info: str -# -# -# settings = Settings() - -class settings: - redis_addr = "127.0.0.1" - redis_port = 6379 - redis_password = "fedml_default" - end_point_name = "" - model_name = "" - model_version = "" - model_infer_url = "127.0.0.1" - version = "dev" - use_mqtt_inference = False - use_worker_gateway = False + +class Settings: + fedml.load_env() + redis_addr = os.getenv(ModuleConstants.ENV_FEDML_INFER_REDIS_ADDR) + redis_port = os.getenv(ModuleConstants.ENV_FEDML_INFER_REDIS_PORT) + redis_password = os.getenv(ModuleConstants.ENV_FEDML_INFER_REDIS_PASSWORD) + model_infer_host = os.getenv(ModuleConstants.ENV_FEDML_INFER_HOST) + version = fedml.get_env_version() + mqtt_config = MLOpsConfigs.fetch_mqtt_config() ext_info = "2b34303961245c4f175f2236282d7a272c040b0904747579087f6a760112030109010c215d54505707140005190a051c347f365c4a430c020a7d39120e26032a78730f797f7c031f0901657e75" @@ -62,6 +38,11 @@ class settings: api = FastAPI() +@api.on_event("startup") +async def startup_event(): + configure_logging() + + @api.get('/') async def root(): return {'message': 'FedML Federated Inference Service!'} @@ -185,9 +166,9 @@ async def _predict( # Start timing for model metrics model_metrics = FedMLModelMetrics(end_point_id, in_end_point_name, model_id, in_model_name, model_version, - settings.model_infer_url, - settings.redis_addr, settings.redis_port, settings.redis_password, - version=settings.version) + Settings.model_infer_host, + Settings.redis_addr, Settings.redis_port, Settings.redis_password, + version=Settings.version) model_metrics.set_start_time(start_time) # Send inference request to idle device @@ -224,8 +205,8 @@ def retrieve_info_by_endpoint_id(end_point_id, in_end_point_name=None, in_model_ We allow missing end_point_name and model_name in the input parameters. return end_point_name, model_name """ - FedMLModelCache.get_instance().set_redis_params(settings.redis_addr, settings.redis_port, settings.redis_password) - redis_key = FedMLModelCache.get_instance(settings.redis_addr, settings.redis_port). \ + FedMLModelCache.get_instance().set_redis_params(Settings.redis_addr, Settings.redis_port, Settings.redis_password) + redis_key = FedMLModelCache.get_instance(Settings.redis_addr, Settings.redis_port). \ get_end_point_full_key_by_id(end_point_id) if redis_key is not None: end_point_name = "" @@ -258,8 +239,8 @@ def found_idle_inference_device(end_point_id, end_point_name, in_model_name, in_ inference_output_url = "" model_version = "" # Found idle device (TODO: optimize the algorithm to search best device for inference) - FedMLModelCache.get_instance().set_redis_params(settings.redis_addr, settings.redis_port, settings.redis_password) - payload, idle_device = FedMLModelCache.get_instance(settings.redis_addr, settings.redis_port). \ + FedMLModelCache.get_instance().set_redis_params(Settings.redis_addr, Settings.redis_port, Settings.redis_password) + payload, idle_device = FedMLModelCache.get_instance(Settings.redis_addr, Settings.redis_port). \ get_idle_device(end_point_id, end_point_name, in_model_name, in_model_version) if payload is not None: logging.info("found idle deployment result {}".format(payload)) @@ -304,7 +285,7 @@ async def send_inference_request(idle_device, endpoint_id, inference_url, input_ if not has_public_ip: connect_str = "@FEDML@" - random_out = sys_utils.random2(settings.ext_info, "FEDML@9999GREAT") + random_out = sys_utils.random2(Settings.ext_info, "FEDML@9999GREAT") config_list = random_out.split(connect_str) agent_config = dict() agent_config["mqtt_config"] = dict() @@ -336,8 +317,8 @@ def auth_request_token(end_point_id, end_point_name, model_name, token): if token is None: return False - FedMLModelCache.get_instance().set_redis_params(settings.redis_addr, settings.redis_port, settings.redis_password) - cached_token = FedMLModelCache.get_instance(settings.redis_addr, settings.redis_port). \ + FedMLModelCache.get_instance().set_redis_params(Settings.redis_addr, Settings.redis_port, Settings.redis_password) + cached_token = FedMLModelCache.get_instance(Settings.redis_addr, Settings.redis_port). \ get_end_point_token(end_point_id, end_point_name, model_name) if cached_token is not None and str(cached_token) == str(token): return True @@ -349,8 +330,8 @@ def is_endpoint_activated(end_point_id): if end_point_id is None: return False - FedMLModelCache.get_instance().set_redis_params(settings.redis_addr, settings.redis_port, settings.redis_password) - activated = FedMLModelCache.get_instance(settings.redis_addr, settings.redis_port).get_end_point_activation( + FedMLModelCache.get_instance().set_redis_params(Settings.redis_addr, Settings.redis_port, Settings.redis_password) + activated = FedMLModelCache.get_instance(Settings.redis_addr, Settings.redis_port).get_end_point_activation( end_point_id) return activated @@ -361,8 +342,6 @@ def logging_inference_request(request, response): try: log_dir = ServerConstants.get_log_file_dir() - if not os.path.exists(log_dir): - os.makedirs(log_dir, exist_ok=True) inference_log_file = os.path.join(log_dir, "inference.log") with open(inference_log_file, "a") as f: f.writelines([f"request: {request}, response: {response}\n"]) @@ -370,15 +349,24 @@ def logging_inference_request(request, response): logging.info("failed to log inference request and response to file.") -def set_logging_args(args=None): - global logging_args - logging_args = args - if logging_args is not None: - # Force run id to 0, as the gateway is shared by all the runs. - setattr(args, "run_id", "0") - MLOpsRuntimeLog.get_instance(args).init_logs(log_level=logging.INFO) - MLOpsRuntimeLogDaemon.get_instance(args).start_log_processor(args.run_id, args.edge_id) - logging.info("start the log processor") +def configure_logging(): + parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) + args = parser.parse_args([]) + + setattr(args, "log_file_dir", ServerConstants.get_log_file_dir()) + setattr(args, "run_id", "inference_gateway") + setattr(args, "role", "server") + setattr(args, "using_mlops", True) + setattr(args, "config_version", fedml.get_env_version()) + + runner_info = ServerConstants.get_runner_infos() + if not (runner_info and "edge_id" in runner_info): + raise Exception("Inference gateway couldn't be started as edge_id couldn't be parsed from runner_infos.yaml") + setattr(args, "edge_id", runner_info.get("edge_id")) + + MLOpsRuntimeLog.get_instance(args).init_logs(log_level=logging.INFO) + MLOpsRuntimeLogDaemon.get_instance(args).start_log_processor(args.run_id, args.edge_id) + logging.info("start the log processor for inference gateway") if __name__ == "__main__": diff --git a/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py b/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py index 6b5b335863..eb01fbb599 100644 --- a/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py +++ b/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py @@ -5,6 +5,7 @@ import subprocess import sys from os.path import expanduser +from pathlib import Path import psutil import yaml @@ -329,9 +330,23 @@ def save_bootstrap_process(run_id, process_id): run_id, process_id, ServerConstants.get_data_dir(), ServerConstants.LOCAL_RUNNER_INFO_DIR_NAME, info_file_prefix=SchedulerConstants.RUN_PROCESS_TYPE_BOOTSTRAP_PROCESS) + @staticmethod + def get_runner_infos(): + local_pkg_data_dir = ServerConstants.get_data_dir() + os.makedirs(local_pkg_data_dir, exist_ok=True) + os.makedirs(os.path.join(local_pkg_data_dir, ServerConstants.LOCAL_RUNNER_INFO_DIR_NAME), exist_ok=True) + + runner_info_file = os.path.join(local_pkg_data_dir, ServerConstants.LOCAL_RUNNER_INFO_DIR_NAME, + "runner_infos.yaml") + runner_info = {} + try: + runner_info = yaml.safe_load(Path(runner_info_file).read_text()) + except Exception as e: + logging.error(f"Failed to parse runner info: {e}") + return runner_info + @staticmethod def save_runner_infos(unique_device_id, edge_id, run_id=None): - home_dir = expanduser("~") local_pkg_data_dir = ServerConstants.get_data_dir() os.makedirs(local_pkg_data_dir, exist_ok=True) os.makedirs(os.path.join(local_pkg_data_dir, ServerConstants.LOCAL_RUNNER_INFO_DIR_NAME), exist_ok=True) diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py index 4d5974237d..15841600ad 100755 --- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py +++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py @@ -24,7 +24,6 @@ from ..master.base_master_job_runner import FedMLBaseMasterJobRunner from .device_replica_controller import FedMLDeviceReplicaController from .job_runner_msg_sender import FedMLDeployJobRunnerMsgSender -from .device_model_inference import set_logging_args class FedMLDeployMasterJobRunner(FedMLBaseMasterJobRunner, FedMLDeployJobRunnerMsgSender, ABC): @@ -66,11 +65,6 @@ def _generate_job_runner_instance(self, args, run_id=None, request_json=None, ag def _generate_extend_queue_list(self): return [self.deployment_result_queue] - @staticmethod - def start_inference_gateway_server(inference_gw_cmd, port, args): - set_logging_args(args) - uvicorn.run(inference_gw_cmd, host="0.0.0.0", port=port, log_level="info") - # Override def run_impl( self, edge_id_status_queue, edge_device_info_queue, run_metrics_queue, @@ -493,8 +487,8 @@ def start_device_inference_gateway( inference_gw_cmd = "fedml.computing.scheduler.model_scheduler.device_model_inference:api" inference_gateway_pids = RunProcessUtils.get_pid_from_cmd_line(inference_gw_cmd) if inference_gateway_pids is None or len(inference_gateway_pids) <= 0: - # cur_dir = os.path.dirname(__file__) - # fedml_base_dir = os.path.dirname(os.path.dirname(os.path.dirname(cur_dir))) + cur_dir = os.path.dirname(__file__) + fedml_base_dir = os.path.dirname(os.path.dirname(os.path.dirname(cur_dir))) # connect_str = "@FEDML@" # ext_info = sys_utils.random1( # agent_config["mqtt_config"]["BROKER_HOST"] + connect_str + @@ -503,10 +497,19 @@ def start_device_inference_gateway( # agent_config["mqtt_config"]["MQTT_PWD"] + connect_str + # str(agent_config["mqtt_config"]["MQTT_KEEPALIVE"]), "FEDML@9999GREAT") # python_program = get_python_program() - inference_gateway_process = multiprocessing.Process( - target=FedMLDeployMasterJobRunner.start_inference_gateway_server, args=(inference_gw_cmd, - inference_port, args) - ) + # inference_gateway_process = multiprocessing.Process( + # target=FedMLDeployMasterJobRunner.start_inference_gateway_server, args=(inference_gw_cmd, + # inference_port, args) + # ) + inference_gateway_process = ServerConstants.exec_console_with_script(f"{python_program} " + f"-m uvicorn {inference_gw_cmd} " + f"--host 0.0.0.0 " + f"--port {str(inference_port)} " + f"--reload --reload-delay 3 " + f"--reload-dir {fedml_base_dir} " + f"--log-level critical", + should_capture_stdout=False, + should_capture_stderr=False) # inference_gateway_process = ServerConstants.exec_console_with_script( # "REDIS_ADDR=\"{}\" REDIS_PORT=\"{}\" REDIS_PASSWORD=\"{}\" " # "END_POINT_NAME=\"{}\" " @@ -518,9 +521,9 @@ def start_device_inference_gateway( # "", "", "", fedml.get_env_version(), use_mqtt_inference, # use_worker_gateway, ext_info, python_program, inference_gw_cmd, str(inference_port), # fedml_base_dir), - # should_capture_stdout=False, should_capture_stderr=False) - inference_gateway_process.daemon = True - inference_gateway_process.start() + # ) + # inference_gateway_process.daemon = True + # inference_gateway_process.start() return inference_gateway_process else: diff --git a/python/fedml/computing/scheduler/slave/client_login.py b/python/fedml/computing/scheduler/slave/client_login.py index 37a6dc8064..95c772a225 100755 --- a/python/fedml/computing/scheduler/slave/client_login.py +++ b/python/fedml/computing/scheduler/slave/client_login.py @@ -30,8 +30,6 @@ def logout(): if args.api_key == "": args.api_key = args.user - fedml.set_env_version("test") - if args.local_on_premise_platform_host != "127.0.0.1": fedml.set_local_on_premise_platform_host(args.local_on_premise_platform_host) if args.local_on_premise_platform_port != 80: diff --git a/python/fedml/core/mlops/mlops_configs.py b/python/fedml/core/mlops/mlops_configs.py index 6c25c38128..c1b46fd0dd 100644 --- a/python/fedml/core/mlops/mlops_configs.py +++ b/python/fedml/core/mlops/mlops_configs.py @@ -154,6 +154,10 @@ def fetch_all_configs(): fetched_configs[Configs.ML_OPS_CONFIG], fetched_configs[Configs.DOCKER_CONFIG]) + @staticmethod + def fetch_mqtt_config(): + return MLOpsConfigs._fetch_configs({Configs.MQTT_CONFIG}) + if __name__ == "__main__": fedml.set_env_version("release") From c1e37af1385f2746193658ff852f5b545afda13d Mon Sep 17 00:00:00 2001 From: Raphael Jin Date: Wed, 5 Jun 2024 20:02:57 +0000 Subject: [PATCH 115/251] Nit. --- .../computing/scheduler/model_scheduler/worker_job_runner.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py index 370ba57b49..348b760153 100755 --- a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py +++ b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py @@ -521,6 +521,9 @@ def find_previous_downloaded_pkg(parent_dir: str, model_name: str) -> str: exact_matched = False + if unzip_fd == "": + return res + for folder in os.listdir(unzip_fd): if folder == model_name: res = os.path.join(unzip_fd, folder) From a4b8ad23969efec0c51758a3068590b7c0152bb0 Mon Sep 17 00:00:00 2001 From: alaydshah Date: Wed, 5 Jun 2024 22:18:27 +0000 Subject: [PATCH 116/251] Pipe in Mqtt config directly instead of deserializing object --- .../model_scheduler/device_model_inference.py | 30 +++---------- .../model_scheduler/master_job_runner.py | 45 ++----------------- .../master_job_runner_manager.py | 4 +- .../master_protocol_manager.py | 2 +- python/fedml/core/mlops/mlops_configs.py | 3 +- 5 files changed, 16 insertions(+), 68 deletions(-) diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py index f55eda9047..3d9db78a23 100755 --- a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py +++ b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py @@ -21,7 +21,6 @@ from fedml.computing.scheduler.model_scheduler.device_mqtt_inference_protocol import FedMLMqttInference from fedml.computing.scheduler.model_scheduler.device_http_proxy_inference_protocol import FedMLHttpProxyInference from fedml.core.mlops.mlops_configs import MLOpsConfigs -from fedml.computing.scheduler.comm_utils import sys_utils from fedml.core.mlops import MLOpsRuntimeLog, MLOpsRuntimeLogDaemon @@ -33,11 +32,8 @@ class Settings: model_infer_host = os.getenv(ModuleConstants.ENV_FEDML_INFER_HOST) version = fedml.get_env_version() mqtt_config = MLOpsConfigs.fetch_mqtt_config() - ext_info = "2b34303961245c4f175f2236282d7a272c040b0904747579087f6a760112030109010c215d54505707140005190a051c347f365c4a430c020a7d39120e26032a78730f797f7c031f0901657e75" -logging_args = None - api = FastAPI() FEDML_MODEL_CACHE = FedMLModelCache.get_instance().set_redis_params(redis_addr=Settings.redis_addr, @@ -47,7 +43,6 @@ class Settings: @api.middleware("http") async def auth_middleware(request: Request, call_next): - if "/inference" in request.url.path or "/api/v1/predict" in request.url.path: try: # Attempt to parse the JSON body. @@ -138,7 +133,7 @@ async def predict_openai(end_point_id, request: Request): try: response = await _predict(end_point_id, input_json, header) except Exception as e: - response = {"error": True, "message": f"{traceback.format_exc()}"} + response = {"error": True, "message": f"{traceback.format_exc()}, exception {e}"} return response @@ -174,7 +169,6 @@ async def _predict( input_json, header=None ) -> Union[MutableMapping[str, Any], Response, StreamingResponse]: - # Always increase the pending requests counter on a new incoming request. FEDML_MODEL_CACHE.update_pending_requests_counter(increase=True) inference_response = {} @@ -222,7 +216,7 @@ async def _predict( # Start timing for model metrics model_metrics = FedMLModelMetrics(end_point_id, in_end_point_name, model_id, in_model_name, model_version, - Settings.model_infer_url, + Settings.model_infer_host, Settings.redis_addr, Settings.redis_port, Settings.redis_password, @@ -269,7 +263,6 @@ async def _predict( FEDML_MODEL_CACHE.update_pending_requests_counter(decrease=True) - def retrieve_info_by_endpoint_id(end_point_id, in_end_point_name=None, in_model_name=None, in_model_version=None, enable_check=False): """ @@ -308,7 +301,7 @@ def found_idle_inference_device(end_point_id, end_point_name, in_model_name, in_ inference_output_url = "" model_version = "" # Found idle device (TODO: optimize the algorithm to search best device for inference) - payload, idle_device = FEDML_MODEL_CACHE.\ + payload, idle_device = FEDML_MODEL_CACHE. \ get_idle_device(end_point_id, end_point_name, in_model_name, in_model_version) if payload is not None: logging.info("found idle deployment result {}".format(payload)) @@ -328,7 +321,6 @@ def found_idle_inference_device(end_point_id, end_point_name, in_model_name, in_ async def send_inference_request(idle_device, end_point_id, inference_url, input_list, output_list, inference_type="default", has_public_ip=True): - request_timeout_sec = FEDML_MODEL_CACHE.get_endpoint_settings(end_point_id) \ .get("request_timeout_sec", ClientConstants.INFERENCE_REQUEST_TIMEOUT) @@ -367,16 +359,7 @@ async def send_inference_request(idle_device, end_point_id, inference_url, input return inference_response if not has_public_ip: - connect_str = "@FEDML@" - random_out = sys_utils.random2(Settings.ext_info, "FEDML@9999GREAT") - config_list = random_out.split(connect_str) - agent_config = dict() - agent_config["mqtt_config"] = dict() - agent_config["mqtt_config"]["BROKER_HOST"] = config_list[0] - agent_config["mqtt_config"]["BROKER_PORT"] = int(config_list[1]) - agent_config["mqtt_config"]["MQTT_USER"] = config_list[2] - agent_config["mqtt_config"]["MQTT_PWD"] = config_list[3] - agent_config["mqtt_config"]["MQTT_KEEPALIVE"] = int(config_list[4]) + agent_config = {"mqtt_config": Settings.mqtt_config} mqtt_inference = FedMLMqttInference( agent_config=agent_config, run_id=end_point_id) @@ -410,12 +393,13 @@ async def send_inference_request(idle_device, end_point_id, inference_url, input def auth_request_token(end_point_id, end_point_name, model_name, token): if token is None: return False - cached_token = FEDML_MODEL_CACHE.\ + cached_token = FEDML_MODEL_CACHE. \ get_end_point_token(end_point_id, end_point_name, model_name) if cached_token is not None and str(cached_token) == str(token): return True return False + def is_endpoint_activated(end_point_id): if end_point_id is None: return False @@ -433,7 +417,7 @@ def logging_inference_request(request, response): with open(inference_log_file, "a") as f: f.writelines([f"request: {request}, response: {response}\n"]) except Exception as ex: - logging.info("failed to log inference request and response to file.") + logging.info(f"failed to log inference request and response to file with exception {ex}") def configure_logging(): diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py index bd40b71c0e..d8a81e016f 100755 --- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py +++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py @@ -1,7 +1,6 @@ import copy import json import logging -import multiprocessing import os import time import queue @@ -10,7 +9,6 @@ from multiprocessing import Queue import fedml -import uvicorn from fedml.core.mlops import MLOpsRuntimeLog, MLOpsConfigs from fedml.core.mlops.mlops_runtime_log import MLOpsFormatter from .device_client_constants import ClientConstants @@ -117,8 +115,7 @@ def run_impl( message_center=self.message_center) # start unified inference gateway process if not started - FedMLDeployMasterJobRunner.start_device_inference_gateway( - args=self.args, inference_port=inference_port, agent_config=self.agent_config) + FedMLDeployMasterJobRunner.start_device_inference_gateway(inference_port=inference_port) # start inference monitor process FedMLDeployMasterJobRunner.stop_device_inference_monitor( @@ -469,10 +466,7 @@ def cleanup_runner_process(self, run_id): ServerConstants.cleanup_run_process(run_id, not_kill_subprocess=True) @staticmethod - def start_device_inference_gateway( - args, inference_port=ServerConstants.MODEL_INFERENCE_DEFAULT_PORT, agent_config=None, - redis_addr="localhost", redis_port=6379, redis_password="fedml_default" - ): + def start_device_inference_gateway(inference_port=ServerConstants.MODEL_INFERENCE_DEFAULT_PORT): # start unified inference server python_program = get_python_program() master_port = os.getenv("FEDML_MASTER_PORT", None) @@ -480,27 +474,11 @@ def start_device_inference_gateway( inference_port = int(master_port) if not ServerConstants.is_running_on_k8s(): logging.info(f"start the model inference gateway...") - # use_mqtt_inference = os.getenv("FEDML_USE_MQTT_INFERENCE", "False") - # use_mqtt_inference = True if use_mqtt_inference.lower() == 'true' else False - # use_worker_gateway = os.getenv("FEDML_USE_WORKER_GATEWAY", "False") - # use_worker_gateway = True if use_worker_gateway.lower() == 'true' else False inference_gw_cmd = "fedml.computing.scheduler.model_scheduler.device_model_inference:api" inference_gateway_pids = RunProcessUtils.get_pid_from_cmd_line(inference_gw_cmd) if inference_gateway_pids is None or len(inference_gateway_pids) <= 0: cur_dir = os.path.dirname(__file__) fedml_base_dir = os.path.dirname(os.path.dirname(os.path.dirname(cur_dir))) - # connect_str = "@FEDML@" - # ext_info = sys_utils.random1( - # agent_config["mqtt_config"]["BROKER_HOST"] + connect_str + - # str(agent_config["mqtt_config"]["BROKER_PORT"]) + connect_str + - # agent_config["mqtt_config"]["MQTT_USER"] + connect_str + - # agent_config["mqtt_config"]["MQTT_PWD"] + connect_str + - # str(agent_config["mqtt_config"]["MQTT_KEEPALIVE"]), "FEDML@9999GREAT") - # python_program = get_python_program() - # inference_gateway_process = multiprocessing.Process( - # target=FedMLDeployMasterJobRunner.start_inference_gateway_server, args=(inference_gw_cmd, - # inference_port, args) - # ) inference_gateway_process = ServerConstants.exec_console_with_script(f"{python_program} " f"-m uvicorn {inference_gw_cmd} " f"--host 0.0.0.0 " @@ -510,21 +488,6 @@ def start_device_inference_gateway( f"--log-level critical", should_capture_stdout=False, should_capture_stderr=False) - # inference_gateway_process = ServerConstants.exec_console_with_script( - # "REDIS_ADDR=\"{}\" REDIS_PORT=\"{}\" REDIS_PASSWORD=\"{}\" " - # "END_POINT_NAME=\"{}\" " - # "MODEL_NAME=\"{}\" MODEL_VERSION=\"{}\" MODEL_INFER_URL=\"{}\" VERSION=\"{}\" " - # "USE_MQTT_INFERENCE={} USE_WORKER_GATEWAY={} EXT_INFO={} " - # "{} -m uvicorn {} --host 0.0.0.0 --port {} --reload --reload-delay 3 --reload-dir {} " - # "--log-level critical".format( - # redis_addr, str(redis_port), redis_password, "", - # "", "", "", fedml.get_env_version(), use_mqtt_inference, - # use_worker_gateway, ext_info, python_program, inference_gw_cmd, str(inference_port), - # fedml_base_dir), - # ) - # inference_gateway_process.daemon = True - # inference_gateway_process.start() - return inference_gateway_process else: return inference_gateway_pids[0] @@ -562,7 +525,7 @@ def stop_device_inference_monitor(run_id, end_point_name, model_id, model_name, model_id, model_name, model_version) @staticmethod - def recover_inference_and_monitor(args): + def recover_inference_and_monitor(): # noinspection PyBroadException try: agent_config = dict() @@ -589,7 +552,7 @@ def recover_inference_and_monitor(args): if not is_activated: continue - FedMLDeployMasterJobRunner.start_device_inference_gateway(args=args, inference_port=inference_port, agent_config=agent_config) + FedMLDeployMasterJobRunner.start_device_inference_gateway(inference_port=inference_port) FedMLDeployMasterJobRunner.stop_device_inference_monitor( run_id, end_point_name, model_id, model_name, model_version) diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py index 02a6fde329..c761cd6d8f 100755 --- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py +++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner_manager.py @@ -55,8 +55,8 @@ def stop_device_inference_monitor(self, run_id, end_point_name, model_id, model_ run_id, end_point_name, model_id, model_name, model_version) @staticmethod - def recover_inference_and_monitor(args): - FedMLDeployMasterJobRunner.recover_inference_and_monitor(args=args) + def recover_inference_and_monitor(): + FedMLDeployMasterJobRunner.recover_inference_and_monitor() @staticmethod def generate_request_json_with_replica_num_diff(run_id, edge_id, request_json): diff --git a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py index d90857c0ab..668d1192ce 100755 --- a/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py +++ b/python/fedml/computing/scheduler/model_scheduler/master_protocol_manager.py @@ -82,7 +82,7 @@ def _init_extra_items(self): except Exception as e: pass - FedMLDeployJobRunnerManager.recover_inference_and_monitor(args = self.args) + FedMLDeployJobRunnerManager.recover_inference_and_monitor() # Override def _process_connection_ready(self): diff --git a/python/fedml/core/mlops/mlops_configs.py b/python/fedml/core/mlops/mlops_configs.py index c1b46fd0dd..1ed2e0476d 100644 --- a/python/fedml/core/mlops/mlops_configs.py +++ b/python/fedml/core/mlops/mlops_configs.py @@ -156,7 +156,8 @@ def fetch_all_configs(): @staticmethod def fetch_mqtt_config(): - return MLOpsConfigs._fetch_configs({Configs.MQTT_CONFIG}) + fetched_config = MLOpsConfigs._fetch_configs({Configs.MQTT_CONFIG}) + return fetched_config[Configs.MQTT_CONFIG] if __name__ == "__main__": From 0ad81ce9819ecf48ee678fd51d27b01a99741953 Mon Sep 17 00:00:00 2001 From: alaydshah Date: Wed, 5 Jun 2024 23:59:21 +0000 Subject: [PATCH 117/251] Nits --- .../model_scheduler/device_model_inference.py | 2 +- .../device_mqtt_inference_protocol.py | 1 - .../model_scheduler/master_job_runner.py | 2 +- .../fedml/core/mlops/mlops_runtime_log_daemon.py | 15 ++++++++++++--- 4 files changed, 14 insertions(+), 6 deletions(-) diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py index 3d9db78a23..111052faf1 100755 --- a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py +++ b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py @@ -442,7 +442,7 @@ def configure_logging(): if __name__ == "__main__": import uvicorn - port = 2203 logging.basicConfig(level=logging.INFO) + configure_logging() uvicorn.run(api, host="0.0.0.0", port=port, log_level="info") diff --git a/python/fedml/computing/scheduler/model_scheduler/device_mqtt_inference_protocol.py b/python/fedml/computing/scheduler/model_scheduler/device_mqtt_inference_protocol.py index 1fac5a984b..9cd5c1e9a2 100755 --- a/python/fedml/computing/scheduler/model_scheduler/device_mqtt_inference_protocol.py +++ b/python/fedml/computing/scheduler/model_scheduler/device_mqtt_inference_protocol.py @@ -10,7 +10,6 @@ import asyncio -from ..comm_utils.constants import SchedulerConstants from ....core.distributed.communication.mqtt.mqtt_manager import MqttManager from .device_http_inference_protocol import FedMLHttpInference diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py index d8a81e016f..a10bd2c559 100755 --- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py +++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py @@ -485,7 +485,7 @@ def start_device_inference_gateway(inference_port=ServerConstants.MODEL_INFERENC f"--port {str(inference_port)} " f"--reload --reload-delay 3 " f"--reload-dir {fedml_base_dir} " - f"--log-level critical", + f"--log-level info", should_capture_stdout=False, should_capture_stderr=False) return inference_gateway_process diff --git a/python/fedml/core/mlops/mlops_runtime_log_daemon.py b/python/fedml/core/mlops/mlops_runtime_log_daemon.py index 64bd982ae3..7791d8f4e5 100644 --- a/python/fedml/core/mlops/mlops_runtime_log_daemon.py +++ b/python/fedml/core/mlops/mlops_runtime_log_daemon.py @@ -223,23 +223,32 @@ def __upload(self, log_upload_request) -> bool: if cert_path is not None: try: requests.session().verify = cert_path + logging.info(f"FedMLDebug POST log to server. log_headers: {log_headers}, " + f"log_upload_request: {log_upload_request}, url: {self.log_server_url}") + # logging.info(f"FedMLDebug POST log to server. run_id {run_id}, device_id {device_id}") response = requests.post( self.log_server_url, json=log_upload_request, verify=True, headers=log_headers ) + logging.info(f"FedMLDebug POST log to server. response: {response}") # logging.info(f"FedMLDebug POST log to server run_id {run_id}, device_id {device_id}. response.status_code: {response.status_code}") except requests.exceptions.SSLError as err: MLOpsConfigs.install_root_ca_file() # logging.info(f"FedMLDebug POST log to server. run_id {run_id}, device_id {device_id}") + logging.info(f"FedMLDebug POST log to server. log_headers: {log_headers}, " + f"log_upload_request: {log_upload_request}, url: {self.log_server_url}") + response = requests.post( self.log_server_url, json=log_upload_request, verify=True, headers=log_headers ) + logging.info(f"FedMLDebug POST log to server. response: {response}") # logging.info(f"FedMLDebug POST log to server run_id {run_id}, device_id {device_id}. response.status_code: {response.status_code}") else: - # logging.info(f"FedMLDebug POST log to server. run_id {run_id}, device_id {device_id}") + logging.info(f"FedMLDebug POST log to server. log_headers: {log_headers}, " + f"log_upload_request: {log_upload_request}, url: {self.log_server_url}") response = requests.post(self.log_server_url, headers=log_headers, json=log_upload_request) - # logging.info(f"FedMLDebug POST log to server. run_id {run_id}, device_id {device_id}. response.status_code: {response.status_code}") + logging.info(f"FedMLDebug POST log to server. response: {response}") if response.status_code != 200: logging.error(f"Failed to upload log to server. run_id {self.run_id}, device_id {self.device_id}. " f"response.status_code: {response.status_code}") @@ -404,9 +413,9 @@ def __new__(cls, *args, **kwargs): def __init__(self, in_args): self.args = in_args self.edge_id = MLOpsLoggingUtils.get_edge_id_from_args(self.args) + url = fedml._get_backend_service() try: if self.args.log_server_url is None or self.args.log_server_url == "": - url = fedml._get_backend_service() self.log_server_url = f"{url}/fedmlLogsServer/logs/update" else: self.log_server_url = self.args.log_server_url From d71387651e813e93865fd2e864cb685c874a813c Mon Sep 17 00:00:00 2001 From: alaydshah Date: Thu, 6 Jun 2024 04:48:58 +0000 Subject: [PATCH 118/251] Fix bugs --- .../model_scheduler/device_model_inference.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py index 111052faf1..57f9d80208 100755 --- a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py +++ b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py @@ -25,6 +25,7 @@ class Settings: + server_name = "DEVICE_INFERENCE_GATEWAY" fedml.load_env() redis_addr = os.getenv(ModuleConstants.ENV_FEDML_INFER_REDIS_ADDR) redis_port = os.getenv(ModuleConstants.ENV_FEDML_INFER_REDIS_PORT) @@ -425,7 +426,7 @@ def configure_logging(): args = parser.parse_args([]) setattr(args, "log_file_dir", ServerConstants.get_log_file_dir()) - setattr(args, "run_id", "inference_gateway") + setattr(args, "run_id", -1) setattr(args, "role", "server") setattr(args, "using_mlops", True) setattr(args, "config_version", fedml.get_env_version()) @@ -433,10 +434,12 @@ def configure_logging(): runner_info = ServerConstants.get_runner_infos() if not (runner_info and "edge_id" in runner_info): raise Exception("Inference gateway couldn't be started as edge_id couldn't be parsed from runner_infos.yaml") - setattr(args, "edge_id", runner_info.get("edge_id")) + setattr(args, "edge_id", int(runner_info.get("edge_id"))) MLOpsRuntimeLog.get_instance(args).init_logs(log_level=logging.INFO) - MLOpsRuntimeLogDaemon.get_instance(args).start_log_processor(args.run_id, args.edge_id) + MLOpsRuntimeLogDaemon.get_instance(args).start_log_processor(log_run_id=args.run_id, log_device_id=args.edge_id, + log_source=Settings.server_name, + log_file_prefix=Settings.server_name) logging.info("start the log processor for inference gateway") @@ -444,5 +447,4 @@ def configure_logging(): import uvicorn port = 2203 logging.basicConfig(level=logging.INFO) - configure_logging() uvicorn.run(api, host="0.0.0.0", port=port, log_level="info") From 134f63e3adab71c976cfd012459fc7db6b744906 Mon Sep 17 00:00:00 2001 From: alaydshah Date: Thu, 6 Jun 2024 05:03:03 +0000 Subject: [PATCH 119/251] Remove info logging added for debugging --- .../fedml/core/mlops/mlops_runtime_log_daemon.py | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/python/fedml/core/mlops/mlops_runtime_log_daemon.py b/python/fedml/core/mlops/mlops_runtime_log_daemon.py index 7791d8f4e5..ff06dc91b3 100644 --- a/python/fedml/core/mlops/mlops_runtime_log_daemon.py +++ b/python/fedml/core/mlops/mlops_runtime_log_daemon.py @@ -223,32 +223,17 @@ def __upload(self, log_upload_request) -> bool: if cert_path is not None: try: requests.session().verify = cert_path - logging.info(f"FedMLDebug POST log to server. log_headers: {log_headers}, " - f"log_upload_request: {log_upload_request}, url: {self.log_server_url}") - - # logging.info(f"FedMLDebug POST log to server. run_id {run_id}, device_id {device_id}") response = requests.post( self.log_server_url, json=log_upload_request, verify=True, headers=log_headers ) - logging.info(f"FedMLDebug POST log to server. response: {response}") - # logging.info(f"FedMLDebug POST log to server run_id {run_id}, device_id {device_id}. response.status_code: {response.status_code}") except requests.exceptions.SSLError as err: MLOpsConfigs.install_root_ca_file() - # logging.info(f"FedMLDebug POST log to server. run_id {run_id}, device_id {device_id}") - logging.info(f"FedMLDebug POST log to server. log_headers: {log_headers}, " - f"log_upload_request: {log_upload_request}, url: {self.log_server_url}") - response = requests.post( self.log_server_url, json=log_upload_request, verify=True, headers=log_headers ) - logging.info(f"FedMLDebug POST log to server. response: {response}") - # logging.info(f"FedMLDebug POST log to server run_id {run_id}, device_id {device_id}. response.status_code: {response.status_code}") else: - logging.info(f"FedMLDebug POST log to server. log_headers: {log_headers}, " - f"log_upload_request: {log_upload_request}, url: {self.log_server_url}") response = requests.post(self.log_server_url, headers=log_headers, json=log_upload_request) - logging.info(f"FedMLDebug POST log to server. response: {response}") if response.status_code != 200: logging.error(f"Failed to upload log to server. run_id {self.run_id}, device_id {self.device_id}. " f"response.status_code: {response.status_code}") From fd446b0a1aed81b320da001295c3e44b232cbe4c Mon Sep 17 00:00:00 2001 From: alaydshah Date: Thu, 6 Jun 2024 05:49:01 +0000 Subject: [PATCH 120/251] Fix --- .../scheduler/model_scheduler/device_model_inference.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py index 57f9d80208..7b3ac1d0bf 100755 --- a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py +++ b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py @@ -37,9 +37,10 @@ class Settings: api = FastAPI() -FEDML_MODEL_CACHE = FedMLModelCache.get_instance().set_redis_params(redis_addr=Settings.redis_addr, - redis_port=Settings.redis_port, - redis_password=Settings.redis_password) +FEDML_MODEL_CACHE = FedMLModelCache.get_instance() +FEDML_MODEL_CACHE.set_redis_params(redis_addr=Settings.redis_addr, + redis_port=Settings.redis_port, + redis_password=Settings.redis_password) @api.middleware("http") From 19abac107d31439ca1c3269b53414c92eff02c2f Mon Sep 17 00:00:00 2001 From: Alex Date: Thu, 6 Jun 2024 17:29:56 +0800 Subject: [PATCH 121/251] [CoreEngine] update the version and dependent libs. --- python/fedml/__init__.py | 2 +- python/setup.py | 5 ++++- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/python/fedml/__init__.py b/python/fedml/__init__.py index c13a64566e..21da84c9ab 100644 --- a/python/fedml/__init__.py +++ b/python/fedml/__init__.py @@ -36,7 +36,7 @@ _global_training_type = None _global_comm_backend = None -__version__ = "0.8.31" +__version__ = "0.9.0" # This is the deployment environment used for different roles (RD/PM/BD/Public Developers). Potential VALUE: local, dev, test, release diff --git a/python/setup.py b/python/setup.py index e88788d1ff..9651465d32 100644 --- a/python/setup.py +++ b/python/setup.py @@ -67,6 +67,9 @@ def finalize_options(self): # Need to pin this version due to breaking change released in python docker sdk 'requests<2.32', 'python-dotenv', + 'protobuf>=3.20.2,<4.0dev', + 'typer<0.10.0,>=0.3.0', + 'fastapi-cli==0.0.1' ] requirements_extra_mpi = [ @@ -123,7 +126,7 @@ def finalize_options(self): setup( name="fedml", - version="0.8.31", + version="0.9.0", author="FedML Team", author_email="ch@fedml.ai", description="A research and production integrated edge-cloud library for " From 27ad2e7a21cf3f98ffbd9443a4db6f85218bbb92 Mon Sep 17 00:00:00 2001 From: Alex Date: Thu, 6 Jun 2024 18:11:16 +0800 Subject: [PATCH 122/251] [CoreEngine] remove the deprecated files in the scheduler. --- .../master/server_runner_deprecated.py | 2775 ----------------- .../device_client_runner_deprecated.py | 1483 --------- .../device_server_runner_deprecated.py | 2022 ------------ .../slave/client_runner_deprecated.py | 1872 ----------- 4 files changed, 8152 deletions(-) delete mode 100755 python/fedml/computing/scheduler/master/server_runner_deprecated.py delete mode 100755 python/fedml/computing/scheduler/model_scheduler/device_client_runner_deprecated.py delete mode 100755 python/fedml/computing/scheduler/model_scheduler/device_server_runner_deprecated.py delete mode 100755 python/fedml/computing/scheduler/slave/client_runner_deprecated.py diff --git a/python/fedml/computing/scheduler/master/server_runner_deprecated.py b/python/fedml/computing/scheduler/master/server_runner_deprecated.py deleted file mode 100755 index 238349a3e4..0000000000 --- a/python/fedml/computing/scheduler/master/server_runner_deprecated.py +++ /dev/null @@ -1,2775 +0,0 @@ -import base64 -import copy -import json -import logging -import platform -import queue -import sys - -import multiprocessing -from multiprocessing import Process, Queue, Value, Array -import os -import shutil -import stat -import subprocess -import threading - -import time -import traceback -import urllib -import uuid -import zipfile -from os import listdir -from urllib.parse import urljoin, urlparse - -import requests - -import fedml -from ..comm_utils.job_cleanup import JobCleanup -from ..scheduler_core.scheduler_matcher import SchedulerMatcher -from ..comm_utils.constants import SchedulerConstants -from ..comm_utils.job_utils import JobRunnerUtils -from ..comm_utils.run_process_utils import RunProcessUtils -from ....core.mlops.mlops_runtime_log import MLOpsRuntimeLog - -from ....core.distributed.communication.mqtt.mqtt_manager import MqttManager -from ..comm_utils.yaml_utils import load_yaml_config -from ..slave.client_constants import ClientConstants -from ..master.server_constants import ServerConstants - -from ....core.mlops.mlops_metrics import MLOpsMetrics - -from ....core.mlops.mlops_configs import MLOpsConfigs -from ....core.mlops.mlops_runtime_log_daemon import MLOpsRuntimeLogDaemon -from ....core.mlops.mlops_status import MLOpsStatus -from ..comm_utils.sys_utils import get_sys_runner_info, get_python_program -from ..comm_utils import sys_utils -from .server_data_interface import FedMLServerDataInterface -from ....core.mlops.mlops_utils import MLOpsUtils -from ..scheduler_entry.constants import Constants -from ..model_scheduler.model_device_server import FedMLModelDeviceServerRunner -from ..model_scheduler.device_model_cards import FedMLModelCards -from ..model_scheduler import device_client_constants -from ..scheduler_core.log_manager import LogsManager -from ..scheduler_core.metrics_manager import MetricsManager -from ..scheduler_core.master_api_daemon import MasterApiDaemon -from fedml.utils.debugging import debug -from ..scheduler_core.message_center import FedMLMessageCenter -import ssl - - -class RunnerError(Exception): - """ Runner stopped. """ - pass - - -class RunnerCompletedError(Exception): - """ Runner completed. """ - pass - - -class FedMLServerRunner(FedMLMessageCenter): - FEDML_CLOUD_SERVER_PREFIX = "fedml-server-run-" - debug_cloud_server = False - - def __init__(self, args, run_id=0, request_json=None, agent_config=None, edge_id=0): - super().__init__() - self.master_api_daemon = None - self.run_stop_process = None - self.run_stop_process_map = dict() - self.run_edge_id_status_queue_map = dict() - self.run_metrics_queue_map = dict() - self.run_events_queue_map = dict() - self.run_artifacts_queue_map = dict() - self.run_logs_queue_map = dict() - self.async_check_timeout = 0 - self.enable_async_cluster = False - self.origin_fedml_config_object = None - self.package_type = SchedulerConstants.JOB_PACKAGE_TYPE_DEFAULT - self.local_api_process = None - self.run_process_event = None - self.run_process_event_map = dict() - self.run_process_completed_event = None - self.run_process_completed_event_map = dict() - self.run_process_event_map_for_stop = dict() - self.edge_device_info_queue = None - self.run_edge_device_info_queue_map = dict() - self.run_edge_device_info_queue_map_for_stop = dict() - self.run_edge_device_info_global_queue = None - self.run_edge_device_info_global_queue_for_stop = None - self.run_process = None - self.run_process_map = dict() - self.start_request_json = None - self.server_docker_image = None - self.cloud_server_name = None - self.run_as_cloud_agent = False - self.run_as_cloud_server = False - self.run_as_edge_server_and_agent = False - self.run_as_cloud_server_and_agent = False - self.fedml_packages_base_dir = None - self.fedml_packages_unzip_dir = None - self.mqtt_mgr = None - self.running_request_json = dict() - self.run_id = run_id - self.unique_device_id = None - self.edge_id = edge_id - self.server_agent_id = 0 - if request_json is not None: - self.server_agent_id = request_json.get("server_id", 0) - self.process = None - self.args = args - self.request_json = copy.deepcopy(request_json) - self.version = args.version - self.device_id = args.device_id - self.cur_dir = os.path.split(os.path.realpath(__file__))[0] - if args.current_running_dir is not None: - self.cur_dir = args.current_running_dir - - image_version = self.version - if image_version == "local": - image_version = "dev" - self.server_docker_base_image = "/fedml-device-image:" + image_version - - self.agent_config = agent_config - self.fedml_data_base_package_dir = os.path.join("/", "fedml", "data") - self.fedml_data_local_package_dir = os.path.join("/", "fedml", "fedml-package", "fedml", "data") - self.fedml_data_dir = self.fedml_data_base_package_dir - self.fedml_config_dir = os.path.join("/", "fedml", "conf") - - self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES = { - "${FEDSYS.RUN_ID}": "", - "${FEDSYS.PRIVATE_LOCAL_DATA}": "", - "${FEDSYS.CLIENT_ID_LIST}": "", - "${FEDSYS.SYNTHETIC_DATA_URL}": "", - "${FEDSYS.IS_USING_LOCAL_DATA}": "", - "${FEDSYS.CLIENT_NUM}": "", - "${FEDSYS.CLIENT_INDEX}": "", - "${FEDSYS.CLIENT_OBJECT_LIST}": "", - "${FEDSYS.LOG_SERVER_URL}": "", - } - - self.mlops_metrics = None - self.client_agent_active_list = dict() - self.server_active_list = dict() - self.run_status = None - self.ntp_offset = MLOpsUtils.get_ntp_offset() - self.runner_list = dict() - self.enable_simulation_cloud_agent = False - self.use_local_process_as_cloud_server = False - - self.model_device_server = None - self.run_model_device_ids = dict() - self.run_edge_ids = dict() - self.master_api_process = None - - self.subscribed_topics = list() - self.user_name = None - self.message_center = None - - def build_dynamic_constrain_variables(self, run_id, run_config): - data_config = run_config.get("data_config", {}) - server_edge_id_list = self.request_json["edgeids"] - is_using_local_data = 0 - private_data_dir = data_config.get("privateLocalData", "") - synthetic_data_url = data_config.get("syntheticDataUrl", "") - edges = self.request_json["edges"] - # if private_data_dir is not None \ - # and len(str(private_data_dir).strip(' ')) > 0: - # is_using_local_data = 1 - if private_data_dir is None or len(str(private_data_dir).strip(" ")) <= 0: - params_config = run_config.get("parameters", None) - private_data_dir = ServerConstants.get_data_dir() - if synthetic_data_url is None or len(str(synthetic_data_url)) <= 0: - synthetic_data_url = private_data_dir - - self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.RUN_ID}"] = run_id - self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.PRIVATE_LOCAL_DATA}"] = private_data_dir.replace(" ", "") - self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_ID_LIST}"] = str(server_edge_id_list).replace(" ", "") - self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.SYNTHETIC_DATA_URL}"] = synthetic_data_url.replace(" ", "") - self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.IS_USING_LOCAL_DATA}"] = str(is_using_local_data) - self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_NUM}"] = len(server_edge_id_list) - client_objects = str(json.dumps(edges)) - client_objects = client_objects.replace(" ", "").replace("\n", "").replace('"', '\\"') - self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_OBJECT_LIST}"] = client_objects - self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.LOG_SERVER_URL}"] = self.agent_config["ml_ops_config"][ - "LOG_SERVER_URL" - ] - - def unzip_file(self, zip_file, unzip_file_path) -> str: - unziped_file_name = "" - if zipfile.is_zipfile(zip_file): - with zipfile.ZipFile(zip_file, "r") as zipf: - zipf.extractall(unzip_file_path) - unziped_file_name = zipf.namelist()[0] - else: - raise Exception("Invalid zip file {}".format(zip_file)) - - return unziped_file_name - - def package_download_progress(self, count, blksize, filesize): - self.check_runner_stop_event() - - downloaded = count * blksize - downloaded = filesize if downloaded > filesize else downloaded - progress = (downloaded / filesize * 100) if filesize != 0 else 0 - progress_int = int(progress) - downloaded_kb = format(downloaded / 1024, '.2f') - - # since this hook funtion is stateless, we need a state to avoid printing progress repeatly - if count == 0: - self.prev_download_progress = 0 - if progress_int != self.prev_download_progress and progress_int % 5 == 0: - self.prev_download_progress = progress_int - logging.info("package downloaded size {} KB, progress {}%".format(downloaded_kb, progress_int)) - - def retrieve_and_unzip_package(self, package_name, package_url): - local_package_path = ServerConstants.get_package_download_dir() - os.makedirs(local_package_path, exist_ok=True) - filename, filename_without_extension, file_extension = ServerConstants.get_filename_and_extension(package_url) - local_package_file = os.path.join(local_package_path, f"fedml_run_{self.run_id}_{filename_without_extension}") - if os.path.exists(local_package_file): - os.remove(local_package_file) - ssl._create_default_https_context = ssl._create_unverified_context - urllib.request.urlretrieve(package_url, local_package_file, - reporthook=self.package_download_progress) - unzip_package_path = os.path.join(ClientConstants.get_package_unzip_dir(), - f"unzip_fedml_run_{self.run_id}_{filename_without_extension}") - try: - shutil.rmtree(unzip_package_path, ignore_errors=True) - except Exception as e: - pass - - package_dir_name = self.unzip_file(local_package_file, unzip_package_path) # Using unziped folder name - unzip_package_full_path = os.path.join(unzip_package_path, package_dir_name) - - logging.info("local_package_file {}, unzip_package_path {}, unzip file full path {}".format( - local_package_file, unzip_package_path, unzip_package_full_path)) - - return unzip_package_full_path - - def update_local_fedml_config(self, run_id, run_config): - packages_config = run_config["packages_config"] - - # Copy config file from the client - server_package_name = packages_config.get("server", None) - server_package_url = packages_config.get("serverUrl", None) - unzip_package_path = self.retrieve_and_unzip_package(server_package_name, server_package_url) - self.fedml_packages_unzip_dir = unzip_package_path - fedml_local_config_file = os.path.join(unzip_package_path, "conf", "fedml.yaml") - - # Load the above config to memory - config_from_container = load_yaml_config(fedml_local_config_file) - container_entry_file_config = config_from_container["entry_config"] - container_dynamic_args_config = config_from_container["dynamic_args"] - entry_file = container_entry_file_config["entry_file"] - conf_file = container_entry_file_config["conf_file"] - self.package_type = container_entry_file_config.get("package_type", SchedulerConstants.JOB_PACKAGE_TYPE_DEFAULT) - full_conf_path = os.path.join(unzip_package_path, "fedml", "config", os.path.basename(conf_file)) - - # Dynamically build constrain variable with realtime parameters from server - self.build_dynamic_constrain_variables(run_id, run_config) - - # Update entry arguments value with constrain variable values with realtime parameters from server - # currently we support the following constrain variables: - # ${FEDSYS_RUN_ID}: a run id represented one entire Federated Learning flow - # ${FEDSYS_PRIVATE_LOCAL_DATA}: private local data path in the Federated Learning client - # ${FEDSYS_CLIENT_ID_LIST}: client list in one entire Federated Learning flow - # ${FEDSYS_SYNTHETIC_DATA_URL}: synthetic data url from server, - # if this value is not null, the client will download data from this URL to use it as - # federated training data set - # ${FEDSYS_IS_USING_LOCAL_DATA}: whether use private local data as federated training data set - # container_dynamic_args_config["data_cache_dir"] = "${FEDSYS.PRIVATE_LOCAL_DATA}" - for constrain_variable_key, constrain_variable_value in self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES.items(): - for argument_key, argument_value in container_dynamic_args_config.items(): - if argument_value is not None and str(argument_value).find(constrain_variable_key) == 0: - replaced_argument_value = str(argument_value).replace( - constrain_variable_key, str(constrain_variable_value) - ) - container_dynamic_args_config[argument_key] = replaced_argument_value - - # Merge all container new config sections as new config dictionary - package_conf_object = dict() - package_conf_object["entry_config"] = container_entry_file_config - package_conf_object["dynamic_args"] = container_dynamic_args_config - package_conf_object["dynamic_args"]["config_version"] = self.args.config_version - container_dynamic_args_config["mqtt_config_path"] = os.path.join( - unzip_package_path, "fedml", "config", os.path.basename(container_dynamic_args_config["mqtt_config_path"]) - ) - container_dynamic_args_config["s3_config_path"] = os.path.join( - unzip_package_path, "fedml", "config", os.path.basename(container_dynamic_args_config["s3_config_path"]) - ) - log_file_dir = ServerConstants.get_log_file_dir() - os.makedirs(log_file_dir, exist_ok=True) - package_conf_object["dynamic_args"]["log_file_dir"] = log_file_dir - - # Save new config dictionary to local file - fedml_updated_config_file = os.path.join(unzip_package_path, "conf", "fedml.yaml") - ServerConstants.generate_yaml_doc(package_conf_object, fedml_updated_config_file) - - # Build dynamic arguments and set arguments to fedml config object - if not self.build_dynamic_args(run_id, run_config, package_conf_object, unzip_package_path): - return None, None - - return unzip_package_path, package_conf_object - - def build_dynamic_args(self, run_id, run_config, package_conf_object, base_dir): - fedml_conf_file = package_conf_object["entry_config"]["conf_file"] - fedml_conf_file_processed = str(fedml_conf_file).replace('\\', os.sep).replace('/', os.sep) - fedml_conf_path = os.path.join(base_dir, "fedml", "config", - os.path.basename(fedml_conf_file_processed)) - fedml_conf_object = load_yaml_config(fedml_conf_path) - self.origin_fedml_config_object = fedml_conf_object.copy() - run_params = run_config.get("parameters", {}) - job_yaml = run_params.get("job_yaml", {}) - - # Replace local fedml config objects with parameters from MLOps web - parameters_object = run_config.get("parameters", None) - if parameters_object is not None: - for config_k, config_v in fedml_conf_object.items(): - parameter_v = parameters_object.get(config_k, None) - if parameter_v is not None: - fedml_conf_object[config_k] = parameter_v - parameters_object.pop(config_k) - - for config_k, config_v in parameters_object.items(): - fedml_conf_object[config_k] = config_v - - package_dynamic_args = package_conf_object["dynamic_args"] - if fedml_conf_object.get("comm_args", None) is not None: - fedml_conf_object["comm_args"]["mqtt_config_path"] = package_dynamic_args["mqtt_config_path"] - fedml_conf_object["comm_args"]["s3_config_path"] = package_dynamic_args["s3_config_path"] - fedml_conf_object["common_args"]["using_mlops"] = True - if fedml_conf_object.get("train_args", None) is not None: - fedml_conf_object["train_args"]["run_id"] = package_dynamic_args["run_id"] - fedml_conf_object["train_args"]["client_id_list"] = package_dynamic_args["client_id_list"] - fedml_conf_object["train_args"]["client_num_in_total"] = int(package_dynamic_args["client_num_in_total"]) - fedml_conf_object["train_args"]["client_num_per_round"] = int(package_dynamic_args["client_num_in_total"]) - fedml_conf_object["train_args"]["server_id"] = self.edge_id - fedml_conf_object["train_args"]["server_agent_id"] = self.request_json.get("cloud_agent_id", self.edge_id) - fedml_conf_object["train_args"]["group_server_id_list"] = self.request_json.get("group_server_id_list", - list()) - if fedml_conf_object.get("device_args", None) is not None: - fedml_conf_object["device_args"]["worker_num"] = int(package_dynamic_args["client_num_in_total"]) - # fedml_conf_object["data_args"]["data_cache_dir"] = package_dynamic_args["data_cache_dir"] - if fedml_conf_object.get("tracking_args", None) is not None: - fedml_conf_object["tracking_args"]["log_file_dir"] = package_dynamic_args["log_file_dir"] - fedml_conf_object["tracking_args"]["log_server_url"] = package_dynamic_args["log_server_url"] - - bootstrap_script_path = None - env_args = fedml_conf_object.get("environment_args", None) - if env_args is not None: - bootstrap_script_file = env_args.get("bootstrap", None) - if bootstrap_script_file is not None: - bootstrap_script_file = str(bootstrap_script_file).replace('\\', os.sep).replace('/', os.sep) - if platform.system() == 'Windows': - bootstrap_script_file = bootstrap_script_file.rstrip('.sh') + '.bat' - if bootstrap_script_file is not None: - bootstrap_script_dir = os.path.join(base_dir, "fedml", os.path.dirname(bootstrap_script_file)) - bootstrap_script_path = os.path.join( - bootstrap_script_dir, bootstrap_script_dir, os.path.basename(bootstrap_script_file) - ) - # try: - # os.makedirs(package_dynamic_args["data_cache_dir"], exist_ok=True) - # except Exception as e: - # pass - fedml_conf_object["dynamic_args"] = package_dynamic_args - - ServerConstants.generate_yaml_doc(fedml_conf_object, fedml_conf_path) - - is_bootstrap_run_ok = True - try: - if bootstrap_script_path is not None: - if os.path.exists(bootstrap_script_path): - bootstrap_stat = os.stat(bootstrap_script_path) - if platform.system() == 'Windows': - os.chmod(bootstrap_script_path, - bootstrap_stat.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) - bootstrap_scripts = "{}".format(bootstrap_script_path) - else: - os.chmod(bootstrap_script_path, - bootstrap_stat.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH) - bootstrap_scripts = "cd {}; ./{}".format(bootstrap_script_dir, - os.path.basename(bootstrap_script_file)) - bootstrap_scripts = str(bootstrap_scripts).replace('\\', os.sep).replace('/', os.sep) - logging.info("Bootstrap scripts are being executed...") - shell_cmd_list = list() - shell_cmd_list.append(bootstrap_scripts) - process, error_list = ServerConstants.execute_commands_with_live_logs( - shell_cmd_list, callback=self.callback_run_bootstrap) - - ret_code, out, err = process.returncode, None, None - if ret_code is None or ret_code <= 0: - if error_list is not None and len(error_list) > 0: - is_bootstrap_run_ok = False - else: - if out is not None: - out_str = sys_utils.decode_our_err_result(out) - if out_str != "": - logging.info("{}".format(out_str)) - - sys_utils.log_return_info(bootstrap_script_file, 0) - - is_bootstrap_run_ok = True - else: - if err is not None: - err_str = sys_utils.decode_our_err_result(err) - if err_str != "": - logging.error("{}".format(err_str)) - - sys_utils.log_return_info(bootstrap_script_file, ret_code) - - is_bootstrap_run_ok = False - except Exception as e: - logging.error("Bootstrap scripts error: {}".format(traceback.format_exc())) - - is_bootstrap_run_ok = False - - return is_bootstrap_run_ok - - def callback_run_bootstrap(self, job_pid): - ServerConstants.save_bootstrap_process(self.run_id, job_pid) - - @debug - def run( - self, process_event, completed_event, edge_id_status_queue=None, - edge_device_info_queue=None, run_metrics_queue=None, - run_event_queue=None, run_artifacts_queue=None, run_logs_queue=None, - message_center_queue=None, edge_device_info_global_queue=None - ): - print(f"Server runner process id {os.getpid()}, run id {self.run_id}") - - if platform.system() != "Windows": - os.setsid() - - os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning' - os.environ.setdefault('PYTHONWARNINGS', 'ignore:semaphore_tracker:UserWarning') - - self.run_process_event = process_event - self.run_process_completed_event = completed_event - try: - MLOpsUtils.set_ntp_offset(self.ntp_offset) - - self.rebuild_message_center(message_center_queue) - - self.run_impl(edge_id_status_queue, edge_device_info_queue, run_metrics_queue, - run_event_queue, run_artifacts_queue, run_logs_queue, edge_device_info_global_queue) - except RunnerError: - logging.info("Runner stopped.") - self.mlops_metrics.report_server_id_status( - self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED, edge_id=self.edge_id, - server_id=self.edge_id, server_agent_id=self.edge_id) - except RunnerCompletedError: - logging.info("Runner completed.") - except Exception as e: - logging.error("Runner exits with exceptions. {}".format(traceback.format_exc())) - self.mlops_metrics.report_server_id_status( - self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id, - server_id=self.edge_id, server_agent_id=self.edge_id) - finally: - logging.info("Release resources.") - self._process_run_metrics_queue(run_metrics_queue) - self._process_run_logs_queue(run_logs_queue) - MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(self.run_id, self.edge_id) - if self.mlops_metrics is not None: - self.mlops_metrics.stop_sys_perf() - time.sleep(3) - ServerConstants.cleanup_run_process(self.run_id) - ServerConstants.cleanup_learning_process(self.run_id) - ServerConstants.cleanup_bootstrap_process(self.run_id) - - def check_runner_stop_event(self): - if self.run_process_event is not None and self.run_process_event.is_set(): - logging.info("Received stopping event.") - raise RunnerError("Runner stopped") - - if self.run_process_completed_event is not None and self.run_process_completed_event.is_set(): - logging.info("Received completed event.") - raise RunnerCompletedError("Runner completed") - - def deploy_model(self, serving_devices, request_json, run_id): - run_config = request_json["run_config"] - run_params = run_config.get("parameters", {}) - job_yaml = run_params.get("job_yaml", {}) - job_type = job_yaml.get("job_type", None) - job_type = job_yaml.get("task_type", Constants.JOB_TASK_TYPE_TRAIN) if job_type is None else job_type - if job_type == Constants.JOB_TASK_TYPE_DEPLOY or job_type == Constants.JOB_TASK_TYPE_SERVE: - computing = job_yaml.get("computing", {}) - num_gpus = computing.get("minimum_num_gpus", 1) - serving_args = run_params.get("serving_args", {}) - model_id = serving_args.get("model_id", None) - model_name = serving_args.get("model_name", None) - model_version = serving_args.get("model_version", None) - model_storage_url = serving_args.get("model_storage_url", None) - endpoint_name = serving_args.get("endpoint_name", None) - endpoint_id = serving_args.get("endpoint_id", None) - random = serving_args.get("random", "") - random_out = sys_utils.random2(random, "FEDML@9999GREAT") - random_list = random_out.split("FEDML@") - device_type = device_client_constants.ClientConstants.login_role_list[ - device_client_constants.ClientConstants.LOGIN_MODE_FEDML_CLOUD_INDEX] - FedMLModelCards.get_instance().deploy_model( - model_name, device_type, json.dumps(serving_devices), - "", random_list[1], None, - in_model_id=model_id, in_model_version=model_version, - endpoint_name=endpoint_name, endpoint_id=endpoint_id, run_id=run_id) - - @debug - def run_impl( - self, edge_id_status_queue, edge_device_info_queue, run_metrics_queue, - run_event_queue, run_artifacts_queue, run_logs_queue, edge_device_info_global_queue - ): - run_id = self.request_json["runId"] - run_config = self.request_json["run_config"] - data_config = run_config["data_config"] - edge_ids = self.request_json["edgeids"] - - self.check_runner_stop_event() - - self.run_id = run_id - self.args.run_id = self.run_id - MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO) - - # report server running status - self.mlops_metrics.report_server_id_status( - run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_STARTING, edge_id=self.edge_id, - server_id=self.edge_id, server_agent_id=self.edge_id) - - logging.info("Detect all status of Edge ids: " + str(edge_ids)) - - status_ok, active_edge_info_dict, inactivate_edges = self.detect_edges_status( - edge_device_info_queue, edge_device_info_global_queue=edge_device_info_global_queue, - callback_when_edges_ready=self.send_training_request_to_edges) - logging.info(f"Status OK: {status_ok}, Active edge info dict: {active_edge_info_dict}, " - f"inactivate edges: {inactivate_edges}") - if not status_ok: - logging.error(f"Status of edge device is not OK. Active edge info dict: {active_edge_info_dict}, " - f"Inactivate edges: {inactivate_edges}") - return - - if not self.should_continue_run_job(run_id): - if FedMLServerRunner.debug_cloud_server: - while True: - time.sleep(30) - # Check if the run status is normal - self.aggregate_run_status_metrics_logs( - run_id, edge_ids, edge_id_status_queue, edge_device_info_queue, - edge_device_info_global_queue, - run_metrics_queue, run_logs_queue) - return - - # Start the server job - self._start_runner_process(run_id, self.request_json, is_server_job=True) - - # Check if the run status is normal - self.aggregate_run_status_metrics_logs( - run_id, edge_ids, edge_id_status_queue, edge_device_info_queue, - edge_device_info_global_queue, - run_metrics_queue, run_logs_queue) - - def aggregate_run_status_metrics_logs( - self, run_id, edge_id_list, edge_id_status_queue, edge_device_info_queue, - edge_device_info_global_queue, run_metrics_queue, run_logs_queue): - total_sleep_seconds = 0 - sleep_seconds = 3 - allowed_status_check_sleep_seconds = 60 * 25 - server_id = self.edge_id - normal_response_status_list = [ - ClientConstants.MSG_MLOPS_CLIENT_STATUS_OFFLINE, ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE, - ClientConstants.MSG_MLOPS_CLIENT_STATUS_TRAINING, ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED, - ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED, - ClientConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION, ClientConstants.MSG_MLOPS_CLIENT_STATUS_RUNNING - ] - edges_id_status_timeout_map = dict() - number_of_failed_edges = 0 - number_of_finished_edges = 0 - number_of_killed_edges = 0 - running_edges_list = list() - inactivate_edge_list = list() - current_edge_id_status_map = dict() - - while True: - self.check_runner_stop_event() - - # Process run metrics - self._process_run_metrics_queue(run_metrics_queue) - - # Process run logs - self._process_run_logs_queue(run_logs_queue) - - # Fetch edge id and status from the edge id status queue - while True: - try: - queue_item = edge_id_status_queue.get(block=False, timeout=3) - if queue_item is not None: - current_edge_id_status_map.update(queue_item) - except queue.Empty as e: # If queue is empty, then break loop - break - - # Calc the total completed device number - server_id = current_edge_id_status_map.get("server", 0) - running_edges_list.clear() - number_of_failed_edges = 0 - number_of_finished_edges = 0 - number_of_killed_edges = 0 - for edge_id_item, status_item in current_edge_id_status_map.items(): - if edge_id_item == "server": - continue - - if status_item is None or status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED or \ - status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_EXCEPTION: - number_of_failed_edges += 1 - continue - - if status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED: - number_of_finished_edges += 1 - continue - - if status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED: - number_of_killed_edges += 1 - continue - - if status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE or \ - status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_OFFLINE: - continue - - running_edges_list.append(edge_id_item) - - # Process the no response edges and accumulate the counter. - for edge_id_item in edge_id_list: - status_dict = edges_id_status_timeout_map.get(str(edge_id_item)) - status_item = current_edge_id_status_map.get(str(edge_id_item)) - if status_item is None: - continue - if status_dict is None: - status_dict = {"status": status_item, "count": 0} - else: - if status_item in normal_response_status_list: - status_dict["count"] = 0 - else: - status_dict["count"] += 1 - edges_id_status_timeout_map[str(edge_id_item)] = status_dict - - # If the completed device number is equal total device number, then break - if len(running_edges_list) <= 0 and len(current_edge_id_status_map.keys()) == len(edge_id_list) + 1: - break - - # Calc the timeout value to wait to device killed. - self.check_runner_stop_event() - time.sleep(sleep_seconds) - total_sleep_seconds += sleep_seconds - no_response_edge_ids = list() - for no_res_edge, no_res_status in edges_id_status_timeout_map.items(): - if no_res_status.get("count") * sleep_seconds > allowed_status_check_sleep_seconds: - no_response_edge_ids.append(no_res_edge) - - # If timeout, then report killed device status - if len(no_response_edge_ids) > 0: - for edge_id_item in no_response_edge_ids: - self.mlops_metrics.report_client_id_status( - edge_id_item, ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED, - server_id=self.edge_id, run_id=self.run_id) - - # Check if we can get the response device info from edge devices - # and set the inactive edges to killed status. - self.check_runner_stop_event() - given_edge_ids = list(set(edge_id_list) - set(inactivate_edge_list)) - status_ok, active_edge_info_dict, inactivate_edges = self.detect_edges_status( - edge_device_info_queue, edge_device_info_global_queue=edge_device_info_global_queue, - need_to_trigger_exception=False, status_timeout=60, - given_edge_ids=given_edge_ids, callback_when_detecting=self.callback_when_detecting_on_aggregation, - args_for_callback_when_detecting=(run_metrics_queue, run_logs_queue) - ) - if not status_ok: - inactivate_edge_list.extend(inactivate_edges) - for edge_id_item in inactivate_edges: - self.mlops_metrics.report_client_id_status( - edge_id_item, ClientConstants.MSG_MLOPS_CLIENT_STATUS_OFFLINE, - server_id=self.edge_id, run_id=self.run_id) - - # Calc the final run status based on the completed device numbers and fault tolerance parameters. - enable_fault_tolerance, fault_tolerance_rate = self.parse_fault_tolerance_params(run_id) - running_edges_list = list(set(running_edges_list)) - status_to_report = self.calculate_server_status( - run_id, len(edge_id_list), number_of_failed_edges, number_of_finished_edges, - number_of_killed_edges, running_edges_list, enable_fault_tolerance=enable_fault_tolerance, - fault_tolerance_rate=fault_tolerance_rate) - if status_to_report is not None: - logging.info( - f"Run completed when aggregating status, metrics and logs, will report status {status_to_report}") - self.mlops_metrics.report_server_id_status( - self.run_id, status_to_report, edge_id=self.edge_id, - server_id=self.edge_id, server_agent_id=self.edge_id) - - def callback_when_detecting_on_aggregation(self, detecting_args): - # Process run metrics - self._process_run_metrics_queue(detecting_args[0]) - - # Process run logs - self._process_run_logs_queue(detecting_args[1]) - - def _process_run_metrics_queue(self, run_metrics_queue): - # Fetch metrics from the run metrics queue - while True: - try: - metrics_item = run_metrics_queue.get(block=False, timeout=3) - MetricsManager.get_instance().save_metrics(metrics_item) - metric_json = json.loads(metrics_item) - if metric_json.get("is_endpoint", False): - metric_json().pop("is_endpoint") - self.mlops_metrics.report_endpoint_metric({}, payload=json.dumps(metric_json)) - else: - self.mlops_metrics.report_server_training_metric({}, payload=metrics_item) - except queue.Empty as e: # If queue is empty, then break loop - break - - def _process_run_logs_queue(self, run_logs_queue): - # Fetch logs from the run logs queue - while True: - try: - logs_item = run_logs_queue.get(block=False, timeout=3) - LogsManager.save_logs(logs_item) - except queue.Empty as e: # If queue is empty, then break loop - break - - def run_server_job_impl(self, process_event, completed_event, edge_id_status_queue=None, - edge_device_info_queue=None, run_metrics_queue=None, - run_event_queue=None, run_artifacts_queue=None, run_logs_queue=None, - message_center_queue=None, edge_device_info_global_queue=None): - print(f"Server runner process id {os.getpid()}, run id {self.run_id}") - - if platform.system() != "Windows": - os.setsid() - - os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning' - os.environ.setdefault('PYTHONWARNINGS', 'ignore:semaphore_tracker:UserWarning') - - self.run_process_event = process_event - self.run_process_completed_event = completed_event - - MLOpsUtils.set_ntp_offset(self.ntp_offset) - - self.rebuild_message_center(message_center_queue) - - run_id = self.request_json["runId"] - run_config = self.request_json["run_config"] - data_config = run_config["data_config"] - edge_ids = self.request_json["edgeids"] - - self.check_runner_stop_event() - - # get training params - private_local_data_dir = data_config.get("privateLocalData", "") - is_using_local_data = 0 - # if private_local_data_dir is not None and len(str(private_local_data_dir).strip(' ')) > 0: - # is_using_local_data = 1 - - # start a run according to the hyper-parameters - # fedml_local_data_dir = self.cur_dir + "/fedml_data/run_" + run_id_str + "_edge_" + str(edge_id) - fedml_local_data_dir = os.path.join(self.cur_dir, "fedml_data") - fedml_local_config_dir = os.path.join(self.cur_dir, "fedml_config") - if is_using_local_data: - fedml_local_data_dir = private_local_data_dir - self.fedml_data_dir = self.fedml_data_local_package_dir - - self.check_runner_stop_event() - - logging.info("download packages and run the bootstrap script...") - - # update local config with real time parameters from server and dynamically replace variables value - unzip_package_path, fedml_config_object = self.update_local_fedml_config(run_id, run_config) - if unzip_package_path is None or fedml_config_object is None: - logging.info("failed to update local fedml config.") - self.check_runner_stop_event() - self.cleanup_run_when_starting_failed() - self.send_training_stop_request_to_edges_when_exception(edge_ids, payload=self.start_request_json, - run_id=run_id) - return - - logging.info("cleanup the previous aggregation process and check downloaded packages...") - - entry_file_config = fedml_config_object["entry_config"] - dynamic_args_config = fedml_config_object["dynamic_args"] - entry_file = str(entry_file_config["entry_file"]).replace('\\', os.sep).replace('/', os.sep) - entry_file = os.path.basename(entry_file) - conf_file = entry_file_config["conf_file"] - conf_file = str(conf_file).replace('\\', os.sep).replace('/', os.sep) - ServerConstants.cleanup_learning_process(run_id) - self.check_runner_stop_event() - if not os.path.exists(unzip_package_path): - logging.info("failed to unzip file.") - self.check_runner_stop_event() - self.cleanup_run_when_starting_failed() - self.send_training_stop_request_to_edges_when_exception(edge_ids, payload=self.start_request_json, - run_id=run_id) - return - os.chdir(os.path.join(unzip_package_path, "fedml")) - - self.check_runner_stop_event() - - logging.info("starting the server user process...") - - entry_file_full_path = os.path.join(unzip_package_path, "fedml", entry_file) - conf_file_full_path = os.path.join(unzip_package_path, "fedml", conf_file) - logging.info(" ") - logging.info(" ") - logging.info("====Your Run Logs Begin===") - process, is_launch_task, error_list = self.execute_job_task(entry_file_full_path, conf_file_full_path, run_id) - logging.info("====Your Run Logs End===") - logging.info(" ") - logging.info(" ") - - ret_code, out, err = process.returncode, None, None - is_run_ok = sys_utils.is_runner_finished_normally(process.pid) - if is_launch_task: - is_run_ok = True - if error_list is not None and len(error_list) > 0: - is_run_ok = False - if ret_code is None or ret_code <= 0: - self.check_runner_stop_event() - - if is_run_ok: - if out is not None: - out_str = sys_utils.decode_our_err_result(out) - if out_str != "": - logging.info("{}".format(out_str)) - - self.mlops_metrics.report_server_id_status( - run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED, edge_id=self.edge_id, - server_id=self.edge_id, server_agent_id=self.edge_id) - - if is_launch_task: - sys_utils.log_return_info(f"job {run_id}", 0) - else: - sys_utils.log_return_info(entry_file, 0) - else: - is_run_ok = False - - if not is_run_ok: - # If the run status is killed or finished, then return with the normal state. - current_job = FedMLServerDataInterface.get_instance().get_job_by_id(run_id) - if current_job is not None and (current_job.status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED or - current_job.status == ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED): - return - - self.check_runner_stop_event() - - logging.error("failed to run the aggregation process...") - - if err is not None: - err_str = sys_utils.decode_our_err_result(err) - if err_str != "": - logging.error("{}".format(err_str)) - - if is_launch_task: - sys_utils.log_return_info(f"job {run_id}", ret_code) - else: - sys_utils.log_return_info(entry_file, ret_code) - - self.send_training_stop_request_to_edges_when_exception(edge_ids, run_id=run_id) - - def init_job_task(self, request_json): - run_id = request_json["runId"] - run_config = request_json["run_config"] - edge_ids = request_json["edgeids"] - run_params = run_config.get("parameters", {}) - job_yaml = run_params.get("job_yaml", None) - server_id = request_json["server_id"] - if self.run_as_cloud_agent: - server_id = self.edge_id - - self.setup_listeners_for_edge_status(run_id, edge_ids, server_id) - self.setup_listener_for_run_metrics(run_id) - self.setup_listener_for_run_logs(run_id) - - def should_continue_run_job(self, run_id): - run_config = self.request_json["run_config"] - run_params = run_config.get("parameters", {}) - job_yaml = run_params.get("job_yaml", {}) - job_yaml_default_none = run_params.get("job_yaml", None) - framework_type = job_yaml.get("framework_type", None) - job_type = job_yaml.get("job_type", None) - job_type = job_yaml.get("task_type", Constants.JOB_TASK_TYPE_TRAIN) if job_type is None else job_type - if job_yaml_default_none is not None: - if job_type == Constants.JOB_TASK_TYPE_FEDERATE: - return True - - if framework_type is None or framework_type != Constants.JOB_FRAMEWORK_TYPE_FEDML: - self.mlops_metrics.report_server_id_status( - run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_RUNNING, edge_id=self.edge_id, - server_id=self.edge_id, server_agent_id=self.edge_id) - return False - - return True - - def execute_job_task(self, entry_file_full_path, conf_file_full_path, run_id): - run_config = self.request_json["run_config"] - run_params = run_config.get("parameters", {}) - job_yaml = run_params.get("job_yaml", {}) - job_yaml_default_none = run_params.get("job_yaml", None) - job_api_key = job_yaml.get("run_api_key", None) - job_api_key = job_yaml.get("fedml_run_dynamic_params", None) if job_api_key is None else job_api_key - assigned_gpu_ids = run_params.get("gpu_ids", None) - framework_type = job_yaml.get("framework_type", None) - job_type = job_yaml.get("job_type", None) - job_type = job_yaml.get("task_type", Constants.JOB_TASK_TYPE_TRAIN) if job_type is None else job_type - conf_file_object = load_yaml_config(conf_file_full_path) - entry_args_dict = conf_file_object.get("fedml_entry_args", {}) - entry_args = entry_args_dict.get("arg_items", None) - - executable_interpreter = ClientConstants.CLIENT_SHELL_PS \ - if platform.system() == ClientConstants.PLATFORM_WINDOWS else ClientConstants.CLIENT_SHELL_BASH - - if job_yaml_default_none is None: - # Generate the job executing commands for previous federated learning (Compatibility) - python_program = get_python_program() - logging.info("Run the server: {} {} --cf {} --rank 0 --role server".format( - python_program, entry_file_full_path, conf_file_full_path)) - entry_command = f"{python_program} {entry_file_full_path} --cf " \ - f"{conf_file_full_path} --rank 0 --role server" - shell_cmd_list = [entry_command] - - # Run the job executing commands for previous federated learning (Compatibility) - process, error_list = ClientConstants.execute_commands_with_live_logs( - shell_cmd_list, callback=self.callback_start_fl_job, should_write_log_file=False) - is_launch_task = False - else: - self.check_runner_stop_event() - - self.mlops_metrics.report_server_id_status( - run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_RUNNING, edge_id=self.edge_id, - server_id=self.edge_id, server_agent_id=self.edge_id) - - # Generate the job executing commands - job_executing_commands = JobRunnerUtils.generate_job_execute_commands( - run_id=self.run_id, edge_id=self.edge_id, version=self.version, package_type=self.package_type, - executable_interpreter=executable_interpreter, entry_file_full_path=entry_file_full_path, - conf_file_object=conf_file_object, entry_args=entry_args, assigned_gpu_ids=assigned_gpu_ids, - job_api_key=job_api_key, client_rank=0) - - # Run the job executing commands - logging.info(f"Run the server job with job id {self.run_id}, device id {self.edge_id}.") - process, error_list = ServerConstants.execute_commands_with_live_logs( - job_executing_commands, callback=self.start_job_perf, error_processor=self.job_error_processor) - is_launch_task = True - - return process, is_launch_task, error_list - - def callback_start_fl_job(self, job_pid): - ServerConstants.save_learning_process(self.run_id, job_pid) - self.mlops_metrics.report_sys_perf( - self.args, self.agent_config["mqtt_config"], job_process_id=job_pid) - - def start_job_perf(self, job_pid): - ServerConstants.save_learning_process(self.run_id, job_pid) - self.mlops_metrics.report_job_perf(self.args, self.agent_config["mqtt_config"], job_pid) - - def job_error_processor(self, error_list): - self.check_runner_stop_event() - - error_str = "\n".join(error_list) - raise Exception(f"Error occurs when running the job... {error_str}") - - def process_job_status(self, run_id, edge_id, status): - number_of_failed_edges = 0 - number_of_finished_edges = 0 - number_of_killed_edges = 0 - edge_id_status_dict = self.client_agent_active_list.get(f"{run_id}", {}) - server_id = edge_id_status_dict.get("server", 0) - enable_fault_tolerance, fault_tolerance_rate = self.parse_fault_tolerance_params(run_id) - running_edges_list = list() - for edge_id_item, status_item in edge_id_status_dict.items(): - if edge_id_item == "server": - continue - - if status_item is None or status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED or \ - status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_EXCEPTION: - number_of_failed_edges += 1 - continue - - if status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED: - number_of_finished_edges += 1 - continue - - if status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED: - number_of_killed_edges += 1 - continue - - if status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE or \ - status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_OFFLINE: - continue - - running_edges_list.append(edge_id_item) - - # Report client status - edge_status = ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED if status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION else status - self.mlops_metrics.report_client_training_status(edge_id, edge_status, run_id=run_id) - self.mlops_metrics.report_client_device_status_to_web_ui(edge_id, edge_status, run_id=run_id) - - # Report server status based on the fault tolerance model and parameters - edge_nums = len(edge_id_status_dict.keys()) - 1 - status_to_report = self.calculate_server_status( - run_id, edge_nums, number_of_failed_edges, number_of_finished_edges, number_of_killed_edges, - running_edges_list, enable_fault_tolerance=enable_fault_tolerance, - fault_tolerance_rate=fault_tolerance_rate) - if status_to_report is not None: - logging.info(f"Run completed when processing edge status, will report status {status_to_report}") - self.report_server_status(run_id, server_id, status_to_report) - - def calculate_server_status( - self, run_id, total_edge_nums, number_of_failed_edges, number_of_finished_edges, - number_of_killed_edges, running_edges_list, enable_fault_tolerance=False, - fault_tolerance_rate=0.8 - ): - # Report server status based on the fault tolerance model and parameters - actual_failed_rate = number_of_failed_edges / total_edge_nums - all_edges_run_completed = True if len(running_edges_list) <= 0 else False - if all_edges_run_completed: - status_to_report = None - if enable_fault_tolerance: - if actual_failed_rate >= fault_tolerance_rate: - status_to_report = ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED - self.send_training_stop_request_to_edges_when_exception( - running_edges_list, run_id=run_id, status=status_to_report) - return status_to_report - else: - if number_of_killed_edges == total_edge_nums: - status_to_report = ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED - else: - status_to_report = ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED - else: - if number_of_failed_edges > 0: - status_to_report = ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED - elif number_of_finished_edges == total_edge_nums: - status_to_report = ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED - elif number_of_killed_edges == total_edge_nums: - status_to_report = ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED - - return status_to_report - - def parse_fault_tolerance_params(self, run_id): - run_json = self.running_request_json.get(str(run_id), None) - if run_json is None: - run_json = self.request_json - run_config = run_json.get("run_config", {}) - run_params = run_config.get("parameters", {}) - common_args = run_params.get("common_args", {}) - enable_fault_tolerance = common_args.get("enable_fault_tolerance", False) - fault_tolerance_rate = common_args.get("fault_tolerance_rate", 0) - return enable_fault_tolerance, fault_tolerance_rate - - def report_server_status(self, run_id, server_id, status): - self.mlops_metrics.report_server_id_status(run_id, status, edge_id=self.edge_id, - server_id=server_id, server_agent_id=self.edge_id) - - def stop_run_when_starting_failed(self): - edge_id_list = self.request_json["edgeids"] - run_id = self.request_json.get("run_id", 0) - logging.error("edge ids {}".format(str(edge_id_list))) - - payload = self.running_request_json.get(str(run_id)) - if payload is not None: - self.send_training_stop_request_to_edges(edge_id_list, payload=json.dumps(payload), run_id=run_id) - - # logging.info("Stop run successfully when starting failed.") - - self.mlops_metrics.report_server_id_status( - self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id, - server_id=self.edge_id, server_agent_id=self.edge_id) - - def cleanup_run_when_finished(self, should_send_server_id_status=True): - # logging.info("Cleanup run successfully when finished.") - - self.mlops_metrics.report_server_training_status( - self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED, edge_id=self.edge_id - ) - - if should_send_server_id_status: - self.mlops_metrics.report_server_id_status( - self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED, edge_id=self.edge_id, - server_id=self.edge_id, server_agent_id=self.edge_id) - - try: - self.mlops_metrics.stop_sys_perf() - except Exception as ex: - pass - - time.sleep(1) - - ServerConstants.cleanup_learning_process(self.run_id) - ServerConstants.cleanup_bootstrap_process(self.run_id) - - try: - local_package_path = ServerConstants.get_package_download_dir() - for package_file in listdir(local_package_path): - if os.path.basename(package_file).startswith("run_" + str(self.run_id)): - shutil.rmtree(os.path.join(local_package_path, package_file), ignore_errors=True) - except Exception as e: - pass - - def cleanup_run_when_starting_failed( - self, status=ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED, should_send_server_id_status=True): - # logging.info("Cleanup run successfully when starting failed.") - - self.mlops_metrics.report_server_training_status( - self.run_id, status, edge_id=self.edge_id) - - if should_send_server_id_status: - self.mlops_metrics.report_server_id_status( - self.run_id, status, edge_id=self.edge_id, - server_id=self.edge_id, server_agent_id=self.edge_id) - - try: - self.mlops_metrics.stop_sys_perf() - except Exception as ex: - pass - - time.sleep(1) - - ServerConstants.cleanup_learning_process(self.run_id) - ServerConstants.cleanup_bootstrap_process(self.run_id) - - try: - local_package_path = ServerConstants.get_package_download_dir() - for package_file in listdir(local_package_path): - if os.path.basename(package_file).startswith("run_" + str(self.run_id)): - shutil.rmtree(os.path.join(local_package_path, package_file), ignore_errors=True) - except Exception as e: - pass - - def should_process_async_cluster(self): - run_config = self.request_json.get("run_config", {}) - run_params = run_config.get("parameters", {}) - common_args = run_params.get("common_args", {}) - self.enable_async_cluster = common_args.get("enable_async_cluster", False) - self.async_check_timeout = common_args.get("async_check_timeout", 0) - if self.enable_async_cluster: - return True, self.async_check_timeout - - return False, self.async_check_timeout - - @debug - def detect_edges_status( - self, edge_device_info_queue, edge_device_info_global_queue=None, callback_when_edges_ready=None, status_timeout=None, - need_to_trigger_exception=True, status_check_context=None, given_edge_ids=None, - callback_when_detecting=None, args_for_callback_when_detecting=None - ): - run_id = self.request_json["runId"] - run_id_str = str(run_id) - edge_id_list = self.request_json["edgeids"] - if given_edge_ids is not None: - edge_id_list = given_edge_ids - - # Init realtime status of all edges - run_edges_realtime_status = dict() - run_edges_realtime_status[run_id_str] = dict() - - edge_info_global_dict = dict() - if edge_device_info_global_queue is not None: - for edge_info_global in edge_device_info_global_queue: - edge_info_id = edge_info_global.get("edge_id") - edge_info_global_dict[edge_info_id] = edge_info_global - - # Send status message to all edges - allowed_cache_edge_status_time = 60 - for edge_id in edge_id_list: - # Check if the edge status was filled allowed_cache_edge_status_time seconds ago, - # if so no more checking message would be sent. - edge_info = edge_info_global_dict.get(edge_id, None) - if edge_info is not None: - timestamp = edge_info.get("timestamp", None) - time_interval = time.time() - timestamp - if time_interval <= allowed_cache_edge_status_time: - continue - - self.send_status_check_msg(run_id, edge_id, self.edge_id, context=status_check_context) - time.sleep(3) - - total_sleep_seconds = 0 - status_check_sleep_seconds = 10 - allowed_status_check_sleep_seconds = 60 * 2 if status_timeout is None else status_timeout - allowed_status_check_sleep_seconds_for_async = 30 - inactivate_edges = list() - active_edge_info_dict = dict() - log_active_edge_info_flag = True - while True: - if callback_when_detecting is not None: - callback_when_detecting(args_for_callback_when_detecting) - - # Fetch edge info from the edge status queue, which will be added to realtime status map - while True: - self.check_runner_stop_event() - - try: - edge_info = edge_device_info_queue.get(block=False, timeout=1) - if edge_info is not None: - edge_id = edge_info.get("edge_id", None) - if edge_id is not None: - run_edges_realtime_status[run_id_str][edge_id] = edge_info - except queue.Empty as e: # If queue is empty, then break loop - break - - self.check_runner_stop_event() - - # Check all edges which don't send response status successfully - # and retry to send the status checking message. - active_edges_count = 0 - inactivate_edges.clear() - active_edge_info_dict.clear() - for edge_id in edge_id_list: - edge_info_dict = run_edges_realtime_status.get(run_id_str, {}) - edge_info = edge_info_dict.get(edge_id, None) - edge_info = edge_info_dict.get(str(edge_id), None) if edge_info is None else edge_info - if edge_info is not None: - active_edges_count += 1 - active_edge_info_dict[str(edge_id)] = edge_info - else: - # Check if the edge status was filled allowed_cache_edge_status_time seconds ago, - # if so no more checking message would be sent. - edge_info = edge_info_global_dict.get(edge_id, None) - if edge_info is not None: - timestamp = edge_info.get("timestamp", None) - time_interval = time.time() - timestamp - if time_interval <= allowed_cache_edge_status_time: - active_edges_count += 1 - active_edge_info_dict[str(edge_id)] = edge_info - continue - - inactivate_edges.append(edge_id) - self.send_status_check_msg(run_id, edge_id, self.edge_id, context=status_check_context) - - # If all edges are ready then send the starting job message to them - if active_edges_count == len(edge_id_list): - if log_active_edge_info_flag: - logging.debug(f"All edges are ready. Active edge id list is as follows. {active_edge_info_dict}") - log_active_edge_info_flag = False - if callback_when_edges_ready is not None: - logging.info("All edges are ready. Start to process the callback function.") - callback_when_edges_ready(active_edge_info_dict=active_edge_info_dict) - else: - logging.debug("All edges are ready. No callback function to process.") - break - else: - logging.info(f"All edges are not ready. Active edge id list: {active_edge_info_dict}, " - f"Inactive edge id list: {inactivate_edges}") - log_active_edge_info_flag = True - - # Check if runner needs to stop and sleep specific time - self.check_runner_stop_event() - time.sleep(status_check_sleep_seconds) - total_sleep_seconds += status_check_sleep_seconds - - # Check if the status response message has timed out to receive - if total_sleep_seconds >= allowed_status_check_sleep_seconds: - # If so, send failed message to MLOps and send exception message to all edges. - logging.error(f"There are inactive edge devices. " - f"Inactivate edge id list is as follows. {inactivate_edges}") - if need_to_trigger_exception: - self.mlops_metrics.report_server_id_status( - run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id, - server_id=self.edge_id, server_agent_id=self.server_agent_id) - self.send_training_stop_request_to_edges_when_exception(edge_id_list, - payload=json.dumps(self.request_json), - run_id=run_id) - return False, active_edge_info_dict, inactivate_edges - - # If we enable the mode for async cluster, then sleep some time and send messages to all clients. - if callback_when_edges_ready is not None: - should_async, async_timeout = self.should_process_async_cluster() - if should_async and total_sleep_seconds >= allowed_status_check_sleep_seconds_for_async: - if async_timeout > allowed_status_check_sleep_seconds_for_async: - time.sleep(async_timeout - allowed_status_check_sleep_seconds_for_async) - self.send_training_request_to_edges() - return True, active_edge_info_dict, inactivate_edges - - return True, active_edge_info_dict, inactivate_edges - - def send_status_check_msg(self, run_id, edge_id, server_id, context=None): - topic_get_model_device_id = "server/client/request_device_info/" + str(edge_id) - payload = {"server_id": server_id, "run_id": run_id} - if context is not None: - payload["context"] = context - self.message_center.send_message(topic_get_model_device_id, json.dumps(payload)) - - @debug - def send_training_request_to_edges(self, active_edge_info_dict=None): - run_id = self.request_json["runId"] - edge_id_list = self.request_json["edgeids"] - run_config = self.request_json.get("run_config", {}) - run_params = run_config.get("parameters", {}) - job_yaml = run_params.get("job_yaml", {}) - job_yaml_default_none = run_params.get("job_yaml", None) - computing = job_yaml.get("computing", {}) - request_num_gpus = computing.get("minimum_num_gpus", None) - job_gpu_id_list = self.request_json.get("job_gpu_id_list", None) - - logging.info("Send training request to Edge ids: " + str(edge_id_list)) - - should_match_gpu = False - if job_yaml_default_none is not None and request_num_gpus is not None and \ - int(request_num_gpus) > 0 and active_edge_info_dict is not None: - should_match_gpu = True - SchedulerMatcher.parse_and_print_gpu_info_for_all_edges(active_edge_info_dict, show_gpu_list=True) - - # Match and assign gpus to each device - assigned_gpu_num_dict, assigned_gpu_ids_dict = SchedulerMatcher.match_and_assign_gpu_resources_to_devices( - request_num_gpus, edge_id_list, active_edge_info_dict, job_gpu_id_list=job_gpu_id_list) - if assigned_gpu_num_dict is None or assigned_gpu_ids_dict is None: - # If no resources available, send failed message to MLOps and send exception message to all edges. - gpu_count, gpu_available_count = SchedulerMatcher.parse_and_print_gpu_info_for_all_edges( - active_edge_info_dict, should_print=True) - err_info = f"No resources available." \ - f"Total available GPU count {gpu_available_count} is less than " \ - f"request GPU count {request_num_gpus}" - logging.error(err_info) - - # Bug fix: This mqtt message needs to be sent so platform can clean up the failed run and change the - # status from running to failed. - self.mlops_metrics.report_server_training_status( - run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id - ) - - self.mlops_metrics.report_server_id_status( - run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id, - server_id=self.edge_id, server_agent_id=self.server_agent_id) - self.send_training_stop_request_to_edges_when_exception(edge_id_list, - payload=json.dumps(self.request_json), - run_id=run_id) - - serving_args = job_yaml.get("serving_args", {}) - endpoint_id = serving_args.get("endpoint_id", None) - if endpoint_id is not None: - fedml.mlops.log_endpoint_status( - endpoint_id, device_client_constants.ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED) - fedml.mlops.log_run_log_lines( - endpoint_id, 0, [err_info], - log_source=device_client_constants.ClientConstants.FEDML_LOG_SOURCE_TYPE_MODEL_END_POINT - ) - return - - # Generate master node addr and port - master_node_addr, master_node_port = SchedulerMatcher.get_master_node_info(edge_id_list, - active_edge_info_dict) - - # Generate new edge id list after matched - edge_id_list = SchedulerMatcher.generate_new_edge_list_for_gpu_matching(assigned_gpu_num_dict) - if len(edge_id_list) <= 0: - gpu_count, gpu_available_count = SchedulerMatcher.parse_and_print_gpu_info_for_all_edges( - active_edge_info_dict, should_print=True) - logging.error(f"Request parameter for GPU num is invalid." - f"Total available GPU count {gpu_available_count}." - f"Request GPU num {request_num_gpus}") - self.mlops_metrics.report_server_id_status( - run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id, - server_id=self.edge_id, server_agent_id=self.server_agent_id) - self.send_training_stop_request_to_edges_when_exception(edge_id_list, - payload=json.dumps(self.request_json), - run_id=run_id) - return - - if should_match_gpu: - # Report gpu num and related infos to MLOps. - serving_args = job_yaml.get("serving_args", {}) - endpoint_id = serving_args.get("endpoint_id", None) - if endpoint_id is not None: - endpoint_info = list() - for edge_id_item, gpu_num in assigned_gpu_num_dict.items(): - edge_info = active_edge_info_dict.get(str(edge_id_item), {}) - endpoint_info.append({ - "machine_id": edge_id_item, "endpoint_gpu_count": gpu_num, - "master_deploy_id": edge_info.get("master_device_id", 0), - "slave_deploy_id": edge_info.get("slave_device_id", 0)}) - topic_name = f"compute/mlops/endpoint" - endpoint_info_json = {"endpoint_id": endpoint_id, "endpoint_info": endpoint_info} - print(f"endpoint_info_json {endpoint_info_json}") - self.message_center.send_message(topic_name, json.dumps(endpoint_info_json)) - - client_rank = 1 - for edge_id in edge_id_list: - topic_start_train = "flserver_agent/" + str(edge_id) + "/start_train" - logging.info("start_train: send topic " + topic_start_train + " to client...") - request_json = self.request_json - request_json["client_rank"] = client_rank - client_rank += 1 - - if active_edge_info_dict is not None: - edge_info = active_edge_info_dict.get(str(edge_id), {}) - model_master_device_id = edge_info.get("master_device_id", None) - model_slave_device_id = edge_info.get("slave_device_id", None) - model_slave_device_id_list = edge_info.get("slave_device_id_list", None) - - if should_match_gpu: - request_json["scheduler_match_info"] = SchedulerMatcher.generate_match_info_for_scheduler( - edge_id, edge_id_list, master_node_addr, master_node_port, - assigned_gpu_num_dict, assigned_gpu_ids_dict, - model_master_device_id=model_master_device_id, - model_slave_device_id=model_slave_device_id, - model_slave_device_id_list=model_slave_device_id_list - ) - - self.message_center.send_message(topic_start_train, json.dumps(request_json)) - - def setup_listeners_for_edge_status(self, run_id, edge_ids, server_id): - self.client_agent_active_list[f"{run_id}"] = dict() - self.client_agent_active_list[f"{run_id}"][f"server"] = server_id - for edge_id in edge_ids: - self.client_agent_active_list[f"{run_id}"][f"{edge_id}"] = ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE - edge_status_topic = "fl_client/flclient_agent_" + str(edge_id) + "/status" - self.add_message_listener(edge_status_topic, self.callback_edge_status) - self.subscribe_msg(edge_status_topic) - - def remove_listeners_for_edge_status(self, edge_ids=None): - if edge_ids is None: - edge_ids = self.request_json["edgeids"] - - for edge_id in edge_ids: - edge_status_topic = "fl_client/flclient_agent_" + str(edge_id) + "/status" - self.unsubscribe_msg(edge_status_topic) - - def setup_listener_for_run_metrics(self, run_id): - metric_topic = f"fedml_slave/fedml_master/metrics/{run_id}" - self.add_message_listener(metric_topic, self.callback_run_metrics) - self.subscribe_msg(metric_topic) - - def remove_listener_for_run_metrics(self, run_id): - metric_topic = f"fedml_slave/fedml_master/metrics/{run_id}" - self.unsubscribe_msg(metric_topic) - - def setup_listener_for_run_logs(self, run_id): - logs_topic = f"fedml_slave/fedml_master/logs/{run_id}" - self.add_message_listener(logs_topic, self.callback_run_logs) - self.subscribe_msg(logs_topic) - - def remove_listener_for_run_logs(self, run_id): - logs_topic = f"fedml_slave/fedml_master/logs/{run_id}" - self.unsubscribe_msg(logs_topic) - - def callback_run_logs(self, topic, payload): - run_id = str(topic).split('/')[-1] - run_id_str = str(run_id) - if self.run_logs_queue_map.get(run_id_str) is None: - self.run_logs_queue_map[run_id_str] = Queue() - self.run_logs_queue_map[run_id_str].put(payload) - - def callback_run_metrics(self, topic, payload): - print(f"callback_run_metrics topic {topic}, payload {payload}") - run_id = str(topic).split('/')[-1] - run_id_str = str(run_id) - if self.run_metrics_queue_map.get(run_id_str) is None: - self.run_metrics_queue_map[run_id_str] = Queue() - self.run_metrics_queue_map[run_id_str].put(payload) - - def callback_edge_status(self, topic, payload): - payload_json = json.loads(payload) - run_id = payload_json.get("run_id", None) - edge_id = payload_json.get("edge_id", None) - status = payload_json.get("status", None) - if run_id is not None and edge_id is not None: - active_item_dict = self.client_agent_active_list.get(f"{run_id}", None) - if active_item_dict is None: - return - self.client_agent_active_list[f"{run_id}"][f"{edge_id}"] = status - - if self.run_edge_id_status_queue_map.get(f"{run_id}") is None: - self.run_edge_id_status_queue_map[f"{run_id}"] = Queue() - self.run_edge_id_status_queue_map[f"{run_id}"].put(self.client_agent_active_list[f"{run_id}"]) - - self.process_job_status(run_id, edge_id, status) - - def ota_upgrade(self, payload, request_json): - run_id = request_json["runId"] - force_ota = False - ota_version = None - - try: - run_config = request_json.get("run_config", None) - parameters = run_config.get("parameters", None) - common_args = parameters.get("common_args", None) - force_ota = common_args.get("force_ota", False) - ota_version = common_args.get("ota_version", None) - except Exception as e: - pass - - if force_ota and ota_version is not None: - should_upgrade = True if ota_version != fedml.__version__ else False - upgrade_version = ota_version - else: - try: - fedml_is_latest_version, local_ver, remote_ver = sys_utils.check_fedml_is_latest_version(self.version) - except Exception as e: - return - - should_upgrade = False if fedml_is_latest_version else True - upgrade_version = remote_ver - - if should_upgrade: - job_obj = FedMLServerDataInterface.get_instance().get_job_by_id(run_id) - if job_obj is None: - FedMLServerDataInterface.get_instance(). \ - save_started_job(run_id, self.edge_id, time.time(), - ServerConstants.MSG_MLOPS_SERVER_STATUS_UPGRADING, - ServerConstants.MSG_MLOPS_SERVER_STATUS_UPGRADING, - payload) - - self.mlops_metrics.report_server_id_status( - run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_UPGRADING, edge_id=self.edge_id, - server_id=self.edge_id, server_agent_id=self.edge_id) - logging.info(f"Upgrade to version {upgrade_version} ...") - - sys_utils.do_upgrade(self.version, upgrade_version) - - raise Exception("Restarting after upgraded...") - - def callback_start_train(self, topic=None, payload=None): - print("callback_start_train: ") - try: - MLOpsConfigs.fetch_all_configs() - except Exception as e: - pass - - # [NOTES] Example Request JSON: https://fedml-inc.larksuite.com/wiki/ScnIwUif9iupbjkYS0LuBrd6sod#WjbEdhYrvogmlGxKTOGu98C6sSb - request_json = json.loads(payload) - is_retain = request_json.get("is_retain", False) - if is_retain: - return - - # Process the log - run_id = request_json["runId"] - run_id_str = str(run_id) - if self.run_as_edge_server_and_agent or self.enable_simulation_cloud_agent: - # Start log processor for current run - self.args.run_id = run_id - self.args.edge_id = self.edge_id - MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO) - MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor( - run_id, self.edge_id, SchedulerConstants.get_log_source(request_json)) - logging.info("start the log processor.") - elif self.run_as_cloud_agent: - # Start log processor for current run - MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor( - run_id, request_json.get("server_id", "0"), SchedulerConstants.get_log_source(request_json) - ) - elif self.run_as_cloud_server: - self.server_agent_id = request_json.get("cloud_agent_id", self.edge_id) - run_id = request_json["runId"] - run_id_str = str(run_id) - - # Start log processor for current run - self.args.run_id = run_id - MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor( - run_id, self.edge_id, SchedulerConstants.get_log_source(request_json)) - - logging.info("callback_start_train payload: {}".format(payload)) - logging.info( - f"FedMLDebug - Receive: topic ({topic}), payload ({payload})" - ) - - # if not self.run_as_cloud_agent and not self.run_as_cloud_server: - # self.ota_upgrade(payload, request_json) - - # report server running status - if not self.run_as_cloud_server: - self.mlops_metrics.report_server_id_status( - run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_STARTING, edge_id=self.edge_id, - server_id=self.edge_id, server_agent_id=self.edge_id) - - self.start_request_json = payload - self.run_id = run_id - ServerConstants.save_runner_infos(self.args.device_id + "." + self.args.os_name, self.edge_id, run_id=run_id) - - # Start server with multiprocessing mode - self.request_json = request_json - self.running_request_json[run_id_str] = request_json - edge_id_list = request_json.get("edgeids", list()) - self.run_edge_ids[run_id_str] = edge_id_list - - logging.info("subscribe the client exception message.") - - if self.run_as_edge_server_and_agent or self.enable_simulation_cloud_agent: - self.init_job_task(request_json) - - self.args.run_id = run_id - - self._start_runner_process(run_id, request_json) - - ServerConstants.save_run_process(run_id, self.run_process_map[run_id_str].pid) - elif self.run_as_cloud_agent: - self.init_job_task(request_json) - - server_runner = FedMLServerRunner( - self.args, run_id=run_id, request_json=request_json, agent_config=self.agent_config - ) - server_runner.run_as_cloud_agent = self.run_as_cloud_agent - server_runner.start_request_json = json.dumps(request_json) - self.run_process_event_map[run_id_str] = multiprocessing.Event() - self.run_process_event_map[run_id_str].clear() - server_runner.run_process_event = self.run_process_event_map[run_id_str] - - if not self.use_local_process_as_cloud_server: - self.run_process_map[run_id_str] = Process(target=server_runner.start_cloud_server_process_entry) - self.run_process_map[run_id_str].start() - else: - message_bytes = json.dumps(self.request_json).encode("ascii") - base64_bytes = base64.b64encode(message_bytes) - runner_cmd_encoded = base64_bytes.decode("ascii") - logging.info("runner_cmd_encoded: {}".format(runner_cmd_encoded)) - - cloud_device_id = request_json.get("cloudServerDeviceId", "0") - - self.run_process_map[run_id_str] = Process( - target=FedMLServerRunner.start_local_cloud_server, - args=(run_id_str, self.args.user, self.version, cloud_device_id, runner_cmd_encoded)) - self.run_process_map[run_id_str].start() - time.sleep(1) - - ServerConstants.save_run_process(run_id, self.run_process_map[run_id_str].pid) - elif self.run_as_cloud_server: - self.server_agent_id = request_json.get("cloud_agent_id", self.edge_id) - self.start_request_json = json.dumps(request_json) - run_id = request_json["runId"] - run_id_str = str(run_id) - - self.init_job_task(request_json) - - self.args.run_id = run_id - - self._start_runner_process(run_id, request_json) - # ServerConstants.save_run_process(run_id, self.run_process_map[run_id_str].pid) - - @staticmethod - def start_local_cloud_server(run_id, user, version, cloud_device_id, runner_cmd_encoded): - print(f"start cloud server, device id {cloud_device_id}, runner cmd {runner_cmd_encoded}") - if not FedMLServerRunner.debug_cloud_server: - pip_source_dir = os.path.dirname(__file__) - login_cmd = os.path.join(pip_source_dir, "server_login.py") - run_cmd = f"{get_python_program()} -W ignore {login_cmd} -t login -r cloud_server -u {str(user)} " \ - f"-v {version} -id {cloud_device_id} -rc {runner_cmd_encoded}" - os.system(run_cmd) - - def _start_runner_process(self, run_id, request_json, is_server_job=False): - server_runner = FedMLServerRunner( - self.args, run_id=run_id, request_json=request_json, agent_config=self.agent_config - ) - run_id_str = str(run_id) - server_runner.run_as_edge_server_and_agent = self.run_as_edge_server_and_agent - server_runner.edge_id = self.edge_id - server_runner.server_agent_id = self.server_agent_id - server_runner.start_request_json = json.dumps(request_json) - self.run_process_event_map[run_id_str] = multiprocessing.Event() - self.run_process_event_map[run_id_str].clear() - server_runner.run_process_event = self.run_process_event_map[run_id_str] - self.run_process_completed_event_map[run_id_str] = multiprocessing.Event() - self.run_process_completed_event_map[run_id_str].clear() - server_runner.run_process_completed_event = self.run_process_completed_event_map[run_id_str] - if self.run_edge_id_status_queue_map.get(run_id_str, None) is None: - self.run_edge_id_status_queue_map[run_id_str] = Queue() - if self.run_edge_device_info_queue_map.get(run_id_str, None) is None: - self.run_edge_device_info_queue_map[run_id_str] = Queue() - if self.run_metrics_queue_map.get(run_id_str, None) is None: - self.run_metrics_queue_map[run_id_str] = Queue() - if self.run_events_queue_map.get(run_id_str, None) is None: - self.run_events_queue_map[run_id_str] = Queue() - if self.run_artifacts_queue_map.get(run_id_str, None) is None: - self.run_artifacts_queue_map[run_id_str] = Queue() - if self.run_logs_queue_map.get(run_id_str, None) is None: - self.run_logs_queue_map[run_id_str] = Queue() - # if self.run_edge_device_info_global_queue is None: - # self.run_edge_device_info_global_queue = Array('i', list()) - server_runner.edge_id_status_queue = self.run_edge_id_status_queue_map[run_id_str] - server_runner.edge_device_info_queue = self.run_edge_device_info_queue_map[run_id_str] - self.run_process_map[run_id_str] = Process( - target=server_runner.run if not is_server_job else server_runner.run_server_job_impl, args=( - self.run_process_event_map[run_id_str], self.run_process_completed_event_map[run_id_str], - self.run_edge_id_status_queue_map[run_id_str], self.run_edge_device_info_queue_map[run_id_str], - self.run_metrics_queue_map[run_id_str], self.run_events_queue_map[run_id_str], - self.run_artifacts_queue_map[run_id_str], self.run_logs_queue_map[run_id_str], - self.message_center.get_message_queue(), - self.run_edge_device_info_global_queue - ) - ) - self.run_process_map[run_id_str].start() - ServerConstants.save_run_process(run_id, self.run_process_map[run_id_str].pid) - - def start_cloud_server_process_entry(self): - try: - self.start_cloud_server_process() - except Exception as e: - pass - - def start_cloud_server_process(self): - run_config = self.request_json["run_config"] - packages_config = run_config["packages_config"] - self.start_cloud_server(packages_config) - - def start_cloud_server(self, packages_config): - server_id = self.request_json["server_id"] - self.cloud_server_name = FedMLServerRunner.FEDML_CLOUD_SERVER_PREFIX + str(self.run_id) + "-" + str(server_id) - self.server_docker_image = ( - self.agent_config["docker_config"]["registry_server"] - + self.agent_config["docker_config"]["registry_dir"] - + self.server_docker_base_image - ) - - logging.info("docker image {}".format(self.server_docker_image)) - # logging.info("file_sys_driver {}".format(self.agent_config["docker_config"]["file_sys_driver"])) - - registry_secret_cmd = ( - "kubectl create namespace fedml-devops-aggregator-" - + self.version - + ";kubectl -n fedml-devops-aggregator-" - + self.version - + " delete secret secret-" - + self.cloud_server_name - + " ;kubectl create secret docker-registry secret-" - + self.cloud_server_name - + " --docker-server=" - + self.agent_config["docker_config"]["registry_server"] - + " --docker-username=" - + self.agent_config["docker_config"]["user_name"] - + " --docker-password=$(aws ecr-public get-login-password --region " - + self.agent_config["docker_config"]["public_cloud_region"] - + ")" - + " --docker-email=fedml@fedml.ai -n fedml-devops-aggregator-" - + self.version - ) - logging.info("Create secret cmd: " + registry_secret_cmd) - os.system(registry_secret_cmd) - - message_bytes = json.dumps(self.request_json).encode("ascii") - base64_bytes = base64.b64encode(message_bytes) - runner_cmd_encoded = base64_bytes.decode("ascii") - logging.info("runner_cmd_encoded: {}".format(runner_cmd_encoded)) - # logging.info("runner_cmd_decoded: {}".format(base64.b64decode(runner_cmd_encoded).decode())) - cur_dir = os.path.dirname(__file__) - run_deployment_cmd = ( - "export FEDML_AGGREGATOR_NAME=" - + self.cloud_server_name - + ";export FEDML_AGGREGATOR_SVC=" - + self.cloud_server_name - + ";export FEDML_AGGREGATOR_VERSION=" - + self.version - + ';export FEDML_AGGREGATOR_IMAGE_PATH="' - + self.server_docker_image - + '"' - + ";export FEDML_CONF_ID=" - + self.cloud_server_name - + ";export FEDML_DATA_PV_ID=" - + self.cloud_server_name - + ";export FEDML_DATA_PVC_ID=" - + self.cloud_server_name - + ";export FEDML_REGISTRY_SECRET_SUFFIX=" - + self.cloud_server_name - + ";export FEDML_ACCOUNT_ID=0" - + ";export FEDML_SERVER_DEVICE_ID=" - + self.request_json.get("cloudServerDeviceId", "0") - + ";export FEDML_VERSION=" - + self.version - + ";export FEDML_PACKAGE_NAME=" - + packages_config.get("server", "") - + ";export FEDML_PACKAGE_URL=" - + packages_config.get("serverUrl", "") - + ";export FEDML_RUNNER_CMD=" - + runner_cmd_encoded - + ";envsubst < " - + os.path.join(cur_dir, "templates", "fedml-server-deployment.yaml") - + " | kubectl apply -f - " - ) - logging.info("FedMLServerRunner.run with k8s: " + run_deployment_cmd) - os.system(run_deployment_cmd) - - def stop_cloud_server(self): - self.cloud_server_name = FedMLServerRunner.FEDML_CLOUD_SERVER_PREFIX + str(self.run_id) \ - + "-" + str(self.edge_id) - self.server_docker_image = ( - self.agent_config["docker_config"]["registry_server"] - + self.agent_config["docker_config"]["registry_dir"] - + self.server_docker_base_image - ) - delete_deployment_cmd = ( - "export FEDML_AGGREGATOR_NAME=" - + self.cloud_server_name - + ";export FEDML_AGGREGATOR_SVC=" - + self.cloud_server_name - + ";export FEDML_AGGREGATOR_VERSION=" - + self.version - + ';export FEDML_AGGREGATOR_IMAGE_PATH="' - + self.server_docker_image - + '"' - + ";export FEDML_CONF_ID=" - + self.cloud_server_name - + ";export FEDML_DATA_PV_ID=" - + self.cloud_server_name - + ";export FEDML_DATA_PVC_ID=" - + self.cloud_server_name - + ";export FEDML_REGISTRY_SECRET_SUFFIX=" - + self.cloud_server_name - + ";kubectl -n fedml-devops-aggregator-" - + self.version - + " delete deployment " - + self.cloud_server_name - + ";kubectl -n fedml-devops-aggregator-" - + self.version - + " delete svc " - + self.cloud_server_name - + ";kubectl -n fedml-devops-aggregator-" - + self.version - + " delete secret secret-" - + self.cloud_server_name - ) - logging.info("FedMLServerRunner.stop_run with k8s: " + delete_deployment_cmd) - os.system(delete_deployment_cmd) - - def setup_message_center(self): - if self.message_center is not None: - return - - self.message_center = FedMLMessageCenter(agent_config=self.agent_config) - self.message_center.start_sender() - - if self.mlops_metrics is None: - self.mlops_metrics = MLOpsMetrics() - self.mlops_metrics.set_messenger(self.message_center) - self.mlops_metrics.run_id = self.run_id - self.mlops_metrics.edge_id = self.edge_id - self.mlops_metrics.server_agent_id = self.server_agent_id - - def rebuild_message_center(self, message_center_queue): - self.message_center = FedMLMessageCenter(message_queue=message_center_queue) - - if self.mlops_metrics is None: - self.mlops_metrics = MLOpsMetrics() - self.mlops_metrics.set_messenger(self.message_center) - self.mlops_metrics.run_id = self.run_id - self.mlops_metrics.edge_id = self.edge_id - self.mlops_metrics.server_agent_id = self.server_agent_id - - def release_message_center(self): - try: - if self.message_center is not None: - self.message_center.stop() - self.message_center = None - - except Exception as e: - logging.error( - f"Failed to release client mqtt manager with Exception {e}. Traceback: {traceback.format_exc()}") - pass - - def send_training_stop_request_to_edges( - self, edge_id_list, payload=None, run_id=0): - if payload is None: - payload_obj = {"runId": run_id, "edgeids": edge_id_list} - else: - payload_obj = json.loads(payload) - - for edge_id in edge_id_list: - topic_stop_train = "flserver_agent/" + str(edge_id) + "/stop_train" - logging.info("stop_train: send topic " + topic_stop_train) - self.message_center.send_message(topic_stop_train, json.dumps(payload_obj)) - - def send_training_stop_request_to_specific_edge(self, edge_id, payload): - topic_stop_train = "flserver_agent/" + str(edge_id) + "/stop_train" - logging.info("stop_train: send topic " + topic_stop_train) - self.message_center.send_message(topic_stop_train, payload) - - def send_training_stop_request_to_cloud_server(self, edge_id, payload): - topic_stop_train = "mlops/flserver_agent_" + str(edge_id) + "/stop_train" - logging.info("stop_train: send topic " + topic_stop_train) - self.message_center.send_message(topic_stop_train, payload) - - def send_training_stop_request_to_edges_when_exception( - self, edge_id_list, payload=None, run_id=0, server_id=None, status=None): - if payload is None: - payload_obj = {"runId": run_id, "edgeids": edge_id_list} - if server_id is not None: - payload_obj["serverId"] = server_id - else: - payload_obj = json.loads(payload) - payload_obj["run_status"] = ClientConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION if status is None else status - topic_stop_train = "flserver_agent/" + str(self.edge_id) + "/stop_train" - self.callback_stop_train(topic_stop_train, json.dumps(payload_obj), use_payload=payload_obj) - - def callback_stop_train(self, topic, payload, use_payload=None): - # logging.info("callback_stop_train: topic = %s, payload = %s" % (topic, payload)) - logging.info( - f"FedMLDebug - Receive: topic ({topic}), payload ({payload})" - ) - - request_json = json.loads(payload) - is_retain = request_json.get("is_retain", False) - if is_retain: - return - run_id = request_json.get("runId", None) - if run_id is None: - run_id = request_json.get("id", None) - - edge_id_list = request_json["edgeids"] - server_id = request_json.get("serverId", None) - if server_id is None: - server_id = request_json.get("server_id", None) - - if run_id is None or server_id is None: - logging.info("Json format is not correct!") - return - - # logging.info("Stop run with multiprocessing.") - - # Stop server with multiprocessing mode - run_id_str = str(run_id) - stop_request_json = self.running_request_json.get(run_id_str, None) - if stop_request_json is None: - stop_request_json = request_json - if use_payload is not None: - stop_request_json = use_payload - - if self.run_process_event_map.get(run_id_str) is not None: - self.run_process_event_map.get(run_id_str).set() - - if self.run_as_edge_server_and_agent or self.enable_simulation_cloud_agent: - server_runner = FedMLServerRunner( - self.args, run_id=run_id, request_json=stop_request_json, agent_config=self.agent_config, - edge_id=self.edge_id - ) - server_runner.run_as_edge_server_and_agent = self.run_as_edge_server_and_agent - self.run_process_event_map_for_stop[run_id_str] = multiprocessing.Event() - if self.run_edge_id_status_queue_map.get(run_id_str, None) is None: - self.run_edge_id_status_queue_map[run_id_str] = Queue() - if self.run_edge_device_info_queue_map_for_stop.get(run_id_str, None) is None: - self.run_edge_device_info_queue_map_for_stop[run_id_str] = Queue() - # if self.run_edge_device_info_global_queue_for_stop is None: - # self.run_edge_device_info_global_queue_for_stop = Array('i', list()) - - self.run_stop_process_map[run_id_str] = Process( - target=server_runner.run_stop, args=( - self.run_process_event_map_for_stop[run_id_str], - self.run_edge_id_status_queue_map[run_id_str], - self.run_edge_device_info_queue_map_for_stop[run_id_str], - self.run_edge_device_info_global_queue_for_stop, - self.message_center.get_message_queue(), - ) - ) - self.run_stop_process_map[run_id_str].start() - elif self.run_as_cloud_agent: - self.send_training_stop_request_to_cloud_server(server_id, payload) - return - elif self.run_as_cloud_server: - # if not self.use_local_process_as_cloud_server: - server_runner = FedMLServerRunner( - self.args, run_id=run_id, request_json=stop_request_json, agent_config=self.agent_config, - edge_id=server_id - ) - server_runner.run_as_cloud_agent = self.run_as_cloud_agent - self.run_process_event_map_for_stop[run_id_str] = multiprocessing.Event() - if self.run_edge_id_status_queue_map.get(run_id_str, None) is None: - self.run_edge_id_status_queue_map[run_id_str] = Queue() - if self.run_edge_device_info_queue_map_for_stop.get(run_id_str, None) is None: - self.run_edge_device_info_queue_map_for_stop[run_id_str] = Queue() - # if self.run_edge_device_info_global_queue_for_stop is None: - # self.run_edge_device_info_global_queue_for_stop = Array('i', list()) - - self.run_stop_process_map[run_id_str] = Process( - target=server_runner.run_stop, args=( - self.run_process_event_map_for_stop[run_id_str], - self.run_edge_id_status_queue_map[run_id_str], - self.run_edge_device_info_queue_map_for_stop[run_id_str], - self.run_edge_device_info_global_queue_for_stop, - self.message_center.get_message_queue(), - ) - ) - self.run_stop_process_map[run_id_str].start() - return - - if self.running_request_json.get(run_id_str, None) is not None: - self.running_request_json.pop(run_id_str) - - if self.run_process_map.get(run_id_str, None) is not None: - self.run_process_map.pop(run_id_str) - - def run_stop(self, process_event, edge_id_status_queue, edge_device_info_queue, - edge_device_info_global_queue, message_center_queue): - if platform.system() != "Windows": - os.setsid() - - os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning' - os.environ.setdefault('PYTHONWARNINGS', 'ignore:semaphore_tracker:UserWarning') - - self.run_process_event = process_event - try: - MLOpsUtils.set_ntp_offset(self.ntp_offset) - - self.rebuild_message_center(message_center_queue) - - self.run_stop_impl(edge_id_status_queue, edge_device_info_queue, edge_device_info_global_queue) - except Exception as e: - logging.error("Stop runner exits with exceptions. {}".format(traceback.format_exc())) - finally: - logging.info("Release resources.") - - def run_stop_impl(self, edge_id_status_queue, edge_device_info_queue, edge_device_info_global_queue): - run_id_str = str(self.run_id) - edge_id_list = self.request_json["edgeids"] - - # Detect running status of all edges - status_ok, active_edge_info_dict, inactivate_edges = self.detect_edges_status( - edge_device_info_queue, edge_device_info_global_queue=edge_device_info_global_queue, - status_timeout=120, need_to_trigger_exception=False, - status_check_context=SchedulerConstants.STATUS_CHECK_FRO_RUN_STOP_CONTEXT) - - # Send the training stopping request to running edges. - for edge_id_item, _ in active_edge_info_dict.items(): - self.send_training_stop_request_to_specific_edge(edge_id_item, json.dumps(self.request_json)) - time.sleep(0.2) - time.sleep(3) - - total_sleep_seconds = 0 - allowed_status_check_sleep_seconds = 60 - server_id = self.edge_id - running_edges_list = list() - current_edge_id_status_map = dict() - - while True: - # Fetch edge id and status from the edge id status queue - while True: - try: - queue_item = edge_id_status_queue.get(block=False, timeout=3) - if queue_item is not None: - current_edge_id_status_map.update(queue_item) - except queue.Empty as e: # If queue is empty, then break loop - break - - # Calc the total killed device number - running_edges_list.clear() - number_of_failed_edges = 0 - number_of_finished_edges = 0 - number_of_killed_edges = 0 - for edge_id_item, status_item in current_edge_id_status_map.items(): - if edge_id_item == "server": - continue - - if status_item is None or status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED or \ - status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_EXCEPTION: - number_of_failed_edges += 1 - continue - - if status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED: - number_of_finished_edges += 1 - continue - - if status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED: - number_of_killed_edges += 1 - continue - - if status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE or \ - status_item == ServerConstants.MSG_MLOPS_SERVER_STATUS_OFFLINE: - continue - - running_edges_list.append(edge_id_item) - - # If the killed device number is equal total device number, then break - if len(running_edges_list) <= 0 and len(current_edge_id_status_map.keys()) == len(edge_id_list) + 1: - break - - # Calc the timeout value to wait to device killed. - time.sleep(3) - total_sleep_seconds += 3 - if total_sleep_seconds < allowed_status_check_sleep_seconds: - continue - - # If timeout, then report killed device status - no_response_edges = list(set(edge_id_list) - set(running_edges_list)) - if len(no_response_edges) <= 0: - break - for edge_id_item in no_response_edges: - self.mlops_metrics.report_client_id_status( - edge_id_item, ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED, - server_id=self.edge_id, run_id=self.run_id) - - if self.run_as_edge_server_and_agent or self.enable_simulation_cloud_agent: - # Stop log processor for current run - MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(self.run_id, self.edge_id) - elif self.run_as_cloud_agent: - # Stop log processor for current run - MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(self.run_id, server_id) - - self.mlops_metrics.report_server_id_status( - self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED, edge_id=self.edge_id, - server_id=self.edge_id, server_agent_id=self.edge_id) - - def set_run_status(self, run_id, status, running_request_json): - server_runner = FedMLServerRunner( - self.args, run_id=run_id, request_json=running_request_json, agent_config=self.agent_config - ) - server_runner.edge_id = self.edge_id - server_runner.run_as_edge_server_and_agent = self.run_as_edge_server_and_agent - server_runner.run_status = status - server_runner.message_center = self.message_center - server_runner.mlops_metrics = self.mlops_metrics - server_runner.cleanup_client_with_status() - - def callback_runner_id_status(self, topic, payload): - # logging.info("callback_runner_id_status: topic = %s, payload = %s" % (topic, payload)) - # logging.info( - # f"FedMLDebug - Receive: topic ({topic}), payload ({payload})" - # ) - - request_json = json.loads(payload) - is_retain = request_json.get("is_retain", False) - if is_retain: - return - run_id = request_json["run_id"] - status = request_json["status"] - edge_id = request_json["edge_id"] - server_id = request_json.get("server_id", None) - run_id_str = str(run_id) - - if ( - status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED - or status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED - or status == ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED - ): - completed_event = self.run_process_completed_event_map.get(run_id_str, None) - if completed_event is not None: - completed_event.set() - - FedMLServerDataInterface.get_instance().save_job_status(run_id, self.edge_id, status, status) - - # Stop server with multiprocessing mode - running_request_json = self.running_request_json.get(run_id_str, None) - if running_request_json is None: - running_request_json = request_json - if self.run_as_edge_server_and_agent or self.enable_simulation_cloud_agent: - self.set_run_status(run_id, status, running_request_json) - - run_process = self.run_process_map.get(run_id_str, None) - if run_process is not None: - if run_process.pid is not None: - RunProcessUtils.kill_process(run_process.pid) - - self.run_process_map.pop(run_id_str) - - # Stop log processor for current run - MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, self.edge_id) - elif self.run_as_cloud_agent: - pass - elif self.run_as_cloud_server: - self.set_run_status(run_id, status, running_request_json) - - # Stop log processor for current run - MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, self.edge_id) - if self.use_local_process_as_cloud_server: - # RunProcessUtils.kill_process(os.getpid()) - cloud_server_process = self.run_process_map.get(run_id_str, None) - if cloud_server_process is not None: - RunProcessUtils.kill_process(cloud_server_process.pid) - else: - self.stop_cloud_server() - - if self.run_process_map.get(run_id_str, None) is not None: - self.run_process_map.pop(run_id_str) - - self.remove_listener_for_run_metrics(self.run_id) - self.remove_listener_for_run_logs(self.run_id) - elif ( - status == ServerConstants.MSG_MLOPS_SERVER_STATUS_EXCEPTION - ): - request_json = self.running_request_json.get(run_id_str, None) - if request_json is not None: - edge_id_list = request_json.get("edgeids", list()) - server_id = request_json.get("serverId", None) - server_id = request_json.get("server_id", None) if server_id is None else server_id - self.send_training_stop_request_to_edges_when_exception( - edge_id_list, run_id=run_id, server_id=server_id, - status=ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED) - - FedMLServerDataInterface.get_instance().save_job_status(run_id, self.edge_id, status, status) - else: - request_json = self.running_request_json.get(run_id_str, None) - if request_json is None: - request_json = self.start_request_json - self.mlops_metrics.report_server_training_status( - run_id, status, edge_id=self.edge_id, running_json=json.dumps(request_json)) - - def cleanup_client_with_status(self): - if self.run_status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED: - # logging.info("received to finished status.") - self.cleanup_run_when_finished(should_send_server_id_status=False) - elif self.run_status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED: - # logging.info("received to failed status.") - self.cleanup_run_when_starting_failed(should_send_server_id_status=False) - elif self.run_status == ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED: - # logging.info("received to failed status.") - self.cleanup_run_when_starting_failed( - status=self.run_status, should_send_server_id_status=False) - - def callback_report_current_status(self, topic, payload): - logging.info( - f"FedMLDebug - Receive: topic ({topic}), payload ({payload})" - ) - - request_json = json.loads(payload) - if self.run_as_edge_server_and_agent: - self.send_agent_active_msg() - elif self.run_as_cloud_agent: - self.send_agent_active_msg() - elif self.run_as_cloud_server: - pass - - @staticmethod - def process_ota_upgrade_msg(): - os.system("pip install -U fedml") - - def callback_server_ota_msg(self, topic, payload): - logging.info( - f"FedMLDebug - Receive: topic ({topic}), payload ({payload})" - ) - - request_json = json.loads(payload) - cmd = request_json["cmd"] - - if cmd == ServerConstants.FEDML_OTA_CMD_UPGRADE: - try: - self.process_ota_upgrade_msg() - # Process(target=FedMLServerRunner.process_ota_upgrade_msg).start() - raise Exception("After upgraded, restart runner...") - except Exception as e: - pass - elif cmd == ServerConstants.FEDML_OTA_CMD_RESTART: - raise Exception("Restart runner...") - - def callback_response_device_info(self, topic, payload): - # Parse payload - payload_json = json.loads(payload) - run_id = payload_json.get("run_id", 0) - context = payload_json.get("context", None) - master_device_id = payload_json.get("master_device_id", 0) - slave_device_id = payload_json.get("slave_device_id", 0) - slave_device_id_list = payload_json.get("slave_device_id_list", 0) - edge_id = payload_json.get("edge_id", 0) - device_info = payload_json.get("edge_info", 0) - device_info["master_device_id"] = master_device_id - device_info["slave_device_id"] = slave_device_id - device_info["slave_device_id_list"] = slave_device_id_list - run_id_str = str(run_id) - - # Put device info into a multiprocessing queue so master runner checks if all edges are ready - if context is None: - if self.run_edge_device_info_queue_map.get(run_id_str, None) is None: - self.run_edge_device_info_queue_map[run_id_str] = Queue() - self.run_edge_device_info_queue_map[run_id_str].put(device_info) - - # if self.run_edge_device_info_global_queue is None: - # self.run_edge_device_info_global_queue = Array('i', list()) - # - # self.run_edge_device_info_global_queue[len(self.run_edge_device_info_global_queue)] = \ - # {"timestamp": time.time(), "edge_id": edge_id, "device_info": device_info} - - self.check_model_device_ready_and_deploy(run_id, master_device_id, slave_device_id, - slave_device_id_list=slave_device_id_list) - elif context == SchedulerConstants.STATUS_CHECK_FRO_RUN_STOP_CONTEXT: - if self.run_edge_device_info_queue_map_for_stop.get(run_id_str, None) is None: - self.run_edge_device_info_queue_map_for_stop[run_id_str] = Queue() - self.run_edge_device_info_queue_map_for_stop[run_id_str].put(device_info) - - # if self.run_edge_device_info_global_queue_for_stop is None: - # self.run_edge_device_info_global_queue_for_stop = Array('i', list()) - # - # self.run_edge_device_info_global_queue_for_stop[len(self.run_edge_device_info_global_queue_for_stop)] = \ - # {"timestamp": time.time(), "edge_id": edge_id, "device_info": device_info} - - def check_model_device_ready_and_deploy(self, run_id, master_device_id, slave_device_id, slave_device_id_list=None): - request_json = self.running_request_json.get(str(run_id), None) - if request_json is None: - return - run_config = request_json["run_config"] - run_params = run_config.get("parameters", {}) - job_yaml = run_params.get("job_yaml", {}) - job_type = job_yaml.get("job_type", None) - job_type = job_yaml.get("task_type", Constants.JOB_TASK_TYPE_TRAIN) if job_type is None else job_type - if job_type != Constants.JOB_TASK_TYPE_DEPLOY and job_type != Constants.JOB_TASK_TYPE_SERVE: - return - - # Init model device ids for each run - run_id_str = str(run_id) - if self.run_model_device_ids.get(run_id_str, None) is None: - self.run_model_device_ids[run_id_str] = list() - - # Append master device and slave devices to the model devices map - self.run_model_device_ids[run_id_str].append({"master_device_id": master_device_id, - "slave_device_id": slave_device_id}) - model_device_ids = self.run_model_device_ids.get(run_id_str, None) - if model_device_ids is None: - return - - # Check if all model devices are ready - if len(model_device_ids) != len(self.run_edge_ids.get(run_id_str, list())): - return - - # Generate model master ids and model slave device ids - device_master_ids = list() - device_slave_ids = list() - for device_ids in model_device_ids: - model_master_id = device_ids.get("master_device_id") - model_slave_id = device_ids.get("slave_device_id") - device_master_ids.append(model_master_id) - device_slave_ids.append(model_slave_id) - - if len(device_master_ids) <= 0: - return - - # Generate serving devices for deploying - serving_devices = list() - serving_devices.append(device_master_ids[0]) - serving_devices.extend(device_slave_ids) - - # Start to deploy the model - self.deploy_model(serving_devices, request_json, run_id=run_id) - - def callback_request_device_info_from_mlops(self, topic, payload): - self.response_device_info_to_mlops(topic, payload) - - def response_device_info_to_mlops(self, topic, payload): - response_topic = f"deploy/master_agent/mlops/response_device_info" - payload_json = json.loads(payload) - need_gpu_info = payload_json.get("need_gpu_info", False) - if self.mlops_metrics is not None: - if not need_gpu_info: - response_payload = { - "run_id": self.run_id, - "master_agent_device_id": self.edge_id, - "fedml_version": fedml.__version__ - } - else: - total_mem, free_mem, total_disk_size, free_disk_size, cup_utilization, cpu_cores, \ - gpu_cores_total, gpu_cores_available, sent_bytes, recv_bytes, gpu_available_ids = \ - sys_utils.get_sys_realtime_stats() - gpu_available_ids = JobRunnerUtils.get_instance().get_available_gpu_id_list(self.edge_id) - gpu_available_ids = JobRunnerUtils.trim_unavailable_gpu_ids(gpu_available_ids) - gpu_cores_available = len(gpu_available_ids) - response_payload = { - "run_id": self.run_id, - "master_agent_device_id": self.edge_id, - "memoryTotal": round(total_mem * MLOpsUtils.BYTES_TO_GB, 2), - "memoryAvailable": round(free_mem * MLOpsUtils.BYTES_TO_GB, 2), - "diskSpaceTotal": round(total_disk_size * MLOpsUtils.BYTES_TO_GB, 2), - "diskSpaceAvailable": round(free_disk_size * MLOpsUtils.BYTES_TO_GB, 2), - "cpuUtilization": round(cup_utilization, 2), - "cpuCores": cpu_cores, - "gpuCoresTotal": gpu_cores_total, - "gpuCoresAvailable": gpu_cores_available, - "networkTraffic": sent_bytes + recv_bytes, - "timestamp": int(MLOpsUtils.get_ntp_time()), - "fedml_version": fedml.__version__ - } - self.mlops_metrics.report_json_message(response_topic, json.dumps(response_payload)) - - @staticmethod - def get_device_id(): - device_file_path = os.path.join(ServerConstants.get_data_dir(), ServerConstants.LOCAL_RUNNER_INFO_DIR_NAME) - file_for_device_id = os.path.join(device_file_path, "devices.id") - if not os.path.exists(device_file_path): - os.makedirs(device_file_path) - elif os.path.exists(file_for_device_id): - with open(file_for_device_id, 'r', encoding='utf-8') as f: - device_id_from_file = f.readline() - if device_id_from_file is not None and device_id_from_file != "": - return device_id_from_file - - if platform.system() == "Darwin": - cmd_get_serial_num = "system_profiler SPHardwareDataType | grep Serial | awk '{gsub(/ /,\"\")}{print}' " \ - "|awk -F':' '{print $2}' " - device_id = os.popen(cmd_get_serial_num).read() - device_id = device_id.replace('\n', '').replace(' ', '') - if device_id is None or device_id == "": - device_id = hex(uuid.getnode()) - else: - device_id = "0x" + device_id - else: - if "nt" in os.name: - - def get_uuid(): - guid = "" - try: - cmd = "wmic csproduct get uuid" - guid = str(subprocess.check_output(cmd)) - pos1 = guid.find("\\n") + 2 - guid = guid[pos1:-15] - except Exception as ex: - pass - return str(guid) - - device_id = str(get_uuid()) - elif "posix" in os.name: - device_id = sys_utils.get_device_id_in_docker() - if device_id is None: - device_id = hex(uuid.getnode()) - else: - device_id = sys_utils.run_subprocess_open( - "hal-get-property --udi /org/freedesktop/Hal/devices/computer --key system.hardware.uuid".split() - ) - device_id = hex(device_id) - - if device_id is not None and device_id != "": - with open(file_for_device_id, 'w', encoding='utf-8') as f: - f.write(device_id) - else: - device_id = hex(uuid.uuid4()) - with open(file_for_device_id, 'w', encoding='utf-8') as f: - f.write(device_id) - - return device_id - - def bind_account_and_device_id(self, url, account_id, device_id, os_name, api_key="", role=None): - if role is None: - role = "edge_server" - if self.run_as_edge_server_and_agent: - role = "edge_server" - elif self.run_as_cloud_agent: - role = "cloud_agent" - elif self.run_as_cloud_server: - role = "cloud_server" - - ip = requests.get('https://checkip.amazonaws.com').text.strip() - fedml_ver, exec_path, os_ver, cpu_info, python_ver, torch_ver, mpi_installed, \ - cpu_usage, available_mem, total_mem, gpu_info, gpu_available_mem, gpu_total_mem, \ - gpu_count, gpu_vendor, cpu_count, gpu_device_name = get_sys_runner_info() - host_name = sys_utils.get_host_name() - json_params = { - "accountid": account_id, - "deviceid": device_id, - "type": os_name, - "state": ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE, - "status": ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE, - "processor": cpu_info, - "core_type": cpu_info, - "network": "", - "role": role, - "os_ver": os_ver, - "memory": total_mem, - "ip": ip, - "api_key": api_key, - "extra_infos": {"fedml_ver": fedml_ver, "exec_path": exec_path, "os_ver": os_ver, - "cpu_info": cpu_info, "python_ver": python_ver, "torch_ver": torch_ver, - "mpi_installed": mpi_installed, "cpu_usage": cpu_usage, - "available_mem": available_mem, "total_mem": total_mem, - "cpu_count": cpu_count, "gpu_count": 0, "host_name": host_name} - } - if gpu_count > 0: - if gpu_total_mem is not None: - json_params["gpu"] = gpu_info if gpu_info is not None else "" + ", Total GPU Memory: " + gpu_total_mem - else: - json_params["gpu"] = gpu_info if gpu_info is not None else "" - json_params["extra_infos"]["gpu_info"] = gpu_info if gpu_info is not None else "" - if gpu_available_mem is not None: - json_params["extra_infos"]["gpu_available_mem"] = gpu_available_mem - if gpu_total_mem is not None: - json_params["extra_infos"]["gpu_total_mem"] = gpu_total_mem - - json_params["extra_infos"]["gpu_count"] = gpu_count - json_params["extra_infos"]["gpu_vendor"] = gpu_vendor - json_params["extra_infos"]["gpu_device_name"] = gpu_device_name - - gpu_available_id_list = sys_utils.get_available_gpu_id_list(limit=gpu_count) - gpu_available_count = len(gpu_available_id_list) if gpu_available_id_list is not None else 0 - gpu_list = sys_utils.get_gpu_list() - json_params["extra_infos"]["gpu_available_count"] = gpu_available_count - json_params["extra_infos"]["gpu_available_id_list"] = gpu_available_id_list - json_params["extra_infos"]["gpu_list"] = gpu_list - else: - json_params["gpu"] = "None" - json_params["extra_infos"]["gpu_available_count"] = 0 - json_params["extra_infos"]["gpu_available_id_list"] = [] - json_params["extra_infos"]["gpu_list"] = [] - - _, cert_path = MLOpsConfigs.get_request_params() - if cert_path is not None: - try: - requests.session().verify = cert_path - response = requests.post( - url, json=json_params, verify=True, - headers={"content-type": "application/json", "Connection": "close"} - ) - except requests.exceptions.SSLError as err: - MLOpsConfigs.install_root_ca_file() - response = requests.post( - url, json=json_params, verify=True, - headers={"content-type": "application/json", "Connection": "close"} - ) - else: - response = requests.post(url, json=json_params, headers={"Connection": "close"}) - edge_id = -1 - user_name = None - extra_url = None - if response.status_code != 200: - print(f"Binding to MLOps with response.status_code = {response.status_code}, " - f"response.content: {response.content}") - pass - else: - # print("url = {}, response = {}".format(url, response)) - status_code = response.json().get("code") - if status_code == "SUCCESS": - edge_id = response.json().get("data").get("id") - user_name = response.json().get("data").get("userName", None) - extra_url = response.json().get("data").get("url", None) - if edge_id is None or edge_id <= 0: - print(f"Binding to MLOps with response.status_code = {response.status_code}, " - f"response.content: {response.content}") - else: - if status_code == SchedulerConstants.BINDING_ACCOUNT_NOT_EXIST_ERROR: - raise SystemExit(SchedulerConstants.BINDING_ACCOUNT_NOT_EXIST_ERROR) - print(f"Binding to MLOps with response.status_code = {response.status_code}, " - f"response.content: {response.content}") - return -1, None, None - return edge_id, user_name, extra_url - - def fetch_configs(self): - return MLOpsConfigs.fetch_all_configs() - - def send_agent_active_msg(self): - active_topic = "flserver_agent/active" - status = MLOpsStatus.get_instance().get_server_agent_status(self.edge_id) - if ( - status is not None - and status != ServerConstants.MSG_MLOPS_SERVER_STATUS_OFFLINE - and status != ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE - ): - return - - if self.run_as_cloud_agent: - status = ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE - else: - try: - current_job = FedMLServerDataInterface.get_instance().get_job_by_id(self.run_id) - except Exception as e: - current_job = None - if current_job is None: - if status is not None and status == ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE: - status = ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE - else: - return - else: - status = ServerConstants.get_device_state_from_run_edge_state(current_job.status) - active_msg = {"ID": self.edge_id, "status": status} - MLOpsStatus.get_instance().set_server_agent_status(self.edge_id, status) - if self.mqtt_mgr is not None: - self.mqtt_mgr.send_message_json(active_topic, json.dumps(active_msg)) - else: - self.send_message_json(active_topic, json.dumps(active_msg)) - - def recover_start_train_msg_after_upgrading(self): - try: - current_job = FedMLServerDataInterface.get_instance().get_current_job() - if current_job is not None and \ - current_job.status == ServerConstants.MSG_MLOPS_SERVER_STATUS_UPGRADING: - logging.info("start training after upgrading.") - server_agent_id = self.edge_id - topic_start_train = "mlops/flserver_agent_" + str(server_agent_id) + "/start_train" - self.callback_start_train(topic_start_train, current_job.running_json) - except Exception as e: - logging.info("recover starting train message after upgrading: {}".format(traceback.format_exc())) - - def on_agent_mqtt_connected(self, mqtt_client_object): - # The MQTT message topic format is as follows: // - - # Setup MQTT message listener for starting training - server_agent_id = self.edge_id - topic_start_train = "mlops/flserver_agent_" + str(server_agent_id) + "/start_train" - self.add_message_listener(topic_start_train, self.callback_start_train) - self.mqtt_mgr.add_message_listener(topic_start_train, self.listener_message_dispatch_center) - - # Setup MQTT message listener for stopping training - topic_stop_train = "mlops/flserver_agent_" + str(server_agent_id) + "/stop_train" - self.add_message_listener(topic_stop_train, self.callback_stop_train) - self.mqtt_mgr.add_message_listener(topic_stop_train, self.listener_message_dispatch_center) - - # Setup MQTT message listener for server status switching - topic_server_status = "fl_server/flserver_agent_" + str(server_agent_id) + "/status" - self.add_message_listener(topic_server_status, self.callback_runner_id_status) - self.mqtt_mgr.add_message_listener(topic_server_status, self.listener_message_dispatch_center) - - # Setup MQTT message listener to report current device status. - topic_report_status = "mlops/report_device_status" - self.add_message_listener(topic_report_status, self.callback_report_current_status) - self.mqtt_mgr.add_message_listener(topic_report_status, self.listener_message_dispatch_center) - - # Setup MQTT message listener to OTA messages from the MLOps. - topic_ota_msg = "mlops/flserver_agent_" + str(server_agent_id) + "/ota" - self.add_message_listener(topic_ota_msg, self.callback_server_ota_msg) - self.mqtt_mgr.add_message_listener(topic_ota_msg, self.listener_message_dispatch_center) - - # Setup MQTT message listener to request device info from the client. - topic_response_device_info = "client/server/response_device_info/" + str(self.edge_id) - self.add_message_listener(topic_response_device_info, self.callback_response_device_info) - self.mqtt_mgr.add_message_listener(topic_response_device_info, self.listener_message_dispatch_center) - - # Setup MQTT message listener to request device info from MLOps. - topic_request_device_info_from_mlops = f"deploy/mlops/master_agent/request_device_info/{self.edge_id}" - self.add_message_listener(topic_request_device_info_from_mlops, self.callback_request_device_info_from_mlops) - self.mqtt_mgr.add_message_listener( - topic_request_device_info_from_mlops, self.listener_message_dispatch_center) - - # Subscribe topics for starting train, stopping train and fetching client status. - mqtt_client_object.subscribe(topic_start_train, qos=2) - mqtt_client_object.subscribe(topic_stop_train, qos=2) - mqtt_client_object.subscribe(topic_server_status, qos=2) - mqtt_client_object.subscribe(topic_report_status, qos=2) - mqtt_client_object.subscribe(topic_ota_msg, qos=2) - mqtt_client_object.subscribe(topic_response_device_info, qos=2) - mqtt_client_object.subscribe(topic_request_device_info_from_mlops, qos=2) - - self.subscribed_topics.clear() - self.subscribed_topics.append(topic_start_train) - self.subscribed_topics.append(topic_stop_train) - self.subscribed_topics.append(topic_server_status) - self.subscribed_topics.append(topic_report_status) - self.subscribed_topics.append(topic_ota_msg) - self.subscribed_topics.append(topic_response_device_info) - self.subscribed_topics.append(topic_request_device_info_from_mlops) - - # Broadcast the first active message. - self.send_agent_active_msg() - - # Start the message center for listener - self.start_listener(sender_message_queue=self.message_center.get_message_queue(), - agent_config=self.agent_config) - - if self.run_as_cloud_server: - # Start the FedML server - message_bytes = self.args.runner_cmd.encode("ascii") - base64_bytes = base64.b64decode(message_bytes) - payload = base64_bytes.decode("ascii") - self.receive_message_json(topic_start_train, payload) - - # Echo results - MLOpsRuntimeLog.get_instance(self.args).enable_show_log_to_stdout() - print("\nCongratulations, your device is connected to the FedML MLOps platform successfully!") - print( - "Your FedML Edge ID is " + str(self.edge_id) + ", unique device ID is " - + str(self.unique_device_id) - ) - MLOpsRuntimeLog.get_instance(self.args).enable_show_log_to_stdout(enable=True) - - def on_agent_mqtt_disconnected(self, mqtt_client_object): - MLOpsStatus.get_instance().set_server_agent_status( - self.edge_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_OFFLINE - ) - - def setup_agent_mqtt_connection(self, service_config): - # Setup MQTT connection - self.mqtt_mgr = MqttManager( - service_config["mqtt_config"]["BROKER_HOST"], - service_config["mqtt_config"]["BROKER_PORT"], - service_config["mqtt_config"]["MQTT_USER"], - service_config["mqtt_config"]["MQTT_PWD"], - service_config["mqtt_config"]["MQTT_KEEPALIVE"], - f"FedML_ServerAgent_Daemon_@{self.user_name}@_@{self.args.current_device_id}@_@{str(uuid.uuid4())}@", - "flserver_agent/last_will_msg", - json.dumps({"ID": self.edge_id, "status": ServerConstants.MSG_MLOPS_SERVER_STATUS_OFFLINE}) - ) - - # Init local database - FedMLServerDataInterface.get_instance().create_job_table() - - # Start the message center to process edge related messages. - self.setup_message_center() - - server_api_cmd = "fedml.computing.scheduler.master.server_api:api" - server_api_pids = RunProcessUtils.get_pid_from_cmd_line(server_api_cmd) - if server_api_pids is None or len(server_api_pids) <= 0: - # Start local API services - cur_dir = os.path.dirname(__file__) - fedml_base_dir = os.path.dirname(os.path.dirname(os.path.dirname(cur_dir))) - python_program = get_python_program() - self.local_api_process = ServerConstants.exec_console_with_script( - "{} -m uvicorn {} --host 0.0.0.0 --port {} --reload --reload-delay 3 --reload-dir {} " - "--log-level critical".format( - python_program, server_api_cmd, ServerConstants.LOCAL_SERVER_API_PORT, - fedml_base_dir - ), - should_capture_stdout=False, - should_capture_stderr=False - ) - # if self.local_api_process is not None and self.local_api_process.pid is not None: - # print(f"Server local API process id {self.local_api_process.pid}") - - # Setup MQTT connected listener - self.mqtt_mgr.add_connected_listener(self.on_agent_mqtt_connected) - self.mqtt_mgr.add_disconnected_listener(self.on_agent_mqtt_disconnected) - self.mqtt_mgr.connect() - - # Report the IDLE status to MLOps - self.mlops_metrics.report_server_training_status( - self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE, edge_id=self.edge_id) - MLOpsStatus.get_instance().set_server_agent_status( - self.edge_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE - ) - - # MLOpsRuntimeLogDaemon.get_instance(self.args).stop_all_log_processor() - - self.mlops_metrics.stop_device_realtime_perf() - self.mlops_metrics.report_device_realtime_perf(self.args, service_config["mqtt_config"], is_client=False) - - if not self.run_as_cloud_server: - self.recover_start_train_msg_after_upgrading() - - JobCleanup.get_instance().sync_data_on_startup(self.edge_id, is_client=False) - - self.master_api_daemon = MasterApiDaemon() - self.master_api_process = Process(target=self.master_api_daemon.run) - self.master_api_process.start() - - # if self.model_device_server is None: - # self.model_device_server = FedMLModelDeviceServerRunner(self.args, self.args.current_device_id, - # self.args.os_name, self.args.is_from_docker, - # self.agent_config) - # self.model_device_server.start() - - def start_agent_mqtt_loop(self): - # Start MQTT message loop - try: - self.mqtt_mgr.loop_forever() - except Exception as e: - if str(e) == "Restarting after upgraded...": - logging.info("Restarting after upgraded...") - else: - logging.info("Server tracing: {}".format(traceback.format_exc())) - - finally: - login_exit_file = os.path.join(ServerConstants.get_log_file_dir(), "exited.log") - with open(login_exit_file, "w") as f: - f.writelines(f"{os.getpid()}.") - - self.stop_agent() - - time.sleep(5) - sys_utils.cleanup_all_fedml_server_login_processes( - ServerConstants.SERVER_LOGIN_PROGRAM, clean_process_group=False) - sys.exit(1) - - def stop_agent(self): - if self.run_process_event is not None: - self.run_process_event.set() - - if self.mqtt_mgr is not None: - try: - for topic in self.subscribed_topics: - self.mqtt_mgr.unsubscribe_msg(topic) - except Exception as e: - pass - - self.mqtt_mgr.loop_stop() - self.mqtt_mgr.disconnect() - self.release_message_center() - - def get_runner(self): - runner = FedMLServerRunner( - self.args, run_id=self.run_id, request_json=self.request_json, - agent_config=self.agent_config - ) - runner.run_as_edge_server_and_agent = self.run_as_edge_server_and_agent - runner.edge_id = self.edge_id - runner.server_agent_id = self.server_agent_id - runner.start_request_json = self.start_request_json - runner.unique_device_id = self.unique_device_id - runner.user_name = self.user_name - runner.run_as_edge_server_and_agent = self.run_as_edge_server_and_agent - runner.run_as_cloud_agent = self.run_as_cloud_agent - runner.run_as_cloud_server = self.run_as_cloud_server - return runner diff --git a/python/fedml/computing/scheduler/model_scheduler/device_client_runner_deprecated.py b/python/fedml/computing/scheduler/model_scheduler/device_client_runner_deprecated.py deleted file mode 100755 index 8bb03eebbd..0000000000 --- a/python/fedml/computing/scheduler/model_scheduler/device_client_runner_deprecated.py +++ /dev/null @@ -1,1483 +0,0 @@ -import json -import logging -import multiprocessing -import sys - -from multiprocessing import Process -import os -import platform -import shutil -import subprocess -import threading - -import time -import traceback -import urllib -import uuid -import zipfile -from urllib.parse import urlparse, urljoin - -import requests - -import yaml - -import fedml -from fedml import mlops -from fedml.computing.scheduler.model_scheduler.device_model_msg_object import FedMLModelMsgObject -from fedml.computing.scheduler.scheduler_core.compute_cache_manager import ComputeCacheManager - -from fedml.computing.scheduler.scheduler_core.compute_utils import ComputeUtils -from fedml.core.distributed.communication.s3.remote_storage import S3Storage -from .device_model_cache import FedMLModelCache -from ..comm_utils import sys_utils, security_utils - -from ..comm_utils.container_utils import ContainerUtils - -from ....core.mlops.mlops_runtime_log import MLOpsRuntimeLog - -from ....core.distributed.communication.mqtt.mqtt_manager import MqttManager -from ..comm_utils.yaml_utils import load_yaml_config -from .device_client_constants import ClientConstants - -from ....core.mlops.mlops_metrics import MLOpsMetrics - -from ....core.mlops.mlops_configs import MLOpsConfigs -from ....core.mlops.mlops_runtime_log_daemon import MLOpsRuntimeLogDaemon -from ....core.mlops.mlops_status import MLOpsStatus -from ..comm_utils.sys_utils import get_sys_runner_info, get_python_program -from .device_model_deployment import start_deployment, run_http_inference_with_curl_request -from .device_client_data_interface import FedMLClientDataInterface -from ....core.mlops.mlops_utils import MLOpsUtils -from ..comm_utils.job_utils import JobRunnerUtils -from fedml.computing.scheduler.comm_utils.run_process_utils import RunProcessUtils -from .device_mqtt_inference_protocol import FedMLMqttInference -from .device_model_db import FedMLModelDatabase -from ..comm_utils.constants import SchedulerConstants -from fedml.computing.scheduler.comm_utils.job_monitor import JobMonitor - -from .device_replica_handler import FedMLDeviceReplicaHandler - -from fedml.computing.scheduler.scheduler_core.endpoint_sync_protocol import FedMLEndpointSyncProtocol -import ssl - - -class RunnerError(Exception): - """ Runner failed. """ - pass - - -class RunnerCompletedError(Exception): - """ Runner completed. """ - pass - - -class FedMLClientRunner: - FEDML_BOOTSTRAP_RUN_OK = "[FedML]Bootstrap Finished" - - def __init__(self, args, edge_id=0, request_json=None, agent_config=None, run_id=0): - self.local_api_process = None - self.run_process_event = None - self.run_process_event_map = dict() - self.run_process_completed_event = None - self.run_process_completed_event_map = dict() - self.run_inference_event_map = dict() - self.run_inference_response_map = dict() - self.run_process_map = dict() - self.device_status = None - self.current_training_status = None - self.mqtt_mgr = None - self.client_mqtt_mgr = None - self.client_mqtt_is_connected = False - self.client_mqtt_lock = None - self.edge_id = edge_id - self.run_id = run_id - self.unique_device_id = None - self.args = args - self.request_json = request_json - self.version = args.version - self.device_id = args.device_id - self.cur_dir = os.path.split(os.path.realpath(__file__))[0] - if args.current_running_dir is not None: - self.cur_dir = args.current_running_dir - self.sudo_cmd = "" - self.is_mac = False - if platform.system() == "Darwin": - self.is_mac = True - - self.agent_config = agent_config - self.fedml_data_base_package_dir = os.path.join("/", "fedml", "data") - self.fedml_data_local_package_dir = os.path.join("/", "fedml", "fedml-package", "fedml", "data") - self.fedml_data_dir = self.fedml_data_base_package_dir - self.fedml_config_dir = os.path.join("/", "fedml", "conf") - - self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES = {} - - self.mlops_metrics = None - self.client_active_list = dict() - self.infer_host = "127.0.0.1" - self.redis_addr = "local" - self.redis_port = "6379" - self.redis_password = "fedml_default" - - self.model_runner_mapping = dict() - self.ntp_offset = MLOpsUtils.get_ntp_offset() - self.running_request_json = dict() - self.endpoint_inference_runners = dict() - self.mqtt_inference_obj = None - - self.subscribed_topics = list() - self.user_name = None - - self.replica_handler = None - - def unzip_file(self, zip_file, unzip_file_path) -> str: - unziped_file_name = "" - if zipfile.is_zipfile(zip_file): - with zipfile.ZipFile(zip_file, "r") as zipf: - zipf.extractall(unzip_file_path) - unziped_file_name = zipf.namelist()[0] - else: - raise Exception("Invalid zip file {}".format(zip_file)) - - return unziped_file_name - - def retrieve_and_unzip_package(self, package_name, package_url): - """ - Download the package from the url and unzip it to the local package directory - ~/.fedml/fedml-model-client/fedml/model_packages/${end_point_id}_${end_point_name}_${model_name}_${model_version} - Under this folder, there should be the zipped file and the unzipped folder. - the zipped file starts with fedml_run_${end_point_id}_${end_point_name}_${model_name}_${model_version} - """ - # Models root directory - local_package_path = ClientConstants.get_model_package_dir() - os.makedirs(local_package_path, exist_ok=True) - - # Specify this model directory using ${end_point_id}_${end_point_name}_${model_name}_${model_version} - run_id = self.request_json["end_point_id"] - end_point_name = self.request_json["end_point_name"] - model_config = self.request_json["model_config"] - model_name = model_config["model_name"] - model_version = model_config["model_version"] - - model_version = model_version.replace(" ", "-") # Avoid using space for folder name - model_version = model_version.replace(":", "-") # Since docker mount will conflict with ":" - - this_run_model_dir = f"{run_id}_{end_point_name}_{model_name}_{model_version}" - this_run_model_full_path = os.path.join(local_package_path, this_run_model_dir) - os.makedirs(this_run_model_full_path, exist_ok=True) - - # Download the zipped package, overwrite it even if it exists - filename, filename_without_extension, file_extension = ClientConstants.get_filename_and_extension(package_url) - local_package_file = os.path.join(this_run_model_full_path, - f"fedml_run_{self.run_id}_{self.edge_id}_{filename_without_extension}") - if os.path.exists(local_package_file): - os.remove(local_package_file) - logging.info("Download from package_url {}".format(package_url)) - ssl._create_default_https_context = ssl._create_unverified_context - urllib.request.urlretrieve(package_url, local_package_file, - reporthook=self.package_download_progress) - - # Unzip the package in the same folder, overwrite the unzipped folder even if it exists - unzip_package_path = os.path.join(this_run_model_full_path, - f"unzip_fedml_run_{self.run_id}_{self.edge_id}_{filename_without_extension}") - try: - shutil.rmtree(unzip_package_path, ignore_errors=True) - except Exception as e: - pass - package_dir_name = self.unzip_file(local_package_file, unzip_package_path) - unzip_package_full_path = os.path.join(unzip_package_path, package_dir_name) - model_bin_file = os.path.join(unzip_package_path, "fedml_model.bin") # Will deprecated - logging.info("local_package_file {}, unzip_package_path {}, unzip file full path {}".format( - local_package_file, unzip_package_path, unzip_package_full_path)) - - return unzip_package_full_path, model_bin_file - - def retrieve_binary_model_file(self, package_name, package_url): - local_package_path = ClientConstants.get_model_package_dir() - if not os.path.exists(local_package_path): - os.makedirs(local_package_path, exist_ok=True) - unzip_package_path = ClientConstants.get_model_dir() - local_package_file = "{}".format(os.path.join(local_package_path, package_name)) - if os.path.exists(local_package_file): - os.remove(local_package_file) - urllib.request.urlretrieve(package_url, local_package_file, - reporthook=self.package_download_progress) - - unzip_package_path = os.path.join(unzip_package_path, package_name) - if not os.path.exists(unzip_package_path): - os.makedirs(unzip_package_path, exist_ok=True) - dst_model_file = os.path.join(unzip_package_path, package_name) - if os.path.exists(local_package_file): - shutil.copy(local_package_file, dst_model_file) - - return unzip_package_path, dst_model_file - - def package_download_progress(self, count, blksize, filesize): - self.check_runner_stop_event() - - downloaded = count * blksize - downloaded = filesize if downloaded > filesize else downloaded - progress = (downloaded / filesize * 100) if filesize != 0 else 0 - progress_int = int(progress) - downloaded_kb = format(downloaded / 1024, '.2f') - - # since this hook function is stateless, we need a state to avoid printing progress repeatedly - if count == 0: - self.prev_download_progress = 0 - if progress_int != self.prev_download_progress and progress_int % 5 == 0: - self.prev_download_progress = progress_int - logging.info("package downloaded size {} KB, progress {}%".format(downloaded_kb, progress_int)) - - def build_dynamic_constrain_variables(self, run_id, run_config): - pass - - def update_local_fedml_config(self, run_id, model_config, model_config_parameters): - model_name = model_config["model_name"] - model_storage_url = model_config["model_storage_url"] - - # Retrieve model package or model binary file. - unzip_package_path, model_bin_file = self.retrieve_and_unzip_package(model_name, model_storage_url) - - # Load the config to memory - fedml_local_config_file = os.path.join(unzip_package_path, "fedml_model_config.yaml") - - # Inject the config from UI to pkg yaml - package_conf_object = model_config_parameters - - # Save the config to local - with open(fedml_local_config_file, "w") as f: - yaml.dump(package_conf_object, f) - - logging.info("The package_conf_object is {}".format(package_conf_object)) - - return unzip_package_path, model_bin_file, package_conf_object - - def build_dynamic_args(self, run_config, package_conf_object, base_dir): - pass - - def download_model_package(self, package_name, package_url): - # Copy config file from the client - unzip_package_path = self.retrieve_and_unzip_package( - package_name, package_url - ) - - return unzip_package_path - - def run(self, process_event, completed_event): - # print(f"Model worker runner process id {os.getpid()}, run id {self.run_id}") - - if platform.system() != "Windows": - os.setsid() - - os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning' - os.environ.setdefault('PYTHONWARNINGS', 'ignore:semaphore_tracker:UserWarning') - - self.run_process_event = process_event - self.run_process_completed_event = completed_event - run_id = self.request_json.get("end_point_id") - - try: - FedMLModelDatabase.get_instance().set_database_base_dir(ClientConstants.get_database_dir()) - FedMLModelDatabase.get_instance().create_table() - - MLOpsUtils.set_ntp_offset(self.ntp_offset) - self.setup_client_mqtt_mgr() - - if not self.run_impl(): - logging.info( - f"[endpoint/device][{run_id}/{self.edge_id}] " - f"Failed to run the model deployment. run_impl return False.") - - # This if condition only happens when run_impl return False in a controllable way - # Under this condition, the run_impl itself should have handled the cleanup - # So no need to self.release_gpu_ids(run_id) - except RunnerError: - logging.error( - f"[endpoint/device][{run_id}/{self.edge_id}] " - f"Failed due to RunnerError {traceback.format_exc()}") - self.release_gpu_ids(run_id) - - self.reset_devices_status(self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED) - MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, self.edge_id) - except RunnerCompletedError: - logging.error( - f"[endpoint/device][{run_id}/{self.edge_id}] " - f"Failed due to RunnerCompletedError {traceback.format_exc()}") - self.release_gpu_ids(run_id) - - MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, self.edge_id) - except Exception as e: - logging.error( - f"[endpoint/device][{run_id}/{self.edge_id}] " - f"Failed due to exception {traceback.format_exc()}") - - self.cleanup_run_when_starting_failed() - self.mlops_metrics.client_send_exit_train_msg( - run_id, self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED) - - self.release_gpu_ids(run_id) - - MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, self.edge_id) - time.sleep(2) - sys.exit(1) - finally: - logging.info("[Worker] Release resources after deployment.") - MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, self.edge_id) - if self.mlops_metrics is not None: - self.mlops_metrics.stop_sys_perf() - time.sleep(3) - self.release_client_mqtt_mgr() - - def release_gpu_ids(self, run_id): - JobRunnerUtils.get_instance().release_gpu_ids(run_id, self.edge_id) - - def check_runner_stop_event(self): - if self.run_process_event.is_set(): - logging.info("Received stopping event.") - raise RunnerError("Runner stopped") - - if self.run_process_completed_event is not None and self.run_process_completed_event.is_set(): - logging.info("Received completed event.") - raise RunnerCompletedError("Runner completed") - - def run_impl(self): - # Get deployment params - run_id = self.request_json["end_point_id"] - end_point_name = self.request_json["end_point_name"] - device_ids = self.request_json["device_ids"] - master_ip = self.request_json["master_node_ip"] - model_config = self.request_json["model_config"] - model_name = model_config["model_name"] - model_id = model_config["model_id"] - model_version = model_config["model_version"] - model_config_parameters = self.request_json["parameters"] - inference_port = model_config_parameters.get("worker_internal_port", - ClientConstants.MODEL_INFERENCE_DEFAULT_PORT) - inference_port_external = model_config_parameters.get("worker_external_port", inference_port) - inference_engine = model_config_parameters.get("inference_engine", - ClientConstants.INFERENCE_ENGINE_TYPE_INT_DEFAULT) - inference_end_point_id = run_id - - MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO) - - logging.info(f"[Worker] Received model deployment request from master for endpoint {run_id}.") - if self.replica_handler is not None: - logging.info(f"=================Worker replica Handler ======================" - f"Reconcile with num diff {self.replica_handler.replica_num_diff} " - f"and version diff {self.replica_handler.replica_version_diff}." - f"=============================================================") - else: - logging.error(f"[Worker] Replica handler is None.") - return False - - self.check_runner_stop_event() - - # Report the deployment status to mlops - self.mlops_metrics.report_client_training_status( - self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_INITIALIZING, - is_from_model=True, running_json=json.dumps(self.request_json), run_id=run_id) - self.mlops_metrics.report_client_training_status( - self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_RUNNING, - is_from_model=True, run_id=run_id) - - self.check_runner_stop_event() - - # Reconcile the replica number (op: add, remove) - prev_rank, op, op_num = self.replica_handler.reconcile_num_replica() - - # Reconcile the replica version (op: update) - replica_rank_to_update = [] - if not op: - replica_rank_to_update, op = self.replica_handler.reconcile_replica_version() - - if not op: - logging.info("[Worker] No need to reconcile.") - return True - - logging.info( - f"================Worker Reconcile Operations ======================\n" - f" op: {op}; op num: {op_num}.\n" - f"==================================================================\n") - - # If not rollback, download package from MLOps; otherwise, use the backup package - if op != "rollback": - logging.info("Download and unzip model to local...") - unzip_package_path, _, _ = \ - self.update_local_fedml_config(run_id, model_config, model_config_parameters) - if unzip_package_path is None: - logging.info("Failed to update local fedml config.") - self.check_runner_stop_event() - self.cleanup_run_when_starting_failed() - self.mlops_metrics.client_send_exit_train_msg(run_id, self.edge_id, - ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED) - return False - - if not os.path.exists(unzip_package_path): - logging.info("Failed to unzip file.") - self.check_runner_stop_event() - self.cleanup_run_when_starting_failed() - self.mlops_metrics.client_send_exit_train_msg(run_id, self.edge_id, - ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED) - return False - else: - logging.info("Try to use backup package to rollback...") - # Find folder under "~/.fedml/fedml-model-client/fedml/model_packages \ - # /${end_point_id}_${end_point_name}_${model_name}_${model_version}" - backup_folder_full_path = None - models_root_dir = ClientConstants.get_model_package_dir() - - # Find the version (notified by master) to rollback - version_diff_dict = self.request_json["replica_version_diff"][str(self.edge_id)] - version_rollback_to = None - for replica_no, rollback_ops in version_diff_dict.items(): - version_rollback_to = rollback_ops["new_version"] # Note that new_version is the version to rollback - break - if version_rollback_to is None: - logging.error(f"No old version found for run_id: {self.run_id} " - f"edge_id: {self.edge_id}, rollback failed. No old version found in request_json.") - return False - model_version = version_rollback_to - - # Format the version to match the folder name - model_version_formatted = version_rollback_to.replace(" ", "-") - model_version_formatted = model_version_formatted.replace(":", "-") - - last_run_folder_sub_fd = f"{run_id}_{end_point_name}_{model_name}_{model_version_formatted}" - for folder in os.listdir(models_root_dir): - if last_run_folder_sub_fd in folder: - backup_folder_full_path = os.path.join(models_root_dir, folder) - break - if backup_folder_full_path is None: - logging.error(f"No backup folder found for run_id: {self.run_id} edge_id: {self.edge_id} " - f"under {models_root_dir} with sub folder {last_run_folder_sub_fd}, rollback failed.") - return False - - # Inside backup folder, find unzipped package with prefix unzip_fedml_run - unzip_package_path_parent = None - for folder in os.listdir(backup_folder_full_path): - if folder.startswith("unzip_fedml_run"): - unzip_package_path_parent = os.path.join(backup_folder_full_path, folder) - break - - # Inside unzip folder, find the unzipped package, should be the only one - unzip_package_path = None - for folder in os.listdir(unzip_package_path_parent): - if os.path.isdir(os.path.join(unzip_package_path_parent, folder)): - unzip_package_path = os.path.join(unzip_package_path_parent, folder) - break - - if unzip_package_path is None: - logging.error(f"No unzipped package found for run_id: {self.run_id} edge_id: {self.edge_id} " - f"under {backup_folder_full_path}, rollback failed.") - return False - - self.check_runner_stop_event() - - running_model_name, inference_output_url, inference_model_version, model_metadata, model_config = \ - "", "", model_version, {}, {} - - if op == "add": - worker_ip = self.get_ip_address(self.request_json) - for rank in range(prev_rank + 1, prev_rank + 1 + op_num): - try: - running_model_name, inference_output_url, inference_model_version, model_metadata, model_config = \ - start_deployment( - end_point_id=inference_end_point_id, end_point_name=end_point_name, model_id=model_id, - model_version=model_version, model_storage_local_path=unzip_package_path, - inference_model_name=model_name, inference_engine=inference_engine, - infer_host=worker_ip, master_ip=master_ip, edge_id=self.edge_id, - master_device_id=device_ids[0], replica_rank=rank, - gpu_per_replica=int(self.replica_handler.gpu_per_replica) - ) - except Exception as e: - inference_output_url = "" - logging.error(f"[Worker] Exception at deployment: {traceback.format_exc()}") - - if inference_output_url == "": - logging.error("[Worker] Failed to deploy the model.") - - # Release the gpu occupancy - FedMLModelCache.get_instance().set_redis_params() - replica_occupied_gpu_ids_str = FedMLModelCache.get_instance().get_replica_gpu_ids( - run_id, end_point_name, model_name, self.edge_id, rank + 1) - logging.info(f"Release gpu ids {replica_occupied_gpu_ids_str} for " - f"failed deployment of replica no {rank + 1}.") - - if replica_occupied_gpu_ids_str is not None: - replica_occupied_gpu_ids = json.loads(replica_occupied_gpu_ids_str) - JobRunnerUtils.get_instance().release_partial_job_gpu(run_id, - self.edge_id, replica_occupied_gpu_ids) - - # Send failed result back to master - result_payload = self.send_deployment_results( - end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED, - model_id, model_name, inference_output_url, inference_model_version, inference_port, - inference_engine, model_metadata, model_config) - - self.mlops_metrics.run_id = self.run_id - self.mlops_metrics.broadcast_client_training_status( - self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED, - is_from_model=True, run_id=self.run_id) - - self.mlops_metrics.client_send_exit_train_msg( - run_id, self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED) - - return False - else: - # Send failed successful result back to master - logging.info("Finished deployment, continue to send results to master...") - result_payload = self.send_deployment_results( - end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED, - model_id, model_name, inference_output_url, model_version, inference_port_external, - inference_engine, model_metadata, model_config, replica_no=rank + 1) - - if inference_port_external != inference_port: - # Save internal port to local db - logging.info("inference_port_external {} != inference_port {}".format( - inference_port_external, inference_port)) - result_payload = self.construct_deployment_results( - end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED, - model_id, model_name, inference_output_url, model_version, inference_port, - inference_engine, model_metadata, model_config, replica_no=rank + 1) - - FedMLModelDatabase.get_instance().set_deployment_result( - run_id, end_point_name, model_name, model_version, self.edge_id, - json.dumps(result_payload), replica_no=rank + 1) - - logging.info(f"Deploy replica {rank + 1} / {prev_rank + 1 + op_num} successfully.") - time.sleep(5) - - time.sleep(1) - self.mlops_metrics.run_id = self.run_id - self.mlops_metrics.broadcast_client_training_status( - self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED, - is_from_model=True, run_id=self.run_id) - return True - elif op == "remove": - for rank_to_delete in range(prev_rank, prev_rank - op_num, -1): - self.replica_handler.remove_replica(rank_to_delete) - - FedMLModelCache.get_instance().set_redis_params() - replica_occupied_gpu_ids_str = FedMLModelCache.get_instance().get_replica_gpu_ids( - run_id, end_point_name, model_name, self.edge_id, rank_to_delete + 1) - - replica_occupied_gpu_ids = json.loads(replica_occupied_gpu_ids_str) - - JobRunnerUtils.get_instance().release_partial_job_gpu(run_id, self.edge_id, replica_occupied_gpu_ids) - - FedMLModelDatabase.get_instance().delete_deployment_result_with_device_id_and_rank( - run_id, end_point_name, model_name, self.edge_id, rank_to_delete) - - # Report the deletion msg to master - result_payload = self.send_deployment_results( - end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DELETED, - model_id, model_name, inference_output_url, model_version, inference_port_external, - inference_engine, model_metadata, model_config, replica_no=rank_to_delete + 1) - - time.sleep(1) - self.mlops_metrics.run_id = self.run_id - self.mlops_metrics.broadcast_client_training_status( - self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED, - is_from_model=True, run_id=self.run_id) - - # TODO: If delete all replica, then delete the job and related resources - if rank_to_delete == 0: - pass - return True - elif op == "update" or op == "rollback": - # Update is combine of delete and add - worker_ip = self.get_ip_address(self.request_json) - for rank in replica_rank_to_update: - # Delete a replica (container) if exists - self.replica_handler.remove_replica(rank) - - FedMLModelCache.get_instance().set_redis_params() - replica_occupied_gpu_ids_str = FedMLModelCache.get_instance().get_replica_gpu_ids( - run_id, end_point_name, model_name, self.edge_id, rank + 1) - - replica_occupied_gpu_ids = json.loads(replica_occupied_gpu_ids_str) - logging.info(f"Release gpu ids {replica_occupied_gpu_ids} for update / rollback.") - - # TODO (Raphael) check if this will allow another job to seize the gpu during high concurrency: - try: - JobRunnerUtils.get_instance().release_partial_job_gpu( - run_id, self.edge_id, replica_occupied_gpu_ids) - except Exception as e: - if op == "rollback": - pass - else: - logging.error(f"Failed to release gpu ids {replica_occupied_gpu_ids} for update.") - return False - - # Delete the deployment result from local db - FedMLModelDatabase.get_instance().delete_deployment_result_with_device_id_and_rank( - run_id, end_point_name, model_name, self.edge_id, rank) - - logging.info(f"Delete replica with no {rank + 1} successfully.") - time.sleep(1) - - # Add a replica (container) - # TODO: Reduce the duplicated code - logging.info(f"Start to deploy the model with replica no {rank + 1} ...") - try: - running_model_name, inference_output_url, inference_model_version, model_metadata, model_config = \ - start_deployment( - end_point_id=inference_end_point_id, end_point_name=end_point_name, model_id=model_id, - model_version=model_version, model_storage_local_path=unzip_package_path, - inference_model_name=model_name, inference_engine=inference_engine, - infer_host=worker_ip, master_ip=master_ip, edge_id=self.edge_id, - master_device_id=device_ids[0], replica_rank=rank, - gpu_per_replica=int(self.replica_handler.gpu_per_replica) - ) - except Exception as e: - inference_output_url = "" - logging.error(f"Exception at deployment: {traceback.format_exc()}") - - if inference_output_url == "": - logging.error("Failed to deploy the model...") - - # If update failed, should release this replica's gpu - FedMLModelCache.get_instance().set_redis_params() - replica_occupied_gpu_ids_str = FedMLModelCache.get_instance().get_replica_gpu_ids( - run_id, end_point_name, model_name, self.edge_id, rank + 1) - - replica_occupied_gpu_ids = json.loads(replica_occupied_gpu_ids_str) - - JobRunnerUtils.get_instance().release_partial_job_gpu( - run_id, self.edge_id, replica_occupied_gpu_ids) - - result_payload = self.send_deployment_results( - end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED, - model_id, model_name, inference_output_url, inference_model_version, inference_port, - inference_engine, model_metadata, model_config) - - self.mlops_metrics.run_id = self.run_id - self.mlops_metrics.broadcast_client_training_status( - self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED, - is_from_model=True, run_id=self.run_id) - - self.mlops_metrics.client_send_exit_train_msg( - run_id, self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED) - - return False - else: - logging.info("Finished deployment, continue to send results to master...") - result_payload = self.send_deployment_results( - end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED, - model_id, model_name, inference_output_url, model_version, inference_port_external, - inference_engine, model_metadata, model_config, replica_no=rank + 1) - - if inference_port_external != inference_port: # Save internal port to local db - logging.info("inference_port_external {} != inference_port {}".format( - inference_port_external, inference_port)) - result_payload = self.construct_deployment_results( - end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED, - model_id, model_name, inference_output_url, model_version, inference_port, - inference_engine, model_metadata, model_config, replica_no=rank + 1) - - FedMLModelDatabase.get_instance().set_deployment_result( - run_id, end_point_name, model_name, model_version, self.edge_id, - json.dumps(result_payload), replica_no=rank + 1) - - logging.info(f"Update replica with no {rank + 1} successfully. Op num {op_num}") - time.sleep(5) - time.sleep(1) - self.mlops_metrics.run_id = self.run_id - self.mlops_metrics.broadcast_client_training_status( - self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED, - is_from_model=True, run_id=self.run_id) - return True - - else: - # The delete op will be handled by callback_delete_deployment - logging.error(f"Unsupported op {op} with op num {op_num}") - return False - - def construct_deployment_results(self, end_point_name, device_id, model_status, - model_id, model_name, model_inference_url, - model_version, inference_port, inference_engine, - model_metadata, model_config, replica_no=1): - deployment_results_payload = {"end_point_id": self.run_id, "end_point_name": end_point_name, - "model_id": model_id, "model_name": model_name, - "model_url": model_inference_url, "model_version": model_version, - "port": inference_port, - "inference_engine": inference_engine, - "model_metadata": model_metadata, - "model_config": model_config, - "model_status": model_status, - "inference_port": inference_port, - "replica_no": replica_no, - } - return deployment_results_payload - - def construct_deployment_status(self, end_point_name, device_id, - model_id, model_name, model_version, - model_inference_url, model_status, - inference_port=ClientConstants.MODEL_INFERENCE_DEFAULT_PORT, - replica_no=1, # start from 1 - ): - deployment_status_payload = {"end_point_id": self.run_id, "end_point_name": end_point_name, - "device_id": device_id, - "model_id": model_id, "model_name": model_name, - "model_version": model_version, - "model_url": model_inference_url, "model_status": model_status, - "inference_port": inference_port, - "replica_no": replica_no, - } - return deployment_status_payload - - def send_deployment_results(self, end_point_name, device_id, model_status, - model_id, model_name, model_inference_url, - model_version, inference_port, inference_engine, - model_metadata, model_config, replica_no=1): - deployment_results_topic = "model_device/model_device/return_deployment_result/{}/{}".format( - self.run_id, device_id) - - deployment_results_payload = self.construct_deployment_results( - end_point_name, device_id, model_status, - model_id, model_name, model_inference_url, - model_version, inference_port, inference_engine, - model_metadata, model_config, replica_no=replica_no) - - logging.info("[client] send_deployment_results: topic {}, payload {}.".format(deployment_results_topic, - deployment_results_payload)) - self.client_mqtt_mgr.send_message_json(deployment_results_topic, json.dumps(deployment_results_payload)) - return deployment_results_payload - - def send_deployment_status(self, end_point_name, device_id, - model_id, model_name, model_version, - model_inference_url, model_status, - inference_port=ClientConstants.MODEL_INFERENCE_DEFAULT_PORT, - replica_no=1, # start from 1 - ): - # Deprecated - pass - - def reset_devices_status(self, edge_id, status): - self.mlops_metrics.run_id = self.run_id - self.mlops_metrics.edge_id = edge_id - self.mlops_metrics.broadcast_client_training_status( - edge_id, status, is_from_model=True, run_id=self.run_id) - - def cleanup_run_when_starting_failed(self): - logging.info("Cleanup run successfully when starting failed.") - - self.reset_devices_status(self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED) - - time.sleep(2) - - try: - self.mlops_metrics.stop_sys_perf() - except Exception as ex: - pass - - time.sleep(1) - - def cleanup_run_when_finished(self): - logging.info("Cleanup run successfully when finished.") - - self.reset_devices_status(self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED) - - time.sleep(2) - - try: - self.mlops_metrics.stop_sys_perf() - except Exception as ex: - pass - - time.sleep(1) - - def on_client_mqtt_disconnected(self, mqtt_client_object): - if self.client_mqtt_lock is None: - self.client_mqtt_lock = threading.Lock() - - self.client_mqtt_lock.acquire() - self.client_mqtt_is_connected = False - self.client_mqtt_lock.release() - - def on_client_mqtt_connected(self, mqtt_client_object): - if self.mlops_metrics is None: - self.mlops_metrics = MLOpsMetrics() - - self.mlops_metrics.set_messenger(self.client_mqtt_mgr) - self.mlops_metrics.run_id = self.run_id - - if self.client_mqtt_lock is None: - self.client_mqtt_lock = threading.Lock() - - self.client_mqtt_lock.acquire() - self.client_mqtt_is_connected = True - self.client_mqtt_lock.release() - - def setup_client_mqtt_mgr(self): - if self.client_mqtt_mgr is not None: - return - - if self.client_mqtt_lock is None: - self.client_mqtt_lock = threading.Lock() - - self.client_mqtt_mgr = MqttManager( - self.agent_config["mqtt_config"]["BROKER_HOST"], - self.agent_config["mqtt_config"]["BROKER_PORT"], - self.agent_config["mqtt_config"]["MQTT_USER"], - self.agent_config["mqtt_config"]["MQTT_PWD"], - self.agent_config["mqtt_config"]["MQTT_KEEPALIVE"], - "FedML_ModelClientAgent_Metrics_@{}@_{}_{}_{}".format(self.user_name, self.args.current_device_id, - str(os.getpid()), - str(uuid.uuid4())) - ) - - self.client_mqtt_mgr.add_connected_listener(self.on_client_mqtt_connected) - self.client_mqtt_mgr.add_disconnected_listener(self.on_client_mqtt_disconnected) - self.client_mqtt_mgr.connect() - self.client_mqtt_mgr.loop_start() - - if self.mlops_metrics is None: - self.mlops_metrics = MLOpsMetrics() - self.mlops_metrics.set_messenger(self.client_mqtt_mgr) - self.mlops_metrics.run_id = self.run_id - - def release_client_mqtt_mgr(self): - try: - if self.client_mqtt_mgr is not None: - self.client_mqtt_mgr.loop_stop() - self.client_mqtt_mgr.disconnect() - - self.client_mqtt_lock.acquire() - if self.client_mqtt_mgr is not None: - self.client_mqtt_is_connected = False - self.client_mqtt_mgr = None - self.client_mqtt_lock.release() - except Exception: - pass - - def ota_upgrade(self, payload, request_json): - run_id = request_json["end_point_id"] - force_ota = False - ota_version = None - - try: - parameters = request_json.get("parameters", None) - common_args = parameters.get("common_args", None) - force_ota = common_args.get("force_ota", False) - ota_version = common_args.get("ota_version", None) - except Exception as e: - pass - - if force_ota and ota_version is not None: - should_upgrade = True if ota_version != fedml.__version__ else False - upgrade_version = ota_version - else: - try: - fedml_is_latest_version, local_ver, remote_ver = sys_utils.check_fedml_is_latest_version(self.version) - except Exception as e: - return - - should_upgrade = False if fedml_is_latest_version else True - upgrade_version = remote_ver - - if should_upgrade: - FedMLClientDataInterface.get_instance(). \ - save_started_job(run_id, self.edge_id, time.time(), - ClientConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING, - ClientConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING, - payload) - - logging.info(f"Upgrade to version {upgrade_version} ...") - - sys_utils.do_upgrade(self.version, upgrade_version) - - raise Exception("Restarting after upgraded...") - - def callback_start_deployment(self, topic, payload): - # Get deployment params - request_json = json.loads(payload) - run_id = request_json["end_point_id"] - inference_end_point_id = run_id - - try: - MLOpsConfigs.fetch_all_configs() - except Exception as e: - pass - - # Start log processor for current run - run_id = inference_end_point_id - self.args.run_id = run_id - self.args.edge_id = self.edge_id - MLOpsRuntimeLog(args=self.args).init_logs() - MLOpsRuntimeLogDaemon.get_instance(self.args).set_log_source( - ClientConstants.FEDML_LOG_SOURCE_TYPE_MODEL_END_POINT) - MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor(run_id, self.edge_id) - - # self.ota_upgrade(payload, request_json) - - # Start client with multiprocessing mode - request_json["run_id"] = run_id - run_id_str = str(run_id) - self.request_json = request_json - self.running_request_json[run_id_str] = request_json - client_runner = FedMLClientRunner( - self.args, edge_id=self.edge_id, request_json=request_json, agent_config=self.agent_config, run_id=run_id - ) - client_runner.infer_host = self.get_ip_address(request_json) - self.run_process_event_map[run_id_str] = multiprocessing.Event() - self.run_process_event_map[run_id_str].clear() - client_runner.run_process_event = self.run_process_event_map[run_id_str] - self.run_process_completed_event_map[run_id_str] = multiprocessing.Event() - self.run_process_completed_event_map[run_id_str].clear() - client_runner.run_process_completed_event = self.run_process_completed_event_map[run_id_str] - self.model_runner_mapping[run_id_str] = client_runner - - # Replica Handler will be init for every deployment - replica_handler = FedMLDeviceReplicaHandler(self.edge_id, self.request_json) - client_runner.replica_handler = replica_handler - - self.run_id = run_id - self.run_process_map[run_id_str] = Process(target=client_runner.run, args=( - self.run_process_event_map[run_id_str], self.run_process_completed_event_map[run_id_str] - )) - - self.run_process_map[run_id_str].start() - ClientConstants.save_run_process(run_id, self.run_process_map[run_id_str].pid) - ClientConstants.save_runner_infos(self.args.device_id + "." + self.args.os_name, self.edge_id, run_id=run_id) - - def set_runner_stopped_event(self, run_id): - run_id_str = str(run_id) - client_runner = self.model_runner_mapping.get(run_id_str, None) - if client_runner is not None: - if client_runner.run_process_event is not None: - client_runner.run_process_event.set() - self.model_runner_mapping.pop(run_id_str) - - def set_runner_completed_event(self, run_id): - run_id_str = str(run_id) - client_runner = self.model_runner_mapping.get(run_id_str, None) - if client_runner is not None: - if client_runner.run_process_completed_event is not None: - client_runner.run_process_completed_event.set() - self.model_runner_mapping.pop(run_id_str) - - def callback_delete_deployment(self, topic, payload): - logging.info("[Worker] callback_delete_deployment") - - # Parse payload as the model message object. - model_msg_object = FedMLModelMsgObject(topic, payload) - - # Delete all replicas on this device - try: - ClientConstants.remove_deployment( - model_msg_object.end_point_name, model_msg_object.model_name, model_msg_object.model_version, - model_msg_object.run_id, model_msg_object.model_id, edge_id=self.edge_id) - except Exception as e: - logging.info(f"Exception when removing deployment {traceback.format_exc()}") - pass - - self.set_runner_stopped_event(model_msg_object.run_id) - - logging.info(f"[endpoint/device][{model_msg_object.run_id}/{self.edge_id}] " - f"Release gpu resource when the worker deployment deleted.") - JobRunnerUtils.get_instance().release_gpu_ids(model_msg_object.run_id, self.edge_id) - - if self.running_request_json.get(str(model_msg_object.run_id)) is not None: - try: - self.running_request_json.pop(str(model_msg_object.run_id)) - except Exception as e: - logging.error(f"Error when removing running_request_json: {traceback.format_exc()}") - pass - - FedMLClientDataInterface.get_instance().delete_job_from_db(model_msg_object.run_id) - FedMLModelDatabase.get_instance().delete_deployment_result_with_device_id( - model_msg_object.run_id, model_msg_object.end_point_name, model_msg_object.model_name, - self.edge_id) - - # Delete FEDML_GLOBAL_ENDPOINT_RUN_ID_MAP_TAG-${run_id} both in redis and local db - ComputeCacheManager.get_instance().gpu_cache.delete_endpoint_run_id_map(str(model_msg_object.run_id)) - - # Delete FEDML_EDGE_ID_MODEL_DEVICE_ID_MAP_TAG-${run_id} both in redis and local db - ComputeCacheManager.get_instance().gpu_cache.delete_edge_model_id_map(str(model_msg_object.run_id)) - - # Delete FEDML_GLOBAL_DEVICE_RUN_GPU_IDS_TAG-${run_id}-${device_id} both in redis and local db - ComputeCacheManager.get_instance().gpu_cache.delete_device_run_gpu_ids(str(self.edge_id), - str(model_msg_object.run_id)) - - # Delete FEDML_GLOBAL_DEVICE_RUN_NUM_GPUS_TAG-${run_id}-${device_id} both in redis and local db - ComputeCacheManager.get_instance().gpu_cache.delete_device_run_num_gpus(str(self.edge_id), - str(model_msg_object.run_id)) - - # Delete FEDML_MODEL_REPLICA_GPU_IDS_TAG-${run_id}-${end_point_name}-${model_name}-${device_id}-* - FedMLModelCache.get_instance().set_redis_params() - FedMLModelCache.get_instance().delete_all_replica_gpu_ids(model_msg_object.run_id, - model_msg_object.end_point_name, - model_msg_object.model_name, self.edge_id) - - def exit_run_with_exception_entry(self): - try: - self.setup_client_mqtt_mgr() - self.exit_run_with_exception() - except Exception as e: - self.release_client_mqtt_mgr() - sys.exit(1) - finally: - self.release_client_mqtt_mgr() - - def exit_run_with_exception(self): - logging.info("Exit run successfully.") - - ClientConstants.cleanup_learning_process(self.run_id) - ClientConstants.cleanup_run_process(self.run_id) - - self.mlops_metrics.report_client_id_status( - self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED, - is_from_model=True, run_id=self.run_id) - - time.sleep(1) - - def callback_exit_train_with_exception(self, topic, payload): - request_json = json.loads(payload) - is_retain = request_json.get("is_retain", False) - if is_retain: - return - run_id = request_json.get("runId", None) - if run_id is None: - run_id = request_json.get("run_id", None) - if run_id is None: - run_id = request_json.get("id", None) - - if run_id is None: - return - - # Stop client with multiprocessing mode - self.request_json = request_json - client_runner = FedMLClientRunner( - self.args, edge_id=self.edge_id, request_json=request_json, agent_config=self.agent_config, run_id=run_id - ) - try: - Process(target=client_runner.exit_run_with_exception_entry).start() - except Exception as e: - pass - - def cleanup_client_with_status(self): - self.setup_client_mqtt_mgr() - - if self.device_status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED: - self.cleanup_run_when_finished() - elif self.device_status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED: - self.cleanup_run_when_starting_failed() - - self.release_client_mqtt_mgr() - - def callback_runner_id_status(self, topic, payload): - # logging.info("callback_runner_id_status: topic = %s, payload = %s" % (topic, payload)) - - request_json = json.loads(payload) - run_id = request_json["run_id"] - edge_id = request_json["edge_id"] - status = request_json["status"] - - self.save_training_status(edge_id, status) - - if status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED or \ - status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED: - # Stop client with multiprocessing mode - self.request_json = request_json - client_runner = FedMLClientRunner( - self.args, - edge_id=self.edge_id, - request_json=request_json, - agent_config=self.agent_config, - run_id=run_id, - ) - client_runner.device_status = status - status_process = Process(target=client_runner.cleanup_client_with_status) - status_process.start() - status_process.join(15) - - # Stop log processor for current run - MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, edge_id) - - def callback_report_current_status(self, topic, payload): - self.send_agent_active_msg() - - @staticmethod - def process_ota_upgrade_msg(): - os.system("pip install -U fedml") - - def callback_client_ota_msg(self, topic, payload): - request_json = json.loads(payload) - cmd = request_json["cmd"] - - if cmd == ClientConstants.FEDML_OTA_CMD_UPGRADE: - FedMLClientRunner.process_ota_upgrade_msg() - # Process(target=FedMLClientRunner.process_ota_upgrade_msg).start() - raise Exception("After upgraded, restart runner...") - elif cmd == ClientConstants.FEDML_OTA_CMD_RESTART: - raise Exception("Restart runner...") - - def save_training_status(self, edge_id, training_status): - self.current_training_status = training_status - ClientConstants.save_training_infos(edge_id, training_status) - - @staticmethod - def get_device_id(): - device_file_path = os.path.join(ClientConstants.get_data_dir(), - ClientConstants.LOCAL_RUNNER_INFO_DIR_NAME) - file_for_device_id = os.path.join(device_file_path, "devices.id") - if not os.path.exists(device_file_path): - os.makedirs(device_file_path) - elif os.path.exists(file_for_device_id): - with open(file_for_device_id, 'r', encoding='utf-8') as f: - device_id_from_file = f.readline() - if device_id_from_file is not None and device_id_from_file != "": - return device_id_from_file - - if platform.system() == "Darwin": - cmd_get_serial_num = "system_profiler SPHardwareDataType | grep Serial | awk '{gsub(/ /,\"\")}{print}' " \ - "|awk -F':' '{print $2}' " - device_id = os.popen(cmd_get_serial_num).read() - device_id = device_id.replace('\n', '').replace(' ', '') - if device_id is None or device_id == "": - device_id = hex(uuid.getnode()) - else: - device_id = "0x" + device_id - else: - if "nt" in os.name: - - def get_uuid(): - guid = "" - try: - cmd = "wmic csproduct get uuid" - guid = str(subprocess.check_output(cmd)) - pos1 = guid.find("\\n") + 2 - guid = guid[pos1:-15] - except Exception as ex: - pass - return str(guid) - - device_id = str(get_uuid()) - logging.info(device_id) - elif "posix" in os.name: - device_id = sys_utils.get_device_id_in_docker() - if device_id is None: - device_id = hex(uuid.getnode()) - else: - device_id = sys_utils.run_subprocess_open( - "hal-get-property --udi /org/freedesktop/Hal/devices/computer --key system.hardware.uuid".split() - ) - device_id = hex(device_id) - - if device_id is not None and device_id != "": - with open(file_for_device_id, 'w', encoding='utf-8') as f: - f.write(device_id) - else: - device_id = hex(uuid.uuid4()) - with open(file_for_device_id, 'w', encoding='utf-8') as f: - f.write(device_id) - - return device_id - - def get_ip_address(self, request_json): - # OPTION 1: Use local ip - ip = ClientConstants.get_local_ip() - - # OPTION 2: Auto detect public ip - if "parameters" in request_json and \ - ClientConstants.AUTO_DETECT_PUBLIC_IP in request_json["parameters"] and \ - request_json["parameters"][ClientConstants.AUTO_DETECT_PUBLIC_IP]: - ip = ClientConstants.get_public_ip() - logging.info("Auto detect public ip for worker: " + ip) - - # OPTION 3: Use user indicated ip - if self.infer_host is not None and self.infer_host != "127.0.0.1" and self.infer_host != "localhost": - ip = self.infer_host - - return ip - - def bind_account_and_device_id(self, url, account_id, device_id, os_name, role="md.on_premise_device"): - ip = requests.get('https://checkip.amazonaws.com').text.strip() - fedml_ver, exec_path, os_ver, cpu_info, python_ver, torch_ver, mpi_installed, \ - cpu_usage, available_mem, total_mem, gpu_info, gpu_available_mem, gpu_total_mem, \ - gpu_count, gpu_vendor, cpu_count, gpu_device_name = get_sys_runner_info() - host_name = sys_utils.get_host_name() - json_params = { - "accountid": account_id, - "deviceid": device_id, - "state": ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE, - "status": ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE, - "type": os_name, - "processor": cpu_info, - "core_type": cpu_info, - "network": "", - "role": role, - "os_ver": os_ver, - "memory": total_mem, - "ip": ip, - "extra_infos": {"fedml_ver": fedml_ver, "exec_path": exec_path, "os_ver": os_ver, - "cpu_info": cpu_info, "python_ver": python_ver, "torch_ver": torch_ver, - "mpi_installed": mpi_installed, "cpu_usage": cpu_usage, - "available_mem": available_mem, "total_mem": total_mem, - "cpu_count": cpu_count, "gpu_count": 0, "host_name": host_name} - } - if gpu_count > 0: - if gpu_total_mem is not None: - json_params["gpu"] = gpu_info if gpu_info is not None else "" + ", Total GPU Memory: " + gpu_total_mem - else: - json_params["gpu"] = gpu_info if gpu_info is not None else "" - json_params["extra_infos"]["gpu_info"] = gpu_info if gpu_info is not None else "" - if gpu_available_mem is not None: - json_params["extra_infos"]["gpu_available_mem"] = gpu_available_mem - if gpu_total_mem is not None: - json_params["extra_infos"]["gpu_total_mem"] = gpu_total_mem - - json_params["extra_infos"]["gpu_count"] = gpu_count - json_params["extra_infos"]["gpu_vendor"] = gpu_vendor - json_params["extra_infos"]["gpu_device_name"] = gpu_device_name - - gpu_available_id_list = sys_utils.get_available_gpu_id_list(limit=gpu_count) - gpu_available_count = len(gpu_available_id_list) if gpu_available_id_list is not None else 0 - gpu_list = sys_utils.get_gpu_list() - json_params["extra_infos"]["gpu_available_count"] = gpu_available_count - json_params["extra_infos"]["gpu_available_id_list"] = gpu_available_id_list - json_params["extra_infos"]["gpu_list"] = gpu_list - else: - json_params["gpu"] = "None" - json_params["extra_infos"]["gpu_available_count"] = 0 - json_params["extra_infos"]["gpu_available_id_list"] = [] - json_params["extra_infos"]["gpu_list"] = [] - - _, cert_path = MLOpsConfigs.get_request_params() - if cert_path is not None: - try: - requests.session().verify = cert_path - response = requests.post( - url, json=json_params, verify=True, - headers={"content-type": "application/json", "Connection": "close"} - ) - except requests.exceptions.SSLError as err: - MLOpsConfigs.install_root_ca_file() - response = requests.post( - url, json=json_params, verify=True, - headers={"content-type": "application/json", "Connection": "close"} - ) - else: - response = requests.post(url, json=json_params, headers={"Connection": "close"}) - edge_id = -1 - user_name = None - extra_url = None - if response.status_code != 200: - print(f"Binding to MLOps with response.status_code = {response.status_code}, " - f"response.content: {response.content}") - pass - else: - # print("url = {}, response = {}".format(url, response)) - status_code = response.json().get("code") - if status_code == "SUCCESS": - edge_id = response.json().get("data").get("id") - user_name = response.json().get("data").get("userName", None) - extra_url = response.json().get("data").get("url", None) - if edge_id is None or edge_id <= 0: - print(f"Binding to MLOps with response.status_code = {response.status_code}, " - f"response.content: {response.content}") - else: - if status_code == SchedulerConstants.BINDING_ACCOUNT_NOT_EXIST_ERROR: - raise SystemExit(SchedulerConstants.BINDING_ACCOUNT_NOT_EXIST_ERROR) - print(f"Binding to MLOps with response.status_code = {response.status_code}, " - f"response.content: {response.content}") - return -1, None, None - return edge_id, user_name, extra_url - - def fetch_configs(self): - return MLOpsConfigs.fetch_all_configs() - - def send_agent_active_msg(self): - active_topic = "flclient_agent/active" - status = MLOpsStatus.get_instance().get_client_agent_status(self.edge_id) - if ( - status is not None - and status != ClientConstants.MSG_MLOPS_CLIENT_STATUS_OFFLINE - and status != ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE - ): - return - - try: - current_job = FedMLClientDataInterface.get_instance().get_job_by_id(self.run_id) - except Exception as e: - current_job = None - if current_job is None: - if status is not None and status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_OFFLINE: - status = ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE - else: - return - else: - status = ClientConstants.get_device_state_from_run_edge_state(current_job.status) - active_msg = {"ID": self.edge_id, "status": status} - MLOpsStatus.get_instance().set_client_agent_status(self.edge_id, status) - self.mqtt_mgr.send_message_json(active_topic, json.dumps(active_msg)) - - def recover_start_deployment_msg_after_upgrading(self): - try: - current_job = FedMLClientDataInterface.get_instance().get_current_job() - if current_job is not None and \ - current_job.status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING: - logging.info("start deployment after upgrading.") - topic_start_deployment = "model_ops/model_device/start_deployment/{}".format(str(self.edge_id)) - self.callback_start_deployment(topic_start_deployment, current_job.running_json) - except Exception as e: - logging.info("recover starting deployment message after upgrading: {}".format(traceback.format_exc())) - - def on_agent_mqtt_connected(self, mqtt_client_object): - # The MQTT message topic format is as follows: // - - # Setup MQTT message listener for starting deployment - topic_start_deployment = "model_ops/model_device/start_deployment/{}".format(str(self.edge_id)) - self.mqtt_mgr.add_message_listener(topic_start_deployment, self.callback_start_deployment) - - # Setup MQTT message listener for delete deployment - topic_delete_deployment = "model_ops/model_device/delete_deployment/{}".format(str(self.edge_id)) - self.mqtt_mgr.add_message_listener(topic_delete_deployment, self.callback_delete_deployment) - - # Setup MQTT message listener for running failed - topic_exit_train_with_exception = "flserver_agent/" + str(self.edge_id) + "/exit_train_with_exception" - self.mqtt_mgr.add_message_listener(topic_exit_train_with_exception, self.callback_exit_train_with_exception) - - # Setup MQTT message listener for client status switching - topic_client_status = "fl_client/flclient_agent_" + str(self.edge_id) + "/status" - self.mqtt_mgr.add_message_listener(topic_client_status, self.callback_runner_id_status) - - # Setup MQTT message listener to report current device status. - topic_report_status = "mlops/report_device_status" - self.mqtt_mgr.add_message_listener(topic_report_status, self.callback_report_current_status) - - # Setup MQTT message listener to OTA messages from the MLOps. - topic_ota_msg = "mlops/flclient_agent_" + str(self.edge_id) + "/ota" - self.mqtt_mgr.add_message_listener(topic_ota_msg, self.callback_client_ota_msg) - - if self.mqtt_inference_obj is None: - self.mqtt_inference_obj = FedMLMqttInference(agent_config=self.agent_config, mqtt_mgr=self.mqtt_mgr) - self.mqtt_inference_obj.setup_listener_for_endpoint_inference_request(self.edge_id) - - # Subscribe topics for starting deployment, stopping deployment and fetching client status. - mqtt_client_object.subscribe(topic_start_deployment, qos=2) - mqtt_client_object.subscribe(topic_delete_deployment, qos=2) - mqtt_client_object.subscribe(topic_client_status, qos=2) - mqtt_client_object.subscribe(topic_report_status, qos=2) - mqtt_client_object.subscribe(topic_exit_train_with_exception, qos=2) - mqtt_client_object.subscribe(topic_ota_msg, qos=2) - - self.subscribed_topics.clear() - self.subscribed_topics.append(topic_start_deployment) - self.subscribed_topics.append(topic_delete_deployment) - self.subscribed_topics.append(topic_client_status) - self.subscribed_topics.append(topic_report_status) - self.subscribed_topics.append(topic_exit_train_with_exception) - self.subscribed_topics.append(topic_ota_msg) - - # Broadcast the first active message. - self.send_agent_active_msg() - - # Echo results - # print("\n\nCongratulations, your device is connected to the FedML MLOps platform successfully!") - # print( - # "Your FedML Edge ID is " + str(self.edge_id) + ", unique device ID is " - # + str(self.unique_device_id) - # + "\n" - # ) - - MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO) - - def on_agent_mqtt_disconnected(self, mqtt_client_object): - MLOpsStatus.get_instance().set_client_agent_status( - self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_OFFLINE - ) - - try: - if self.mqtt_inference_obj is not None: - self.mqtt_inference_obj.remove_listener_for_endpoint_inference_request(self.edge_id) - except Exception as e: - pass - - def setup_agent_mqtt_connection(self, service_config): - # Setup MQTT connection - self.mqtt_mgr = MqttManager( - service_config["mqtt_config"]["BROKER_HOST"], - service_config["mqtt_config"]["BROKER_PORT"], - service_config["mqtt_config"]["MQTT_USER"], - service_config["mqtt_config"]["MQTT_PWD"], - service_config["mqtt_config"]["MQTT_KEEPALIVE"], - "FedML_ModelClientAgent_Daemon_@" + self.user_name + "@_" + self.args.current_device_id + str(uuid.uuid4()), - "flclient_agent/last_will_msg", - json.dumps({"ID": self.edge_id, "status": ClientConstants.MSG_MLOPS_CLIENT_STATUS_OFFLINE}) - ) - self.agent_config = service_config - - # Init local database - FedMLClientDataInterface.get_instance().create_job_table() - try: - FedMLModelDatabase.get_instance().set_database_base_dir(ClientConstants.get_database_dir()) - FedMLModelDatabase.get_instance().create_table() - except Exception as e: - pass - - client_api_cmd = "fedml.computing.scheduler.model_scheduler.device_client_api:api" - client_api_pids = RunProcessUtils.get_pid_from_cmd_line(client_api_cmd) - if client_api_pids is None or len(client_api_pids) <= 0: - # Start local API services - cur_dir = os.path.dirname(__file__) - fedml_base_dir = os.path.dirname(os.path.dirname(os.path.dirname(cur_dir))) - python_program = get_python_program() - self.local_api_process = ClientConstants.exec_console_with_script( - "{} -m uvicorn {} --host 0.0.0.0 --port {} --reload --reload-delay 3 --reload-dir {} " - "--log-level critical".format( - python_program, client_api_cmd, - ClientConstants.LOCAL_CLIENT_API_PORT, fedml_base_dir - ), - should_capture_stdout=False, - should_capture_stderr=False - ) - # if self.local_api_process is not None and self.local_api_process.pid is not None: - # print(f"Model worker local API process id {self.local_api_process.pid}") - - # MLOpsRuntimeLogDaemon.get_instance(self.args).stop_all_log_processor() - - # Setup MQTT connected listener - self.mqtt_mgr.add_connected_listener(self.on_agent_mqtt_connected) - self.mqtt_mgr.add_disconnected_listener(self.on_agent_mqtt_disconnected) - self.mqtt_mgr.connect() - - self.setup_client_mqtt_mgr() - self.mlops_metrics.report_client_training_status( - self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE, is_from_model=True) - MLOpsStatus.get_instance().set_client_agent_status(self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE) - - self.recover_start_deployment_msg_after_upgrading() - - def stop_agent(self): - if self.run_process_event is not None: - self.run_process_event.set() - - if self.mqtt_mgr is not None: - try: - for topic in self.subscribed_topics: - self.mqtt_mgr.unsubscribe_msg(topic) - except Exception as e: - pass - - self.mqtt_mgr.loop_stop() - self.mqtt_mgr.disconnect() - - self.release_client_mqtt_mgr() - - def start_agent_mqtt_loop(self, should_exit_sys=False): - # Start MQTT message loop - try: - self.mqtt_mgr.loop_forever() - except Exception as e: - if str(e) == "Restarting after upgraded...": - logging.info("Restarting after upgraded...") - else: - logging.info("Client tracing: {}".format(traceback.format_exc())) - finally: - self.stop_agent() - - if should_exit_sys: - time.sleep(5) - sys.exit(1) diff --git a/python/fedml/computing/scheduler/model_scheduler/device_server_runner_deprecated.py b/python/fedml/computing/scheduler/model_scheduler/device_server_runner_deprecated.py deleted file mode 100755 index 4bcac6d2db..0000000000 --- a/python/fedml/computing/scheduler/model_scheduler/device_server_runner_deprecated.py +++ /dev/null @@ -1,2022 +0,0 @@ -import copy -import json -import logging -import multiprocessing -import platform -import sys - -from multiprocessing import Process -import os -import shutil -import subprocess -import threading - -import time -import traceback -import urllib -import uuid -import zipfile -from os import listdir - -import requests -import torch - -import fedml -from fedml.computing.scheduler.comm_utils.run_process_utils import RunProcessUtils -from fedml.core.mlops.mlops_runtime_log import MLOpsFormatter - -from ..comm_utils import sys_utils -from .device_server_data_interface import FedMLServerDataInterface -from ..scheduler_core.endpoint_sync_protocol import FedMLEndpointSyncProtocol -from ....core.mlops.mlops_runtime_log import MLOpsRuntimeLog - -from ....core.distributed.communication.mqtt.mqtt_manager import MqttManager -from ..comm_utils.yaml_utils import load_yaml_config -from .device_client_constants import ClientConstants -from .device_server_constants import ServerConstants - -from ....core.mlops.mlops_metrics import MLOpsMetrics - -from ....core.mlops.mlops_configs import MLOpsConfigs -from ....core.mlops.mlops_runtime_log_daemon import MLOpsRuntimeLogDaemon -from ....core.mlops.mlops_status import MLOpsStatus -from ..comm_utils.sys_utils import get_sys_runner_info, get_python_program -from .device_model_cache import FedMLModelCache -from .device_model_msg_object import FedMLModelMsgObject -from ....core.mlops.mlops_utils import MLOpsUtils -from ..comm_utils.constants import SchedulerConstants -from .device_model_db import FedMLModelDatabase -from .device_replica_controller import FedMLDeviceReplicaController - - -class RunnerError(BaseException): - """ Runner failed. """ - pass - - -class RunnerCompletedError(Exception): - """ Runner completed. """ - pass - - -class FedMLServerRunner: - FEDML_CLOUD_SERVER_PREFIX = "fedml-server-run-" - - def __init__(self, args, run_id=0, request_json=None, agent_config=None, edge_id=0): - self.inference_gateway_process = None - self.local_api_process = None - self.run_process_event = None - self.run_process_event_map = dict() - self.run_process_completed_event = None - self.run_process_completed_event_map = dict() - self.run_as_cloud_agent = False - self.run_as_cloud_server = False - self.run_as_edge_server_and_agent = False - self.run_as_cloud_server_and_agent = False - self.fedml_packages_base_dir = None - self.fedml_packages_unzip_dir = None - self.mqtt_mgr = None - self.running_request_json = dict() - self.run_id = run_id - self.client_mqtt_mgr = None - self.client_mqtt_is_connected = False - self.client_mqtt_lock = None - self.unique_device_id = None - self.edge_id = edge_id - self.server_agent_id = 0 - if request_json is not None: - self.server_agent_id = request_json.get("server_id", 0) - self.process = None - self.args = args - self.request_json = copy.deepcopy(request_json) - self.version = args.version - self.device_id = args.device_id - self.cur_dir = os.path.split(os.path.realpath(__file__))[0] - if args.current_running_dir is not None: - self.cur_dir = args.current_running_dir - - self.agent_config = agent_config - self.fedml_data_base_package_dir = os.path.join("/", "fedml", "data") - self.fedml_data_local_package_dir = os.path.join("/", "fedml", "fedml-package", "fedml", "data") - self.fedml_data_dir = self.fedml_data_base_package_dir - self.fedml_config_dir = os.path.join("/", "fedml", "conf") - - self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES = {} - - self.mlops_metrics = None - self.run_status = None - self.infer_host = "127.0.0.1" - self.redis_addr = "local" - self.redis_port = "6379" - self.redis_password = "fedml_default" - - self.slave_deployment_statuses_mapping = dict() - self.slave_deployment_results_mapping = dict() - self.slave_update_result_mapping = dict() - - self.model_runner_mapping = dict() - self.ntp_offset = MLOpsUtils.get_ntp_offset() - - self.subscribed_topics = list() - self.user_name = None - - self.replica_controller = None - self.deployed_replica_payload = None - - self.autoscaler_launcher = None - - def build_dynamic_constrain_variables(self, run_id, run_config): - pass - - def unzip_file(self, zip_file, unzip_file_path): - unziped_file_name = "" - if zipfile.is_zipfile(zip_file): - with zipfile.ZipFile(zip_file, "r") as zipf: - zipf.extractall(unzip_file_path) - unziped_file_name = zipf.namelist()[0] - - return unziped_file_name - - def package_download_progress(self, count, blksize, filesize): - self.check_runner_stop_event() - - downloaded = count * blksize - downloaded = filesize if downloaded > filesize else downloaded - progress = (downloaded / filesize * 100) if filesize != 0 else 0 - progress_int = int(progress) - downloaded_kb = format(downloaded / 1024, '.2f') - - # since this hook function is stateless, we need a state to avoid printing progress repeatedly - if count == 0: - self.prev_download_progress = 0 - if progress_int != self.prev_download_progress and progress_int % 5 == 0: - self.prev_download_progress = progress_int - logging.info("package downloaded size {} KB, progress {}%".format(downloaded_kb, progress_int)) - - def retrieve_and_unzip_package(self, package_name, package_url): - local_package_path = ServerConstants.get_model_package_dir() - if not os.path.exists(local_package_path): - os.makedirs(local_package_path, exist_ok=True) - local_package_file = "{}.zip".format(os.path.join(local_package_path, package_name)) - if os.path.exists(local_package_file): - os.remove(local_package_file) - - # Download without renaming - urllib.request.urlretrieve(package_url, filename=None, reporthook=self.package_download_progress) - - unzip_package_path = ServerConstants.get_model_dir() - self.fedml_packages_base_dir = unzip_package_path - try: - shutil.rmtree( - os.path.join(unzip_package_path, package_name), ignore_errors=True - ) - except Exception as e: - pass - logging.info("local_package_file {}, unzip_package_path {}".format( - local_package_file, unzip_package_path)) - package_name = self.unzip_file(local_package_file, unzip_package_path) - unzip_package_path = os.path.join(unzip_package_path, package_name) - return unzip_package_path - - def update_local_fedml_config(self, run_id, run_config): - model_config = run_config - model_name = model_config["model_name"] - model_storage_url = model_config["model_storage_url"] - scale_min = model_config.get("instance_scale_min", 0) - scale_max = model_config.get("instance_scale_max", 0) - inference_engine = model_config.get("inference_engine", 0) - inference_end_point_id = run_id - - # Copy config file from the client - unzip_package_path = self.retrieve_and_unzip_package( - model_name, model_storage_url - ) - fedml_local_config_file = os.path.join(unzip_package_path, "fedml_model_config.yaml") - - # Load the above config to memory - package_conf_object = {} - if os.path.exists(fedml_local_config_file): - package_conf_object = load_yaml_config(fedml_local_config_file) - - return unzip_package_path, package_conf_object - - def get_usr_indicated_token(self, request_json) -> str: - usr_indicated_token = "" - if "parameters" in request_json and "authentication_token" in request_json["parameters"]: - usr_indicated_token = request_json["parameters"]["authentication_token"] - return usr_indicated_token - - def build_dynamic_args(self, run_config, package_conf_object, base_dir): - pass - - def run(self, process_event, completed_event): - # print(f"Model master runner process id {os.getpid()}, run id {self.run_id}") - - if platform.system() != "Windows": - os.setsid() - - os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning' - os.environ.setdefault('PYTHONWARNINGS', 'ignore:semaphore_tracker:UserWarning') - - self.run_process_event = process_event - self.run_process_completed_event = completed_event - run_id = self.request_json.get("end_point_id") - - try: - MLOpsUtils.set_ntp_offset(self.ntp_offset) - - self.setup_client_mqtt_mgr() - - self.run_impl() - except RunnerError: - logging.info("Runner stopped.") - self.mlops_metrics.report_server_training_status( - self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_KILLED, - is_from_model=True, edge_id=self.edge_id) - except RunnerCompletedError: - logging.info("Runner completed.") - except Exception as e: - logging.error("Runner exits with exceptions.") - logging.error(traceback.format_exc()) - logging.error(e) - self.mlops_metrics.report_server_training_status( - self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED, - is_from_model=True, edge_id=self.edge_id) - MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, self.edge_id) - if self.mlops_metrics is not None: - self.mlops_metrics.stop_sys_perf() - time.sleep(3) - sys.exit(1) - finally: - logging.info("[Master] Deployment finished, release resources.") - MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, self.edge_id) - if self.mlops_metrics is not None: - self.mlops_metrics.stop_sys_perf() - time.sleep(3) - if not self.run_as_cloud_server: - self.release_client_mqtt_mgr() - - def parse_model_run_params(self, running_json): - run_id = running_json["end_point_id"] - end_point_name = running_json["end_point_name"] - token = running_json["token"] - user_id = running_json["user_id"] - user_name = running_json["user_name"] - device_ids = running_json["device_ids"] - device_objs = running_json["device_objs"] - - model_config = running_json["model_config"] - model_name = model_config["model_name"] - model_id = model_config["model_id"] - model_storage_url = model_config["model_storage_url"] - scale_min = model_config.get("instance_scale_min", 0) - scale_max = model_config.get("instance_scale_max", 0) - inference_engine = model_config.get("inference_engine", 0) - model_is_from_open = model_config["is_from_open"] - inference_end_point_id = run_id - use_gpu = "gpu" # TODO: Get GPU from device infos - memory_size = "256m" # TODO: Get Memory size for each instance - model_version = model_config["model_version"] - model_config_parameters = running_json.get("parameters", {}) - - inference_port = model_config_parameters.get("server_internal_port", # Internal port is for the gateway - ServerConstants.MODEL_INFERENCE_DEFAULT_PORT) - inference_port_external = model_config_parameters.get("server_external_port", inference_port) - - return run_id, end_point_name, token, user_id, user_name, device_ids, device_objs, model_config, model_name, \ - model_id, model_storage_url, scale_min, scale_max, inference_engine, model_is_from_open, \ - inference_end_point_id, use_gpu, memory_size, model_version, inference_port - - def inference_run(self): - # run_id, end_point_name, token, user_id, user_name, device_ids, device_objs, model_config, model_name, \ - # model_id, model_storage_url, scale_min, scale_max, inference_engine, model_is_from_open, \ - # inference_end_point_id, use_gpu, memory_size, model_version, inference_port = - # self.parse_model_run_params(self.request_json) - # - # inference_server = FedMLModelServingServer(self.args, - # end_point_name, - # model_name, - # model_version, - # inference_request=self.request_json) - # inference_server.run() - pass - - def run_impl(self): - run_id, end_point_name, token, user_id, user_name, device_ids, device_objs, model_config, model_name, \ - model_id, model_storage_url, scale_min, scale_max, inference_engine, model_is_from_open, \ - inference_end_point_id, use_gpu, memory_size, model_version, inference_port = self.parse_model_run_params( - self.request_json) - - # TODO(Raphael): This measurement is for the host machine. Change to container's metrics - self.mlops_metrics.report_sys_perf(self.args, self.agent_config["mqtt_config"], run_id=run_id) - - self.check_runner_stop_event() - - # Send stage: MODEL_DEPLOYMENT_STAGE4 = "ForwardRequest2Slave" - self.send_deployment_stages(self.run_id, model_name, model_id, - "", - ServerConstants.MODEL_DEPLOYMENT_STAGE4["index"], - ServerConstants.MODEL_DEPLOYMENT_STAGE4["text"], - ServerConstants.MODEL_DEPLOYMENT_STAGE4["text"]) - - self.args.run_id = self.run_id - MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO) - - # Report server running status - self.check_runner_stop_event() - self.mlops_metrics.report_server_training_status( - run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_STARTING, - is_from_model=True, running_json=json.dumps(self.request_json), edge_id=self.edge_id) - self.send_deployment_status(self.run_id, end_point_name, - model_name, "", - ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYING) - - # Start unified inference gateway if it has not started - self.start_device_inference_gateway( - run_id, end_point_name, model_id, model_name, model_version, inference_port=inference_port) - - # (re)Start inference monitor server - self.stop_device_inference_monitor(run_id, end_point_name, model_id, model_name, model_version) - self.start_device_inference_monitor(run_id, end_point_name, model_id, model_name, model_version) - - # Changed the master's status to "IDLE" - self.mlops_metrics.broadcast_server_training_status( - run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED, - is_from_model=True, edge_id=self.edge_id) - - # Forward deployment request to slave devices - self.check_runner_stop_event() - - # Handle "op:add" && "op:remove" - devices_sent_add_or_remove_msg = self.send_deployment_start_request_to_edges() - - # Handle "op:update" - try: - devices_sent_update_remove_msg = self.send_first_scroll_update_msg() - - if len(devices_sent_add_or_remove_msg) == 0 and len(devices_sent_update_remove_msg) == 0: - # No device is added, updated or removed - logging.info("No device is added, updated or removed. No action needed for reconciliation.") - ip = self.get_ip_address(self.request_json) - master_port = os.getenv("FEDML_MASTER_PORT", None) - if master_port is not None: - inference_port = int(master_port) - model_inference_port = inference_port - if ip.startswith("http://") or ip.startswith("https://"): - model_inference_url = "{}/api/v1/predict".format(ip) - else: - model_inference_url = "http://{}:{}/api/v1/predict".format(ip, model_inference_port) - - self.set_runner_completed_event(run_id) - - self.send_deployment_status(run_id, end_point_name, - model_name, - model_inference_url, - ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED) - - # Set setting to "DEPLOYED" for autoscaling service reference - FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password) - FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - update_user_setting_replica_num(end_point_id=run_id, state="DEPLOYED") - - return - except Exception as e: - logging.error(f"Failed to send first scroll update message due to {e}.") - logging.error(f"Exception traceback {traceback.format_exc()}.") - - logging.info("Start waiting for result callback from workers ...") - - while True: - # Wait for all devices to finish the add / delete / update operation - self.check_runner_stop_event() - time.sleep(3) - - def check_runner_stop_event(self): - if self.run_process_event is not None and self.run_process_event.is_set(): - logging.info("Received stopping event.") - raise RunnerError("Runner stopped") - - if self.run_process_completed_event is not None and self.run_process_completed_event.is_set(): - logging.info("Received completed event.") - raise RunnerCompletedError("Runner completed") - - def start_device_inference_gateway( - self, run_id, end_point_name, model_id, - model_name, model_version, inference_port=ServerConstants.MODEL_INFERENCE_DEFAULT_PORT): - # start unified inference server - running_model_name = ServerConstants.get_running_model_name(end_point_name, - model_name, model_version, run_id, model_id) - python_program = get_python_program() - master_port = os.getenv("FEDML_MASTER_PORT", None) - if master_port is not None: - inference_port = int(master_port) - if not ServerConstants.is_running_on_k8s(): - logging.info(f"start the model inference gateway, end point {run_id}, " - f"model name {model_name} at port {inference_port}...") - self.check_runner_stop_event() - - use_mqtt_inference = os.getenv("FEDML_USE_MQTT_INFERENCE", "False") - use_mqtt_inference = True if use_mqtt_inference.lower() == 'true' else False - use_worker_gateway = os.getenv("FEDML_USE_WORKER_GATEWAY", "False") - use_worker_gateway = True if use_worker_gateway.lower() == 'true' else False - inference_gw_cmd = "fedml.computing.scheduler.model_scheduler.device_model_inference:api" - inference_gateway_pids = RunProcessUtils.get_pid_from_cmd_line(inference_gw_cmd) - if inference_gateway_pids is None or len(inference_gateway_pids) <= 0: - cur_dir = os.path.dirname(__file__) - fedml_base_dir = os.path.dirname(os.path.dirname(os.path.dirname(cur_dir))) - connect_str = "@FEDML@" - ext_info = sys_utils.random1( - self.agent_config["mqtt_config"]["BROKER_HOST"] + connect_str + - str(self.agent_config["mqtt_config"]["BROKER_PORT"]) + connect_str + - self.agent_config["mqtt_config"]["MQTT_USER"] + connect_str + - self.agent_config["mqtt_config"]["MQTT_PWD"] + connect_str + - str(self.agent_config["mqtt_config"]["MQTT_KEEPALIVE"]), "FEDML@9999GREAT") - self.inference_gateway_process = ServerConstants.exec_console_with_script( - "REDIS_ADDR=\"{}\" REDIS_PORT=\"{}\" REDIS_PASSWORD=\"{}\" " - "END_POINT_NAME=\"{}\" " - "MODEL_NAME=\"{}\" MODEL_VERSION=\"{}\" MODEL_INFER_URL=\"{}\" VERSION=\"{}\" " - "USE_MQTT_INFERENCE={} USE_WORKER_GATEWAY={} EXT_INFO={} " - "{} -m uvicorn {} --host 0.0.0.0 --port {} --reload --reload-delay 3 --reload-dir {} " - "--log-level critical".format( - self.redis_addr, self.redis_port, self.redis_password, - end_point_name, - model_name, model_version, "", self.args.version, - use_mqtt_inference, use_worker_gateway, ext_info, - python_program, inference_gw_cmd, str(inference_port), fedml_base_dir - ), - should_capture_stdout=False, - should_capture_stderr=False - ) - - def start_device_inference_monitor(self, run_id, end_point_name, - model_id, model_name, model_version, check_stopped_event=True): - # start inference monitor server - # Will report the qps related metrics to the MLOps - logging.info(f"start the model inference monitor, end point {run_id}, model name {model_name}...") - if check_stopped_event: - self.check_runner_stop_event() - run_id_str = str(run_id) - pip_source_dir = os.path.dirname(__file__) - monitor_file = os.path.join(pip_source_dir, "device_model_monitor.py") - python_program = get_python_program() - running_model_name = ServerConstants.get_running_model_name(end_point_name, - model_name, model_version, run_id, model_id) - self.monitor_process = ServerConstants.exec_console_with_shell_script_list( - [ - python_program, - monitor_file, - "-v", - self.args.version, - "-ep", - run_id_str, - "-epn", - str(end_point_name), - "-mi", - str(model_id), - "-mn", - model_name, - "-mv", - model_version, - "-iu", - "infer_url", - "-ra", - self.redis_addr, - "-rp", - self.redis_port, - "-rpw", - self.redis_password - ], - should_capture_stdout=False, - should_capture_stderr=False - ) - - def stop_device_inference_monitor(self, run_id, end_point_name, model_id, model_name, model_version): - # stop inference monitor server - logging.info(f"stop the model inference monitor, end point {run_id}, model name {model_name}...") - sys_utils.cleanup_model_monitor_processes(run_id, end_point_name, - model_id, model_name, model_version) - - def cleanup_run_when_finished(self): - logging.info("Cleanup run successfully when finished.") - - self.mlops_metrics.broadcast_server_training_status( - self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED, - is_from_model=True, edge_id=self.edge_id - ) - - try: - self.mlops_metrics.stop_sys_perf() - except Exception as ex: - pass - - time.sleep(1) - - try: - local_package_path = ServerConstants.get_package_download_dir() - for package_file in listdir(local_package_path): - if os.path.basename(package_file).startswith("run_" + str(self.run_id)): - shutil.rmtree(os.path.join(local_package_path, package_file), ignore_errors=True) - except Exception as e: - pass - - def cleanup_run_when_starting_failed(self): - logging.info("Cleanup run successfully when starting failed.") - - self.mlops_metrics.broadcast_server_training_status( - self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED, - is_from_model=True, edge_id=self.edge_id) - - try: - self.mlops_metrics.stop_sys_perf() - except Exception as ex: - pass - - time.sleep(1) - - try: - local_package_path = ServerConstants.get_package_download_dir() - for package_file in listdir(local_package_path): - if os.path.basename(package_file).startswith("run_" + str(self.run_id)): - shutil.rmtree(os.path.join(local_package_path, package_file), ignore_errors=True) - except Exception as e: - pass - - def cleanup_run_when_deploy_failed(self): - topic = f"model_ops/model_device/delete_deployment/{self.edge_id}" - self.callback_delete_deployment(topic, payload=json.dumps(self.request_json)) - - def callback_deployment_result_message(self, topic=None, payload=None): - """ - This method is called when a deployment result is received from a worker device. - """ - # Save deployment result to local cache - topic_splits = str(topic).split('/') - device_id = topic_splits[-1] - payload_json = json.loads(payload) - end_point_id = payload_json["end_point_id"] - end_point_name = payload_json["end_point_name"] - model_id = payload_json["model_id"] - model_name = payload_json["model_name"] - model_version = payload_json["model_version"] - model_status = payload_json["model_status"] - replica_no = payload_json.get("replica_no", None) # "no" Idx start from 1 - run_id_str = str(end_point_id) - - # HotFix(Raphael): logging service cross talk - # Change the handler since each handler need to write to different log files - try: - # Remove the existing file handler - root_logger = logging.getLogger() - for handler in root_logger.handlers: - if isinstance(handler, logging.FileHandler): - root_logger.removeHandler(handler) - - # Correct log path: ~/.fedml/fedml-model-server/fedml/logs/fedml-run-$rid-edge-$eid.log - log_file = os.path.join(ServerConstants.get_log_file_dir(), - f"fedml-run-{run_id_str}-edge-{self.edge_id}.log") - - filehandler = logging.FileHandler(log_file, "a") - - program_prefix = "FedML-Server @device-id-{}".format(self.edge_id) - formatter = MLOpsFormatter(fmt="[" + program_prefix + "] [%(asctime)s] [%(levelname)s] " - "[%(filename)s:%(lineno)d:%(funcName)s] %(" - "message)s") - - filehandler.setFormatter(formatter) - root_logger.addHandler(filehandler) - except Exception as e: - logging.warning(f"Failed to change the logging handler due to {e}.") - - assert run_id_str in self.model_runner_mapping, (f"Run id {run_id_str} is not in the model runner mapping." - f"Current mapping {self.model_runner_mapping}.") - - logging.info("========== callback_deployment_result_message ==========\n") - # Identify the operation for this run (add, remove, update) - if run_id_str not in self.running_request_json: - logging.error(f"Run id {run_id_str} is not in the running request json.") - return - - # The rolling update and scale out / in operation should not happen at the same time - assert not ("replica_num_diff" in self.running_request_json[run_id_str] and - len(self.running_request_json[run_id_str]["replica_num_diff"]) > 0 and - "replica_version_diff" in self.running_request_json[run_id_str]) - - if "replica_version_diff" in self.running_request_json[run_id_str]: - run_operation = "UPDATE" - elif "replica_num_diff" in self.running_request_json[run_id_str] and \ - len(self.running_request_json[run_id_str]["replica_num_diff"]) > 0: - run_operation = "ADD_OR_REMOVE" - else: - logging.error(f"Unsupported operation for run id {run_id_str}. and request json " - f"{self.running_request_json[run_id_str]}") - return - - logging.info(f"End point {end_point_id}; Device {device_id}; replica {replica_no}; " - f"run_operation {run_operation} model status {model_status}.") - - # OPTIONAL DEBUG PARAMS - # this_run_controller = self.model_runner_mapping[run_id_str].replica_controller - # logging.info(f"The current replica controller state is " - # f"Total version diff num {this_run_controller.total_replica_version_diff_num}") - # logging.info(f"self.request_json now {self.request_json}") # request_json will be deprecated - # this_run_request_json = self.running_request_json.get(run_id_str, None) - # logging.info(f"self.running_request_json now {this_run_request_json}") - - # Set redis + sqlite deployment result - FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password) - - # Deal with different model status - if model_status == ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DELETED: - # remove - FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - delete_deployment_result_with_device_id_and_replica_no( - end_point_id, end_point_name, model_name, device_id, replica_no) - elif model_status == ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED: - # add or update or update-failed-rollback - FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - set_deployment_result(end_point_id, end_point_name, - model_name, model_version, - device_id, payload, replica_no) - - # Note: To display the result in the UI, we need to save successful deployment result to the database - self.model_runner_mapping[run_id_str].deployed_replica_payload = copy.deepcopy(payload_json) - else: - if model_status != ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED: - logging.error(f"Unsupported model status {model_status}.") - - # Avoid endless loop, if the rollback also failed, we should report the failure to the MLOps - if self.model_runner_mapping[run_id_str].replica_controller.under_rollback: - self.send_deployment_status( - end_point_id, end_point_name, payload_json["model_name"], "", - ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED) - return - - # Failure handler, send the rollback message to the worker devices only if it has not been rollback - if run_operation == "ADD_OR_REMOVE": - # During Scale out / in, - # the worker that already been scaled out / in should be sent the rollback message - rollback_dict = self.model_runner_mapping[run_id_str].replica_controller.rollback_add_or_remove_replica( - device_id=device_id, replica_no=replica_no, op_type=run_operation - ) - self.model_runner_mapping[run_id_str].replica_controller.under_rollback = True - - if rollback_dict is not None and len(rollback_dict) > 0: - self.send_deployment_status( - end_point_id, end_point_name, payload_json["model_name"], "", - ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_ABORTING) - self.send_rollback_add_remove_op(run_id_str, rollback_dict) - return - else: - # This is the last worker that failed, so we should continue to "ABORTED" status - model_config_parameters = self.running_request_json[run_id_str]["parameters"] - inference_port = model_config_parameters.get("server_internal_port", - ServerConstants.MODEL_INFERENCE_DEFAULT_PORT) - inference_port_external = model_config_parameters.get("server_external_port", inference_port) - ip = self.get_ip_address(self.running_request_json[run_id_str]) - if ip.startswith("http://") or ip.startswith("https://"): - model_inference_url = "{}/inference/{}".format(ip, end_point_id) - else: - model_inference_url = "http://{}:{}/inference/{}".format(ip, inference_port_external, - end_point_id) - - self.send_deployment_status(end_point_id, end_point_name, - payload_json["model_name"], - model_inference_url, - ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_ABORTED) - - # For auto-scaling, should update the state to "DEPLOYED" - FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - update_user_setting_replica_num(end_point_id=end_point_id, state="DEPLOYED") - - self.model_runner_mapping[run_id_str].replica_controller.under_rollback = False - - return - elif run_operation == "UPDATE": - # Overwrite the json with the rollback version diff - rollback_version_diff = \ - self.model_runner_mapping[run_id_str].replica_controller.rollback_get_replica_version_diff( - device_id_trigger=device_id, replica_no_trigger=replica_no) - - # Change the target version to the start version - self.model_runner_mapping[run_id_str].replica_controller.rollback_setback_target_replica_version() - - self.running_request_json[run_id_str]["replica_version_diff"] = copy.deepcopy(rollback_version_diff) - - # Send the rollback message to the worker devices - self.send_rollback_msg(run_id_str) - - # Set the deployment status to ABORTING - self.send_deployment_status( - end_point_id, end_point_name, payload_json["model_name"], "", - ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_ABORTING) - - # TODO(Raphael): Check if resource left not cleaned up - return - else: - logging.error(f"Unsupported operation {run_operation}.") - return - - # Move to the next state (rolling update, finish the deployment, etc.) - # Notify the replica number controller - (self.model_runner_mapping[run_id_str]. - replica_controller.callback_update_curr_replica_num_state(device_id, replica_no, model_status)) - - # Notify the replica version controller, which might trigger the next rolling update - self.send_next_scroll_update_msg(run_id_str, device_id, replica_no) - - # Update the global deployment result mapping - if run_id_str not in self.slave_deployment_results_mapping: - self.slave_deployment_results_mapping[run_id_str] = dict() - if str(device_id) not in self.slave_deployment_results_mapping[run_id_str]: - self.slave_deployment_results_mapping[run_id_str][str(device_id)] = dict() - self.slave_deployment_results_mapping[run_id_str][str(device_id)][str(replica_no)] = model_status - - logging.info("callback_deployment_result_message: topic {}, payload {}, result mapping {}.".format( - topic, payload, self.slave_deployment_results_mapping[run_id_str])) - - request_json = self.running_request_json.get(run_id_str, None) - if request_json is None: - logging.error(f"The endpoint {end_point_id} is no longer running.") - self.send_deployment_status( - end_point_id, end_point_name, payload_json["model_name"], "", - ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED) - return - - # Wait for all replica-level's result, not device-level - if (self.model_runner_mapping[run_id_str].replica_controller.is_all_replica_num_reconciled() and - self.model_runner_mapping[run_id_str].replica_controller.is_all_replica_version_reconciled()): - """ - When all the devices have finished the add / delete / update operation - """ - # Generate one unified inference api - # Note that here we use the gateway port instead of the inference port that is used by the slave device - model_config_parameters = request_json["parameters"] - inference_port = model_config_parameters.get("server_internal_port", - ServerConstants.MODEL_INFERENCE_DEFAULT_PORT) - inference_port_external = model_config_parameters.get("server_external_port", inference_port) - ip = self.get_ip_address(request_json) - - if ip.startswith("http://") or ip.startswith("https://"): - model_inference_url = "{}/inference/{}".format(ip, end_point_id) - else: - model_inference_url = "http://{}:{}/inference/{}".format(ip, inference_port_external, end_point_id) - - # Send stage: MODEL_DEPLOYMENT_STAGE5 = "StartInferenceIngress" - self.send_deployment_stages(end_point_id, model_name, model_id, - model_inference_url, - ServerConstants.MODEL_DEPLOYMENT_STAGE5["index"], - ServerConstants.MODEL_DEPLOYMENT_STAGE5["text"], - "inference url: {}".format(model_inference_url)) - - # Send the result to MLOps - if self.model_runner_mapping[run_id_str].deployed_replica_payload is not None: - payload_json = self.model_runner_mapping[run_id_str].deployed_replica_payload - model_slave_url = payload_json["model_url"] - payload_json["model_url"] = model_inference_url - payload_json["port"] = inference_port_external - token = FedMLModelCache.get_instance(self.redis_addr, self.redis_port).get_end_point_token( - end_point_id, end_point_name, model_name) - - model_metadata = payload_json["model_metadata"] - model_inputs = model_metadata["inputs"] - ret_inputs = list() - if "type" in model_metadata and model_metadata["type"] == "default": - payload_json["input_json"] = {"end_point_name": end_point_name, - "model_name": model_name, - "token": str(token), - "inputs": model_inputs, - "outputs": []} - payload_json["output_json"] = model_metadata["outputs"] - else: - raise Exception(f"Unsupported model metadata type {model_metadata['type']}") - - self.send_deployment_results_with_payload( - end_point_id, end_point_name, payload_json, - self.model_runner_mapping[run_id_str].replica_controller.target_replica_ids) - - payload_json_saved = payload_json - payload_json_saved["model_slave_url"] = model_slave_url - FedMLServerDataInterface.get_instance().save_job_result(end_point_id, self.edge_id, - json.dumps(payload_json_saved)) - else: - # Arrive here because only contains remove ops, so we do not need to update the model metadata - pass - - # For auto-scaling, should update the state to "DEPLOYED" - FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - update_user_setting_replica_num(end_point_id=end_point_id, state="DEPLOYED") - - if self.model_runner_mapping[run_id_str].replica_controller.under_rollback: - # If first time failed (Still might need rollback), then send failed message to the MLOps - if not (FedMLModelCache.get_instance(self.redis_addr, self.redis_port). - get_end_point_activation(end_point_id)): - self.send_deployment_status( - end_point_id, end_point_name, payload_json["model_name"], "", - ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_FAILED) - else: - self.send_deployment_status(end_point_id, end_point_name, - payload_json["model_name"], - model_inference_url, - ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_ABORTED) - self.model_runner_mapping[run_id_str].replica_controller.under_rollback = False - else: - # Set the end point activation status to True, for scaling out / in and rolling update - FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - set_end_point_activation(end_point_id, end_point_name, True) - - self.send_deployment_status(end_point_id, end_point_name, - payload_json["model_name"], - model_inference_url, - ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED) - - self.slave_deployment_results_mapping[run_id_str] = dict() - - time.sleep(3) - self.set_runner_completed_event(end_point_id) - - def callback_deployment_status_message(self, topic=None, payload=None): - # [Deprecated] Merge the logic into callback_deployment_result_message - logging.info("[Deprecated] callback_deployment_status_message: topic {}, payload {}.".format( - topic, payload)) - pass - - def send_deployment_start_request_to_edges(self, in_request_json=None): - if in_request_json is not None: - self.request_json = in_request_json - - # Iterate through replica_num_diff, both add and replace should be sent to the edge devices - if "replica_num_diff" not in self.request_json or self.request_json["replica_num_diff"] is None: - return [] - - edge_id_list = [] - for device_id in self.request_json["replica_num_diff"].keys(): - edge_id_list.append(device_id) - - self.request_json["master_node_ip"] = self.get_ip_address(self.request_json) - should_added_devices = [] - for edge_id in edge_id_list: - if edge_id == self.edge_id: - continue - should_added_devices.append(edge_id) - # send start deployment request to each device - self.send_deployment_start_request_to_edge(edge_id, self.request_json) - return should_added_devices - - def send_deployment_start_request_to_edge(self, edge_id, res_json): - topic_start_deployment = "model_ops/model_device/start_deployment/{}".format(str(edge_id)) - logging.info("start_deployment: send topic " + topic_start_deployment + f" to client {edge_id}...") - self.client_mqtt_mgr.send_message_json(topic_start_deployment, json.dumps(res_json)) - - def get_ip_address(self, request_json): - # OPTION 1: Use local ip - ip = ServerConstants.get_local_ip() - - # OPTION 2: Auto detect public ip - if "parameters" in request_json and \ - ServerConstants.AUTO_DETECT_PUBLIC_IP in request_json["parameters"] and \ - request_json["parameters"][ServerConstants.AUTO_DETECT_PUBLIC_IP]: - ip = ServerConstants.get_public_ip() - - # OPTION 3: Use user indicated ip - if self.infer_host is not None and self.infer_host != "127.0.0.1" and self.infer_host != "localhost": - ip = self.infer_host - - return ip - - def send_deployment_delete_request_to_edges(self, payload, model_msg_object): - edge_id_list_to_delete = model_msg_object.device_ids - - # Remove the model master node id from the list using index 0 - edge_id_list_to_delete = edge_id_list_to_delete[1:] - - logging.info("Device ids to be deleted: " + str(edge_id_list_to_delete)) - - for edge_id in edge_id_list_to_delete: - if edge_id == self.edge_id: - continue - # send delete deployment request to each model device - topic_delete_deployment = "model_ops/model_device/delete_deployment/{}".format(str(edge_id)) - logging.info("delete_deployment: send topic " + topic_delete_deployment + " to client...") - self.client_mqtt_mgr.send_message_json(topic_delete_deployment, payload) - - def ota_upgrade(self, payload, request_json): - run_id = request_json["end_point_id"] - force_ota = False - ota_version = None - - try: - parameters = request_json.get("parameters", None) - common_args = parameters.get("common_args", None) - force_ota = common_args.get("force_ota", False) - ota_version = common_args.get("ota_version", None) - except Exception as e: - pass - - if force_ota and ota_version is not None: - should_upgrade = True if ota_version != fedml.__version__ else False - upgrade_version = ota_version - else: - try: - fedml_is_latest_version, local_ver, remote_ver = sys_utils.check_fedml_is_latest_version(self.version) - except Exception as e: - return - - should_upgrade = False if fedml_is_latest_version else True - upgrade_version = remote_ver - - if should_upgrade: - job_obj = FedMLServerDataInterface.get_instance().get_job_by_id(run_id) - if job_obj is None: - FedMLServerDataInterface.get_instance(). \ - save_started_job(run_id, self.edge_id, time.time(), - ServerConstants.MSG_MLOPS_SERVER_STATUS_UPGRADING, - ServerConstants.MSG_MLOPS_SERVER_STATUS_UPGRADING, - payload) - - logging.info(f"Upgrade to version {upgrade_version} ...") - - sys_utils.do_upgrade(self.version, upgrade_version) - - raise Exception("Restarting after upgraded...") - - def callback_start_deployment(self, topic, payload): - try: - MLOpsConfigs.fetch_all_configs() - except Exception as e: - pass - - # Get deployment params - request_json = json.loads(payload) - run_id = request_json["end_point_id"] - end_point_name = request_json["end_point_name"] - token = request_json["token"] - user_id = request_json["user_id"] - user_name = request_json["user_name"] - device_ids = request_json["device_ids"] - device_objs = request_json["device_objs"] - - model_config = request_json["model_config"] - model_name = model_config["model_name"] - model_version = model_config["model_version"] - model_id = model_config["model_id"] - model_storage_url = model_config["model_storage_url"] - scale_min = model_config.get("instance_scale_min", 0) - scale_max = model_config.get("instance_scale_max", 0) - inference_engine = model_config.get("inference_engine", 0) - enable_auto_scaling = request_json.get("enable_auto_scaling", False) - desired_replica_num = request_json.get("desired_replica_num", 1) - - target_queries_per_replica = request_json.get("target_queries_per_replica", 10) - aggregation_window_size_seconds = request_json.get("aggregation_window_size_seconds", 60) - scale_down_delay_seconds = request_json.get("scale_down_delay_seconds", 120) - - inference_end_point_id = run_id - - logging.info("[Master] received start deployment request for end point {}.".format(run_id)) - - # Set redis config - FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password) - - # Save the user setting (about replica number) of this run to Redis, if existed, update it - FedMLModelCache.get_instance(self.redis_addr, self.redis_port).set_user_setting_replica_num( - end_point_id=run_id, end_point_name=end_point_name, model_name=model_name, model_version=model_version, - replica_num=desired_replica_num, enable_auto_scaling=enable_auto_scaling, - scale_min=scale_min, scale_max=scale_max, state="DEPLOYING", - aggregation_window_size_seconds=aggregation_window_size_seconds, - target_queries_per_replica=target_queries_per_replica, - scale_down_delay_seconds=int(scale_down_delay_seconds) - ) - - # Start log processor for current run - self.args.run_id = run_id - self.args.edge_id = self.edge_id - MLOpsRuntimeLog(args=self.args).init_logs() - MLOpsRuntimeLogDaemon.get_instance(self.args).set_log_source( - ServerConstants.FEDML_LOG_SOURCE_TYPE_MODEL_END_POINT) - MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor(run_id, self.edge_id) - - # # Deprecated - # self.ota_upgrade(payload, request_json) - - # Add additional parameters to the request_json - run_id = inference_end_point_id - self.args.run_id = run_id - self.run_id = run_id - request_json["run_id"] = run_id - self.request_json = request_json - run_id_str = str(run_id) - self.running_request_json[run_id_str] = request_json - self.request_json["master_node_ip"] = self.get_ip_address(self.request_json) - - # Set the target status of the devices to redis - FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - set_end_point_device_info(request_json["end_point_id"], end_point_name, json.dumps(device_objs)) - - # Setup Token - usr_indicated_token = self.get_usr_indicated_token(request_json) - if usr_indicated_token != "": - logging.info(f"Change Token from{token} to {usr_indicated_token}") - token = usr_indicated_token - FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - set_end_point_token(run_id, end_point_name, model_name, token) - - self.subscribe_slave_devices_message(request_json) - - # Report stage to mlops: MODEL_DEPLOYMENT_STAGE1 = "Received" - self.send_deployment_stages(self.run_id, model_name, model_id, - "", - ServerConstants.MODEL_DEPLOYMENT_STAGE1["index"], - ServerConstants.MODEL_DEPLOYMENT_STAGE1["text"], - "Received request for endpoint {}".format(run_id)) - - # Report stage to mlops: MODEL_DEPLOYMENT_STAGE2 = "Initializing" - self.send_deployment_stages(self.run_id, model_name, model_id, - "", - ServerConstants.MODEL_DEPLOYMENT_STAGE2["index"], - ServerConstants.MODEL_DEPLOYMENT_STAGE2["text"], - ServerConstants.MODEL_DEPLOYMENT_STAGE2["text"]) - - ServerConstants.save_runner_infos(self.args.device_id + "." + self.args.os_name, self.edge_id, run_id=run_id) - - if self.run_as_edge_server_and_agent: - # Replica Controller is per deployment - replica_controller = FedMLDeviceReplicaController(self.edge_id, self.request_json) - - # Prepare num diff - new_request_with_num_diff = replica_controller.generate_diff_to_request_json() - self.running_request_json[run_id_str] = new_request_with_num_diff - request_json = new_request_with_num_diff - - # Listen to extra worker topics, especially when worker's replica remove to zero, - # In this case, currently Java will NOT send those worker ids to the master, but still need to listen to it. - if "replica_num_diff" in request_json and len(request_json["replica_num_diff"]) > 0: - for device_id in request_json["replica_num_diff"].keys(): - # {"op": "remove", "curr_num": 1, "target_num": 0} - if request_json["replica_num_diff"][device_id]["op"] == "remove" and \ - request_json["replica_num_diff"][device_id]["target_num"] == 0: - self.subscribe_spec_device_message(run_id, device_id) - - # Prepare version diff - new_request_with_version_diff = replica_controller.init_first_update_device_replica_mapping() - self.running_request_json[run_id_str] = new_request_with_version_diff - request_json = new_request_with_version_diff - - # Init the model runner - server_runner = FedMLServerRunner( - self.args, run_id=run_id, request_json=request_json, agent_config=self.agent_config - ) - server_runner.run_as_edge_server_and_agent = self.run_as_edge_server_and_agent - server_runner.edge_id = self.edge_id - server_runner.infer_host = self.infer_host - server_runner.redis_addr = self.redis_addr - server_runner.redis_port = self.redis_port - server_runner.redis_password = self.redis_password - server_runner.replica_controller = replica_controller - - logging.info(f"[Master] new request for id {run_id_str}") - logging.info(f"[Master] model runner mapping before: {self.model_runner_mapping.items()}") - - self.run_process_event_map[run_id_str] = multiprocessing.Event() - self.run_process_event_map[run_id_str].clear() - server_runner.run_process_event = self.run_process_event_map[run_id_str] - self.run_process_completed_event_map[run_id_str] = multiprocessing.Event() - self.run_process_completed_event_map[run_id_str].clear() - server_runner.run_process_completed_event = self.run_process_completed_event_map[run_id_str] - self.model_runner_mapping[run_id_str] = server_runner - - logging.info(f"[Master] model runner mapping after: {self.model_runner_mapping.items()}") - - # This subprocess will copy the server_runner and run it, but they are not the same object - server_process = Process(target=server_runner.run, args=( - self.run_process_event_map[run_id_str], self.run_process_completed_event_map[run_id_str] - )) - server_process.start() - ServerConstants.save_run_process(run_id, server_process.pid) - - # Send stage: MODEL_DEPLOYMENT_STAGE3 = "StartRunner" - self.send_deployment_stages(self.run_id, model_name, model_id, - "", - ServerConstants.MODEL_DEPLOYMENT_STAGE3["index"], - ServerConstants.MODEL_DEPLOYMENT_STAGE3["text"], - ServerConstants.MODEL_DEPLOYMENT_STAGE3["text"]) - - def send_first_scroll_update_msg(self): - """ - Replica-level rolling update. - Delete the record of the replaced device and send the deployment msg to the devices - """ - if "replica_version_diff" not in self.request_json or self.request_json["replica_version_diff"] is None: - return [] - - first_chunk_dict = self.request_json["replica_version_diff"] - - # Delete the record of the replaced device - self.delete_device_replica_info_on_master( - self.request_json["end_point_id"], self.request_json["end_point_name"], - self.request_json["model_config"]["model_name"], first_chunk_dict) - - logging.info(f"Send the first scroll update msg to the device {first_chunk_dict} ") - - # Send the deployment msg to the devices, (we reuse the start_deployment msg) - for edge_id in first_chunk_dict.keys(): - if edge_id == self.edge_id: - continue - # send start deployment request to each device - self.send_deployment_start_request_to_edge(edge_id, self.request_json) - return list(first_chunk_dict.keys()) - - def send_rollback_msg(self, run_id_str): - # Avoid using the old request_json - self.delete_device_replica_info_on_master( - self.running_request_json[run_id_str]["end_point_id"], - self.running_request_json[run_id_str]["end_point_name"], - self.running_request_json[run_id_str]["model_config"]["model_name"], - self.running_request_json[run_id_str]["replica_version_diff"]) - - # Send the deployment msg to the devices, (we reuse the start_deployment msg) - for edge_id in self.running_request_json[run_id_str]["replica_version_diff"].keys(): - if edge_id == self.edge_id: - continue - # send start deployment request to each device - self.send_deployment_start_request_to_edge(edge_id, self.running_request_json[run_id_str]) - - def delete_device_replica_info_on_master(self, endpoint_id, endpoint_name, model_name, edge_id_replica_no_dict): - FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password) - # Remove the record of the replaced device - # [Deprecated] deployment status & device info - # Delete the result in deployment result list in Redis / SQLite - device_result_list = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - get_deployment_result_list(endpoint_id, endpoint_name, model_name) - - delete_device_result_list = [] - for device_result in device_result_list: - device_result_dict = json.loads(device_result) - if (str(device_result_dict["cache_device_id"]) in edge_id_replica_no_dict.keys() and - str(device_result_dict["cache_replica_no"]) in - edge_id_replica_no_dict[str(device_result_dict["cache_device_id"])]): - delete_device_result_list.append(device_result) - - for delete_item in delete_device_result_list: - FedMLModelCache.get_instance(self.redis_addr, self.redis_port).delete_deployment_result( - delete_item, endpoint_id, endpoint_name, model_name - ) - - logging.info(f"Deleted the replica record on master: {edge_id_replica_no_dict}") - - def send_next_scroll_update_msg(self, run_id_str, device_id, replica_no): - """ - Send the next scroll update msg to the devices if needed. - If there is no need for the next scroll update, directly return. - """ - if replica_no is None: - return - - replica_controller = self.model_runner_mapping[run_id_str].replica_controller - - if replica_controller.total_replica_version_diff_num == 0: - return - - if replica_controller.under_rollback: - replica_controller.intermediate_replica_version[device_id][replica_no] = replica_controller.start_version - return - - logging.info(f"Curr updating window: {replica_controller.curr_replica_updating_window} " - f"Curr version diff num: {replica_controller.total_replica_version_diff_num}") - - replica_controller.callback_update_updating_window(device_id, replica_no) - - # Decide whether to send the next scroll update - next_chunk_dict = replica_controller.get_next_chunk_devices_replica() - - if next_chunk_dict: - logging.info(f"The next scroll update for end point {run_id_str} is {next_chunk_dict}") - # Update curr updating window - replica_controller.curr_replica_updating_window = copy.deepcopy(next_chunk_dict) - - # Use global deployment result mapping to decide whether to send the next scroll update - self.running_request_json[run_id_str]["replica_version_diff"] = next_chunk_dict - - # Avoid using the old request_json - self.delete_device_replica_info_on_master( - self.running_request_json[run_id_str]["end_point_id"], - self.running_request_json[run_id_str]["end_point_name"], - self.running_request_json[run_id_str]["model_config"]["model_name"], - next_chunk_dict) - - # Send the deployment msg to the devices, (we reuse the start_deployment msg) - for edge_id in next_chunk_dict.keys(): - if edge_id == self.edge_id: - continue - # send start deployment request to each device - self.send_deployment_start_request_to_edge(edge_id, self.running_request_json[run_id_str]) - return - - def send_rollback_add_remove_op(self, run_id, rollback_replica_dict): - """ - This method is used when the original add op failed, we need to rollback by delete the existed replicas - Input example: - rollback_replica_dict = {'96684': {'curr_num': 2, 'op': 'remove', 'target_num': 1}} - """ - existed_request_json = self.running_request_json[str(run_id)] - updated_request_json = copy.deepcopy(existed_request_json) - - # Reverse the replica_num_diff - updated_request_json["replica_num_diff"] = rollback_replica_dict - - self.send_deployment_start_request_to_edges(in_request_json=updated_request_json) - - def callback_activate_deployment(self, topic, payload): - logging.info("callback_activate_deployment: topic = %s, payload = %s" % (topic, payload)) - - # Parse payload as the model message object. - model_msg_object = FedMLModelMsgObject(topic, payload) - - # Get the previous deployment status. - FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password) - endpoint_status = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - get_end_point_status(model_msg_object.inference_end_point_id) - if endpoint_status != ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED: - return - - # Set end point as activated status - FedMLModelCache.get_instance(self.redis_addr, self.redis_port).set_end_point_activation( - model_msg_object.inference_end_point_id, model_msg_object.end_point_name, True) - - def callback_deactivate_deployment(self, topic, payload): - logging.info("callback_deactivate_deployment: topic = %s, payload = %s" % (topic, payload)) - - # Parse payload as the model message object. - model_msg_object = FedMLModelMsgObject(topic, payload) - - # Get the endpoint status - FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password) - endpoint_status = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - get_end_point_status(model_msg_object.inference_end_point_id) - if endpoint_status != ServerConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED: - return - - # Set end point as deactivated status - FedMLModelCache.get_instance(self.redis_addr, self.redis_port).set_end_point_activation( - model_msg_object.inference_end_point_id, model_msg_object.model_name, False) - - def set_runner_stopped_event(self, run_id): - run_id_str = str(run_id) - server_runner = self.model_runner_mapping.get(run_id_str, None) - if server_runner is not None: - if server_runner.run_process_event is not None: - server_runner.run_process_event.set() - self.model_runner_mapping.pop(run_id_str) - - def set_runner_completed_event(self, run_id): - run_id_str = str(run_id) - server_runner = self.model_runner_mapping.get(run_id_str, None) - if server_runner is not None: - if server_runner.run_process_completed_event is not None: - server_runner.run_process_completed_event.set() - self.model_runner_mapping.pop(run_id_str) - - def callback_delete_deployment(self, topic, payload): - logging.info("[Master] callback_delete_deployment") - # Parse payload as the model message object. - model_msg_object = FedMLModelMsgObject(topic, payload) - - # Delete SQLite records - FedMLServerDataInterface.get_instance().delete_job_from_db(model_msg_object.run_id) - FedMLModelDatabase.get_instance().delete_deployment_result( - model_msg_object.run_id, model_msg_object.end_point_name, model_msg_object.model_name, - model_version=model_msg_object.model_version) - FedMLModelDatabase.get_instance().delete_deployment_run_info( - end_point_id=model_msg_object.inference_end_point_id) - - # Delete Redis Records - FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password) - FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - set_end_point_activation(model_msg_object.inference_end_point_id, - model_msg_object.end_point_name, False) - FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - delete_end_point(model_msg_object.inference_end_point_id, model_msg_object.end_point_name, - model_msg_object.model_name, model_msg_object.model_version) - - # Send delete deployment request to the edge devices - self.send_deployment_delete_request_to_edges(payload, model_msg_object) - - # Stop processes on master - self.set_runner_stopped_event(model_msg_object.run_id) - self.stop_device_inference_monitor(model_msg_object.run_id, model_msg_object.end_point_name, - model_msg_object.model_id, model_msg_object.model_name, - model_msg_object.model_version) - - def send_deployment_results_with_payload(self, end_point_id, end_point_name, payload, replica_id_list=None): - self.send_deployment_results(end_point_id, end_point_name, - payload["model_name"], payload["model_url"], - payload["model_version"], payload["port"], - payload["inference_engine"], - payload["model_metadata"], - payload["model_config"], - payload["input_json"], - payload["output_json"], - replica_id_list=replica_id_list) - - def send_deployment_results(self, end_point_id, end_point_name, - model_name, model_inference_url, - model_version, inference_port, inference_engine, - model_metadata, model_config, input_json, output_json, replica_id_list=None): - deployment_results_topic_prefix = "model_ops/model_device/return_deployment_result" - deployment_results_topic = "{}/{}".format(deployment_results_topic_prefix, end_point_id) - deployment_results_payload = {"end_point_id": end_point_id, "end_point_name": end_point_name, - "model_name": model_name, "model_url": model_inference_url, - "version": model_version, "port": inference_port, - "inference_engine": inference_engine, - "model_metadata": model_metadata, - "model_config": model_config, - "input_json": input_json, - "output_json": output_json, - "timestamp": int(format(time.time_ns() / 1000.0, '.0f')), - "replica_ids": replica_id_list} - logging.info(f"[Master] deployment_results_payload is sent to mlops: {deployment_results_payload}") - - self.client_mqtt_mgr.send_message_json(deployment_results_topic, json.dumps(deployment_results_payload)) - self.client_mqtt_mgr.send_message_json(deployment_results_topic_prefix, json.dumps(deployment_results_payload)) - - def send_deployment_status(self, end_point_id, end_point_name, model_name, model_inference_url, model_status): - deployment_status_topic_prefix = "model_ops/model_device/return_deployment_status" - deployment_status_topic = "{}/{}".format(deployment_status_topic_prefix, end_point_id) - deployment_status_payload = {"end_point_id": end_point_id, "end_point_name": end_point_name, - "model_name": model_name, - "model_url": model_inference_url, - "model_status": model_status, - "timestamp": int(format(time.time_ns() / 1000.0, '.0f'))} - logging.info(f"[Master] deployment_status_payload is sent to mlops: {deployment_status_payload}") - - self.client_mqtt_mgr.send_message_json(deployment_status_topic, json.dumps(deployment_status_payload)) - self.client_mqtt_mgr.send_message_json(deployment_status_topic_prefix, json.dumps(deployment_status_payload)) - - def send_deployment_stages(self, end_point_id, model_name, model_id, model_inference_url, - model_stages_index, model_stages_title, model_stage_detail): - deployment_stages_topic_prefix = "model_ops/model_device/return_deployment_stages" - deployment_stages_topic = "{}/{}".format(deployment_stages_topic_prefix, end_point_id) - deployment_stages_payload = {"model_name": model_name, - "model_id": model_id, - "model_url": model_inference_url, - "end_point_id": end_point_id, - "model_stage_index": model_stages_index, - "model_stage_title": model_stages_title, - "model_stage_detail": model_stage_detail, - "timestamp": int(format(time.time_ns() / 1000.0, '.0f'))} - - self.client_mqtt_mgr.send_message_json(deployment_stages_topic, json.dumps(deployment_stages_payload)) - self.client_mqtt_mgr.send_message_json(deployment_stages_topic_prefix, json.dumps(deployment_stages_payload)) - - logging.info(f"-------- Stages has been sent to mlops with stage {model_stages_index} and " - f"payload {deployment_stages_payload}") - time.sleep(2) - - def on_client_mqtt_disconnected(self, mqtt_client_object): - if self.client_mqtt_lock is None: - self.client_mqtt_lock = threading.Lock() - - self.client_mqtt_lock.acquire() - self.client_mqtt_is_connected = False - self.client_mqtt_lock.release() - - logging.info("on_client_mqtt_disconnected: {}.".format(self.client_mqtt_is_connected)) - - def on_client_mqtt_connected(self, mqtt_client_object): - if self.mlops_metrics is None: - self.mlops_metrics = MLOpsMetrics() - - self.mlops_metrics.set_messenger(self.client_mqtt_mgr) - self.mlops_metrics.run_id = self.run_id - self.mlops_metrics.edge_id = self.edge_id - self.mlops_metrics.server_agent_id = self.server_agent_id - - if self.client_mqtt_lock is None: - self.client_mqtt_lock = threading.Lock() - - self.client_mqtt_lock.acquire() - self.client_mqtt_is_connected = True - self.client_mqtt_lock.release() - - # logging.info("on_client_mqtt_connected: {}.".format(self.client_mqtt_is_connected)) - - def setup_client_mqtt_mgr(self): - if self.client_mqtt_mgr is not None: - return - - if self.client_mqtt_lock is None: - self.client_mqtt_lock = threading.Lock() - - # logging.info( - # "server agent config: {},{}".format( - # self.agent_config["mqtt_config"]["BROKER_HOST"], self.agent_config["mqtt_config"]["BROKER_PORT"] - # ) - # ) - - self.client_mqtt_mgr = MqttManager( - self.agent_config["mqtt_config"]["BROKER_HOST"], - self.agent_config["mqtt_config"]["BROKER_PORT"], - self.agent_config["mqtt_config"]["MQTT_USER"], - self.agent_config["mqtt_config"]["MQTT_PWD"], - self.agent_config["mqtt_config"]["MQTT_KEEPALIVE"], - "FedML_ModelServerAgent_Metrics_@{}@_{}_{}_{}".format(self.user_name, self.args.current_device_id, - str(os.getpid()), - str(uuid.uuid4())) - ) - self.client_mqtt_mgr.add_connected_listener(self.on_client_mqtt_connected) - self.client_mqtt_mgr.add_disconnected_listener(self.on_client_mqtt_disconnected) - self.client_mqtt_mgr.connect() - self.client_mqtt_mgr.loop_start() - - if self.mlops_metrics is None: - self.mlops_metrics = MLOpsMetrics() - self.mlops_metrics.set_messenger(self.client_mqtt_mgr) - self.mlops_metrics.run_id = self.run_id - self.mlops_metrics.edge_id = self.edge_id - self.mlops_metrics.server_agent_id = self.server_agent_id - - def release_client_mqtt_mgr(self): - try: - if self.client_mqtt_mgr is not None: - self.client_mqtt_mgr.loop_stop() - self.client_mqtt_mgr.disconnect() - - self.client_mqtt_lock.acquire() - if self.client_mqtt_mgr is not None: - self.client_mqtt_is_connected = False - self.client_mqtt_mgr = None - self.client_mqtt_lock.release() - except Exception: - pass - - def send_deployment_stop_request_to_edges(self, edge_id_list, payload): - for edge_id in edge_id_list: - topic_stop_deployment = "model_ops/model_device/stop_deployment/{}".format(str(self.edge_id)) - logging.info("stop_deployment: send topic " + topic_stop_deployment) - self.client_mqtt_mgr.send_message_json(topic_stop_deployment, payload) - - def send_exit_train_with_exception_request_to_edges(self, edge_id_list, payload): - for edge_id in edge_id_list: - topic_exit_train = "flserver_agent/" + str(edge_id) + "/exit_train_with_exception" - logging.info("exit_train_with_exception: send topic " + topic_exit_train) - self.client_mqtt_mgr.send_message_json(topic_exit_train, payload) - - def exit_run_with_exception_entry(self): - try: - self.setup_client_mqtt_mgr() - self.exit_run_with_exception() - except Exception as e: - self.release_client_mqtt_mgr() - sys_utils.cleanup_all_fedml_server_login_processes( - ServerConstants.SERVER_LOGIN_PROGRAM, clean_process_group=False) - sys.exit(1) - finally: - self.release_client_mqtt_mgr() - - def exit_run_with_exception(self): - logging.info("Exit run successfully.") - - ServerConstants.cleanup_learning_process(self.run_id) - ServerConstants.cleanup_run_process(self.run_id) - - self.mlops_metrics.report_server_id_status( - self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED, edge_id=self.edge_id) - - time.sleep(1) - - def callback_exit_train_with_exception(self, topic, payload): - # logging.info("callback_exit_train_with_exception: topic = %s, payload = %s" % (topic, payload)) - - request_json = json.loads(payload) - is_retain = request_json.get("is_retain", False) - if is_retain: - return - run_id = request_json.get("runId", None) - if run_id is None: - run_id = request_json.get("run_id", None) - if run_id is None: - run_id = request_json.get("id", None) - - if run_id is None: - return - - edge_ids = request_json.get("edgeids", None) - - self.send_exit_train_with_exception_request_to_edges(edge_ids, payload) - - # Stop server with multiprocessing mode - self.request_json = request_json - server_runner = FedMLServerRunner( - self.args, edge_id=self.edge_id, request_json=request_json, agent_config=self.agent_config, run_id=run_id - ) - try: - Process(target=server_runner.exit_run_with_exception_entry).start() - except Exception as e: - pass - - def callback_client_exit_train_with_exception(self, topic, payload): - # logging.info("callback_client_exit_train_with_exception: topic = %s, payload = %s" % (topic, payload)) - - request_json = json.loads(payload) - run_id = request_json.get("run_id", None) - edge_id = request_json.get("edge_id", None) - if run_id is None: - logging.info("callback_client_exit_train_with_exception run id is none") - return - - job = FedMLServerDataInterface.get_instance().get_job_by_id(run_id) - if job is not None and job.running_json is not None and job.running_json != "": - job_json_obj = json.loads(job.running_json) - edge_ids = job_json_obj.get("edgeids", None) - - self.mlops_metrics.broadcast_server_training_status( - run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED, - is_from_model=True, edge_id=edge_id) - - self.send_exit_train_with_exception_request_to_edges(edge_ids, job.running_json) - - self.exit_run_with_exception() - - def callback_runner_id_status(self, topic, payload): - logging.info("callback_runner_id_status: topic = %s, payload = %s" % (topic, payload)) - - request_json = json.loads(payload) - is_retain = request_json.get("is_retain", False) - if is_retain: - return - run_id = request_json["run_id"] - status = request_json["status"] - edge_id = request_json["edge_id"] - run_id_str = str(run_id) - - if ( - status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED - or status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED - ): - # Stop server with multiprocessing mode - stop_request_json = self.running_request_json.get(run_id_str, None) - if stop_request_json is None: - stop_request_json = request_json - if self.run_as_edge_server_and_agent: - server_runner = FedMLServerRunner( - self.args, run_id=run_id, request_json=stop_request_json, agent_config=self.agent_config - ) - server_runner.edge_id = self.edge_id - server_runner.run_as_edge_server_and_agent = self.run_as_edge_server_and_agent - server_runner.run_status = status - status_process = Process(target=server_runner.cleanup_client_with_status) - status_process.start() - status_process.join(10) - - # Stop log processor for current run - MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, self.edge_id) - - def cleanup_client_with_status(self): - if self.run_status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FINISHED: - logging.info("received to finished status.") - self.cleanup_run_when_finished() - elif self.run_status == ServerConstants.MSG_MLOPS_SERVER_STATUS_FAILED: - logging.info("received to failed status.") - self.cleanup_run_when_starting_failed() - - def callback_report_current_status(self, topic, payload): - request_json = json.loads(payload) - if self.run_as_edge_server_and_agent: - self.send_agent_active_msg() - elif self.run_as_cloud_agent: - self.send_agent_active_msg() - elif self.run_as_cloud_server: - pass - - @staticmethod - def process_ota_upgrade_msg(): - os.system("pip install -U fedml") - - def callback_server_ota_msg(self, topic, payload): - request_json = json.loads(payload) - cmd = request_json["cmd"] - - if cmd == ServerConstants.FEDML_OTA_CMD_UPGRADE: - try: - self.process_ota_upgrade_msg() - # Process(target=FedMLServerRunner.process_ota_upgrade_msg).start() - raise Exception("After upgraded, restart runner...") - except Exception as e: - pass - elif cmd == ServerConstants.FEDML_OTA_CMD_RESTART: - raise Exception("Restart runner...") - - @staticmethod - def get_device_id(): - device_file_path = os.path.join(ServerConstants.get_data_dir(), ServerConstants.LOCAL_RUNNER_INFO_DIR_NAME) - file_for_device_id = os.path.join(device_file_path, "devices.id") - if not os.path.exists(device_file_path): - os.makedirs(device_file_path) - elif os.path.exists(file_for_device_id): - with open(file_for_device_id, 'r', encoding='utf-8') as f: - device_id_from_file = f.readline() - if device_id_from_file is not None and device_id_from_file != "": - return device_id_from_file - - if platform.system() == "Darwin": - cmd_get_serial_num = "system_profiler SPHardwareDataType | grep Serial | awk '{gsub(/ /,\"\")}{print}' " \ - "|awk -F':' '{print $2}' " - device_id = os.popen(cmd_get_serial_num).read() - device_id = device_id.replace('\n', '').replace(' ', '') - if device_id is None or device_id == "": - device_id = hex(uuid.getnode()) - else: - device_id = "0x" + device_id - else: - if "nt" in os.name: - - def get_uuid(): - guid = "" - try: - cmd = "wmic csproduct get uuid" - guid = str(subprocess.check_output(cmd)) - pos1 = guid.find("\\n") + 2 - guid = guid[pos1:-15] - except Exception as ex: - pass - return str(guid) - - device_id = str(get_uuid()) - elif "posix" in os.name: - device_id = sys_utils.get_device_id_in_docker() - if device_id is None: - device_id = hex(uuid.getnode()) - else: - device_id = sys_utils.run_subprocess_open( - "hal-get-property --udi /org/freedesktop/Hal/devices/computer --key system.hardware.uuid".split() - ) - device_id = hex(device_id) - - if device_id is not None and device_id != "": - with open(file_for_device_id, 'w', encoding='utf-8') as f: - f.write(device_id) - else: - device_id = hex(uuid.uuid4()) - with open(file_for_device_id, 'w', encoding='utf-8') as f: - f.write(device_id) - - return device_id - - def bind_account_and_device_id(self, url, account_id, device_id, os_name): - role = ServerConstants.login_role_list[ServerConstants.LOGIN_MODE_ON_PREMISE_MASTER_INDEX] - if self.run_as_edge_server_and_agent: - role = ServerConstants.login_role_list[ServerConstants.LOGIN_MODE_ON_PREMISE_MASTER_INDEX] - elif self.run_as_cloud_agent: - role = ServerConstants.login_role_list[ServerConstants.LOGIN_MODE_FEDML_CLOUD_MASTER_INDEX] - elif self.run_as_cloud_server: - role = ServerConstants.login_role_list[ServerConstants.LOGIN_MODE_INFERENCE_INSTANCE_INDEX] - - ip = requests.get('https://checkip.amazonaws.com').text.strip() - fedml_ver, exec_path, os_ver, cpu_info, python_ver, torch_ver, mpi_installed, \ - cpu_usage, available_mem, total_mem, gpu_info, gpu_available_mem, gpu_total_mem, \ - gpu_count, gpu_vendor, cpu_count, gpu_device_name = get_sys_runner_info() - host_name = sys_utils.get_host_name() - json_params = { - "accountid": account_id, - "deviceid": device_id, - "type": os_name, - "state": ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE, - "status": ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE, - "processor": cpu_info, - "core_type": cpu_info, - "network": "", - "role": role, - "os_ver": os_ver, - "memory": total_mem, - "ip": ip, - "extra_infos": {"fedml_ver": fedml_ver, "exec_path": exec_path, "os_ver": os_ver, - "cpu_info": cpu_info, "python_ver": python_ver, "torch_ver": torch_ver, - "mpi_installed": mpi_installed, "cpu_usage": cpu_usage, - "available_mem": available_mem, "total_mem": total_mem, - "cpu_count": cpu_count, "gpu_count": 0, "host_name": host_name} - } - if gpu_count > 0: - if gpu_total_mem is not None: - json_params["gpu"] = gpu_info if gpu_info is not None else "" + ", Total GPU Memory: " + gpu_total_mem - else: - json_params["gpu"] = gpu_info if gpu_info is not None else "" - json_params["extra_infos"]["gpu_info"] = gpu_info if gpu_info is not None else "" - if gpu_available_mem is not None: - json_params["extra_infos"]["gpu_available_mem"] = gpu_available_mem - if gpu_total_mem is not None: - json_params["extra_infos"]["gpu_total_mem"] = gpu_total_mem - - json_params["extra_infos"]["gpu_count"] = gpu_count - json_params["extra_infos"]["gpu_vendor"] = gpu_vendor - json_params["extra_infos"]["gpu_device_name"] = gpu_device_name - - gpu_available_id_list = sys_utils.get_available_gpu_id_list(limit=gpu_count) - gpu_available_count = len(gpu_available_id_list) if gpu_available_id_list is not None else 0 - gpu_list = sys_utils.get_gpu_list() - json_params["extra_infos"]["gpu_available_count"] = gpu_available_count - json_params["extra_infos"]["gpu_available_id_list"] = gpu_available_id_list - json_params["extra_infos"]["gpu_list"] = gpu_list - else: - json_params["gpu"] = "None" - json_params["extra_infos"]["gpu_available_count"] = 0 - json_params["extra_infos"]["gpu_available_id_list"] = [] - json_params["extra_infos"]["gpu_list"] = [] - - _, cert_path = MLOpsConfigs.get_request_params() - if cert_path is not None: - try: - requests.session().verify = cert_path - response = requests.post( - url, json=json_params, verify=True, - headers={"content-type": "application/json", "Connection": "close"} - ) - except requests.exceptions.SSLError as err: - MLOpsConfigs.install_root_ca_file() - response = requests.post( - url, json=json_params, verify=True, - headers={"content-type": "application/json", "Connection": "close"} - ) - else: - response = requests.post(url, json=json_params, headers={"Connection": "close"}) - edge_id = -1 - user_name = None - extra_url = None - if response.status_code != 200: - print(f"Binding to MLOps with response.status_code = {response.status_code}, " - f"response.content: {response.content}") - pass - else: - # print("url = {}, response = {}".format(url, response)) - status_code = response.json().get("code") - if status_code == "SUCCESS": - edge_id = response.json().get("data").get("id") - user_name = response.json().get("data").get("userName", None) - extra_url = response.json().get("data").get("url", None) - if edge_id is None or edge_id <= 0: - print(f"Binding to MLOps with response.status_code = {response.status_code}, " - f"response.content: {response.content}") - else: - if status_code == SchedulerConstants.BINDING_ACCOUNT_NOT_EXIST_ERROR: - raise SystemExit(SchedulerConstants.BINDING_ACCOUNT_NOT_EXIST_ERROR) - print(f"Binding to MLOps with response.status_code = {response.status_code}, " - f"response.content: {response.content}") - return -1, None, None - return edge_id, user_name, extra_url - - def fetch_configs(self): - return MLOpsConfigs.fetch_all_configs() - - def send_agent_active_msg(self): - active_topic = "flserver_agent/active" - status = MLOpsStatus.get_instance().get_server_agent_status(self.edge_id) - if ( - status is not None - and status != ServerConstants.MSG_MLOPS_SERVER_STATUS_OFFLINE - and status != ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE - ): - return - - status = ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE - active_msg = {"ID": self.edge_id, "status": status} - MLOpsStatus.get_instance().set_server_agent_status(self.edge_id, status) - self.mqtt_mgr.send_message_json(active_topic, json.dumps(active_msg)) - - def subscribe_slave_devices_message(self, request_json): - if request_json is None: - return - run_id = request_json["run_id"] - edge_id_list = request_json["device_ids"] - for edge_id in edge_id_list: - if str(edge_id) == str(self.edge_id): - continue - - # subscribe deployment result message for each model device - deployment_results_topic = "model_device/model_device/return_deployment_result/{}/{}".format( - run_id, edge_id) - - self.mqtt_mgr.add_message_listener(deployment_results_topic, self.callback_deployment_result_message) - self.mqtt_mgr.subscribe_msg(deployment_results_topic) - - def subscribe_spec_device_message(self, run_id, device_id): - if device_id == self.edge_id: - return - - # subscribe deployment result message for each model device - deployment_results_topic = "model_device/model_device/return_deployment_result/{}/{}".format( - run_id, device_id) - - self.mqtt_mgr.add_message_listener(deployment_results_topic, self.callback_deployment_result_message) - self.mqtt_mgr.subscribe_msg(deployment_results_topic) - - def on_agent_mqtt_connected(self, mqtt_client_object): - # The MQTT message topic format is as follows: // - - # Setup MQTT message listener for starting deployment - server_agent_id = self.edge_id - topic_start_deployment = "model_ops/model_device/start_deployment/{}".format(str(self.edge_id)) - self.mqtt_mgr.add_message_listener(topic_start_deployment, self.callback_start_deployment) - - # Setup MQTT message listener for activating deployment - topic_activate_deployment = "model_ops/model_device/activate_deployment/{}".format(str(self.edge_id)) - self.mqtt_mgr.add_message_listener(topic_activate_deployment, self.callback_activate_deployment) - - # Setup MQTT message listener for deactivating deployment - topic_deactivate_deployment = "model_ops/model_device/deactivate_deployment/{}".format(str(self.edge_id)) - self.mqtt_mgr.add_message_listener(topic_deactivate_deployment, self.callback_deactivate_deployment) - - # Setup MQTT message listener for delete deployment - topic_delete_deployment = "model_ops/model_device/delete_deployment/{}".format(str(self.edge_id)) - self.mqtt_mgr.add_message_listener(topic_delete_deployment, self.callback_delete_deployment) - - # Setup MQTT message listener for server status switching - topic_server_status = "fl_server/flserver_agent_" + str(server_agent_id) + "/status" - self.mqtt_mgr.add_message_listener(topic_server_status, self.callback_runner_id_status) - - # Setup MQTT message listener to report current device status. - topic_report_status = "mlops/report_device_status" - self.mqtt_mgr.add_message_listener(topic_report_status, self.callback_report_current_status) - - # Setup MQTT message listener to OTA messages from the MLOps. - topic_ota_msg = "mlops/flserver_agent_" + str(server_agent_id) + "/ota" - self.mqtt_mgr.add_message_listener(topic_ota_msg, self.callback_server_ota_msg) - - # Subscribe topics for starting train, stopping train and fetching client status. - mqtt_client_object.subscribe(topic_start_deployment, qos=2) - mqtt_client_object.subscribe(topic_activate_deployment, qos=2) - mqtt_client_object.subscribe(topic_deactivate_deployment, qos=2) - mqtt_client_object.subscribe(topic_delete_deployment, qos=2) - mqtt_client_object.subscribe(topic_server_status, qos=2) - mqtt_client_object.subscribe(topic_report_status, qos=2) - mqtt_client_object.subscribe(topic_ota_msg, qos=2) - - self.subscribed_topics.clear() - self.subscribed_topics.append(topic_start_deployment) - self.subscribed_topics.append(topic_activate_deployment) - self.subscribed_topics.append(topic_deactivate_deployment) - self.subscribed_topics.append(topic_delete_deployment) - self.subscribed_topics.append(topic_server_status) - self.subscribed_topics.append(topic_report_status) - self.subscribed_topics.append(topic_ota_msg) - - self.endpoint_sync_protocol = FedMLEndpointSyncProtocol(agent_config=self.agent_config, mqtt_mgr=self.mqtt_mgr) - self.endpoint_sync_protocol.setup_listener_for_sync_device_info(self.edge_id) - - # Broadcast the first active message. - self.send_agent_active_msg() - - # Echo results - # print("\n\nCongratulations, your device is connected to the FedML MLOps platform successfully!") - # print( - # "Your FedML Edge ID is " + str(self.edge_id) + ", unique device ID is " - # + str(self.unique_device_id) - # + "\n" - # ) - - MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO) - - def on_agent_mqtt_disconnected(self, mqtt_client_object): - MLOpsStatus.get_instance().set_server_agent_status( - self.edge_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_OFFLINE - ) - - def recover_inference_and_monitor(self): - try: - history_jobs = FedMLServerDataInterface.get_instance().get_history_jobs() - for job in history_jobs.job_list: - if job.running_json is None: - continue - - if job.deployment_result == "": - continue - - run_id, end_point_name, token, user_id, user_name, device_ids, device_objs, model_config, model_name, \ - model_id, model_storage_url, scale_min, scale_max, inference_engine, model_is_from_open, \ - inference_end_point_id, use_gpu, memory_size, model_version, inference_port = \ - self.parse_model_run_params(json.loads(job.running_json)) - - FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password) - is_activated = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - get_end_point_activation(run_id) - if not is_activated: - continue - - self.start_device_inference_gateway(run_id, end_point_name, model_id, model_name, model_version, - inference_port=inference_port) - - self.stop_device_inference_monitor(run_id, end_point_name, model_id, model_name, model_version) - self.start_device_inference_monitor(run_id, end_point_name, model_id, model_name, model_version) - except Exception as e: - logging.info("recover inference and monitor: {}".format(traceback.format_exc())) - - def recover_start_deployment_msg_after_upgrading(self): - try: - current_job = FedMLServerDataInterface.get_instance().get_current_job() - if current_job is not None and \ - current_job.status == ServerConstants.MSG_MLOPS_SERVER_STATUS_UPGRADING: - FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password) - is_activated = FedMLModelCache.get_instance(self.redis_addr, self.redis_port). \ - get_end_point_activation(current_job.job_id) - if not is_activated: - return - logging.info("start deployment after upgrading.") - topic_start_deployment = "model_ops/model_device/start_deployment/{}".format(str(self.edge_id)) - self.callback_start_deployment(topic_start_deployment, current_job.running_json) - except Exception as e: - logging.info("recover starting deployment message after upgrading: {}".format(traceback.format_exc())) - - def setup_agent_mqtt_connection(self, service_config): - # Setup MQTT connection - self.mqtt_mgr = MqttManager( - service_config["mqtt_config"]["BROKER_HOST"], - service_config["mqtt_config"]["BROKER_PORT"], - service_config["mqtt_config"]["MQTT_USER"], - service_config["mqtt_config"]["MQTT_PWD"], - service_config["mqtt_config"]["MQTT_KEEPALIVE"], - "FedML_ModelServerAgent_Daemon_@" + self.user_name + "@_" + self.args.current_device_id + str(uuid.uuid4()), - "flserver_agent/last_will_msg", - json.dumps({"ID": self.edge_id, "status": ServerConstants.MSG_MLOPS_SERVER_STATUS_OFFLINE}) - ) - self.agent_config = service_config - - # Init local database - FedMLServerDataInterface.get_instance().create_job_table() - try: - FedMLModelDatabase.get_instance().set_database_base_dir(ServerConstants.get_database_dir()) - FedMLModelDatabase.get_instance().create_table() - except Exception as e: - pass - - server_api_cmd = "fedml.computing.scheduler.model_scheduler.device_server_api:api" - server_api_pids = RunProcessUtils.get_pid_from_cmd_line(server_api_cmd) - if server_api_pids is None or len(server_api_pids) <= 0: - # Start local API services - cur_dir = os.path.dirname(__file__) - fedml_base_dir = os.path.dirname(os.path.dirname(os.path.dirname(cur_dir))) - python_program = get_python_program() - self.local_api_process = ServerConstants.exec_console_with_script( - "{} -m uvicorn {} --host 0.0.0.0 --port {} --reload --reload-delay 3 --reload-dir {} " - "--log-level critical".format( - python_program, server_api_cmd, ServerConstants.LOCAL_SERVER_API_PORT, - fedml_base_dir - ), - should_capture_stdout=False, - should_capture_stderr=False - ) - # if self.local_api_process is not None and self.local_api_process.pid is not None: - # print(f"Model master local API process id {self.local_api_process.pid}") - - self.recover_inference_and_monitor() - - # MLOpsRuntimeLogDaemon.get_instance(self.args).stop_all_log_processor() - - # Setup MQTT connected listener - self.mqtt_mgr.add_connected_listener(self.on_agent_mqtt_connected) - self.mqtt_mgr.add_disconnected_listener(self.on_agent_mqtt_disconnected) - self.mqtt_mgr.connect() - - self.setup_client_mqtt_mgr() - self.mlops_metrics.report_server_training_status( - self.run_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE, - is_from_model=True, edge_id=self.edge_id) - MLOpsStatus.get_instance().set_server_agent_status( - self.edge_id, ServerConstants.MSG_MLOPS_SERVER_STATUS_IDLE - ) - - self.recover_start_deployment_msg_after_upgrading() - - def stop_agent(self): - if self.run_process_event is not None: - self.run_process_event.set() - - if self.mqtt_mgr is not None: - try: - for topic in self.subscribed_topics: - self.mqtt_mgr.unsubscribe_msg(topic) - except Exception as e: - pass - - self.mqtt_mgr.loop_stop() - self.mqtt_mgr.disconnect() - - self.release_client_mqtt_mgr() - - def start_agent_mqtt_loop(self, should_exit_sys=True): - # Start MQTT message loop - try: - self.mqtt_mgr.loop_forever() - except Exception as e: - if str(e) == "Restarting after upgraded...": - logging.info("Restarting after upgraded...") - else: - print("Server tracing: {}".format(traceback.format_exc())) - finally: - self.stop_agent() - if should_exit_sys: - pass - """ - # Deprecated, will kill the process by the parent process. - time.sleep(5) - sys_utils.cleanup_all_fedml_server_login_processes( - ServerConstants.SERVER_LOGIN_PROGRAM, clean_process_group=False) - sys.exit(1) - """ - diff --git a/python/fedml/computing/scheduler/slave/client_runner_deprecated.py b/python/fedml/computing/scheduler/slave/client_runner_deprecated.py deleted file mode 100755 index 79b5697728..0000000000 --- a/python/fedml/computing/scheduler/slave/client_runner_deprecated.py +++ /dev/null @@ -1,1872 +0,0 @@ -import json -import logging -import multiprocessing -import sys - -from multiprocessing import Process -import os -import platform -import shutil -import subprocess -import threading - -import time -import traceback -import urllib -import uuid -import zipfile -from urllib.parse import urljoin, urlparse - -import requests - -import fedml -from ..comm_utils.constants import SchedulerConstants -from ..comm_utils.job_cleanup import JobCleanup -from ..comm_utils.job_utils import JobRunnerUtils, DockerArgs -from ..comm_utils.run_process_utils import RunProcessUtils -from ..scheduler_entry.constants import Constants -from ....core.mlops.mlops_device_perfs import MLOpsDevicePerfStats -from ....core.mlops.mlops_runtime_log import MLOpsRuntimeLog - -from ....core.distributed.communication.mqtt.mqtt_manager import MqttManager -from ..comm_utils.yaml_utils import load_yaml_config -from .client_constants import ClientConstants - -from ....core.mlops.mlops_metrics import MLOpsMetrics - -from ....core.mlops.mlops_configs import MLOpsConfigs -from ....core.mlops.mlops_runtime_log_daemon import MLOpsRuntimeLogDaemon -from ....core.mlops.mlops_status import MLOpsStatus -from ..comm_utils.sys_utils import get_sys_runner_info, get_python_program -from .client_data_interface import FedMLClientDataInterface -from ..comm_utils import sys_utils -from ....core.mlops.mlops_utils import MLOpsUtils -from ..model_scheduler.model_device_client import FedMLModelDeviceClientRunner -from ..model_scheduler.model_device_server import FedMLModelDeviceServerRunner -from ..comm_utils import security_utils -from ..scheduler_core.compute_cache_manager import ComputeCacheManager -from ..scheduler_core.message_center import FedMLMessageCenter -import ssl - - -class RunnerError(Exception): - """ Runner stopped. """ - pass - - -class RunnerCompletedError(Exception): - """ Runner completed. """ - pass - - -class FedMLClientRunner(FedMLMessageCenter): - - def __init__(self, args, edge_id=0, request_json=None, agent_config=None, run_id=0, - cuda_visible_gpu_ids_str=None): - super().__init__() - self.model_device_server_id = None - self.model_device_client_edge_id_list = None - self.disable_client_login = False - self.model_device_server = None - self.model_device_client_list = None - self.run_process_event = None - self.run_process_event_map = dict() - self.run_process_completed_event = None - self.run_process_completed_event_map = dict() - self.run_process = None - self.run_process_map = dict() - self.running_request_json = dict() - self.local_api_process = None - self.start_request_json = None - self.device_status = None - self.current_training_status = None - self.mqtt_mgr = None - self.edge_id = edge_id - self.edge_user_name = None - self.edge_extra_url = None - self.run_id = run_id - self.unique_device_id = None - self.args = args - self.request_json = request_json - self.version = args.version - self.device_id = args.device_id - self.cur_dir = os.path.split(os.path.realpath(__file__))[0] - if args.current_running_dir is not None: - self.cur_dir = args.current_running_dir - self.sudo_cmd = "" - self.is_mac = False - if platform.system() == "Darwin": - self.is_mac = True - - self.agent_config = agent_config - self.fedml_data_base_package_dir = os.path.join("/", "fedml", "data") - self.fedml_data_local_package_dir = os.path.join("/", "fedml", "fedml-package", "fedml", "data") - self.fedml_data_dir = self.fedml_data_base_package_dir - self.fedml_config_dir = os.path.join("/", "fedml", "conf") - - self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES = { - "${FEDSYS.RUN_ID}": "", - "${FEDSYS.PRIVATE_LOCAL_DATA}": "", - "${FEDSYS.CLIENT_ID_LIST}": "", - "${FEDSYS.SYNTHETIC_DATA_URL}": "", - "${FEDSYS.IS_USING_LOCAL_DATA}": "", - "${FEDSYS.CLIENT_NUM}": "", - "${FEDSYS.CLIENT_INDEX}": "", - "${FEDSYS.CLIENT_OBJECT_LIST}": "", - "${FEDSYS.LOG_SERVER_URL}": "", - } - - self.mlops_metrics = None - self.client_active_list = dict() - self.ntp_offset = MLOpsUtils.get_ntp_offset() - self.server_id = None - self.computing_started_time = 0 - self.fedml_config_object = None - self.package_type = SchedulerConstants.JOB_PACKAGE_TYPE_DEFAULT - self.cuda_visible_gpu_ids_str = cuda_visible_gpu_ids_str - # logging.info("Current directory of client agent: " + self.cur_dir) - self.subscribed_topics = list() - self.user_name = None - self.general_edge_id = None - self.message_center = None - - def __repr__(self): - return "<{klass} @{id:x} {attrs}>".format( - klass=self.__class__.__name__, - id=id(self) & 0xFFFFFF, - attrs=" ".join("{}={!r}".format(k, v) for k, v in self.__dict__.items()), - ) - - def copy_runner(self): - copy_runner = FedMLClientRunner(self.args) - copy_runner.disable_client_login = self.disable_client_login - copy_runner.model_device_server = self.model_device_server - copy_runner.model_device_client_list = self.model_device_client_list - copy_runner.run_process_event = self.run_process_event - copy_runner.run_process_event_map = self.run_process_event_map - copy_runner.run_process_completed_event = self.run_process_completed_event - copy_runner.run_process_completed_event_map = self.run_process_completed_event_map - copy_runner.run_process = self.run_process - copy_runner.run_process_map = self.run_process_map - copy_runner.running_request_json = self.running_request_json - copy_runner.local_api_process = self.local_api_process - copy_runner.start_request_json = self.start_request_json - copy_runner.device_status = self.device_status - copy_runner.current_training_status = self.current_training_status - copy_runner.mqtt_mgr = self.mqtt_mgr - copy_runner.edge_id = self.edge_id - copy_runner.edge_user_name = self.edge_user_name - copy_runner.edge_extra_url = self.edge_extra_url - copy_runner.run_id = self.run_id - copy_runner.unique_device_id = self.unique_device_id - copy_runner.args = self.args - copy_runner.request_json = self.request_json - copy_runner.version =self.version - copy_runner.device_id = self.device_id - copy_runner.cur_dir = self.cur_dir - copy_runner.cur_dir = self.cur_dir - copy_runner.sudo_cmd = self.sudo_cmd - copy_runner.is_mac = self.is_mac - - copy_runner.agent_config = self.agent_config - copy_runner.fedml_data_base_package_dir = self.fedml_data_base_package_dir - copy_runner.fedml_data_local_package_dir = self.fedml_data_local_package_dir - copy_runner.fedml_data_dir = self.fedml_data_dir - copy_runner.fedml_config_dir = self.fedml_config_dir - - copy_runner.FEDML_DYNAMIC_CONSTRAIN_VARIABLES = self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES - - copy_runner.mlops_metrics = self.mlops_metrics - copy_runner.client_active_list = self.client_active_list - copy_runner.ntp_offset = self.ntp_offset - copy_runner.server_id = self.server_id - copy_runner.computing_started_time = self.computing_started_time - copy_runner.fedml_config_object = self.fedml_config_object - copy_runner.package_type = self.package_type - copy_runner.cuda_visible_gpu_ids_str = self.cuda_visible_gpu_ids_str - copy_runner.subscribed_topics = self.subscribed_topics - copy_runner.user_name = self.user_name - copy_runner.general_edge_id = self.general_edge_id - copy_runner.message_center = self.message_center - - return copy_runner - - def build_dynamic_constrain_variables(self, run_id, run_config): - data_config = run_config.get("data_config", {}) - server_edge_id_list = self.request_json["edgeids"] - local_edge_id_list = list() - local_edge_id_list.append(int(self.edge_id)) - is_using_local_data = 0 - private_data_dir = data_config.get("privateLocalData", "") - synthetic_data_url = data_config.get("syntheticDataUrl", "") - edges = self.request_json["edges"] - # if private_data_dir is not None \ - # and len(str(private_data_dir).strip(' ')) > 0: - # is_using_local_data = 1 - if private_data_dir is None or len(str(private_data_dir).strip(" ")) <= 0: - params_config = run_config.get("parameters", None) - private_data_dir = ClientConstants.get_data_dir() - if synthetic_data_url is None or len(str(synthetic_data_url)) <= 0: - synthetic_data_url = private_data_dir - - self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.RUN_ID}"] = run_id - self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.PRIVATE_LOCAL_DATA}"] = private_data_dir.replace(" ", "") - self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_ID_LIST}"] = str(local_edge_id_list).replace(" ", "") - self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.SYNTHETIC_DATA_URL}"] = synthetic_data_url.replace(" ", "") - self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.IS_USING_LOCAL_DATA}"] = str(is_using_local_data) - self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_NUM}"] = len(server_edge_id_list) - self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_INDEX}"] = 1 - for cur_index, id_value in enumerate(server_edge_id_list): - if str(id_value) == str(self.edge_id): - self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_INDEX}"] = cur_index + 1 - break - client_objects = str(json.dumps(edges)) - client_objects = client_objects.replace(" ", "").replace("\n", "").replace('"', '\\"') - self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.CLIENT_OBJECT_LIST}"] = client_objects - self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES["${FEDSYS.LOG_SERVER_URL}"] = self.agent_config["ml_ops_config"][ - "LOG_SERVER_URL" - ] - - def unzip_file(self, zip_file, unzip_file_path) -> str: - if zipfile.is_zipfile(zip_file): - with zipfile.ZipFile(zip_file, "r") as zipf: - zipf.extractall(unzip_file_path) - unzipped_file_name = zipf.namelist()[0] - else: - raise Exception("Invalid zip file {}".format(zip_file)) - - return unzipped_file_name - - def package_download_progress(self, count, blksize, filesize): - self.check_runner_stop_event() - - downloaded = count * blksize - downloaded = filesize if downloaded > filesize else downloaded - progress = (downloaded / filesize * 100) if filesize != 0 else 0 - progress_int = int(progress) - downloaded_kb = format(downloaded / 1024, '.2f') - - # since this hook funtion is stateless, we need a state to avoid print progress repeatly - if count == 0: - self.prev_download_progress = 0 - if progress_int != self.prev_download_progress and progress_int % 5 == 0: - self.prev_download_progress = progress_int - logging.info("package downloaded size {} KB, progress {}%".format(downloaded_kb, progress_int)) - - def retrieve_and_unzip_package(self, package_name, package_url): - local_package_path = ClientConstants.get_package_download_dir() - os.makedirs(local_package_path, exist_ok=True) - filename, filename_without_extension, file_extension = ClientConstants.get_filename_and_extension(package_url) - local_package_file = os.path.join(local_package_path, f"fedml_run_{self.run_id}_{filename_without_extension}") - if os.path.exists(local_package_file): - os.remove(local_package_file) - ssl._create_default_https_context = ssl._create_unverified_context - urllib.request.urlretrieve(package_url, local_package_file, - reporthook=self.package_download_progress) - unzip_package_path = os.path.join(ClientConstants.get_package_unzip_dir(), - f"unzip_fedml_run_{self.run_id}_{filename_without_extension}") - try: - shutil.rmtree(unzip_package_path, ignore_errors=True) - except Exception as e: - logging.error( - f"Failed to remove directory {unzip_package_path}, Exception: {e}, Traceback: {traceback.format_exc()}") - pass - - package_dir_name = self.unzip_file(local_package_file, unzip_package_path) # Using unziped folder name - unzip_package_full_path = os.path.join(unzip_package_path, package_dir_name) - - logging.info("local_package_file {}, unzip_package_path {}, unzip file full path {}".format( - local_package_file, unzip_package_path, unzip_package_full_path)) - - return unzip_package_full_path - - def update_local_fedml_config(self, run_id, run_config): - packages_config = run_config["packages_config"] - - # Copy config file from the client - unzip_package_path = self.retrieve_and_unzip_package( - packages_config["linuxClient"], packages_config["linuxClientUrl"] - ) - fedml_local_config_file = os.path.join(unzip_package_path, "conf", "fedml.yaml") - - # Load the above config to memory - config_from_container = load_yaml_config(fedml_local_config_file) - container_entry_file_config = config_from_container["entry_config"] - container_dynamic_args_config = config_from_container["dynamic_args"] - entry_file = container_entry_file_config["entry_file"] - conf_file = container_entry_file_config["conf_file"] - self.package_type = container_entry_file_config.get("package_type", SchedulerConstants.JOB_PACKAGE_TYPE_DEFAULT) - full_conf_path = os.path.join(unzip_package_path, "fedml", "config", os.path.basename(conf_file)) - - # Dynamically build constrain variable with realtime parameters from server - self.build_dynamic_constrain_variables(run_id, run_config) - - # Update entry arguments value with constrain variable values with realtime parameters from server - # currently we support the following constrain variables: - # ${FEDSYS_RUN_ID}: a run id represented one entire Federated Learning flow - # ${FEDSYS_PRIVATE_LOCAL_DATA}: private local data path in the Federated Learning client - # ${FEDSYS_CLIENT_ID_LIST}: client list in one entire Federated Learning flow - # ${FEDSYS_SYNTHETIC_DATA_URL}: synthetic data url from server, - # if this value is not null, the client will download data from this URL to use it as - # federated training data set - # ${FEDSYS_IS_USING_LOCAL_DATA}: whether use private local data as federated training data set - # container_dynamic_args_config["data_cache_dir"] = "${FEDSYS.PRIVATE_LOCAL_DATA}" - for constrain_variable_key, constrain_variable_value in self.FEDML_DYNAMIC_CONSTRAIN_VARIABLES.items(): - for argument_key, argument_value in container_dynamic_args_config.items(): - if argument_value is not None and str(argument_value).find(constrain_variable_key) == 0: - replaced_argument_value = str(argument_value).replace( - constrain_variable_key, str(constrain_variable_value) - ) - container_dynamic_args_config[argument_key] = replaced_argument_value - - # Merge all container new config sections as new config dictionary - package_conf_object = dict() - package_conf_object["entry_config"] = container_entry_file_config - package_conf_object["dynamic_args"] = container_dynamic_args_config - package_conf_object["dynamic_args"]["config_version"] = self.args.config_version - container_dynamic_args_config["mqtt_config_path"] = os.path.join( - unzip_package_path, "fedml", "config", os.path.basename(container_dynamic_args_config["mqtt_config_path"]) - ) - container_dynamic_args_config["s3_config_path"] = os.path.join( - unzip_package_path, "fedml", "config", os.path.basename(container_dynamic_args_config["s3_config_path"]) - ) - log_file_dir = ClientConstants.get_log_file_dir() - os.makedirs(log_file_dir, exist_ok=True) - package_conf_object["dynamic_args"]["log_file_dir"] = log_file_dir - - # Save new config dictionary to local file - fedml_updated_config_file = os.path.join(unzip_package_path, "conf", "fedml.yaml") - ClientConstants.generate_yaml_doc(package_conf_object, fedml_updated_config_file) - - # Build dynamic arguments and set arguments to fedml config object - self.build_dynamic_args(run_id, run_config, package_conf_object, unzip_package_path) - return unzip_package_path, package_conf_object - - def build_dynamic_args(self, run_id, run_config, package_conf_object, base_dir): - fedml_conf_file = package_conf_object["entry_config"]["conf_file"] - fedml_conf_file_processed = str(fedml_conf_file).replace('\\', os.sep).replace('/', os.sep) - fedml_conf_path = os.path.join(base_dir, "fedml", "config", - os.path.basename(fedml_conf_file_processed)) - fedml_conf_object = load_yaml_config(fedml_conf_path) - run_params = run_config.get("parameters", {}) - job_yaml = run_params.get("job_yaml", {}) - - # Replace local fedml config objects with parameters from MLOps web - parameters_object = run_config.get("parameters", None) - if parameters_object is not None: - for config_k, config_v in fedml_conf_object.items(): - parameter_v = parameters_object.get(config_k, None) - if parameter_v is not None: - fedml_conf_object[config_k] = parameter_v - parameters_object.pop(config_k) - - for config_k, config_v in parameters_object.items(): - fedml_conf_object[config_k] = config_v - - package_dynamic_args = package_conf_object["dynamic_args"] - if fedml_conf_object.get("comm_args", None) is not None: - fedml_conf_object["comm_args"]["mqtt_config_path"] = package_dynamic_args["mqtt_config_path"] - fedml_conf_object["comm_args"]["s3_config_path"] = package_dynamic_args["s3_config_path"] - fedml_conf_object["common_args"]["using_mlops"] = True - if fedml_conf_object.get("train_args", None) is not None: - fedml_conf_object["train_args"]["run_id"] = package_dynamic_args["run_id"] - fedml_conf_object["train_args"]["client_id_list"] = package_dynamic_args["client_id_list"] - fedml_conf_object["train_args"]["client_num_in_total"] = int(package_dynamic_args["client_num_in_total"]) - fedml_conf_object["train_args"]["client_num_per_round"] = int(package_dynamic_args["client_num_in_total"]) - fedml_conf_object["train_args"]["client_id"] = self.edge_id - fedml_conf_object["train_args"]["server_id"] = self.request_json.get("server_id", "0") - if fedml_conf_object.get("device_args", None) is not None: - fedml_conf_object["device_args"]["worker_num"] = int(package_dynamic_args["client_num_in_total"]) - # fedml_conf_object["data_args"]["data_cache_dir"] = package_dynamic_args["data_cache_dir"] - data_args = fedml_conf_object.get("data_args") - if data_args is not None: - data_cache_dir = fedml_conf_object["data_args"].get("data_cache_dir") - if data_cache_dir is not None: - data_cache_dir = os.path.join(data_cache_dir, str(self.edge_id)) - fedml_conf_object["data_args"]["data_cache_dir"] = data_cache_dir - if fedml_conf_object.get("tracking_args", None) is not None: - fedml_conf_object["tracking_args"]["log_file_dir"] = package_dynamic_args["log_file_dir"] - fedml_conf_object["tracking_args"]["log_server_url"] = package_dynamic_args["log_server_url"] - - fedml_conf_object["dynamic_args"] = package_dynamic_args - self.fedml_config_object = fedml_conf_object.copy() - ClientConstants.generate_yaml_doc(fedml_conf_object, fedml_conf_path) - - def run_bootstrap_script(self, bootstrap_cmd_list, bootstrap_script_file): - try: - logging.info("Bootstrap commands are being executed...") - process, error_list = ClientConstants.execute_commands_with_live_logs(bootstrap_cmd_list, - callback=self.callback_run_bootstrap) - - ret_code, out, err = process.returncode, None, None - if ret_code is None or ret_code <= 0: - if error_list is not None and len(error_list) > 0: - is_bootstrap_run_ok = False - else: - if out is not None: - out_str = sys_utils.decode_our_err_result(out) - if out_str != "": - logging.info("{}".format(out_str)) - - sys_utils.log_return_info(bootstrap_script_file, 0) - - is_bootstrap_run_ok = True - else: - if err is not None: - err_str = sys_utils.decode_our_err_result(err) - if err_str != "": - logging.error("{}".format(err_str)) - - sys_utils.log_return_info(bootstrap_script_file, ret_code) - - is_bootstrap_run_ok = False - except Exception as e: - logging.error(f"Bootstrap script error: Exception: {e}, Traceback: {traceback.format_exc()}") - is_bootstrap_run_ok = False - return is_bootstrap_run_ok - - def callback_run_bootstrap(self, job_pid): - ClientConstants.save_bootstrap_process(self.run_id, job_pid) - - def run(self, process_event, completed_event, message_center_queue): - print(f"Client runner process id {os.getpid()}, run id {self.run_id}") - - if platform.system() != "Windows": - os.setsid() - - os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning' - os.environ.setdefault('PYTHONWARNINGS', 'ignore:semaphore_tracker:UserWarning') - - self.run_process_event = process_event - self.run_process_completed_event = completed_event - try: - MLOpsUtils.set_ntp_offset(self.ntp_offset) - self.rebuild_message_center(message_center_queue) - self.run_impl() - except RunnerError: - logging.info("Runner stopped.") - self.reset_devices_status(self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED) - except RunnerCompletedError: - logging.info("Runner completed.") - except Exception as e: - logging.error(f"Runner exited with errors. Exception: {e}, Traceback {traceback.format_exc()}") - self.mlops_metrics.report_client_id_status( - self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED, - server_id=self.server_id, run_id=self.run_id) - finally: - if self.mlops_metrics is not None: - computing_ended_time = MLOpsUtils.get_ntp_time() - self.mlops_metrics.report_edge_job_computing_cost(self.run_id, self.edge_id, - self.computing_started_time, computing_ended_time, - self.args.user, self.args.api_key) - logging.info("Release resources.") - self.cleanup_containers_and_release_gpus(self.run_id, self.edge_id) - MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(self.run_id, self.edge_id) - if self.mlops_metrics is not None: - self.mlops_metrics.stop_sys_perf() - time.sleep(3) - ClientConstants.cleanup_learning_process(self.run_id) - ClientConstants.cleanup_run_process(self.run_id) - - def check_runner_stop_event(self): - if self.run_process_event.is_set(): - logging.info("Received stopping event.") - raise RunnerError("Runner stopped") - - if self.run_process_completed_event.is_set(): - logging.info("Received completed event.") - raise RunnerCompletedError("Runner completed") - - def run_impl(self): - run_id = self.request_json["runId"] - run_config = self.request_json["run_config"] - data_config = run_config.get("data_config", {}) - packages_config = run_config["packages_config"] - - self.computing_started_time = MLOpsUtils.get_ntp_time() - self.mlops_metrics.report_edge_job_computing_cost(run_id, self.edge_id, - self.computing_started_time, 0, - self.args.user, self.args.api_key) - - self.check_runner_stop_event() - - MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO) - - self.mlops_metrics.report_client_id_status( - self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_INITIALIZING, - running_json=self.start_request_json, run_id=run_id) - - # get training params - private_local_data_dir = data_config.get("privateLocalData", "") - is_using_local_data = 0 - # if private_local_data_dir is not None and len(str(private_local_data_dir).strip(' ')) > 0: - # is_using_local_data = 1 - - # start a run according to the hyper-parameters - # fedml_local_data_dir = self.cur_dir + "/fedml_data/run_" + run_id_str + "_edge_" + str(edge_id) - fedml_local_data_dir = os.path.join(self.cur_dir, "fedml_data") - fedml_local_config_dir = os.path.join(self.cur_dir, "fedml_config") - if is_using_local_data: - fedml_local_data_dir = private_local_data_dir - self.fedml_data_dir = self.fedml_data_local_package_dir - - self.check_runner_stop_event() - - logging.info("Download packages") - - # update local config with real time parameters from server and dynamically replace variables value - unzip_package_path, fedml_config_object = self.update_local_fedml_config(run_id, run_config) - # if unzip_package_path is None or fedml_config_object is None: - # logging.info("failed to update local fedml config.") - # self.check_runner_stop_event() - # # Send failed msg when exceptions. - # self.cleanup_run_when_starting_failed(status=ClientConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION) - # return - - logging.info("Check downloaded packages...") - - entry_file_config = fedml_config_object["entry_config"] - dynamic_args_config = fedml_config_object["dynamic_args"] - entry_file = str(entry_file_config["entry_file"]).replace('\\', os.sep).replace('/', os.sep) - entry_file = os.path.basename(entry_file) - conf_file = entry_file_config["conf_file"] - conf_file = str(conf_file).replace('\\', os.sep).replace('/', os.sep) - ##### - # ClientConstants.cleanup_learning_process(run_id) - # ClientConstants.cleanup_bootstrap_process(run_id) - ##### - - if not os.path.exists(unzip_package_path): - logging.info("failed to unzip file.") - self.check_runner_stop_event() - # Send failed msg when exceptions. - self.cleanup_run_when_starting_failed(status=ClientConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION) - return - os.chdir(os.path.join(unzip_package_path, "fedml")) - - self.check_runner_stop_event() - - logging.info("starting the user process...") - - entry_file_full_path = os.path.join(unzip_package_path, "fedml", entry_file) - conf_file_full_path = os.path.join(unzip_package_path, "fedml", conf_file) - logging.info("waiting the user process to finish...") - logging.info(" ") - logging.info(" ") - logging.info("====Your Run Logs Begin===") - - process, is_launch_task, error_list = self.execute_job_task(unzip_package_path=unzip_package_path, - entry_file_full_path=entry_file_full_path, - conf_file_full_path=conf_file_full_path, - dynamic_args_config=dynamic_args_config, - fedml_config_object=self.fedml_config_object) - - logging.info("====Your Run Logs End===") - logging.info(" ") - logging.info(" ") - - ret_code, out, err = process.returncode if process else None, None, None - is_run_ok = sys_utils.is_runner_finished_normally(process.pid) - if is_launch_task: - is_run_ok = True - if error_list is not None and len(error_list) > 0: - is_run_ok = False - if ret_code is None or ret_code <= 0: - self.check_runner_stop_event() - - if is_run_ok: - if out is not None: - out_str = sys_utils.decode_our_err_result(out) - if out_str != "": - logging.info("{}".format(out_str)) - - self.mlops_metrics.report_client_id_status( - self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED, - server_id=self.server_id, run_id=run_id) - - if is_launch_task: - sys_utils.log_return_info(f"job {run_id}", ret_code) - else: - sys_utils.log_return_info(entry_file, ret_code) - else: - is_run_ok = False - - if not is_run_ok: - # If the run status is killed or finished, then return with the normal state. - current_job = FedMLClientDataInterface.get_instance().get_job_by_id(run_id) - if current_job is not None and (current_job.status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED or - current_job.status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED): - return - - self.check_runner_stop_event() - - logging.error("failed to run the learning process...") - - if err is not None: - err_str = sys_utils.decode_our_err_result(err) - if err_str != "": - logging.error("{}".format(err_str)) - - if is_launch_task: - sys_utils.log_return_info(f"job {run_id}", ret_code) - else: - sys_utils.log_return_info(entry_file, ret_code) - - # Send failed msg when exceptions. - self.mlops_metrics.report_client_id_status( - self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED, - server_id=self.server_id, run_id=run_id) - - def execute_job_task(self, unzip_package_path, entry_file_full_path, conf_file_full_path, dynamic_args_config, - fedml_config_object): - run_config = self.request_json["run_config"] - run_params = run_config.get("parameters", {}) - client_rank = self.request_json.get("client_rank", 1) - job_yaml = run_params.get("job_yaml", {}) - job_yaml_default_none = run_params.get("job_yaml", None) - job_api_key = job_yaml.get("run_api_key", None) - job_api_key = job_yaml.get("fedml_run_dynamic_params", None) if job_api_key is None else job_api_key - assigned_gpu_ids = run_params.get("gpu_ids", None) - job_type = job_yaml.get("job_type", None) - containerize = fedml_config_object.get("containerize", None) - image_pull_policy = fedml_config_object.get("image_pull_policy", Constants.IMAGE_PULL_POLICY_ALWAYS) - # TODO: Can we remove task_type? - job_type = job_yaml.get("task_type", Constants.JOB_TASK_TYPE_TRAIN) if job_type is None else job_type - conf_file_object = load_yaml_config(conf_file_full_path) - entry_args_dict = conf_file_object.get("fedml_entry_args", {}) - entry_args = entry_args_dict.get("arg_items", None) - scheduler_match_info = self.request_json.get("scheduler_match_info", {}) - if job_type == Constants.JOB_TASK_TYPE_TRAIN: - containerize = True if containerize is None else containerize - - # Bootstrap Info - bootstrap_script_path, bootstrap_script_dir, bootstrap_script_file = [None] * 3 - env_args = fedml_config_object.get("environment_args", None) - - if env_args is not None: - bootstrap_script_file = env_args.get("bootstrap", None) - if bootstrap_script_file is not None: - bootstrap_script_file = str(bootstrap_script_file).replace('\\', os.sep).replace('/', os.sep) - if platform.system() == 'Windows': - bootstrap_script_file = bootstrap_script_file.rstrip('.sh') + '.bat' - if bootstrap_script_file is not None: - bootstrap_script_dir = os.path.join(unzip_package_path, "fedml", - os.path.dirname(bootstrap_script_file)) - bootstrap_script_path = os.path.join( - bootstrap_script_dir, bootstrap_script_dir, os.path.basename(bootstrap_script_file) - ) - - bootstrap_cmd_list = list() - if bootstrap_script_path: - logging.info("Bootstrap commands are being generated...") - bootstrap_cmd_list = JobRunnerUtils.generate_bootstrap_commands(bootstrap_script_path=bootstrap_script_path, - bootstrap_script_dir=bootstrap_script_dir, - bootstrap_script_file=bootstrap_script_file) - logging.info(f"Generated following Bootstrap commands: {bootstrap_cmd_list}") - - if not containerize: - if len(bootstrap_cmd_list) and not (job_type == Constants.JOB_TASK_TYPE_DEPLOY or - job_type == Constants.JOB_TASK_TYPE_SERVE): - bootstrapping_successful = self.run_bootstrap_script(bootstrap_cmd_list=bootstrap_cmd_list, - bootstrap_script_file=bootstrap_script_file) - - if not bootstrapping_successful: - logging.info("failed to update local fedml config.") - self.check_runner_stop_event() - # Send failed msg when exceptions. - self.cleanup_run_when_starting_failed(status=ClientConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION) - raise Exception(f"Failed to execute following bootstrap commands: {bootstrap_cmd_list}") - - logging.info("cleanup the previous learning process and bootstrap process...") - ClientConstants.cleanup_learning_process(self.request_json["runId"]) - ClientConstants.cleanup_bootstrap_process(self.request_json["runId"]) - - executable_interpreter = ClientConstants.CLIENT_SHELL_PS \ - if platform.system() == ClientConstants.PLATFORM_WINDOWS else ClientConstants.CLIENT_SHELL_BASH - - if job_yaml_default_none is None: - # Generate the job executing commands for previous federated learning (Compatibility) - python_program = get_python_program() - logging.info("Run the client: {} {} --cf {} --rank {} --role client".format( - python_program, entry_file_full_path, conf_file_full_path, str(dynamic_args_config.get("rank", 1)))) - rank = str(dynamic_args_config.get("rank", 1)) - entry_command = f"{python_program} {entry_file_full_path} --cf " \ - f"{conf_file_full_path} --rank {rank} --role client" - shell_cmd_list = [entry_command] - - # Run the job executing commands for previous federated learning (Compatibility) - process, error_list = ClientConstants.execute_commands_with_live_logs( - shell_cmd_list, callback=self.callback_start_fl_job, should_write_log_file=False) - is_launch_task = False - else: - self.check_runner_stop_event() - - self.mlops_metrics.report_client_id_status( - self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_RUNNING, run_id=self.run_id) - - # Generate the job executing commands - job_executing_commands = JobRunnerUtils.generate_job_execute_commands( - self.run_id, self.edge_id, self.version, - self.package_type, executable_interpreter, entry_file_full_path, - conf_file_object, entry_args, assigned_gpu_ids, - job_api_key, client_rank, scheduler_match_info=scheduler_match_info, - cuda_visible_gpu_ids_str=self.cuda_visible_gpu_ids_str) - - if containerize is not None and containerize is True: - docker_args = fedml_config_object.get("docker", {}) - docker_args = JobRunnerUtils.create_instance_from_dict(DockerArgs, docker_args) - try: - job_executing_commands = JobRunnerUtils.generate_launch_docker_command(docker_args=docker_args, - run_id=self.run_id, - edge_id=self.edge_id, - unzip_package_path=unzip_package_path, - executable_interpreter=executable_interpreter, - entry_file_full_path=entry_file_full_path, - bootstrap_cmd_list=bootstrap_cmd_list, - cuda_visible_gpu_ids_str=self.cuda_visible_gpu_ids_str, - image_pull_policy=image_pull_policy) - except Exception as e: - logging.error(f"Error occurred while generating containerized launch commands. " - f"Exception: {e}, Traceback: {traceback.format_exc()}") - return None, None, None - - if not job_executing_commands: - raise Exception("Failed to generate docker execution command") - - # Run the job executing commands - logging.info(f"Run the client job with job id {self.run_id}, device id {self.edge_id}.") - process, error_list = ClientConstants.execute_commands_with_live_logs( - job_executing_commands, callback=self.start_job_perf, error_processor=self.job_error_processor, - should_write_log_file=False if job_type == Constants.JOB_TASK_TYPE_FEDERATE else True) - is_launch_task = False if job_type == Constants.JOB_TASK_TYPE_FEDERATE else True - - return process, is_launch_task, error_list - - def callback_start_fl_job(self, job_pid): - ClientConstants.save_learning_process(self.run_id, job_pid) - self.mlops_metrics.report_sys_perf( - self.args, self.agent_config["mqtt_config"], job_process_id=job_pid) - - def start_job_perf(self, job_pid): - ClientConstants.save_learning_process(self.run_id, job_pid) - self.mlops_metrics.report_job_perf(self.args, self.agent_config["mqtt_config"], job_pid) - - def job_error_processor(self, error_list): - self.check_runner_stop_event() - - error_str = "\n".join(error_list) - error_message = f"Error occurred when running the job... {error_str}" - logging.error(error_message) - raise Exception(error_message) - - def reset_devices_status(self, edge_id, status, should_send_client_id_status=True): - self.mlops_metrics.run_id = self.run_id - self.mlops_metrics.edge_id = edge_id - - if should_send_client_id_status: - if status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED or \ - status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED or \ - status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION: - self.mlops_metrics.report_client_id_status( - edge_id, status, server_id=self.server_id, run_id=self.run_id) - - def sync_run_stop_status(self, run_status=ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED): - try: - if self.run_process_event is not None: - self.run_process_event.set() - - self.mlops_metrics.report_client_id_status( - self.edge_id, run_status, server_id=self.server_id, run_id=self.run_id) - except Exception as e: - logging.error(f"Failed to sync run stop status with Exception {e}. Traceback: {traceback.format_exc()}") - pass - - def cleanup_run_when_starting_failed( - self, status=ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED, should_send_client_id_status=True): - # logging.error("Cleanup run successfully when starting failed.") - - self.reset_devices_status( - self.edge_id, status, should_send_client_id_status=should_send_client_id_status) - - time.sleep(2) - - try: - self.mlops_metrics.stop_sys_perf() - except Exception as ex: - logging.error(f"Failed to stop sys perf with Exception {ex}. Traceback: {traceback.format_exc()}") - pass - - time.sleep(1) - - try: - ClientConstants.cleanup_learning_process(self.run_id) - ClientConstants.cleanup_bootstrap_process(self.run_id) - ClientConstants.cleanup_run_process(self.run_id) - except Exception as e: - logging.error( - f"Failed to cleanup run when starting failed with Exception {e}. Traceback: {traceback.format_exc()}") - pass - - def cleanup_run_when_finished(self): - # logging.info("Cleanup run successfully when finished.") - - self.reset_devices_status(self.edge_id, - ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED, - should_send_client_id_status=False) - - time.sleep(2) - - try: - self.mlops_metrics.stop_sys_perf() - except Exception as ex: - logging.error(f"Failed to stop sys perf with Exception {ex}. Traceback: {traceback.format_exc()}") - pass - - time.sleep(1) - - try: - ClientConstants.cleanup_learning_process(self.run_id) - ClientConstants.cleanup_bootstrap_process(self.run_id) - ClientConstants.cleanup_run_process(self.run_id) - except Exception as e: - logging.error( - f"Failed to cleanup run when finished with Exception {e}. Traceback: {traceback.format_exc()}") - pass - - def setup_message_center(self): - if self.message_center is not None: - return - - self.message_center = FedMLMessageCenter(agent_config=self.agent_config) - self.message_center.start_sender() - - if self.mlops_metrics is None: - self.mlops_metrics = MLOpsMetrics() - self.mlops_metrics.set_messenger(self.message_center) - self.mlops_metrics.run_id = self.run_id - - def rebuild_message_center(self, message_center_queue): - self.message_center = FedMLMessageCenter(message_queue=message_center_queue) - - if self.mlops_metrics is None: - self.mlops_metrics = MLOpsMetrics() - self.mlops_metrics.set_messenger(self.message_center) - self.mlops_metrics.run_id = self.run_id - - def release_message_center(self): - try: - if self.message_center is not None: - self.message_center.stop() - self.message_center = None - - except Exception as e: - logging.error( - f"Failed to release client mqtt manager with Exception {e}. Traceback: {traceback.format_exc()}") - pass - - def ota_upgrade(self, payload, request_json): - run_id = request_json["runId"] - force_ota = False - ota_version = None - - try: - run_config = request_json.get("run_config", None) - parameters = run_config.get("parameters", None) - common_args = parameters.get("common_args", None) - force_ota = common_args.get("force_ota", False) if common_args is not None else False - ota_version = common_args.get("ota_version", None) if common_args is not None else None - except Exception as e: - logging.error( - f"Failed to get ota upgrade parameters with Exception {e}. Traceback: {traceback.format_exc()}") - pass - - if force_ota and ota_version is not None: - should_upgrade = True if ota_version != fedml.__version__ else False - upgrade_version = ota_version - else: - try: - fedml_is_latest_version, local_ver, remote_ver = sys_utils.check_fedml_is_latest_version(self.version) - except Exception as e: - logging.error(f"Failed to check fedml version with Exception {e}. Traceback: {traceback.format_exc()}") - return - - should_upgrade = False if fedml_is_latest_version else True - upgrade_version = remote_ver - - if should_upgrade: - FedMLClientDataInterface.get_instance(). \ - save_started_job(run_id, self.edge_id, time.time(), - ClientConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING, - ClientConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING, - payload) - self.mlops_metrics.report_client_id_status( - self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING, run_id=run_id) - - logging.info(f"Upgrade to version {upgrade_version} ...") - - sys_utils.do_upgrade(self.version, upgrade_version) - raise Exception("Restarting after upgraded...") - - def callback_start_train(self, topic, payload): - # Get training params - - request_json = json.loads(payload) - is_retain = request_json.get("is_retain", False) - if is_retain: - return - run_id = request_json["runId"] - - # Start log processor for current run - train_edge_id = str(topic).split("/")[-2] - self.args.run_id = run_id - self.args.edge_id = train_edge_id - MLOpsRuntimeLog.get_instance(self.args).init_logs(log_level=logging.INFO) - MLOpsRuntimeLogDaemon.get_instance(self.args).start_log_processor( - run_id, train_edge_id, log_source=SchedulerConstants.get_log_source(request_json)) - logging.info("start the log processor") - - try: - MLOpsConfigs.fetch_all_configs() - except Exception as e: - logging.error(f"Failed to fetch all configs with Exception {e}. Traceback: {traceback.format_exc()}") - pass - - if not FedMLClientDataInterface.get_instance().get_agent_status(): - request_json = json.loads(payload) - run_id = request_json["runId"] - logging.error( - "FedMLDebug - Receive: topic ({}), payload ({}), but the client agent is disabled. {}".format( - topic, payload, traceback.format_exc() - ) - ) - # Send failed msg when exceptions. - self.mlops_metrics.report_client_id_status( - train_edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION, run_id=run_id, - msg=f"the client agent {train_edge_id} is disabled") - MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, train_edge_id) - return - - logging.info( - f"FedMLDebug - Receive: topic ({topic}), payload ({payload})" - ) - - # Terminate previous process about starting or stopping run command - logging.info("cleanup and save runner information") - server_agent_id = request_json["cloud_agent_id"] - ClientConstants.save_runner_infos(self.args.device_id + "." + self.args.os_name, train_edge_id, run_id=run_id) - - # OTA upgrade - # self.ota_upgrade(payload, request_json) - - # Occupy GPUs - scheduler_match_info = request_json.get("scheduler_match_info", {}) - matched_gpu_num = scheduler_match_info.get("matched_gpu_num", 0) - model_master_device_id = scheduler_match_info.get("model_master_device_id", None) - model_slave_device_id = scheduler_match_info.get("model_slave_device_id", None) - model_slave_device_id_list = scheduler_match_info.get("model_slave_device_id_list", None) - run_config = request_json.get("run_config", {}) - run_params = run_config.get("parameters", {}) - serving_args = run_params.get("serving_args", {}) - endpoint_id = serving_args.get("endpoint_id", None) - job_yaml = run_params.get("job_yaml", {}) - job_type = job_yaml.get("job_type", SchedulerConstants.JOB_TASK_TYPE_TRAIN) - cuda_visible_gpu_ids_str = None - if not (job_type == SchedulerConstants.JOB_TASK_TYPE_SERVE or - job_type == SchedulerConstants.JOB_TASK_TYPE_DEPLOY): - cuda_visible_gpu_ids_str = JobRunnerUtils.get_instance().occupy_gpu_ids( - run_id, matched_gpu_num, train_edge_id, inner_id=endpoint_id, - model_master_device_id=model_master_device_id, - model_slave_device_id=model_slave_device_id) - logging.info( - f"Run started, available gpu ids: {JobRunnerUtils.get_instance().get_available_gpu_id_list(train_edge_id)}") - - # Start server with multiprocessing mode - self.request_json = request_json - run_id_str = str(run_id) - self.running_request_json[run_id_str] = request_json - client_runner = FedMLClientRunner( - self.args, edge_id=train_edge_id, request_json=request_json, agent_config=self.agent_config, run_id=run_id, - cuda_visible_gpu_ids_str=cuda_visible_gpu_ids_str - ) - client_runner.start_request_json = payload - self.run_process_event_map[run_id_str] = multiprocessing.Event() - self.run_process_event_map[run_id_str].clear() - client_runner.run_process_event = self.run_process_event_map[run_id_str] - self.run_process_completed_event_map[run_id_str] = multiprocessing.Event() - self.run_process_completed_event_map[run_id_str].clear() - client_runner.run_process_completed_event = self.run_process_completed_event_map[run_id_str] - client_runner.server_id = request_json.get("server_id", "0") - logging.info("start the runner process.") - self.run_process_map[run_id_str] = Process(target=client_runner.run, args=( - self.run_process_event_map[run_id_str], self.run_process_completed_event_map[run_id_str], - self.message_center.get_message_queue())) - self.run_process_map[run_id_str].start() - ClientConstants.save_run_process(run_id, self.run_process_map[run_id_str].pid) - - def callback_stop_train(self, topic, payload): - # logging.info("callback_stop_train: topic = %s, payload = %s" % (topic, payload)) - # logging.info( - # f"FedMLDebug - Receive: topic ({topic}), payload ({payload})" - # ) - - train_edge_id = str(topic).split("/")[-2] - request_json = json.loads(payload) - is_retain = request_json.get("is_retain", False) - if is_retain: - return - run_id = request_json.get("runId", None) - if run_id is None: - run_id = request_json.get("id", None) - run_status = request_json.get("run_status", ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED) - - # logging.info("Stop run with multiprocessing...") - - # Stop client with multiprocessing mode - run_id_str = str(run_id) - client_runner = FedMLClientRunner( - self.args, edge_id=train_edge_id, request_json=request_json, agent_config=self.agent_config, run_id=run_id - ) - self.cleanup_containers_and_release_gpus(run_id, train_edge_id) - client_runner.run_process_event = self.run_process_event_map.get(run_id_str, None) - client_runner.run_process = self.run_process_map.get(run_id_str, None) - client_runner.message_center = self.message_center - client_runner.mlops_metrics = self.mlops_metrics - client_runner.sync_run_stop_status(run_status=run_status) - - def cleanup_containers_and_release_gpus(self, run_id, edge_id): - job_type = JobRunnerUtils.get_job_type_from_run_id(run_id) - - if not job_type: - logging.info(f"Failed to get job type from run id {run_id}. This is not an error as it would usually " - f"happen when the job is not found in the database because job is already finished and " - f"cleaned up. Exiting cleanup_containers_and_release_gpus.") - return - - # Check if the job type is not "serve" or "deploy" - if not (job_type == SchedulerConstants.JOB_TASK_TYPE_SERVE or - job_type == SchedulerConstants.JOB_TASK_TYPE_DEPLOY): - - # Terminate the run docker container if exists - container_name = JobRunnerUtils.get_run_container_name(run_id) - docker_client = JobRunnerUtils.get_docker_client(DockerArgs()) - logging.info(f"Terminating the run docker container {container_name} if exists...") - try: - JobRunnerUtils.remove_run_container_if_exists(container_name, docker_client) - except Exception as e: - logging.error(f"Exception {e} occurred when terminating docker container. " - f"Traceback: {traceback.format_exc()}") - - # Release the GPU ids and update the GPU availability in the persistent store - JobRunnerUtils.get_instance().release_gpu_ids(run_id, edge_id) - - # Send mqtt message reporting the new gpu availability to the backend - MLOpsDevicePerfStats.report_gpu_device_info(self.edge_id, mqtt_mgr=self.mqtt_mgr) - - def cleanup_client_with_status(self): - if self.device_status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED: - # logging.info("received to finished status.") - self.cleanup_run_when_finished() - elif self.device_status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED: - # logging.error("received to failed status from the server agent") - self.cleanup_run_when_starting_failed(should_send_client_id_status=False) - elif self.device_status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED: - # logging.error("received to failed status from the server agent") - self.cleanup_run_when_starting_failed(status=self.device_status, should_send_client_id_status=False) - - def callback_runner_id_status(self, topic, payload): - # logging.info("callback_runner_id_status: topic = %s, payload = %s" % (topic, payload)) - # logging.info(f"FedMLDebug - Receive: topic ({topic}), payload ({payload})") - request_json = json.loads(payload) - is_retain = request_json.get("is_retain", False) - if is_retain: - return - run_id = request_json["run_id"] - edge_id = str(topic).split("/")[-2].split('_')[-1] - status = request_json["status"] - run_id_str = str(run_id) - - self.save_training_status( - edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED - if status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_EXCEPTION else status) - - if status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FINISHED or \ - status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_FAILED or \ - status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_KILLED: - completed_event = self.run_process_completed_event_map.get(run_id_str, None) - if completed_event is not None: - completed_event.set() - - # Stop client with multiprocessing mode - client_runner = FedMLClientRunner( - self.args, - edge_id=edge_id, - request_json=request_json, - agent_config=self.agent_config, - run_id=run_id, - ) - client_runner.device_status = status - client_runner.message_center = self.message_center - client_runner.mlops_metrics = self.mlops_metrics - client_runner.cleanup_client_with_status() - - running_json = self.running_request_json.get(run_id_str) - if running_json is None: - try: - current_job = FedMLClientDataInterface.get_instance().get_job_by_id(run_id) - running_json = json.loads(current_job.running_json) - except Exception as e: - logging.error(f"Failed to get running json with Exception {e}. Traceback: {traceback.format_exc()}") - - if running_json is not None: - job_type = JobRunnerUtils.parse_job_type(running_json) - if not SchedulerConstants.is_deploy_job(job_type): - logging.info(f"[run/device][{run_id}/{edge_id}] Release gpu resource when run ended.") - self.cleanup_containers_and_release_gpus(run_id, edge_id) - - run_process = self.run_process_map.get(run_id_str, None) - if run_process is not None: - if run_process.pid is not None: - RunProcessUtils.kill_process(run_process.pid) - - # Terminate the run docker container if exists - try: - container_name = JobRunnerUtils.get_run_container_name(run_id) - docker_client = JobRunnerUtils.get_docker_client(DockerArgs()) - logging.info(f"Terminating the run docker container {container_name} if exists...") - JobRunnerUtils.remove_run_container_if_exists(container_name, docker_client) - except Exception as e: - logging.error(f"Error occurred when terminating docker container." - f"Exception: {e}, Traceback: {traceback.format_exc()}.") - - self.run_process_map.pop(run_id_str) - - # Stop log processor for current run - MLOpsRuntimeLogDaemon.get_instance(self.args).stop_log_processor(run_id, edge_id) - - def callback_report_current_status(self, topic, payload): - logging.info( - f"FedMLDebug - Receive: topic ({topic}), payload ({payload})" - ) - - self.send_agent_active_msg() - if self.general_edge_id is not None: - self.send_agent_active_msg(self.general_edge_id) - - @staticmethod - def process_ota_upgrade_msg(): - os.system("pip install -U fedml") - - @staticmethod - def callback_client_ota_msg(topic, payload): - logging.info( - f"FedMLDebug - Receive: topic ({topic}), payload ({payload})" - ) - - request_json = json.loads(payload) - cmd = request_json["cmd"] - - if cmd == ClientConstants.FEDML_OTA_CMD_UPGRADE: - FedMLClientRunner.process_ota_upgrade_msg() - # Process(target=FedMLClientRunner.process_ota_upgrade_msg).start() - raise Exception("After upgraded, restart runner...") - elif cmd == ClientConstants.FEDML_OTA_CMD_RESTART: - raise Exception("Restart runner...") - - def get_all_run_process_list_map(self): - run_process_dict = dict() - for run_id_str, process in self.run_process_map.items(): - cur_run_process_list = ClientConstants.get_learning_process_list(run_id_str) - run_process_dict[run_id_str] = cur_run_process_list - - return run_process_dict - - def response_device_info_to_mlops(self, topic, payload): - payload_json = json.loads(payload) - server_id = payload_json.get("server_id", 0) - run_id = payload_json.get("run_id", 0) - listen_edge_id = str(topic).split("/")[-1] - context = payload_json.get("context", None) - need_gpu_info = payload_json.get("need_gpu_info", False) - need_running_process_list = payload_json.get("need_running_process_list", False) - response_topic = f"deploy/slave_agent/mlops/response_device_info" - if self.mlops_metrics is not None and self.model_device_client_edge_id_list is not None and \ - self.model_device_server_id is not None: - if not need_gpu_info: - device_info_json = { - "edge_id": listen_edge_id, - "fedml_version": fedml.__version__, - "user_id": self.args.user - } - else: - total_mem, free_mem, total_disk_size, free_disk_size, cup_utilization, cpu_cores, gpu_cores_total, \ - gpu_cores_available, sent_bytes, recv_bytes, gpu_available_ids = sys_utils.get_sys_realtime_stats() - host_ip = sys_utils.get_host_ip() - host_port = sys_utils.get_available_port() - gpu_available_ids = JobRunnerUtils.get_available_gpu_id_list(self.edge_id) - gpu_available_ids = JobRunnerUtils.trim_unavailable_gpu_ids(gpu_available_ids) - gpu_cores_available = len(gpu_available_ids) - gpu_list = sys_utils.get_gpu_list() - device_info_json = { - "edge_id": listen_edge_id, - "memoryTotal": round(total_mem * MLOpsUtils.BYTES_TO_GB, 2), - "memoryAvailable": round(free_mem * MLOpsUtils.BYTES_TO_GB, 2), - "diskSpaceTotal": round(total_disk_size * MLOpsUtils.BYTES_TO_GB, 2), - "diskSpaceAvailable": round(free_disk_size * MLOpsUtils.BYTES_TO_GB, 2), - "cpuUtilization": round(cup_utilization, 2), - "cpuCores": cpu_cores, - "gpuCoresTotal": gpu_cores_total, - "gpuCoresAvailable": gpu_cores_available, - "gpu_available_ids": gpu_available_ids, - "gpu_list": gpu_list, - "node_ip": host_ip, - "node_port": host_port, - "networkTraffic": sent_bytes + recv_bytes, - "updateTime": int(MLOpsUtils.get_ntp_time()), - "fedml_version": fedml.__version__, - "user_id": self.args.user - } - if need_running_process_list: - device_info_json["run_process_list_map"] = self.get_all_run_process_list_map() - salve_device_ids = list() - for model_client_edge_id in self.model_device_client_edge_id_list: - salve_device_ids.append(model_client_edge_id) - response_payload = {"slave_device_id": self.model_device_client_edge_id_list[0], - "slave_device_id_list": salve_device_ids, - "master_device_id": self.model_device_server_id, - "run_id": run_id, "edge_id": listen_edge_id, - "edge_info": device_info_json} - if context is not None: - response_payload["context"] = context - self.message_center.send_message(response_topic, json.dumps(response_payload), run_id=run_id) - - def callback_report_device_info(self, topic, payload): - payload_json = json.loads(payload) - server_id = payload_json.get("server_id", 0) - run_id = payload_json.get("run_id", 0) - listen_edge_id = str(topic).split("/")[-1] - context = payload_json.get("context", None) - need_gpu_info = payload_json.get("need_gpu_info", False) - need_running_process_list = payload_json.get("need_running_process_list", False) - response_topic = f"client/server/response_device_info/{server_id}" - if self.mlops_metrics is not None and self.model_device_client_edge_id_list is not None and \ - self.model_device_server_id is not None: - if not need_gpu_info: - device_info_json = { - "edge_id": listen_edge_id, - "fedml_version": fedml.__version__, - "user_id": self.args.user - } - else: - total_mem, free_mem, total_disk_size, free_disk_size, cup_utilization, cpu_cores, gpu_cores_total, \ - gpu_cores_available, sent_bytes, recv_bytes, gpu_available_ids = sys_utils.get_sys_realtime_stats() - host_ip = sys_utils.get_host_ip() - host_port = sys_utils.get_available_port() - gpu_available_ids = JobRunnerUtils.get_available_gpu_id_list(self.edge_id) - gpu_available_ids = JobRunnerUtils.trim_unavailable_gpu_ids(gpu_available_ids) - gpu_cores_available = len(gpu_available_ids) - gpu_list = sys_utils.get_gpu_list() - device_info_json = { - "edge_id": listen_edge_id, - "memoryTotal": round(total_mem * MLOpsUtils.BYTES_TO_GB, 2), - "memoryAvailable": round(free_mem * MLOpsUtils.BYTES_TO_GB, 2), - "diskSpaceTotal": round(total_disk_size * MLOpsUtils.BYTES_TO_GB, 2), - "diskSpaceAvailable": round(free_disk_size * MLOpsUtils.BYTES_TO_GB, 2), - "cpuUtilization": round(cup_utilization, 2), - "cpuCores": cpu_cores, - "gpuCoresTotal": gpu_cores_total, - "gpuCoresAvailable": gpu_cores_available, - "gpu_available_ids": gpu_available_ids, - "gpu_list": gpu_list, - "node_ip": host_ip, - "node_port": host_port, - "networkTraffic": sent_bytes + recv_bytes, - "updateTime": int(MLOpsUtils.get_ntp_time()), - "fedml_version": fedml.__version__, - "user_id": self.args.user - } - if need_running_process_list: - device_info_json["run_process_list_map"] = self.get_all_run_process_list_map() - salve_device_ids = list() - for model_client_edge_id in self.model_device_client_edge_id_list: - salve_device_ids.append(model_client_edge_id) - response_payload = {"slave_device_id": self.model_device_client_edge_id_list[0], - "slave_device_id_list": salve_device_ids, - "master_device_id": self.model_device_server_id, - "run_id": run_id, "edge_id": listen_edge_id, - "edge_info": device_info_json} - if context is not None: - response_payload["context"] = context - self.message_center.send_message(response_topic, json.dumps(response_payload), run_id=run_id) - - def callback_client_logout(self, topic, payload): - payload_json = json.loads(payload) - secret = payload_json.get("auth", None) - if secret is None or str(secret) != "246b1be6-0eeb-4b17-b118-7d74de1975d4": - return - logging.info("Received the logout request.") - if self.run_process_event is not None: - self.run_process_event.set() - if self.run_process_completed_event is not None: - self.run_process_completed_event.set() - self.disable_client_login = True - time.sleep(3) - os.system("fedml logout") - - def save_training_status(self, edge_id, training_status): - self.current_training_status = training_status - ClientConstants.save_training_infos(edge_id, training_status) - - @staticmethod - def get_gpu_machine_id(): - gpu_list = sys_utils.get_gpu_list() - gpu_uuids = "" - if len(gpu_list) > 0: - for gpu in gpu_list: - gpu_uuids += gpu.get("uuid", "") - else: - gpu_uuids = str(uuid.uuid4()) - device_id_combination = \ - f"{FedMLClientRunner.get_machine_id()}-{hex(uuid.getnode())}-{gpu_uuids}" - device_id = security_utils.get_content_hash(device_id_combination) - return device_id - - @staticmethod - def get_device_id(use_machine_id=False): - device_file_path = os.path.join(ClientConstants.get_data_dir(), - ClientConstants.LOCAL_RUNNER_INFO_DIR_NAME) - file_for_device_id = os.path.join(device_file_path, "devices.id") - if not os.path.exists(device_file_path): - os.makedirs(device_file_path, exist_ok=True) - elif os.path.exists(file_for_device_id): - with open(file_for_device_id, 'r', encoding='utf-8') as f: - device_id_from_file = f.readline() - if device_id_from_file is not None and device_id_from_file != "": - return device_id_from_file - - if platform.system() == "Darwin": - cmd_get_serial_num = "system_profiler SPHardwareDataType | grep Serial | awk '{gsub(/ /,\"\")}{print}' " \ - "|awk -F':' '{print $2}' " - device_id = os.popen(cmd_get_serial_num).read() - device_id = device_id.replace('\n', '').replace(' ', '') - if device_id is None or device_id == "": - if not use_machine_id: - device_id = hex(uuid.getnode()) - else: - device_id = FedMLClientRunner.get_gpu_machine_id() - else: - device_id = "0x" + device_id - else: - if "nt" in os.name: - - def get_uuid(): - guid = "" - try: - cmd = "wmic csproduct get uuid" - guid = str(subprocess.check_output(cmd)) - pos1 = guid.find("\\n") + 2 - guid = guid[pos1:-15] - except Exception as ex: - logging.error(f"Failed to get uuid with Exception {ex}. Traceback: {traceback.format_exc()}") - pass - return str(guid) - - device_id = str(get_uuid()) - logging.info(device_id) - elif "posix" in os.name: - device_id = sys_utils.get_device_id_in_docker() - if device_id is None: - if not use_machine_id: - device_id = hex(uuid.getnode()) - else: - device_id = device_id = FedMLClientRunner.get_gpu_machine_id() - else: - device_id = sys_utils.run_subprocess_open( - "hal-get-property --udi /org/freedesktop/Hal/devices/computer --key system.hardware.uuid".split() - ) - device_id = hex(device_id) - - if device_id is not None and device_id != "": - with open(file_for_device_id, 'w', encoding='utf-8') as f: - f.write(device_id) - else: - device_id = hex(uuid.uuid4()) - with open(file_for_device_id, 'w', encoding='utf-8') as f: - f.write(device_id) - - return device_id - - @staticmethod - def get_machine_id(): - try: - import machineid - return machineid.id().replace('\n', '').replace('\r\n', '').strip() - except Exception as e: - logging.error(f"Failed to get machine id with Exception {e}. Traceback: {traceback.format_exc()}") - return hex(uuid.getnode()) - - @staticmethod - def bind_account_and_device_id(url, account_id, device_id, os_name, api_key="", role="client"): - ip = requests.get('https://checkip.amazonaws.com').text.strip() - fedml_ver, exec_path, os_ver, cpu_info, python_ver, torch_ver, mpi_installed, \ - cpu_usage, available_mem, total_mem, gpu_info, gpu_available_mem, gpu_total_mem, \ - gpu_count, gpu_vendor, cpu_count, gpu_device_name = get_sys_runner_info() - host_name = sys_utils.get_host_name() - json_params = { - "accountid": account_id, - "deviceid": device_id, - "type": os_name, - "state": ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE, - "status": ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE, - "processor": cpu_info, - "core_type": cpu_info, - "network": "", - "role": role, - "os_ver": os_ver, - "memory": total_mem, - "ip": ip, - "api_key": api_key, - "extra_infos": {"fedml_ver": fedml_ver, "exec_path": exec_path, "os_ver": os_ver, - "cpu_info": cpu_info, "python_ver": python_ver, "torch_ver": torch_ver, - "mpi_installed": mpi_installed, "cpu_usage": cpu_usage, - "available_mem": available_mem, "total_mem": total_mem, - "cpu_count": cpu_count, "gpu_count": 0, "host_name": host_name} - } - if gpu_count > 0: - if gpu_total_mem is not None: - json_params["gpu"] = gpu_info if gpu_info is not None else "" + ", Total GPU Memory: " + gpu_total_mem - else: - json_params["gpu"] = gpu_info if gpu_info is not None else "" - json_params["extra_infos"]["gpu_info"] = gpu_info if gpu_info is not None else "" - if gpu_available_mem is not None: - json_params["extra_infos"]["gpu_available_mem"] = gpu_available_mem - if gpu_total_mem is not None: - json_params["extra_infos"]["gpu_total_mem"] = gpu_total_mem - - json_params["extra_infos"]["gpu_count"] = gpu_count - json_params["extra_infos"]["gpu_vendor"] = gpu_vendor - json_params["extra_infos"]["gpu_device_name"] = gpu_device_name - - gpu_available_id_list = sys_utils.get_available_gpu_id_list(limit=gpu_count) - gpu_available_count = len(gpu_available_id_list) if gpu_available_id_list is not None else 0 - gpu_list = sys_utils.get_gpu_list() - json_params["extra_infos"]["gpu_available_count"] = gpu_available_count - json_params["extra_infos"]["gpu_available_id_list"] = gpu_available_id_list - json_params["extra_infos"]["gpu_list"] = gpu_list - else: - json_params["gpu"] = "None" - json_params["extra_infos"]["gpu_available_count"] = 0 - json_params["extra_infos"]["gpu_available_id_list"] = [] - json_params["extra_infos"]["gpu_list"] = [] - - _, cert_path = MLOpsConfigs.get_request_params() - if cert_path is not None: - try: - requests.session().verify = cert_path - response = requests.post( - url, json=json_params, verify=True, - headers={"content-type": "application/json", "Connection": "close"} - ) - except requests.exceptions.SSLError as err: - logging.error( - f"Failed to bind account and device id with error: {err}, traceback: {traceback.format_exc()}") - MLOpsConfigs.install_root_ca_file() - response = requests.post( - url, json=json_params, verify=True, - headers={"content-type": "application/json", "Connection": "close"} - ) - else: - response = requests.post(url, json=json_params, headers={"Connection": "close"}) - edge_id, user_name, extra_url, general_edge_id = -1, None, None, None - if response.status_code != 200: - print(f"Binding to MLOps with response.status_code = {response.status_code}, " - f"response.content: {response.content}") - pass - else: - # print("url = {}, response = {}".format(url, response)) - status_code = response.json().get("code") - if status_code == "SUCCESS": - edge_id = response.json().get("data").get("id") - user_name = response.json().get("data").get("userName", None) - extra_url = response.json().get("data").get("url", None) - general_edge_id = response.json().get("data").get("general_edge_id", None) - if edge_id is None or edge_id <= 0: - print(f"Binding to MLOps with response.status_code = {response.status_code}, " - f"response.content: {response.content}") - else: - if status_code == SchedulerConstants.BINDING_ACCOUNT_NOT_EXIST_ERROR: - raise SystemExit(SchedulerConstants.BINDING_ACCOUNT_NOT_EXIST_ERROR) - print(f"Binding to MLOps with response.status_code = {response.status_code}, " - f"response.content: {response.content}") - return -1, None, None, None - return edge_id, user_name, extra_url, general_edge_id - - def fetch_configs(self): - return MLOpsConfigs.fetch_all_configs() - - def send_agent_active_msg(self, edge_id): - active_topic = "flclient_agent/active" - status = MLOpsStatus.get_instance().get_client_agent_status(edge_id) - if ( - status is not None - and status != ClientConstants.MSG_MLOPS_CLIENT_STATUS_OFFLINE - and status != ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE - ): - return - - try: - current_job = FedMLClientDataInterface.get_instance().get_job_by_id(self.run_id) - except Exception as e: - logging.error(f"Failed to get current job with Exception {e}. Traceback: {traceback.format_exc()}") - current_job = None - if current_job is None: - if status is not None and status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_OFFLINE: - status = ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE - else: - return - else: - status = ClientConstants.get_device_state_from_run_edge_state(current_job.status) - active_msg = {"ID": edge_id, "status": status} - MLOpsStatus.get_instance().set_client_agent_status(edge_id, status) - self.mqtt_mgr.send_message_json(active_topic, json.dumps(active_msg)) - logging.info(f"Send agent active msg {active_msg}") - - def recover_start_train_msg_after_upgrading(self): - try: - current_job = FedMLClientDataInterface.get_instance().get_current_job() - if current_job is not None and \ - current_job.status == ClientConstants.MSG_MLOPS_CLIENT_STATUS_UPGRADING: - logging.info("start training after upgrading.") - topic_start_train = "flserver_agent/" + str(self.edge_id) + "/start_train" - self.callback_start_train(topic_start_train, current_job.running_json) - except Exception as e: - logging.error(f"recover starting train message after upgrading failed with exception {e}, " - f"Traceback {traceback.format_exc()}") - - def on_agent_mqtt_connected(self, mqtt_client_object): - # The MQTT message topic format is as follows: // - - # Setup MQTT message listener for starting training - topic_start_train = "flserver_agent/" + str(self.edge_id) + "/start_train" - self.add_message_listener(topic_start_train, self.callback_start_train) - self.mqtt_mgr.add_message_listener(topic_start_train, self.listener_message_dispatch_center) - - # Setup MQTT message listener for stopping training - topic_stop_train = "flserver_agent/" + str(self.edge_id) + "/stop_train" - self.add_message_listener(topic_stop_train, self.callback_stop_train) - self.mqtt_mgr.add_message_listener(topic_stop_train, self.listener_message_dispatch_center) - - - # Setup MQTT message listener for client status switching - topic_client_status = "fl_client/flclient_agent_" + str(self.edge_id) + "/status" - self.add_message_listener(topic_client_status, self.callback_runner_id_status) - self.mqtt_mgr.add_message_listener(topic_client_status, self.listener_message_dispatch_center) - - # Setup MQTT message listener to report current device status. - topic_report_status = "mlops/report_device_status" - self.add_message_listener(topic_report_status, self.callback_report_current_status) - self.mqtt_mgr.add_message_listener(topic_report_status, self.listener_message_dispatch_center) - - # Setup MQTT message listener to OTA messages from the MLOps. - topic_ota_msg = "mlops/flclient_agent_" + str(self.edge_id) + "/ota" - self.add_message_listener(topic_ota_msg, self.callback_client_ota_msg) - self.mqtt_mgr.add_message_listener(topic_ota_msg, self.listener_message_dispatch_center) - - # Setup MQTT message listener to OTA messages from the MLOps. - topic_request_device_info = "server/client/request_device_info/" + str(self.edge_id) - self.add_message_listener(topic_request_device_info, self.callback_report_device_info) - self.mqtt_mgr.add_message_listener(topic_request_device_info, self.listener_message_dispatch_center) - - topic_request_edge_device_info_from_mlops = f"deploy/mlops/slave_agent/request_device_info/{self.edge_id}" - self.add_message_listener(topic_request_edge_device_info_from_mlops, self.response_device_info_to_mlops) - self.mqtt_mgr.add_message_listener(topic_request_edge_device_info_from_mlops, self.listener_message_dispatch_center) - - topic_request_deploy_master_device_info_from_mlops = None - if self.model_device_server_id is not None: - topic_request_deploy_master_device_info_from_mlops = f"deploy/mlops/master_agent/request_device_info/{self.model_device_server_id}" - self.add_message_listener(topic_request_deploy_master_device_info_from_mlops, self.response_device_info_to_mlops) - self.mqtt_mgr.add_message_listener(topic_request_deploy_master_device_info_from_mlops, self.listener_message_dispatch_center) - - topic_request_deploy_slave_device_info_from_mlops = None - if self.model_device_client_edge_id_list is not None and len(self.model_device_client_edge_id_list) > 0: - topic_request_deploy_slave_device_info_from_mlops = f"deploy/mlops/slave_agent/request_device_info/{self.model_device_client_edge_id_list[0]}" - self.add_message_listener(topic_request_deploy_slave_device_info_from_mlops, self.response_device_info_to_mlops) - self.mqtt_mgr.add_message_listener(topic_request_deploy_slave_device_info_from_mlops, self.listener_message_dispatch_center) - - # Setup MQTT message listener to logout from MLOps. - topic_client_logout = "mlops/client/logout/" + str(self.edge_id) - self.add_message_listener(topic_client_logout, self.callback_client_logout) - self.mqtt_mgr.add_message_listener(topic_client_logout, self.listener_message_dispatch_center) - - # Subscribe topics for starting train, stopping train and fetching client status. - mqtt_client_object.subscribe(topic_start_train, qos=2) - mqtt_client_object.subscribe(topic_stop_train, qos=2) - mqtt_client_object.subscribe(topic_client_status, qos=2) - mqtt_client_object.subscribe(topic_report_status, qos=2) - mqtt_client_object.subscribe(topic_ota_msg, qos=2) - mqtt_client_object.subscribe(topic_request_device_info, qos=2) - mqtt_client_object.subscribe(topic_request_edge_device_info_from_mlops, qos=2) - if topic_request_deploy_master_device_info_from_mlops is not None: - mqtt_client_object.subscribe(topic_request_deploy_master_device_info_from_mlops, qos=2) - if topic_request_deploy_slave_device_info_from_mlops is not None: - mqtt_client_object.subscribe(topic_request_deploy_slave_device_info_from_mlops, qos=2) - mqtt_client_object.subscribe(topic_client_logout, qos=2) - - self.subscribed_topics.clear() - self.subscribed_topics.append(topic_start_train) - self.subscribed_topics.append(topic_stop_train) - self.subscribed_topics.append(topic_client_status) - self.subscribed_topics.append(topic_report_status) - self.subscribed_topics.append(topic_ota_msg) - self.subscribed_topics.append(topic_request_device_info) - self.subscribed_topics.append(topic_request_edge_device_info_from_mlops) - if topic_request_deploy_master_device_info_from_mlops is not None: - self.subscribed_topics.append(topic_request_deploy_master_device_info_from_mlops) - if topic_request_deploy_slave_device_info_from_mlops is not None: - self.subscribed_topics.append(topic_request_deploy_slave_device_info_from_mlops) - self.subscribed_topics.append(topic_client_logout) - - # Subscribe the messages for federated learning. - self.subscribe_fl_msgs() - - # Broadcast the first active message. - self.send_agent_active_msg(self.edge_id) - if self.general_edge_id is not None: - self.send_agent_active_msg(self.general_edge_id) - - # Echo results - MLOpsRuntimeLog.get_instance(self.args).enable_show_log_to_stdout() - worker_deploy_id_list = [modeld_device_clint.edge_id for index, modeld_device_clint in - enumerate(self.model_device_client_list)] - print("\nCongratulations, your device is connected to the FedML MLOps platform successfully!") - print(f"Your FedML Edge ID is {str(self.edge_id)}, unique device ID is {str(self.unique_device_id)}, " - f"master deploy ID is {str(self.model_device_server.edge_id)}, " - f"worker deploy ID is {worker_deploy_id_list}" - ) - if self.edge_extra_url is not None and self.edge_extra_url != "": - print(f"You may visit the following url to fill in more information with your device.\n" - f"{self.edge_extra_url}") - MLOpsRuntimeLog.get_instance(self.args).enable_show_log_to_stdout(enable=False) - - from fedml.core.mlops import sync_deploy_id - sync_deploy_id( - self.edge_id, self.model_device_server.edge_id, worker_deploy_id_list) - - # Start the message center for listener - self.start_listener(sender_message_queue=self.message_center.get_message_queue(), - agent_config=self.agent_config) - - def subscribe_fl_msgs(self): - if self.general_edge_id is None: - return - - # Setup MQTT message listener for starting training - topic_start_train = "flserver_agent/" + str(self.general_edge_id) + "/start_train" - self.add_message_listener(topic_start_train, self.callback_start_train) - self.mqtt_mgr.add_message_listener(topic_start_train, self.listener_message_dispatch_center) - - # Setup MQTT message listener for stopping training - topic_stop_train = "flserver_agent/" + str(self.general_edge_id) + "/stop_train" - self.add_message_listener(topic_stop_train, self.callback_stop_train) - self.mqtt_mgr.add_message_listener(topic_stop_train, self.listener_message_dispatch_center) - - # Setup MQTT message listener for client status switching - topic_client_status = "fl_client/flclient_agent_" + str(self.general_edge_id) + "/status" - self.add_message_listener(topic_client_status, self.callback_runner_id_status) - self.mqtt_mgr.add_message_listener(topic_client_status, self.listener_message_dispatch_center) - - # Setup MQTT message listener to OTA messages from the MLOps. - topic_request_device_info = "server/client/request_device_info/" + str(self.general_edge_id) - self.add_message_listener(topic_request_device_info, self.callback_report_device_info) - self.mqtt_mgr.add_message_listener(topic_request_device_info, self.listener_message_dispatch_center) - - topic_request_device_info_from_mlops = f"deploy/mlops/client_agent/request_device_info/{self.general_edge_id}" - self.add_message_listener(topic_request_device_info_from_mlops, self.response_device_info_to_mlops) - self.mqtt_mgr.add_message_listener(topic_request_device_info_from_mlops, self.listener_message_dispatch_center) - - # Subscribe topics for starting train, stopping train and fetching client status. - self.mqtt_mgr.subscribe_msg(topic_start_train) - self.mqtt_mgr.subscribe_msg(topic_stop_train) - self.mqtt_mgr.subscribe_msg(topic_client_status) - self.mqtt_mgr.subscribe_msg(topic_request_device_info) - self.mqtt_mgr.subscribe_msg(topic_request_device_info_from_mlops) - - self.subscribed_topics.append(topic_start_train) - self.subscribed_topics.append(topic_stop_train) - self.subscribed_topics.append(topic_client_status) - self.subscribed_topics.append(topic_request_device_info) - self.subscribed_topics.append(topic_request_device_info_from_mlops) - - def on_agent_mqtt_disconnected(self, mqtt_client_object): - MLOpsStatus.get_instance().set_client_agent_status( - self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_OFFLINE - ) - pass - - def setup_agent_mqtt_connection(self, service_config): - # Setup MQTT connection - self.mqtt_mgr = MqttManager( - service_config["mqtt_config"]["BROKER_HOST"], - service_config["mqtt_config"]["BROKER_PORT"], - service_config["mqtt_config"]["MQTT_USER"], - service_config["mqtt_config"]["MQTT_PWD"], - service_config["mqtt_config"]["MQTT_KEEPALIVE"], - f"FedML_ClientAgent_Daemon_@{self.user_name}@_@{self.args.current_device_id}@_@{str(uuid.uuid4())}@", - "flclient_agent/last_will_msg", - json.dumps({"ID": self.edge_id, "status": ClientConstants.MSG_MLOPS_CLIENT_STATUS_OFFLINE}) - ) - self.agent_config = service_config - - # Init local database - FedMLClientDataInterface.get_instance().create_job_table() - - # Start the message center to process edge related messages. - self.setup_message_center() - - # Start local API services - client_api_cmd = "fedml.computing.scheduler.slave.client_api:api" - client_api_pids = RunProcessUtils.get_pid_from_cmd_line(client_api_cmd) - if client_api_pids is None or len(client_api_pids) <= 0: - python_program = get_python_program() - cur_dir = os.path.dirname(__file__) - fedml_base_dir = os.path.dirname(os.path.dirname(os.path.dirname(cur_dir))) - self.local_api_process = ClientConstants.exec_console_with_script( - "{} -m uvicorn {} --host 0.0.0.0 --port {} " - "--reload --reload-delay 3 --reload-dir {} --log-level critical".format( - python_program, client_api_cmd, ClientConstants.LOCAL_CLIENT_API_PORT, fedml_base_dir), - should_capture_stdout=False, - should_capture_stderr=False - ) - # if self.local_api_process is not None and self.local_api_process.pid is not None: - # print(f"Client local API process id {self.local_api_process.pid}") - - # Setup MQTT connected listener - self.mqtt_mgr.add_connected_listener(self.on_agent_mqtt_connected) - self.mqtt_mgr.add_disconnected_listener(self.on_agent_mqtt_disconnected) - self.mqtt_mgr.connect() - - # Report the IDLE status to MLOps - self.mlops_metrics.report_client_training_status( - self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE) - MLOpsStatus.get_instance().set_client_agent_status(self.edge_id, ClientConstants.MSG_MLOPS_CLIENT_STATUS_IDLE) - - # MLOpsRuntimeLogDaemon.get_instance(self.args).stop_all_log_processor() - self.recover_start_train_msg_after_upgrading() - - infer_host = os.getenv("FEDML_INFER_HOST", None) - infer_redis_addr = os.getenv("FEDML_INFER_REDIS_ADDR", None) - infer_redis_port = os.getenv("FEDML_INFER_REDIS_PORT", None) - infer_redis_password = os.getenv("FEDML_INFER_REDIS_PASSWORD", None) - model_client_num = os.getenv("FEDML_MODEL_WORKER_NUM", None) - os.environ["FEDML_CURRENT_EDGE_ID"] = str(self.edge_id) - - if not ComputeCacheManager.get_instance().set_redis_params(): - os.environ["FEDML_DISABLE_REDIS_CONNECTION"] = "1" - - if self.model_device_client_edge_id_list is None: - self.model_device_client_edge_id_list = list() - if self.model_device_client_list is None: - model_client_num = 1 if model_client_num is None else int(model_client_num) - self.model_device_client_list = list() - for client_index in range(model_client_num): - model_device_client = FedMLModelDeviceClientRunner( - self.args, f"{self.args.current_device_id}_{client_index + 1}", self.args.os_name, - self.args.is_from_docker, self.agent_config) - if infer_host is not None: - model_device_client.infer_host = infer_host - if infer_redis_addr is not None: - model_device_client.redis_addr = infer_redis_addr - if infer_redis_port is not None: - model_device_client.redis_port = infer_redis_port - if infer_redis_password is not None: - model_device_client.redis_password = infer_redis_password - model_device_client.start() - self.model_device_client_list.append(model_device_client) - self.model_device_client_edge_id_list.append(model_device_client.get_edge_id()) - - if self.model_device_server is None: - self.model_device_server = FedMLModelDeviceServerRunner(self.args, self.args.current_device_id, - self.args.os_name, self.args.is_from_docker, - self.agent_config) - if infer_host is not None: - self.model_device_server.infer_host = infer_host - if infer_redis_addr is not None: - self.model_device_server.redis_addr = infer_redis_addr - if infer_redis_port is not None: - self.model_device_server.redis_port = infer_redis_port - if infer_redis_password is not None: - self.model_device_server.redis_password = infer_redis_password - - self.model_device_server.start() - self.model_device_server_id = self.model_device_server.get_edge_id() - - JobCleanup.get_instance().sync_data_on_startup(self.edge_id) - - os.environ["FEDML_DEPLOY_MASTER_ID"] = str(self.model_device_server.get_edge_id()) - os.environ["FEDML_DEPLOY_WORKER_IDS"] = str([client.get_edge_id() for client in self.model_device_client_list]) - self.mlops_metrics.stop_device_realtime_perf() - self.mlops_metrics.report_device_realtime_perf(self.args, service_config["mqtt_config"]) - - def start_agent_mqtt_loop(self): - # Start MQTT message loop - try: - self.mqtt_mgr.loop_forever() - except Exception as e: - logging.error(f"Errors in the MQTT loop: Exception {e}, Traceback: {traceback.format_exc()}") - if str(e) == "Restarting after upgraded...": - logging.info("Restarting after upgraded...") - else: - logging.info("Client tracing: {}".format(traceback.format_exc())) - finally: - print("finally") - login_exit_file = os.path.join(ClientConstants.get_log_file_dir(), "exited.log") - with open(login_exit_file, "w") as f: - f.writelines(f"{os.getpid()}.") - - self.stop_agent() - - time.sleep(5) - sys_utils.cleanup_all_fedml_client_login_processes( - ClientConstants.CLIENT_LOGIN_PROGRAM, clean_process_group=False) - sys.exit(1) - - def stop_agent(self): - if self.run_process_event is not None: - self.run_process_event.set() - - if self.model_device_server is not None: - self.model_device_server.stop() - self.model_device_server = None - - if self.model_device_client_list is not None: - for model_client in self.model_device_client_list: - model_client.stop() - self.model_device_client_list.clear() - self.model_device_client_list = None - - if self.mqtt_mgr is not None: - try: - for topic in self.subscribed_topics: - self.mqtt_mgr.unsubscribe_msg(topic) - except Exception as e: - logging.error(f"Unsubscribe topics error: {e}, Traceback: {traceback.format_exc()}") - pass - - self.mqtt_mgr.loop_stop() - self.mqtt_mgr.disconnect() - - self.release_message_center() - - def get_runner(self): - runner = FedMLClientRunner( - self.args, edge_id=self.edge_id, request_json=self.request_json, - agent_config=self.agent_config, run_id=self.run_id, - cuda_visible_gpu_ids_str=self.cuda_visible_gpu_ids_str - ) - runner.edge_user_name = self.user_name - runner.edge_extra_url = self.edge_extra_url - runner.unique_device_id = self.unique_device_id - runner.user_name = self.user_name - runner.general_edge_id = self.general_edge_id - runner.model_device_client_edge_id_list = self.model_device_client_edge_id_list - runner.model_device_server_id = self.model_device_server_id - return runner From 493463e3b4002edb929df966cd1dd409b3a60522 Mon Sep 17 00:00:00 2001 From: Raphael Jin Date: Thu, 6 Jun 2024 19:14:28 +0000 Subject: [PATCH 123/251] [Deploy] Recursively find the model serving package folder --- .../scheduler/comm_utils/file_utils.py | 13 ++++++ .../model_scheduler/worker_job_runner.py | 40 ++++++------------- 2 files changed, 25 insertions(+), 28 deletions(-) create mode 100644 python/fedml/computing/scheduler/comm_utils/file_utils.py diff --git a/python/fedml/computing/scheduler/comm_utils/file_utils.py b/python/fedml/computing/scheduler/comm_utils/file_utils.py new file mode 100644 index 0000000000..1d8fc6ca83 --- /dev/null +++ b/python/fedml/computing/scheduler/comm_utils/file_utils.py @@ -0,0 +1,13 @@ +import os + + +def find_file_inside_folder(folder_path, file_name): + """ + Recursively search for a file inside a folder and its sub-folders. + return the full path of the file if found, otherwise return None. + """ + for root, dirs, files in os.walk(folder_path): + if file_name in files: + return os.path.join(root, file_name) + + return None diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py index 348b760153..3c357e9dab 100755 --- a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py +++ b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py @@ -10,6 +10,7 @@ import yaml from fedml.computing.scheduler.comm_utils.job_utils import JobRunnerUtils from fedml.core.mlops import MLOpsRuntimeLog +from fedml.computing.scheduler.comm_utils import file_utils from .device_client_constants import ClientConstants from .device_model_cache import FedMLModelCache from ..scheduler_core.general_constants import GeneralConstants @@ -205,7 +206,7 @@ def run_impl(self, run_extend_queue_list, sender_message_center, # Check if the package is already downloaded unzip_package_path = "" if os.path.exists(os.path.join(models_root_dir, parent_fd)): - unzip_package_path = self.find_previous_downloaded_pkg(os.path.join(models_root_dir, parent_fd), model_name) + unzip_package_path = self.find_previous_downloaded_pkg(os.path.join(models_root_dir, parent_fd)) # Download the package if not found if unzip_package_path == "": @@ -510,30 +511,13 @@ def build_dynamic_constrain_variables(self, run_id, run_config): pass @staticmethod - def find_previous_downloaded_pkg(parent_dir: str, model_name: str) -> str: - unzip_fd = "" - res = "" - - for folder in os.listdir(parent_dir): - if folder.startswith("unzip_fedml_run"): - unzip_fd = os.path.join(parent_dir, folder) - break - - exact_matched = False - - if unzip_fd == "": - return res - - for folder in os.listdir(unzip_fd): - if folder == model_name: - res = os.path.join(unzip_fd, folder) - exact_matched = True - break - - if not exact_matched: - # Use the first folder found - for folder in os.listdir(unzip_fd): - res = os.path.join(unzip_fd, folder) - break - - return res + def find_previous_downloaded_pkg(parent_dir: str) -> str: + """ + Find a folder inside parent_dir that contains the fedml_model_config.yaml file. + """ + res = file_utils.find_file_inside_folder(parent_dir, ClientConstants.MODEL_REQUIRED_MODEL_CONFIG_FILE) + if res is not None: + # return the parent folder of res + return os.path.dirname(res) + else: + return "" From b4cb7c56a68f4a589c9c09e5092914be7dd2b423 Mon Sep 17 00:00:00 2001 From: fedml-dimitris Date: Thu, 6 Jun 2024 16:01:10 -0400 Subject: [PATCH 124/251] Making sure the unzipped file is a directory during initial deployment. --- .../scheduler_core/scheduler_base_job_runner.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py index 648ab18cf1..5e7a71f25a 100755 --- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py +++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py @@ -138,10 +138,13 @@ def get_client_id_list(self, server_edge_id_list): @staticmethod def unzip_file(zip_file, unzip_file_path) -> str: + unzipped_file_name = "" if zipfile.is_zipfile(zip_file): - with zipfile.ZipFile(zip_file, "r") as zipf: + with (zipfile.ZipFile(zip_file, "r") as zipf): zipf.extractall(unzip_file_path) - unzipped_file_name = zipf.namelist()[0] + # Make sure the unzipped file is a directory. + if zipf.namelist()[0].endswith("/"): + unzipped_file_name = zipf.namelist()[0] else: raise Exception("Invalid zip file {}".format(zip_file)) @@ -156,7 +159,7 @@ def package_download_progress(self, count, blksize, filesize): progress_int = int(progress) downloaded_kb = format(downloaded / 1024, '.2f') - # since this hook funtion is stateless, we need a state to avoid print progress repeatly + # Since this hook function is stateless, we need a state to avoid print progress repeatedly. if count == 0: self.prev_download_progress = 0 if progress_int != self.prev_download_progress and progress_int % 5 == 0: From f76d88ee5eb6e2c4d2e9a05be83c48de1a020659 Mon Sep 17 00:00:00 2001 From: Raphael Jin Date: Thu, 6 Jun 2024 22:53:56 +0000 Subject: [PATCH 125/251] [Deploy] Hot fix grammar. --- .../scheduler/scheduler_core/scheduler_base_job_runner.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py index 5e7a71f25a..6e0010f556 100755 --- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py +++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_job_runner.py @@ -140,7 +140,7 @@ def get_client_id_list(self, server_edge_id_list): def unzip_file(zip_file, unzip_file_path) -> str: unzipped_file_name = "" if zipfile.is_zipfile(zip_file): - with (zipfile.ZipFile(zip_file, "r") as zipf): + with zipfile.ZipFile(zip_file, "r") as zipf: zipf.extractall(unzip_file_path) # Make sure the unzipped file is a directory. if zipf.namelist()[0].endswith("/"): From 4b1127052dff23ce58956918571d680ea9c7ba9f Mon Sep 17 00:00:00 2001 From: Alay Shah Date: Thu, 6 Jun 2024 16:45:31 -0700 Subject: [PATCH 126/251] Hot fix to support local debugging --- python/fedml/api/modules/device.py | 22 ++++++------------- .../scheduler/comm_utils/constants.py | 6 +++++ .../model_scheduler/device_model_inference.py | 9 ++++---- 3 files changed, 18 insertions(+), 19 deletions(-) diff --git a/python/fedml/api/modules/device.py b/python/fedml/api/modules/device.py index 497fde9005..77f7eb2424 100644 --- a/python/fedml/api/modules/device.py +++ b/python/fedml/api/modules/device.py @@ -7,6 +7,7 @@ import fedml from fedml.api.modules.constants import ModuleConstants from fedml.computing.scheduler.comm_utils import sys_utils +from fedml.computing.scheduler.comm_utils.constants import SchedulerConstants from fedml.computing.scheduler.comm_utils.run_process_utils import RunProcessUtils from fedml.computing.scheduler.master.server_constants import ServerConstants from fedml.computing.scheduler.master.server_login import logout as server_logout @@ -23,11 +24,6 @@ def bind( device_id = "0" os_name = "" docker = None - docker_rank = 1 - infer_host = "127.0.0.1" - redis_addr = "local" - redis_port = "6379" - redis_password = "fedml_default" role = "" is_client = computing is_server = server @@ -47,26 +43,22 @@ def bind( _bind( userid, computing, server, api_key, role, runner_cmd, device_id, os_name, - docker, docker_rank, infer_host, - redis_addr, redis_port, redis_password - ) + docker) def _bind( userid, computing, server, api_key, role, runner_cmd, device_id, os_name, - docker, docker_rank, infer_host, - redis_addr, redis_port, redis_password -): + docker): fedml.load_env() if os.getenv(ModuleConstants.ENV_FEDML_INFER_HOST) is None: - fedml.set_env_kv(ModuleConstants.ENV_FEDML_INFER_HOST, infer_host) + fedml.set_env_kv(ModuleConstants.ENV_FEDML_INFER_HOST, SchedulerConstants.REDIS_INFER_HOST) if os.getenv(ModuleConstants.ENV_FEDML_INFER_REDIS_ADDR) is None: - fedml.set_env_kv(ModuleConstants.ENV_FEDML_INFER_REDIS_ADDR, redis_addr) + fedml.set_env_kv(ModuleConstants.ENV_FEDML_INFER_REDIS_ADDR, SchedulerConstants.REDIS_ADDR) if os.getenv(ModuleConstants.ENV_FEDML_INFER_REDIS_PORT) is None: - fedml.set_env_kv(ModuleConstants.ENV_FEDML_INFER_REDIS_PORT, redis_port) + fedml.set_env_kv(ModuleConstants.ENV_FEDML_INFER_REDIS_PORT, SchedulerConstants.REDIS_PORT) if os.getenv(ModuleConstants.ENV_FEDML_INFER_REDIS_PASSWORD) is None: - fedml.set_env_kv(ModuleConstants.ENV_FEDML_INFER_REDIS_PASSWORD, redis_password) + fedml.set_env_kv(ModuleConstants.ENV_FEDML_INFER_REDIS_PASSWORD, SchedulerConstants.REDIS_ADDR) url = fedml._get_backend_service() platform_name = platform.system() diff --git a/python/fedml/computing/scheduler/comm_utils/constants.py b/python/fedml/computing/scheduler/comm_utils/constants.py index 22cb31de45..67b9d8b14b 100644 --- a/python/fedml/computing/scheduler/comm_utils/constants.py +++ b/python/fedml/computing/scheduler/comm_utils/constants.py @@ -109,6 +109,12 @@ class SchedulerConstants: IMAGE_PULL_POLICY_IF_NOT_PRESENT = "IfNotPresent" IMAGE_PULL_POLICY_NEVER = "Never" + REDIS_INFER_HOST = "127.0.0.1" + REDIS_ADDR = "local" + REDIS_PORT = "6379" + REDIS_PASSWORD = "fedml_default" + + @staticmethod def get_log_source(run_json): run_config = run_json.get("run_config", {}) diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py index 7b3ac1d0bf..7bc7d6f097 100755 --- a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py +++ b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py @@ -13,6 +13,7 @@ import fedml from fedml.api.modules.constants import ModuleConstants +from fedml.computing.scheduler.comm_utils.constants import SchedulerConstants from fedml.computing.scheduler.model_scheduler.device_client_constants import ClientConstants from fedml.computing.scheduler.model_scheduler.device_http_inference_protocol import FedMLHttpInference from fedml.computing.scheduler.model_scheduler.device_server_constants import ServerConstants @@ -27,10 +28,10 @@ class Settings: server_name = "DEVICE_INFERENCE_GATEWAY" fedml.load_env() - redis_addr = os.getenv(ModuleConstants.ENV_FEDML_INFER_REDIS_ADDR) - redis_port = os.getenv(ModuleConstants.ENV_FEDML_INFER_REDIS_PORT) - redis_password = os.getenv(ModuleConstants.ENV_FEDML_INFER_REDIS_PASSWORD) - model_infer_host = os.getenv(ModuleConstants.ENV_FEDML_INFER_HOST) + redis_addr = os.getenv(ModuleConstants.ENV_FEDML_INFER_REDIS_ADDR, SchedulerConstants.REDIS_ADDR) + redis_port = os.getenv(ModuleConstants.ENV_FEDML_INFER_REDIS_PORT, SchedulerConstants.REDIS_PORT) + redis_password = os.getenv(ModuleConstants.ENV_FEDML_INFER_REDIS_PASSWORD, SchedulerConstants.REDIS_PASSWORD) + model_infer_host = os.getenv(ModuleConstants.ENV_FEDML_INFER_HOST, SchedulerConstants.REDIS_INFER_HOST) version = fedml.get_env_version() mqtt_config = MLOpsConfigs.fetch_mqtt_config() From 2de8c370bb35684121d8f512bb2f2aecf5624eec Mon Sep 17 00:00:00 2001 From: Alay Shah Date: Thu, 6 Jun 2024 17:12:37 -0700 Subject: [PATCH 127/251] Bug fix --- python/fedml/api/modules/device.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/python/fedml/api/modules/device.py b/python/fedml/api/modules/device.py index 77f7eb2424..a853d538d0 100644 --- a/python/fedml/api/modules/device.py +++ b/python/fedml/api/modules/device.py @@ -58,7 +58,7 @@ def _bind( if os.getenv(ModuleConstants.ENV_FEDML_INFER_REDIS_PORT) is None: fedml.set_env_kv(ModuleConstants.ENV_FEDML_INFER_REDIS_PORT, SchedulerConstants.REDIS_PORT) if os.getenv(ModuleConstants.ENV_FEDML_INFER_REDIS_PASSWORD) is None: - fedml.set_env_kv(ModuleConstants.ENV_FEDML_INFER_REDIS_PASSWORD, SchedulerConstants.REDIS_ADDR) + fedml.set_env_kv(ModuleConstants.ENV_FEDML_INFER_REDIS_PASSWORD, SchedulerConstants.REDIS_PASSWORD) url = fedml._get_backend_service() platform_name = platform.system() From 38bc898388a9cb458c8e2b8d90f3e653f12bdd9a Mon Sep 17 00:00:00 2001 From: bhargav191098 Date: Thu, 6 Jun 2024 23:30:08 -0700 Subject: [PATCH 128/251] Adding sequential uploads & download using presigned URL --- python/fedml/api/modules/storage.py | 248 ++++++++++++++++-- .../scheduler/master/server_constants.py | 14 + 2 files changed, 242 insertions(+), 20 deletions(-) diff --git a/python/fedml/api/modules/storage.py b/python/fedml/api/modules/storage.py index 51f58539bf..cc93fd6f87 100644 --- a/python/fedml/api/modules/storage.py +++ b/python/fedml/api/modules/storage.py @@ -3,6 +3,13 @@ import shutil import requests +import math + +import requests.exceptions +import tqdm +import sys +from concurrent.futures import ThreadPoolExecutor +import concurrent.futures from fedml.api.modules.utils import authenticate from fedml.core.distributed.communication.s3.remote_storage import S3Storage from fedml.core.mlops.mlops_configs import Configs, MLOpsConfigs @@ -19,6 +26,7 @@ def __init__(self, data: dict): self.updatedAt = data.get("updateTime", None) self.size = _get_size(data.get("fileSize",None)) self.tag_list = data.get("tags", None) + self.download_url = data.get("fileUrl", None) # Todo (alaydshah): Store service name in metadata @@ -40,16 +48,16 @@ def upload(data_path, api_key, name, description, tag_list, service, show_progre if not archive_path: return FedMLResponse(code=ResponseCode.FAILURE, message=message) - store = _get_storage_service(service) name = os.path.splitext(os.path.basename(archive_path))[0] if name is None else name file_name = name + ".zip" dest_path = os.path.join(user_id, file_name) file_size = os.path.getsize(archive_path) - file_uploaded_url = store.upload_file_with_progress(src_local_path=archive_path, dest_s3_path=dest_path, - show_progress=show_progress, - out_progress_to_err=out_progress_to_err, - progress_desc=progress_desc, metadata=metadata) + file_uploaded_url, message = _upload_multipart(api_key, file_name, archive_path, show_progress, + out_progress_to_err, + progress_desc, metadata) + + os.remove(archive_path) if not file_uploaded_url: return FedMLResponse(code=ResponseCode.FAILURE, message=f"Failed to upload file: {archive_path}") @@ -81,24 +89,36 @@ def download(data_name, api_key, service, dest_path, show_progress=True) -> FedM if user_id is None: return FedMLResponse(code=ResponseCode.FAILURE, message=message) - store = _get_storage_service(service) - zip_file_name = data_name + ".zip" - key = os.path.join(user_id, zip_file_name) - path_local = os.path.abspath(zip_file_name) - dest_path = os.path.abspath(dest_path) if dest_path else data_name - if store.download_file_with_progress(path_s3=key, path_local=path_local, show_progress=show_progress): - try: - shutil.unpack_archive(path_local, dest_path) - os.remove(path_local) - abs_dest_path = os.path.abspath(dest_path) - return FedMLResponse(code=ResponseCode.SUCCESS, message=f"Successfully downloaded and unzipped data at " - f"{abs_dest_path}", data=abs_dest_path) - except Exception as e: - error_message = f"Failed to unpack archive: {e}" + metadata_response = get_metadata(data_name, api_key) + if metadata_response.code == ResponseCode.SUCCESS: + metadata = metadata_response.data + if not metadata or not isinstance(metadata, StorageMetadata): + error_message = f"Unable to get the download URL" + logging.error(error_message) + return FedMLResponse(code=ResponseCode.FAILURE, message=error_message) + download_url = metadata.download_url + zip_file_name = data_name + ".zip" + path_local = os.path.abspath(zip_file_name) + dest_path = os.path.abspath(dest_path) if dest_path else data_name + if _download_using_presigned_url(download_url, zip_file_name, show_progress=show_progress): + try: + shutil.unpack_archive(path_local, dest_path) + os.remove(path_local) + abs_dest_path = os.path.abspath(dest_path) + return FedMLResponse(code=ResponseCode.SUCCESS, message=f"Successfully downloaded and unzipped data at " + f"{abs_dest_path}", data=abs_dest_path) + except Exception as e: + error_message = f"Failed to unpack archive: {e}" + logging.error(error_message) + return FedMLResponse(code=ResponseCode.FAILURE, message=error_message) + + else: + error_message = "Failed to download data from source" logging.error(error_message) return FedMLResponse(code=ResponseCode.FAILURE, message=error_message) + else: - error_message = f"Failed to download data: {data_name}" + error_message = "Unable to get the download URL" logging.error(error_message) return FedMLResponse(code=ResponseCode.FAILURE, message=error_message) @@ -196,6 +216,194 @@ def delete(data_name, service, api_key=None) -> FedMLResponse: logging.error(message, data_name, service) return FedMLResponse(code=ResponseCode.FAILURE, message=message, data=False) +def _get_num_chunks(file_size, max_chunk_size): + num_chunks = math.ceil(file_size / max_chunk_size) + return num_chunks + + +def get_chunks(file_path, chunk_size): + with open(file_path, 'rb') as file: + while True: + chunk = file.read(chunk_size) + if not chunk: + break + yield chunk + + +def _get_presigned_url(api_key, request_url, file_name, part_number=None): + cert_path = MLOpsConfigs.get_cert_path_with_version() + headers = ServerConstants.API_HEADERS + headers["Authorization"] = f"Bearer {api_key}" + params_dict = {'fileKey': file_name} + if part_number is not None: + params_dict['partNumber'] = part_number + if cert_path is None: + try: + requests.session().verify = cert_path + response = requests.get(request_url, verify=True, headers=headers, params=params_dict) + except requests.exceptions.SSLError as err: + MLOpsConfigs.install_root_ca_file() + response = requests.get(request_url, verify=True, headers=headers, params=params_dict) + else: + response = requests.get(request_url, verify=True, headers=headers, params=params_dict) + return response + + +def _upload_part(url,part_data,session): + response = session.put(url,data=part_data,verify=True) + return response + + +def _upload_chunk(presigned_url, chunk, part, pbar=None, max_retries=20,session=None): + for retry_attempt in range(max_retries): + try: + response = _upload_part(presigned_url,chunk,session) + except requests.exceptions.RequestException as e: + if retry_attempt < max_retries: + continue + else: + raise requests.exceptions.RequestException + + if(pbar is not None): + pbar.update(chunk.__sizeof__()) + return {'etag': response.headers['ETag'], 'partNumber': part} + raise requests.exceptions.RequestException + +def _process_post_response(response): + if response.status_code != 200: + message = (f"Failed to complete multipart upload with status code = {response.status_code}, " + f"response.content: {response.content}") + logging.error(message) + return None, message + else: + resp_data = response.json() + code = resp_data.get("code", None) + data_url = resp_data.get("data", None) + + if code is None or data_url is None or code == "FAILURE": + message = resp_data.get("message", None) + message = (f"Failed to complete multipart upload with following message: {message}, " + f"response.content: {response.content}") + return None, message + + return data_url, "Successfully uploaded the data! " + +def _complete_multipart_upload(api_key, file_key, part_info, upload_id): + complete_multipart_url = ServerConstants.get_complete_multipart_upload_url() + body_dict = {"fileKey": file_key, 'partETags': part_info, 'uploadId': upload_id} + + cert_path = MLOpsConfigs.get_cert_path_with_version() + headers = ServerConstants.API_HEADERS + headers["Authorization"] = f"Bearer {api_key}" + if cert_path is None: + try: + requests.session().verify = cert_path + complete_multipart_response = requests.post(complete_multipart_url, json=body_dict, verify=True, + headers=headers) + except requests.exceptions.SSLError as err: + MLOpsConfigs.install_root_ca_file() + complete_multipart_response = requests.post(complete_multipart_url, json=body_dict, verify=True, + headers=headers) + else: + complete_multipart_response = requests.post(complete_multipart_url, json=body_dict, verify=True, + headers=headers) + + return _process_post_response(complete_multipart_response) + +def _upload_multipart(api_key: str, file_key, archive_path, show_progress, out_progress_to_err, + progress_desc_text, metadata): + request_url = ServerConstants.get_presigned_multi_part_url() + + file_size = os.path.getsize(archive_path) + + max_chunk_size = 20 * 1024 * 1024 + + num_chunks = _get_num_chunks(file_size, max_chunk_size) + + upload_id = "" + presigned_urls = [] + + presigned_url_response = _get_presigned_url(api_key, request_url, file_key, num_chunks) + + if presigned_url_response.status_code != 200: + message = (f"Failed to get presigned URL with status code = {presigned_url_response.status_code}, " + f"response.content: {presigned_url_response.content}") + logging.error(message) + return None, message + else: + resp_data = presigned_url_response.json() + code = resp_data.get("code", None) + data = resp_data.get("data", None) + + if code is None or data is None or code == "FAILURE": + message = resp_data.get("message", None) + message = (f"Failed getting presigned URL with following message: {message}, " + f"response.content: {presigned_url_response.content}") + return None, message + + upload_id = data['uploadId'] + presigned_urls = data['urls'] + + parts = [] + chunks = get_chunks(archive_path, max_chunk_size) + part_info = [] + chunk_count = 0 + successful_chunks = 0 + + atomic_session = requests.session() + atomic_session.verify = MLOpsConfigs.get_cert_path_with_version() + with tqdm.tqdm(total=file_size, unit="B", unit_scale=True, + file=sys.stderr if out_progress_to_err else sys.stdout, + desc=progress_desc_text, leave=False) as pbar: + for part, chunk in enumerate(chunks, start=1): + presigned_url = presigned_urls[part - 1] + chunk_count += 1 + # Upload chunk to presigned_url in a separate thread from the thread pool of 10 workers. + if show_progress: + try: + part_data = _upload_chunk(presigned_url=presigned_url, chunk=chunk, part=part, + pbar=pbar,session=atomic_session) + part_info.append(part_data) + successful_chunks += 1 + except Exception as e: + return None, "unsuccessful" + + else: + try: + part_data = _upload_chunk(presigned_url=presigned_url, chunk=chunk, part=part, + pbar=pbar,session=atomic_session) + part_info.append(part_data) + successful_chunks += 1 + except Exception as e: + return None, "unsuccessful" + + if successful_chunks == chunk_count: + return _complete_multipart_upload(api_key, file_key, part_info, upload_id) + else: + return None, "Unsuccessful!" + + +def _download_using_presigned_url(url, fname, chunk_size=1024 * 1024, show_progress=True): + download_response = requests.get(url, verify=True, stream=True) + if download_response.status_code == 200: + total = int(download_response.headers.get('content-length', 0)) + if show_progress: + with open(fname, 'wb') as file, tqdm.tqdm( + desc=fname, + total=total, + unit='B', + unit_scale=True, + unit_divisor=1024, + ) as bar: + for data in download_response.iter_content(chunk_size=chunk_size): + size = file.write(data) + bar.update(size) + else: + with open(fname, "wb") as file: + for data in download_response.iter_content(chunk_size=chunk_size): + size = file.write(data) + return True + return False def _get_user_id_from_api_key(api_key: str) -> (str, str): user_url = ServerConstants.get_user_url() diff --git a/python/fedml/computing/scheduler/master/server_constants.py b/python/fedml/computing/scheduler/master/server_constants.py index b835ba1bde..ebd8b2aef6 100644 --- a/python/fedml/computing/scheduler/master/server_constants.py +++ b/python/fedml/computing/scheduler/master/server_constants.py @@ -255,6 +255,20 @@ def get_dataset_url(): ServerConstants.get_mlops_url()) return create_dataset_url + @staticmethod + def get_presigned_multi_part_url(): + get_presigned_multi_part_url = "{}/system/api/v1/cli/oss/multipart/presigned-url".format( + ServerConstants.get_mlops_url() + ) + return get_presigned_multi_part_url + + @staticmethod + def get_complete_multipart_upload_url(): + complete_multipart_upload_url = "{}/system/api/v1/cli/oss/multipart/upload/complete".format( + ServerConstants.get_mlops_url() + ) + return complete_multipart_upload_url + @staticmethod def list_dataset_url(): list_dataset_url = "{}/fedmlOpsServer/api/v1/cli/dataset/list".format( From aa62a94da7dc68100ed80a75714b8c6b3b60e57d Mon Sep 17 00:00:00 2001 From: bhargav191098 Date: Thu, 6 Jun 2024 23:57:08 -0700 Subject: [PATCH 129/251] minor comments and some error handling --- python/fedml/api/modules/storage.py | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/python/fedml/api/modules/storage.py b/python/fedml/api/modules/storage.py index cc93fd6f87..e7d492c999 100644 --- a/python/fedml/api/modules/storage.py +++ b/python/fedml/api/modules/storage.py @@ -8,8 +8,6 @@ import requests.exceptions import tqdm import sys -from concurrent.futures import ThreadPoolExecutor -import concurrent.futures from fedml.api.modules.utils import authenticate from fedml.core.distributed.communication.s3.remote_storage import S3Storage from fedml.core.mlops.mlops_configs import Configs, MLOpsConfigs @@ -31,7 +29,7 @@ def __init__(self, data: dict): # Todo (alaydshah): Store service name in metadata # Todo (alaydshah): If data already exists, don't upload again. Instead suggest to use update command - +# Todo (bhargav) : Discuss and remove the service variable. Maybe needed sometime later. def upload(data_path, api_key, name, description, tag_list, service, show_progress, out_progress_to_err, progress_desc, metadata) -> FedMLResponse: api_key = authenticate(api_key) @@ -118,8 +116,7 @@ def download(data_name, api_key, service, dest_path, show_progress=True) -> FedM return FedMLResponse(code=ResponseCode.FAILURE, message=error_message) else: - error_message = "Unable to get the download URL" - logging.error(error_message) + error_message = metadata_response.message return FedMLResponse(code=ResponseCode.FAILURE, message=error_message) @@ -288,6 +285,7 @@ def _process_post_response(response): return data_url, "Successfully uploaded the data! " + def _complete_multipart_upload(api_key, file_key, part_info, upload_id): complete_multipart_url = ServerConstants.get_complete_multipart_upload_url() body_dict = {"fileKey": file_key, 'partETags': part_info, 'uploadId': upload_id} @@ -310,6 +308,7 @@ def _complete_multipart_upload(api_key, file_key, part_info, upload_id): return _process_post_response(complete_multipart_response) + def _upload_multipart(api_key: str, file_key, archive_path, show_progress, out_progress_to_err, progress_desc_text, metadata): request_url = ServerConstants.get_presigned_multi_part_url() @@ -349,7 +348,7 @@ def _upload_multipart(api_key: str, file_key, archive_path, show_progress, out_p part_info = [] chunk_count = 0 successful_chunks = 0 - + #TODO: (bhargav191098) Using Thread pool and confirming openssl issue atomic_session = requests.session() atomic_session.verify = MLOpsConfigs.get_cert_path_with_version() with tqdm.tqdm(total=file_size, unit="B", unit_scale=True, @@ -358,7 +357,6 @@ def _upload_multipart(api_key: str, file_key, archive_path, show_progress, out_p for part, chunk in enumerate(chunks, start=1): presigned_url = presigned_urls[part - 1] chunk_count += 1 - # Upload chunk to presigned_url in a separate thread from the thread pool of 10 workers. if show_progress: try: part_data = _upload_chunk(presigned_url=presigned_url, chunk=chunk, part=part, From 14bae990b9a610f7cdfb25f82355025b3d58b3e0 Mon Sep 17 00:00:00 2001 From: Alex Date: Fri, 7 Jun 2024 16:20:34 +0800 Subject: [PATCH 130/251] =?UTF-8?q?[CoreEngine]=201.=20fixed=20the=20issue?= =?UTF-8?q?=20that=20the=20fork=20method=20is=20not=20support=20in=20Windo?= =?UTF-8?q?ws=20OS.=E2=80=A82.=20fixed=20the=20issue=20the=20sqlite=20path?= =?UTF-8?q?=20is=20illegal=20=20in=20Windows=20OS.?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- python/fedml/__init__.py | 24 +++++++++++++------ .../scheduler/comm_utils/hardware_utils.py | 2 +- .../model_scheduler/device_model_db.py | 6 ++++- .../scheduler/scheduler_core/base_db.py | 6 ++++- .../scheduler_base_protocol_manager.py | 4 +--- 5 files changed, 29 insertions(+), 13 deletions(-) diff --git a/python/fedml/__init__.py b/python/fedml/__init__.py index 21da84c9ab..c96d65adc5 100644 --- a/python/fedml/__init__.py +++ b/python/fedml/__init__.py @@ -1,4 +1,5 @@ import logging +import platform import multiprocess as multiprocessing import os @@ -92,13 +93,7 @@ def init(args=None, check_env=True, should_init_logs=True): # Windows/Linux/MacOS compatability issues on multi-processing # https://github.com/pytorch/pytorch/issues/3492 """ - if multiprocessing.get_start_method() != "fork": - # force all platforms (Windows/Linux/macOS) to use the same way (fork) for multiprocessing - multiprocessing.set_start_method("fork", force=True) - - # if multiprocessing.get_start_method() != "spawn": - # # force all platforms (Windows/Linux/MacOS) to use the same way (spawn) for multiprocessing - # multiprocessing.set_start_method("spawn", force=True) + _init_multiprocessing() """ # https://stackoverflow.com/questions/53014306/error-15-initializing-libiomp5-dylib-but-found-libiomp5-dylib-already-initial @@ -450,6 +445,21 @@ def _run_distributed(): pass +def _init_multiprocessing(): + """ + # Windows/Linux/MacOS compatability issues on multi-processing + # https://github.com/pytorch/pytorch/issues/3492 + """ + if platform.system() == "Windows": + if multiprocessing.get_start_method() != "spawn": + # force all platforms (Windows/Linux/macOS) to use the same way (spawn) for multiprocessing + multiprocessing.set_start_method("spawn", force=True) + else: + if multiprocessing.get_start_method() != "fork": + # force all platforms (Windows/Linux/macOS) to use the same way (fork) for multiprocessing + multiprocessing.set_start_method("fork", force=True) + + def set_env_version(version): set_env_kv("FEDML_ENV_VERSION", version) load_env() diff --git a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py index 0062418631..e73809955e 100644 --- a/python/fedml/computing/scheduler/comm_utils/hardware_utils.py +++ b/python/fedml/computing/scheduler/comm_utils/hardware_utils.py @@ -27,7 +27,7 @@ def __get_util(cls) -> Optional[GPUCardUtil]: except Exception as e: pass - logging.error("No GPU card detected") + # logging.error("No GPU card detected") return None @staticmethod diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_db.py b/python/fedml/computing/scheduler/model_scheduler/device_model_db.py index 1f43f719f3..09573a1d1b 100755 --- a/python/fedml/computing/scheduler/model_scheduler/device_model_db.py +++ b/python/fedml/computing/scheduler/model_scheduler/device_model_db.py @@ -1,6 +1,7 @@ import json import logging import os +import platform import time from fedml.computing.scheduler.model_scheduler.device_server_constants import ServerConstants @@ -261,7 +262,10 @@ def open_job_db(self): self.db_base_dir = ServerConstants.get_database_dir() job_db_path = os.path.join(self.db_base_dir, FedMLModelDatabase.MODEL_DEPLOYMENT_DB) - self.db_engine = create_engine('sqlite:////{}'.format(job_db_path), echo=False) + if platform.system() == "Windows": + self.db_engine = create_engine('sqlite:///{}'.format(job_db_path), echo=False) + else: + self.db_engine = create_engine('sqlite:////{}'.format(job_db_path), echo=False) db_session_class = sessionmaker(bind=self.db_engine) self.db_connection = db_session_class() diff --git a/python/fedml/computing/scheduler/scheduler_core/base_db.py b/python/fedml/computing/scheduler/scheduler_core/base_db.py index b827efacf7..dbb322cfae 100755 --- a/python/fedml/computing/scheduler/scheduler_core/base_db.py +++ b/python/fedml/computing/scheduler/scheduler_core/base_db.py @@ -1,5 +1,6 @@ import json import os +import platform import time from sqlalchemy import Column, String, TEXT, Integer, Float, create_engine, and_ @@ -25,7 +26,10 @@ def open_job_db(self): if self.db_connection is not None: return - self.db_engine = create_engine('sqlite:////{}'.format(self.db_path), echo=False) + if platform.system() == "Windows": + self.db_engine = create_engine('sqlite:///{}'.format(self.db_path), echo=False) + else: + self.db_engine = create_engine('sqlite:////{}'.format(self.db_path), echo=False) db_session_class = sessionmaker(bind=self.db_engine) self.db_connection = db_session_class() diff --git a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py index 9bb8b7a7ec..19bb7e9882 100755 --- a/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py +++ b/python/fedml/computing/scheduler/scheduler_core/scheduler_base_protocol_manager.py @@ -46,9 +46,7 @@ def __init__(self, args, agent_config=None, is_master=False): self.status_reporter = None self.user_name = args.user_name - if multiprocessing.get_start_method() != "fork": - # force all platforms (Windows/Linux/macOS) to use the same way (fork) for multiprocessing - multiprocessing.set_start_method("fork", force=True) + fedml._init_multiprocessing() def generate_topics(self): # generate the subscribed topics. From 28ff0f3c1f1621f9994da08836a5920749b22fc6 Mon Sep 17 00:00:00 2001 From: Alex Date: Fri, 7 Jun 2024 16:25:07 +0800 Subject: [PATCH 131/251] [CoreEngine] add the missed import. --- .../fedml/computing/scheduler/scheduler_core/compute_gpu_db.py | 1 + 1 file changed, 1 insertion(+) diff --git a/python/fedml/computing/scheduler/scheduler_core/compute_gpu_db.py b/python/fedml/computing/scheduler/scheduler_core/compute_gpu_db.py index d50555d3c9..eb80c1424e 100755 --- a/python/fedml/computing/scheduler/scheduler_core/compute_gpu_db.py +++ b/python/fedml/computing/scheduler/scheduler_core/compute_gpu_db.py @@ -8,6 +8,7 @@ from fedml.core.common.singleton import Singleton from .base_db import FedMLBaseDb from .compute_utils import ComputeUtils +from ..master.server_constants import ServerConstants Base = declarative_base() From c151831deb46e297e182f74cc0315491d97d535d Mon Sep 17 00:00:00 2001 From: fedml-dimitris Date: Thu, 6 Jun 2024 17:03:46 -0400 Subject: [PATCH 132/251] Adding hash set for counting the number of pending requests per endpoint. --- .../model_scheduler/device_model_cache.py | 29 ++++++++++--------- .../model_scheduler/device_model_inference.py | 18 ++++++------ 2 files changed, 24 insertions(+), 23 deletions(-) diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py b/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py index 75cf4dbc2a..242501f2fa 100755 --- a/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py +++ b/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py @@ -974,20 +974,21 @@ def delete_endpoint_scaling_down_decision_time(self, end_point_id) -> bool: self.FEDML_MODEL_ENDPOINT_SCALING_DOWN_DECISION_TIME_TAG, end_point_id)) - def get_pending_requests_counter(self) -> int: - if not self.redis_connection.exists(self.FEDML_PENDING_REQUESTS_COUNTER): - self.redis_connection.set(self.FEDML_PENDING_REQUESTS_COUNTER, 0) - return int(self.redis_connection.get(self.FEDML_PENDING_REQUESTS_COUNTER)) - - def update_pending_requests_counter(self, increase=False, decrease=False) -> int: - if not self.redis_connection.exists(self.FEDML_PENDING_REQUESTS_COUNTER): - self.redis_connection.set(self.FEDML_PENDING_REQUESTS_COUNTER, 0) + def get_pending_requests_counter(self, end_point_id) -> int: + # If the endpoint does not exist inside the Hash collection, set its counter to 0. + if self.redis_connection.hexists(self.FEDML_PENDING_REQUESTS_COUNTER, end_point_id): + return int(self.redis_connection.hget(self.FEDML_PENDING_REQUESTS_COUNTER, end_point_id)) + return 0 + + def update_pending_requests_counter(self, end_point_id, increase=False, decrease=False) -> int: + if not self.redis_connection.hexists(self.FEDML_PENDING_REQUESTS_COUNTER, end_point_id): + self.redis_connection.hset(self.FEDML_PENDING_REQUESTS_COUNTER, mapping={end_point_id: 0}) if increase: - self.redis_connection.incr(self.FEDML_PENDING_REQUESTS_COUNTER) + self.redis_connection.hincrby(self.FEDML_PENDING_REQUESTS_COUNTER, end_point_id, 1) if decrease: + # Careful on the negative, there is no native function for hash decreases. + self.redis_connection.hincrby(self.FEDML_PENDING_REQUESTS_COUNTER, end_point_id, -1) # Making sure the counter never becomes negative! - if self.get_pending_requests_counter() < 0: - self.redis_connection.set(self.FEDML_PENDING_REQUESTS_COUNTER, 0) - else: - self.redis_connection.decr(self.FEDML_PENDING_REQUESTS_COUNTER) - return self.get_pending_requests_counter() + if self.get_pending_requests_counter(end_point_id) < 0: + self.redis_connection.hset(self.FEDML_PENDING_REQUESTS_COUNTER, mapping={end_point_id: 0}) + return self.get_pending_requests_counter(end_point_id) diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py index 7bc7d6f097..c6e26ba53c 100755 --- a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py +++ b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py @@ -55,10 +55,10 @@ async def auth_middleware(request: Request, call_next): {"error": True, "message": "Invalid JSON."}, status_code=status.HTTP_400_BAD_REQUEST) - # Get total pending requests. - pending_requests_num = FEDML_MODEL_CACHE.get_pending_requests_counter() + # Get endpoint's total pending requests. + end_point_id = request_json.get("end_point_id", None) + pending_requests_num = FEDML_MODEL_CACHE.get_pending_requests_counter(end_point_id) if pending_requests_num: - end_point_id = request_json.get("end_point_id", None) # Fetch metrics of the past k=3 requests. pask_k_metrics = FEDML_MODEL_CACHE.get_endpoint_metrics( end_point_id=end_point_id, @@ -173,7 +173,7 @@ async def _predict( header=None ) -> Union[MutableMapping[str, Any], Response, StreamingResponse]: # Always increase the pending requests counter on a new incoming request. - FEDML_MODEL_CACHE.update_pending_requests_counter(increase=True) + FEDML_MODEL_CACHE.update_pending_requests_counter(end_point_id, increase=True) inference_response = {} try: @@ -205,14 +205,14 @@ async def _predict( if not is_endpoint_activated(in_end_point_id): inference_response = {"error": True, "message": "endpoint is not activated."} logging_inference_request(input_json, inference_response) - FEDML_MODEL_CACHE.update_pending_requests_counter(decrease=True) + FEDML_MODEL_CACHE.update_pending_requests_counter(end_point_id, decrease=True) return inference_response # Found idle inference device idle_device, end_point_id, model_id, model_name, model_version, inference_host, inference_output_url = \ found_idle_inference_device(in_end_point_id, in_end_point_name, in_model_name, in_model_version) if idle_device is None or idle_device == "": - FEDML_MODEL_CACHE.update_pending_requests_counter(decrease=True) + FEDML_MODEL_CACHE.update_pending_requests_counter(end_point_id, decrease=True) return {"error": True, "error_code": status.HTTP_404_NOT_FOUND, "message": "can not found active inference worker for this endpoint."} @@ -252,18 +252,18 @@ async def _predict( pass logging_inference_request(input_json, inference_response) - FEDML_MODEL_CACHE.update_pending_requests_counter(decrease=True) + FEDML_MODEL_CACHE.update_pending_requests_counter(end_point_id, decrease=True) return inference_response else: inference_response = {"error": True, "message": "token is not valid."} logging_inference_request(input_json, inference_response) - FEDML_MODEL_CACHE.update_pending_requests_counter(decrease=True) + FEDML_MODEL_CACHE.update_pending_requests_counter(end_point_id, decrease=True) return inference_response except Exception as e: logging.error("Inference Exception: {}".format(traceback.format_exc())) # Need to reduce the pending requests counter in whatever exception that may be raised. - FEDML_MODEL_CACHE.update_pending_requests_counter(decrease=True) + FEDML_MODEL_CACHE.update_pending_requests_counter(end_point_id, decrease=True) def retrieve_info_by_endpoint_id(end_point_id, in_end_point_name=None, in_model_name=None, From c29cf1d6e6be0c231e6f2c3bd5e13e67d7431956 Mon Sep 17 00:00:00 2001 From: Raphael Jin Date: Mon, 10 Jun 2024 19:58:00 +0000 Subject: [PATCH 133/251] [Deploy] Unified timeout key. --- .../scheduler/model_scheduler/device_model_cache.py | 2 +- .../scheduler/model_scheduler/device_model_inference.py | 5 +++-- .../scheduler/model_scheduler/device_server_constants.py | 3 +++ 3 files changed, 7 insertions(+), 3 deletions(-) diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py b/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py index 242501f2fa..30e4f460e6 100755 --- a/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py +++ b/python/fedml/computing/scheduler/model_scheduler/device_model_cache.py @@ -139,7 +139,7 @@ def set_user_setting_replica_num(self, end_point_id, "target_queries_per_replica": target_queries_per_replica, "aggregation_window_size_seconds": aggregation_window_size_seconds, "scale_down_delay_seconds": scale_down_delay_seconds, - "request_timeout_sec": timeout_s + ServerConstants.INFERENCE_REQUEST_TIMEOUT_KEY: timeout_s } try: self.redis_connection.set(self.get_user_setting_replica_num_key(end_point_id), json.dumps(replica_num_dict)) diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py index c6e26ba53c..d073533b72 100755 --- a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py +++ b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py @@ -66,7 +66,7 @@ async def auth_middleware(request: Request, call_next): # Get the request timeout from the endpoint settings. request_timeout_s = FEDML_MODEL_CACHE.get_endpoint_settings(end_point_id) \ - .get("request_timeout_s", ClientConstants.INFERENCE_REQUEST_TIMEOUT) + .get(ServerConstants.INFERENCE_REQUEST_TIMEOUT_KEY, ServerConstants.INFERENCE_REQUEST_TIMEOUT_DEFAULT) # Only proceed if the past k metrics collection is not empty. if pask_k_metrics: @@ -76,7 +76,8 @@ async def auth_middleware(request: Request, call_next): mean_latency = sum(past_k_latencies_sec) / len(past_k_latencies_sec) # If timeout threshold is exceeded then cancel and return time out error. - if (mean_latency * pending_requests_num) > request_timeout_s: + should_block = (mean_latency * pending_requests_num) > request_timeout_s + if should_block: return JSONResponse( {"error": True, "message": "Request timed out."}, status_code=status.HTTP_504_GATEWAY_TIMEOUT) diff --git a/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py b/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py index eb01fbb599..243c197b2f 100644 --- a/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py +++ b/python/fedml/computing/scheduler/model_scheduler/device_server_constants.py @@ -104,6 +104,9 @@ class ServerConstants(object): AUTO_DETECT_PUBLIC_IP = "auto_detect_public_ip" MODEL_INFERENCE_DEFAULT_PORT = 2203 MODEL_CACHE_KEY_EXPIRE_TIME = 1 * 10 + + INFERENCE_REQUEST_TIMEOUT_KEY = "request_timeout_sec" + INFERENCE_REQUEST_TIMEOUT_DEFAULT = 30 # -----End----- MODEL_DEPLOYMENT_STAGE1 = {"index": 1, "text": "ReceivedRequest"} From c4a87149e3af296310f0a7ca04cd467e0bc9b06f Mon Sep 17 00:00:00 2001 From: Raphael Jin Date: Tue, 11 Jun 2024 00:06:14 +0000 Subject: [PATCH 134/251] [Deploy] Report worker's connectivity when it finished. --- .../scheduler/comm_utils/network_util.py | 16 +++++ .../device_client_constants.py | 5 ++ .../model_scheduler/device_model_inference.py | 60 +++++++++++-------- .../model_scheduler/master_job_runner.py | 8 --- .../model_scheduler/worker_job_runner.py | 33 +++++++--- .../scheduler_core/general_constants.py | 16 ++--- 6 files changed, 87 insertions(+), 51 deletions(-) create mode 100644 python/fedml/computing/scheduler/comm_utils/network_util.py diff --git a/python/fedml/computing/scheduler/comm_utils/network_util.py b/python/fedml/computing/scheduler/comm_utils/network_util.py new file mode 100644 index 0000000000..13674840c5 --- /dev/null +++ b/python/fedml/computing/scheduler/comm_utils/network_util.py @@ -0,0 +1,16 @@ +import os +from fedml.computing.scheduler.model_scheduler.device_client_constants import ClientConstants + + +def return_this_device_connectivity_type() -> str: + """ + Return -> "http" | "http_proxy" |"mqtt" + """ + if os.environ.get(ClientConstants.ENV_CONNECTION_TYPE_KEY) == ClientConstants.WORKER_CONNECTIVITY_TYPE_HTTP: + return ClientConstants.WORKER_CONNECTIVITY_TYPE_HTTP + elif os.environ.get(ClientConstants.ENV_CONNECTION_TYPE_KEY) == ClientConstants.WORKER_CONNECTIVITY_TYPE_HTTP_PROXY: + return ClientConstants.WORKER_CONNECTIVITY_TYPE_HTTP_PROXY + elif os.environ.get(ClientConstants.ENV_CONNECTION_TYPE_KEY) == ClientConstants.WORKER_CONNECTIVITY_TYPE_MQTT: + return ClientConstants.WORKER_CONNECTIVITY_TYPE_MQTT + else: + return ClientConstants.WORKER_CONNECTIVITY_TYPE_HTTP diff --git a/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py b/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py index 7894f2c73e..d66c2f966a 100644 --- a/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py +++ b/python/fedml/computing/scheduler/model_scheduler/device_client_constants.py @@ -97,6 +97,11 @@ class ClientConstants(object): INFERENCE_INFERENCE_SERVER_VERSION = "v2" INFERENCE_REQUEST_TIMEOUT = 30 + ENV_CONNECTION_TYPE_KEY = "FEDML_CONNECTION_TYPE" + WORKER_CONNECTIVITY_TYPE_HTTP = "http" + WORKER_CONNECTIVITY_TYPE_HTTP_PROXY = "http_proxy" + WORKER_CONNECTIVITY_TYPE_MQTT = "mqtt" + MSG_MODELOPS_DEPLOYMENT_STATUS_INITIALIZING = "INITIALIZING" MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYING = "DEPLOYING" MSG_MODELOPS_DEPLOYMENT_STATUS_INFERRING = "INFERRING" diff --git a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py index d073533b72..a9205ceb9a 100755 --- a/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py +++ b/python/fedml/computing/scheduler/model_scheduler/device_model_inference.py @@ -210,7 +210,8 @@ async def _predict( return inference_response # Found idle inference device - idle_device, end_point_id, model_id, model_name, model_version, inference_host, inference_output_url = \ + idle_device, end_point_id, model_id, model_name, model_version, inference_host, inference_output_url,\ + connectivity_type = \ found_idle_inference_device(in_end_point_id, in_end_point_name, in_model_name, in_model_version) if idle_device is None or idle_device == "": FEDML_MODEL_CACHE.update_pending_requests_counter(end_point_id, decrease=True) @@ -235,13 +236,16 @@ async def _predict( stream_flag = input_json.get("stream", False) input_list["stream"] = input_list.get("stream", stream_flag) output_list = input_json.get("outputs", []) + + # main execution of redirecting the inference request to the idle device inference_response = await send_inference_request( idle_device, end_point_id, inference_output_url, input_list, output_list, - inference_type=in_return_type) + inference_type=in_return_type, + connectivity_type=connectivity_type) # Calculate model metrics try: @@ -304,11 +308,12 @@ def found_idle_inference_device(end_point_id, end_point_name, in_model_name, in_ inference_host = "" inference_output_url = "" model_version = "" + connectivity_type = "" + # Found idle device (TODO: optimize the algorithm to search best device for inference) payload, idle_device = FEDML_MODEL_CACHE. \ get_idle_device(end_point_id, end_point_name, in_model_name, in_model_version) if payload is not None: - logging.info("found idle deployment result {}".format(payload)) deployment_result = payload model_name = deployment_result["model_name"] model_version = deployment_result["model_version"] @@ -317,24 +322,25 @@ def found_idle_inference_device(end_point_id, end_point_name, in_model_name, in_ inference_output_url = deployment_result["model_url"] url_parsed = urlparse(inference_output_url) inference_host = url_parsed.hostname + connectivity_type = deployment_result.get("connectivity_type", ClientConstants.WORKER_CONNECTIVITY_TYPE_HTTP) else: logging.info("not found idle deployment result") - return idle_device, end_point_id, model_id, model_name, model_version, inference_host, inference_output_url + res = (idle_device, end_point_id, model_id, model_name, model_version, inference_host, inference_output_url, + connectivity_type) + logging.info(f"found idle device with metrics: {res}") + + return res async def send_inference_request(idle_device, end_point_id, inference_url, input_list, output_list, - inference_type="default", has_public_ip=True): + inference_type="default", + connectivity_type=ClientConstants.WORKER_CONNECTIVITY_TYPE_HTTP): request_timeout_sec = FEDML_MODEL_CACHE.get_endpoint_settings(end_point_id) \ .get("request_timeout_sec", ClientConstants.INFERENCE_REQUEST_TIMEOUT) try: - http_infer_available = os.getenv("FEDML_INFERENCE_HTTP_AVAILABLE", True) - if not http_infer_available: - if http_infer_available == "False" or http_infer_available == "false": - http_infer_available = False - - if http_infer_available: + if connectivity_type == ClientConstants.WORKER_CONNECTIVITY_TYPE_HTTP: response_ok = await FedMLHttpInference.is_inference_ready( inference_url, timeout=request_timeout_sec) @@ -347,22 +353,23 @@ async def send_inference_request(idle_device, end_point_id, inference_url, input timeout=request_timeout_sec) logging.info(f"Use http inference. return {response_ok}") return inference_response - - response_ok = await FedMLHttpProxyInference.is_inference_ready( - inference_url, - timeout=request_timeout_sec) - if response_ok: - response_ok, inference_response = await FedMLHttpProxyInference.run_http_proxy_inference_with_request( - end_point_id, + elif connectivity_type == ClientConstants.WORKER_CONNECTIVITY_TYPE_HTTP_PROXY: + logging.warning("Use http proxy inference.") + response_ok = await FedMLHttpProxyInference.is_inference_ready( inference_url, - input_list, - output_list, - inference_type=inference_type, timeout=request_timeout_sec) - logging.info(f"Use http proxy inference. return {response_ok}") - return inference_response - - if not has_public_ip: + if response_ok: + response_ok, inference_response = await FedMLHttpProxyInference.run_http_proxy_inference_with_request( + end_point_id, + inference_url, + input_list, + output_list, + inference_type=inference_type, + timeout=request_timeout_sec) + logging.info(f"Use http proxy inference. return {response_ok}") + return inference_response + elif connectivity_type == ClientConstants.WORKER_CONNECTIVITY_TYPE_MQTT: + logging.warning("Use mqtt inference.") agent_config = {"mqtt_config": Settings.mqtt_config} mqtt_inference = FedMLMqttInference( agent_config=agent_config, @@ -385,7 +392,8 @@ async def send_inference_request(idle_device, end_point_id, inference_url, input logging.info(f"Use mqtt inference. return {response_ok}.") return inference_response - return {"error": True, "message": "Failed to use http, http-proxy for inference, no response from replica."} + else: + return {"error": True, "message": "Failed to use http, http-proxy for inference, no response from replica."} except Exception as e: inference_response = {"error": True, "message": f"Exception when using http, http-proxy and mqtt " diff --git a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py index a10bd2c559..b9b9b4c356 100755 --- a/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py +++ b/python/fedml/computing/scheduler/model_scheduler/master_job_runner.py @@ -250,14 +250,6 @@ def process_deployment_result_message(self, topic=None, payload=None): logging.info(f"Endpoint {end_point_id}; Device {device_id}; replica {replica_no}; " f"run_operation {run_operation} model status {model_status}.") - # OPTIONAL DEBUG PARAMS - # this_run_controller = self.model_runner_mapping[run_id_str].replica_controller - # logging.info(f"The current replica controller state is " - # f"Total version diff num {this_run_controller.total_replica_version_diff_num}") - # logging.info(f"self.request_json now {self.request_json}") # request_json will be deprecated - # this_run_request_json = self.request_json - # logging.info(f"self.request_json now {this_run_request_json}") - # Set redis + sqlite deployment result FedMLModelCache.get_instance().set_redis_params(self.redis_addr, self.redis_port, self.redis_password) diff --git a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py index 3c357e9dab..9e178228b2 100755 --- a/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py +++ b/python/fedml/computing/scheduler/model_scheduler/worker_job_runner.py @@ -9,6 +9,8 @@ from abc import ABC import yaml from fedml.computing.scheduler.comm_utils.job_utils import JobRunnerUtils +from fedml.computing.scheduler.comm_utils.network_util import return_this_device_connectivity_type + from fedml.core.mlops import MLOpsRuntimeLog from fedml.computing.scheduler.comm_utils import file_utils from .device_client_constants import ClientConstants @@ -234,8 +236,11 @@ def run_impl(self, run_extend_queue_list, sender_message_center, running_model_name, inference_output_url, inference_model_version, model_metadata, model_config = \ "", "", model_version, {}, {} + # ip and connectivity + worker_ip = GeneralConstants.get_ip_address(self.request_json) + connectivity = return_this_device_connectivity_type() + if op == "add": - worker_ip = GeneralConstants.get_ip_address(self.request_json) for rank in range(prev_rank + 1, prev_rank + 1 + op_num): try: running_model_name, inference_output_url, inference_model_version, model_metadata, model_config = \ @@ -269,7 +274,9 @@ def run_impl(self, run_extend_queue_list, sender_message_center, result_payload = self.send_deployment_results( end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED, model_id, model_name, inference_output_url, model_version, inference_port_external, - inference_engine, model_metadata, model_config, replica_no=rank + 1) + inference_engine, model_metadata, model_config, replica_no=rank + 1, + connectivity=connectivity + ) if inference_port_external != inference_port: # Save internal port to local db @@ -278,7 +285,9 @@ def run_impl(self, run_extend_queue_list, sender_message_center, result_payload = self.construct_deployment_results( end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED, model_id, model_name, inference_output_url, model_version, inference_port, - inference_engine, model_metadata, model_config, replica_no=rank + 1) + inference_engine, model_metadata, model_config, replica_no=rank + 1, + connectivity=connectivity + ) FedMLModelDatabase.get_instance().set_deployment_result( run_id, end_point_name, model_name, model_version, self.edge_id, @@ -326,7 +335,6 @@ def run_impl(self, run_extend_queue_list, sender_message_center, return True elif op == "update" or op == "rollback": # Update is combine of delete and add - worker_ip = GeneralConstants.get_ip_address(self.request_json) for rank in replica_rank_to_update: # Delete a replica (container) if exists self.replica_handler.remove_replica(rank) @@ -402,7 +410,9 @@ def run_impl(self, run_extend_queue_list, sender_message_center, result_payload = self.send_deployment_results( end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED, model_id, model_name, inference_output_url, model_version, inference_port_external, - inference_engine, model_metadata, model_config, replica_no=rank + 1) + inference_engine, model_metadata, model_config, replica_no=rank + 1, + connectivity=connectivity + ) if inference_port_external != inference_port: # Save internal port to local db logging.info("inference_port_external {} != inference_port {}".format( @@ -410,7 +420,9 @@ def run_impl(self, run_extend_queue_list, sender_message_center, result_payload = self.construct_deployment_results( end_point_name, self.edge_id, ClientConstants.MSG_MODELOPS_DEPLOYMENT_STATUS_DEPLOYED, model_id, model_name, inference_output_url, model_version, inference_port, - inference_engine, model_metadata, model_config, replica_no=rank + 1) + inference_engine, model_metadata, model_config, replica_no=rank + 1, + connectivity=connectivity + ) FedMLModelDatabase.get_instance().set_deployment_result( run_id, end_point_name, model_name, model_version, self.edge_id, @@ -433,7 +445,8 @@ def run_impl(self, run_extend_queue_list, sender_message_center, def construct_deployment_results(self, end_point_name, device_id, model_status, model_id, model_name, model_inference_url, model_version, inference_port, inference_engine, - model_metadata, model_config, replica_no=1): + model_metadata, model_config, replica_no=1, + connectivity=ClientConstants.WORKER_CONNECTIVITY_TYPE_HTTP): deployment_results_payload = {"end_point_id": self.run_id, "end_point_name": end_point_name, "model_id": model_id, "model_name": model_name, "model_url": model_inference_url, "model_version": model_version, @@ -444,6 +457,7 @@ def construct_deployment_results(self, end_point_name, device_id, model_status, "model_status": model_status, "inference_port": inference_port, "replica_no": replica_no, + "connectivity_type": connectivity, } return deployment_results_payload @@ -466,7 +480,8 @@ def construct_deployment_status(self, end_point_name, device_id, def send_deployment_results(self, end_point_name, device_id, model_status, model_id, model_name, model_inference_url, model_version, inference_port, inference_engine, - model_metadata, model_config, replica_no=1): + model_metadata, model_config, replica_no=1, + connectivity=ClientConstants.WORKER_CONNECTIVITY_TYPE_HTTP): deployment_results_topic = "model_device/model_device/return_deployment_result/{}/{}".format( self.run_id, device_id) @@ -474,7 +489,7 @@ def send_deployment_results(self, end_point_name, device_id, model_status, end_point_name, device_id, model_status, model_id, model_name, model_inference_url, model_version, inference_port, inference_engine, - model_metadata, model_config, replica_no=replica_no) + model_metadata, model_config, replica_no=replica_no, connectivity=connectivity) logging.info("[client] send_deployment_results: topic {}, payload {}.".format(deployment_results_topic, deployment_results_payload)) diff --git a/python/fedml/computing/scheduler/scheduler_core/general_constants.py b/python/fedml/computing/scheduler/scheduler_core/general_constants.py index 68c1a8e09d..8c60b17bdf 100755 --- a/python/fedml/computing/scheduler/scheduler_core/general_constants.py +++ b/python/fedml/computing/scheduler/scheduler_core/general_constants.py @@ -192,14 +192,14 @@ def get_public_ip(): @staticmethod def get_ip_address(request_json, infer_host=None): # OPTION 1: Use local ip - ip = GeneralConstants.get_local_ip() - - # OPTION 2: Auto detect public ip - if "parameters" in request_json and \ - GeneralConstants.CONFIG_KEY_AUTO_DETECT_PUBLIC_IP in request_json["parameters"] and \ - request_json["parameters"][GeneralConstants.CONFIG_KEY_AUTO_DETECT_PUBLIC_IP]: - ip = GeneralConstants.get_public_ip() - logging.info("Auto detect public ip for master: " + ip) + # ip = GeneralConstants.get_local_ip() + # + # # OPTION 2: Auto detect public ip + # if "parameters" in request_json and \ + # GeneralConstants.CONFIG_KEY_AUTO_DETECT_PUBLIC_IP in request_json["parameters"] and \ + # request_json["parameters"][GeneralConstants.CONFIG_KEY_AUTO_DETECT_PUBLIC_IP]: + ip = GeneralConstants.get_public_ip() + logging.info("Auto detect public ip for master: " + ip) # OPTION 3: Use user indicated ip if infer_host is not None and infer_host != "127.0.0.1" and infer_host != "localhost": From ea03b600c9709ba951fdd47e757f07c522891b16 Mon Sep 17 00:00:00 2001 From: Raphael Jin Date: Mon, 10 Jun 2024 17:21:10 -0700 Subject: [PATCH 135/251] [Deploy] Refactor the quick start example, use public ip as default. --- .../examples/deploy/quick_start/config.yaml | 21 +- .../examples/deploy/quick_start/main_entry.py | 27 ++ .../deploy/quick_start/src/__init__.py | 0 .../deploy/quick_start/src/app/__init__.py | 0 .../quick_start/src/app/pipe/__init__.py | 0 .../quick_start/src/app/pipe/constants.py | 68 ----- .../src/app/pipe/instruct_pipeline.py | 261 ------------------ .../quick_start/src/config/bootstrap.sh | 14 - .../deploy/quick_start/src/main_entry.py | 67 ----- python/fedml/api/modules/model.py | 3 + .../device_client_runner_deprecated.py} | 0 .../model_scheduler/sample_model/README.md | 57 ---- .../sample_model/fedml_model.bin | Bin 1476451 -> 0 bytes .../sample_model/fedml_model_config.yaml | 20 -- .../scheduler_core/general_constants.py | 16 +- 15 files changed, 42 insertions(+), 512 deletions(-) create mode 100644 python/examples/deploy/quick_start/main_entry.py delete mode 100644 python/examples/deploy/quick_start/src/__init__.py delete mode 100644 python/examples/deploy/quick_start/src/app/__init__.py delete mode 100644 python/examples/deploy/quick_start/src/app/pipe/__init__.py delete mode 100644 python/examples/deploy/quick_start/src/app/pipe/constants.py delete mode 100644 python/examples/deploy/quick_start/src/app/pipe/instruct_pipeline.py delete mode 100644 python/examples/deploy/quick_start/src/config/bootstrap.sh delete mode 100644 python/examples/deploy/quick_start/src/main_entry.py rename python/{examples/deploy/quick_start/__init__.py => fedml/computing/scheduler/model_scheduler/device_client_runner_deprecated.py} (100%) mode change 100644 => 100755 delete mode 100644 python/fedml/computing/scheduler/model_scheduler/sample_model/README.md delete mode 100644 python/fedml/computing/scheduler/model_scheduler/sample_model/fedml_model.bin delete mode 100644 python/fedml/computing/scheduler/model_scheduler/sample_model/fedml_model_config.yaml diff --git a/python/examples/deploy/quick_start/config.yaml b/python/examples/deploy/quick_start/config.yaml index 83479068e6..880ea92d2d 100644 --- a/python/examples/deploy/quick_start/config.yaml +++ b/python/examples/deploy/quick_start/config.yaml @@ -1,21 +1,8 @@ -workspace: "./src" +workspace: "." entry_point: "main_entry.py" + # If you want to install some packages # Please write the command in the bootstrap.sh bootstrap: | - echo "Bootstrap start..." - sh ./config/bootstrap.sh - echo "Bootstrap finished" - -# If you do not have any GPU resource but want to serve the model -# Try FedML® Nexus AI Platform, and Uncomment the following lines. -# ------------------------------------------------------------ -computing: - minimum_num_gpus: 1 # minimum # of GPUs to provision - maximum_cost_per_hour: $3000 # max cost per hour for your job per gpu card - #allow_cross_cloud_resources: true # true, false - #device_type: CPU # options: GPU, CPU, hybrid - resource_type: A100-80G # e.g., A100-80G, - # please check the resource type list by "fedml show-resource-type" - # or visiting URL: https://open.fedml.ai/accelerator_resource_type -# ------------------------------------------------------------ + echo "Install some packages..." + echo "Install finished!" diff --git a/python/examples/deploy/quick_start/main_entry.py b/python/examples/deploy/quick_start/main_entry.py new file mode 100644 index 0000000000..7c4fb910b0 --- /dev/null +++ b/python/examples/deploy/quick_start/main_entry.py @@ -0,0 +1,27 @@ +from fedml.serving import FedMLPredictor +from fedml.serving import FedMLInferenceRunner + + +class Bot(FedMLPredictor): # Inherit FedMLClientPredictor + def __init__(self): + super().__init__() + + # --- Your model initialization code here --- + + # ------------------------------------------- + + def predict(self, request: dict): + input_dict = request + question: str = input_dict.get("text", "").strip() + + # --- Your model inference code here --- + response = "I do not know the answer to your question." + # --------------------------------------- + + return {"generated_text": f"The answer to your question {question} is: {response}"} + + +if __name__ == "__main__": + chatbot = Bot() + fedml_inference_runner = FedMLInferenceRunner(chatbot) + fedml_inference_runner.run() diff --git a/python/examples/deploy/quick_start/src/__init__.py b/python/examples/deploy/quick_start/src/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/python/examples/deploy/quick_start/src/app/__init__.py b/python/examples/deploy/quick_start/src/app/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/python/examples/deploy/quick_start/src/app/pipe/__init__.py b/python/examples/deploy/quick_start/src/app/pipe/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/python/examples/deploy/quick_start/src/app/pipe/constants.py b/python/examples/deploy/quick_start/src/app/pipe/constants.py deleted file mode 100644 index 811418bf5e..0000000000 --- a/python/examples/deploy/quick_start/src/app/pipe/constants.py +++ /dev/null @@ -1,68 +0,0 @@ -""" -Adapted from https://github.com/databrickslabs/dolly/blob/master/training/consts.py -""" - -# ----------------------------------------------------------------- -DEFAULT_MAX_SEQ_LENGTH = 1024 -IGNORE_INDEX = -100 - -# ----------------------------------------------------------------- -MODEL_NAMES = [ - "EleutherAI/pythia-70m", - "EleutherAI/pythia-160m", - "EleutherAI/pythia-2.8b", - "EleutherAI/pythia-6.9b", - "EleutherAI/pythia-12b", - "EleutherAI/gpt-j-6B", - "databricks/dolly-v2-3b", - "databricks/dolly-v2-7b", - "databricks/dolly-v2-12b", -] - -# ----------------------------------------------------------------- -INTRO_BLURB = ( - "Below is an instruction that describes a task. Write a response that appropriately completes the request." -) - -INSTRUCTION_KEY = "### Instruction:" -INPUT_KEY = "Input:" -RESPONSE_KEY = "### Response:" -END_KEY = "### End" -RESPONSE_KEY_NL = f"{RESPONSE_KEY}\n" - -# This is a training prompt that does not contain an input string. The instruction by itself has enough information -# to respond.For example, the instruction might ask for the year a historic figure was born. -PROMPT_NO_INPUT_FORMAT = f"""{INTRO_BLURB} - -{INSTRUCTION_KEY} -{{instruction}} - -{RESPONSE_KEY} -{{response}} - -{END_KEY}""" - -# This is a training prompt that contains an input string that serves as context for the instruction. For example, -# the input might be a passage from Wikipedia and the instruction is to extract some information from it. -PROMPT_WITH_INPUT_FORMAT = f"""{INTRO_BLURB} - -{INSTRUCTION_KEY} -{{instruction}} - -{INPUT_KEY} -{{input}} - -{RESPONSE_KEY} -{{response}} - -{END_KEY}""" - -# This is the prompt that is used for generating responses using an already trained model. It ends with the response -# key, where the job of the model is to provide the completion that follows it (i.e. the response itself). -PROMPT_FOR_GENERATION_FORMAT = f"""{INTRO_BLURB} - -{INSTRUCTION_KEY} -{{instruction}} - -{RESPONSE_KEY} -""" \ No newline at end of file diff --git a/python/examples/deploy/quick_start/src/app/pipe/instruct_pipeline.py b/python/examples/deploy/quick_start/src/app/pipe/instruct_pipeline.py deleted file mode 100644 index edcc1a643b..0000000000 --- a/python/examples/deploy/quick_start/src/app/pipe/instruct_pipeline.py +++ /dev/null @@ -1,261 +0,0 @@ -""" -Adapted from https://github.com/databrickslabs/dolly/blob/master/training/generate.py -""" -from typing import List, Optional, Tuple - -import logging -import re - -import torch -from transformers import ( - AutoModelForCausalLM, - AutoTokenizer, - Pipeline, - PreTrainedModel, - PreTrainedTokenizer, -) -from transformers.utils import is_tf_available - -if is_tf_available(): - import tensorflow as tf - -from .constants import END_KEY, PROMPT_FOR_GENERATION_FORMAT, RESPONSE_KEY - -logger = logging.getLogger(__name__) - - -def load_model_tokenizer_for_generate( - pretrained_model_name_or_path: str, -) -> Tuple[PreTrainedModel, PreTrainedTokenizer]: - """Loads the model and tokenizer so that it can be used for generating responses. - - Args: - pretrained_model_name_or_path (str): name or path for model - - Returns: - Tuple[PreTrainedModel, PreTrainedTokenizer]: model and tokenizer - """ - tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path, padding_side="left") - model = AutoModelForCausalLM.from_pretrained( - pretrained_model_name_or_path, device_map="auto", torch_dtype=torch.bfloat16, trust_remote_code=True - ) - return model, tokenizer - - -def get_special_token_id(tokenizer: PreTrainedTokenizer, key: str) -> int: - """Gets the token ID for a given string that has been added to the tokenizer as a special token. - - When training, we configure the tokenizer so that the sequences like "### Instruction:" and "### End" are - treated specially and converted to a single, new token. This retrieves the token ID each of these keys map to. - - Args: - tokenizer (PreTrainedTokenizer): the tokenizer - key (str): the key to convert to a single token - - Raises: - ValueError: if more than one ID was generated - - Returns: - int: the token ID for the given key - """ - token_ids = tokenizer.encode(key) - if len(token_ids) > 1: - raise ValueError(f"Expected only a single token for '{key}' but found {token_ids}") - return token_ids[0] - - -class InstructionTextGenerationPipeline(Pipeline): - def __init__( - self, - *args, - do_sample: bool = True, - max_new_tokens: int = 256, - top_p: float = 0.92, - top_k: int = 0, - **kwargs - ): - """Initialize the pipeline - - Args: - do_sample (bool, optional): Whether to use sampling. Defaults to True. - max_new_tokens (int, optional): Max new tokens after the prompt to generate. Defaults to 128. - top_p (float, optional): If set to float < 1, only the smallest set of most probable tokens with - probabilities that add up to top_p or higher are kept for generation. Defaults to 0.92. - top_k (int, optional): The number of highest probability vocabulary tokens to keep for top-k-filtering. - Defaults to 0. - """ - super().__init__( - *args, - do_sample=do_sample, - max_new_tokens=max_new_tokens, - top_p=top_p, - top_k=top_k, - **kwargs - ) - - def _sanitize_parameters( - self, - return_full_text: bool = None, - **generate_kwargs - ): - preprocess_params = {} - - # newer versions of the tokenizer configure the response key as a special token. newer versions still may - # append a newline to yield a single token. find whatever token is configured for the response key. - tokenizer_response_key = next( - (token for token in self.tokenizer.additional_special_tokens if token.startswith(RESPONSE_KEY)), None - ) - - response_key_token_id = None - end_key_token_id = None - if tokenizer_response_key: - try: - response_key_token_id = get_special_token_id(self.tokenizer, tokenizer_response_key) - end_key_token_id = get_special_token_id(self.tokenizer, END_KEY) - - # Ensure generation stops once it generates "### End" - generate_kwargs["eos_token_id"] = end_key_token_id - except ValueError: - pass - - forward_params = generate_kwargs - postprocess_params = { - "response_key_token_id": response_key_token_id, - "end_key_token_id": end_key_token_id - } - - if return_full_text is not None: - postprocess_params["return_full_text"] = return_full_text - - return preprocess_params, forward_params, postprocess_params - - def preprocess(self, instruction_text, **generate_kwargs): - prompt_text = PROMPT_FOR_GENERATION_FORMAT.format(instruction=instruction_text) - inputs = self.tokenizer( - prompt_text, - return_tensors="pt", - ) - inputs["prompt_text"] = prompt_text - inputs["instruction_text"] = instruction_text - return inputs - - def _forward(self, model_inputs, **generate_kwargs): - input_ids = model_inputs["input_ids"] - attention_mask = model_inputs.get("attention_mask", None) - - if input_ids.shape[1] == 0: - input_ids = None - attention_mask = None - in_b = 1 - else: - in_b = input_ids.shape[0] - - generated_sequence = self.model.generate( - input_ids=input_ids, - attention_mask=attention_mask, - pad_token_id=self.tokenizer.pad_token_id, - **generate_kwargs, - ) - - out_b = generated_sequence.shape[0] - if self.framework == "pt": - generated_sequence = generated_sequence.reshape(in_b, out_b // in_b, *generated_sequence.shape[1:]) - elif self.framework == "tf": - generated_sequence = tf.reshape(generated_sequence, (in_b, out_b // in_b, *generated_sequence.shape[1:])) - - instruction_text = model_inputs.pop("instruction_text") - return {"generated_sequence": generated_sequence, "input_ids": input_ids, "instruction_text": instruction_text} - - def postprocess( - self, - model_outputs, - response_key_token_id: Optional[int] = None, - end_key_token_id: Optional[int] = None, - return_full_text: bool = False - ): - generated_sequence: torch.Tensor = model_outputs["generated_sequence"][0] - instruction_text = model_outputs["instruction_text"] - - generated_sequence: List[List[int]] = generated_sequence.tolist() - records = [] - for sequence in generated_sequence: - - # The response will be set to this variable if we can identify it. - decoded = None - - # If we have token IDs for the response and end, then we can find the tokens and only decode between them. - if response_key_token_id and end_key_token_id: - # Find where "### Response:" is first found in the generated tokens. Considering this is part of the - # prompt, we should definitely find it. We will return the tokens found after this token. - try: - response_pos = sequence.index(response_key_token_id) - except ValueError: - logger.warning(f"Could not find response key {response_key_token_id} in: {sequence}") - response_pos = None - - if response_pos: - # Next find where "### End" is located. The model has been trained to end its responses with this - # sequence (or actually, the token ID it maps to, since it is a special token). We may not find - # this token, as the response could be truncated. If we don't find it then just return everything - # to the end. Note that even though we set eos_token_id, we still see this token at the end. - try: - end_pos = sequence.index(end_key_token_id) - except ValueError: - end_pos = None - - decoded = self.tokenizer.decode(sequence[response_pos + 1: end_pos]).strip() - - if not decoded: - # Otherwise we'll decode everything and use a regex to find the response and end. - - fully_decoded = self.tokenizer.decode(sequence) - - # The response appears after "### Response:". The model has been trained to append "### End" at the - # end. - m = re.search(r"#+\s*Response:\s*(.+?)#+\s*End", fully_decoded, flags=re.DOTALL) - - if m: - decoded = m.group(1).strip() - else: - # The model might not generate the "### End" sequence before reaching the max tokens. In this case, - # return everything after "### Response:". - m = re.search(r"#+\s*Response:\s*(.+)", fully_decoded, flags=re.DOTALL) - if m: - decoded = m.group(1).strip() - else: - logger.warning(f"Failed to find response in:\n{fully_decoded}") - - # If the full text is requested, then append the decoded text to the original instruction. - # This technically isn't the full text, as we format the instruction in the prompt the model has been - # trained on, but to the client it will appear to be the full text. - if return_full_text: - decoded = f"{instruction_text}\n{decoded}" - - rec = {"generated_text": decoded} - - records.append(rec) - - return records - - -def generate_response( - instruction: str, - *, - model: PreTrainedModel, - tokenizer: PreTrainedTokenizer, - **kwargs, -) -> str: - """Given an instruction, uses the model and tokenizer to generate a response. This formats the instruction in - the instruction format that the model was fine-tuned on. - - Args: - instruction (str): _description_ - model (PreTrainedModel): the model to use - tokenizer (PreTrainedTokenizer): the tokenizer to use - - Returns: - str: response - """ - - generation_pipeline = InstructionTextGenerationPipeline(model=model, tokenizer=tokenizer, **kwargs) - return generation_pipeline(instruction)[0]["generated_text"] \ No newline at end of file diff --git a/python/examples/deploy/quick_start/src/config/bootstrap.sh b/python/examples/deploy/quick_start/src/config/bootstrap.sh deleted file mode 100644 index 950b749792..0000000000 --- a/python/examples/deploy/quick_start/src/config/bootstrap.sh +++ /dev/null @@ -1,14 +0,0 @@ -### don't modify this part ### -set -x -############################## - - -### please customize your script in this region #### -pip install langchain -pip install transformers -pip install accelerate -pip install "pydantic>=1.8.0,<2.0.0" - -### don't modify this part ### -exit 0 -############################## \ No newline at end of file diff --git a/python/examples/deploy/quick_start/src/main_entry.py b/python/examples/deploy/quick_start/src/main_entry.py deleted file mode 100644 index 82ff90155e..0000000000 --- a/python/examples/deploy/quick_start/src/main_entry.py +++ /dev/null @@ -1,67 +0,0 @@ -import os -from fedml.serving import FedMLPredictor -from fedml.serving import FedMLInferenceRunner -from langchain import PromptTemplate, LLMChain -from langchain.llms import HuggingFacePipeline -import torch -from transformers import ( - AutoConfig, - AutoModelForCausalLM, - AutoTokenizer, - TextGenerationPipeline, -) - -class Chatbot(FedMLPredictor): # Inherit FedMLClientPredictor - def __init__(self): - super().__init__() - PROMPT_FOR_GENERATION_FORMAT = f""""Below is an instruction that describes a task. Write a response that appropriately completes the request." - - ### Instruction: - {{instruction}} - - ### Response: - """ - - prompt = PromptTemplate( - input_variables=["instruction"], - template=PROMPT_FOR_GENERATION_FORMAT - ) - - config = AutoConfig.from_pretrained("EleutherAI/pythia-70m") - model = AutoModelForCausalLM.from_pretrained( - "EleutherAI/pythia-70m", - torch_dtype=torch.float32, # float 16 not supported on CPU - trust_remote_code=True, - device_map="auto" - ) - tokenizer = AutoTokenizer.from_pretrained("EleutherAI/pythia-70m", device_map="auto") - - hf_pipeline = HuggingFacePipeline( - pipeline=TextGenerationPipeline( - model=model, - tokenizer=tokenizer, - return_full_text=True, - task="text-generation", - do_sample=True, - max_new_tokens=256, - top_p=0.92, - top_k=0 - ) - ) - self.chatbot = LLMChain(llm=hf_pipeline, prompt=prompt, verbose=True) - - def predict(self, request:dict): - input_dict = request - question: str = input_dict.get("text", "").strip() - - if len(question) == 0: - response_text = "" - else: - response_text = self.chatbot.predict(instruction=question) - - return {"generated_text": str(response_text)} - -if __name__ == "__main__": - chatbot = Chatbot() - fedml_inference_runner = FedMLInferenceRunner(chatbot) - fedml_inference_runner.run() \ No newline at end of file diff --git a/python/fedml/api/modules/model.py b/python/fedml/api/modules/model.py index ca5d0b95c1..a02e674f47 100644 --- a/python/fedml/api/modules/model.py +++ b/python/fedml/api/modules/model.py @@ -21,6 +21,9 @@ def create(name: str, model: str = None, model_config: str = None) -> bool: return True else: return False + elif model.startswith("tutorial:quick_start"): + # ../../../python/examples/deploy/quick_start + return False else: # TODO: Support arbitrary model creation from GitHub / Nexus AI Job Store click.echo("Model {} is not supported yet.".format(model)) diff --git a/python/examples/deploy/quick_start/__init__.py b/python/fedml/computing/scheduler/model_scheduler/device_client_runner_deprecated.py old mode 100644 new mode 100755 similarity index 100% rename from python/examples/deploy/quick_start/__init__.py rename to python/fedml/computing/scheduler/model_scheduler/device_client_runner_deprecated.py diff --git a/python/fedml/computing/scheduler/model_scheduler/sample_model/README.md b/python/fedml/computing/scheduler/model_scheduler/sample_model/README.md deleted file mode 100644 index fcb51bd792..0000000000 --- a/python/fedml/computing/scheduler/model_scheduler/sample_model/README.md +++ /dev/null @@ -1,57 +0,0 @@ -## 1 Device Login: -Login as fedml cloud device: -```fedml model device login $user_id_or_api_key -c``` - -Login as on premise device: -```fedml model device login $user_id_or_api_key -p``` - - -## 2. Model Card: -Create local model repository: -```fedml model create -n $model_name``` - -Delete local model repository: -```fedml model delete -n $model_name -f $model_file_name``` - -Add file to local model repository: -```fedml model add -n $model_name -p $model_file_path``` - -Remove file from local model repository: -```fedml model remove -n $model_name -f $model_file_name``` - -List model in the local model repository: -```fedml model list -n $model_name``` - -Build local model repository as zip model package: -```fedml model package -n $model_name``` - -Push local model repository to ModelOps(open.fedml.ai): -```fedml model push -n $model_name -u $user_id_or_api_key``` - -Pull remote model(ModelOps) to local model repository: -```fedml model pull -n $model_name -u $user_id_or_api_key``` - - -## 3. Model Package: -Create local model repository: -```fedml model create -n $model_name``` - -Delete local model repository: -```fedml model delete -n $model_name -f $model_file_name``` - -Add file to local model repository: -```fedml model add -n $model_name -p $model_file_path``` - -Remove file from local model repository: -```fedml model remove -n $model_name -f $model_file_name``` - -List model in the local model repository: -```fedml model list -n $model_name``` - -Build local model repository as zip model package: -```fedml model package -n $model_name``` - -## 4. Model Deploy: -``` -fedml model deploy -n $model_name -dt $device_type(md.on_premise_device/md.fedml_cloud_device) -d $master_device_id -u $user_id_or_api_key -p $deployment_extra_params -``` diff --git a/python/fedml/computing/scheduler/model_scheduler/sample_model/fedml_model.bin b/python/fedml/computing/scheduler/model_scheduler/sample_model/fedml_model.bin deleted file mode 100644 index d98296eb617c99fdf257707704fa9c61c28bb308..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1476451 zcmZUadq7P4_x>lM5C%ylj3gsr5Nh5FgCv9yg=El&B9$Z|jBdKA=|0kBbiX&cPW!#8 zNl6GHP6%;aLg=JJhw$6qzkmH(|FrkoYd!0EUdwjfV08{hQ zd5`y=xdN*dV>vtizaR0(Gs1PcPS}0Fm7KJXM&*oyxb$EwsP+g^IZqSZUY`Qj!!qpn znvcBGHBeYy3_WN4K+U-T%Rjn9I?*h*QC3>3(qEo`B_PfUp-cqar-Z70fCi9j3}T&tS__DLzI`J@0` z##GYs>H?glX^o0I^PsEa3{e!FAjW5^z~yc?Dg2oX+0TN|{Hz1vl!Yoar^LhSnjX5j zCJL-|y&+!97deZ4goC;YL9j$0IAcyQS_c)-ad8O@cVB}Cw#VQ}Wg;kRheM-J2s$%z z;(Nmgb1w`BE!R5eY+4ITs29TF7o_#|J7On&Ncy!o*s1iSg5FBv`M{Tm9jqA*?LZVg zu^=r=lc;cTE^z;SOmh<=k(ZW-a~8-U{EY~8>+C3RsRD9NjmEtj%rN;x9xA`AMqjK2 z!Q*R;^~_SznsgNes<*_E69XLgGr}X$X~<0+4{hmO*L%{dgWN_M-3x57^sI%=f|=;@YY|zPJ`rt(3{cVXT4>ctpztXg?!-mo zt_OP9d|L~xerBVsU?2Rw%LiKrYK0HN_0g!ToZ3#whE1(2@mh@^&YiD~t!J(f&DtRl z_qq^uN7PgK*4Z>TZZ$S*=7a6nQlVn-UJxl_X|S>hdcL?*PS-U$>2?jCE>&QcXc4CF zzXm4t-k47sF>+}#s8*L)nE%Wmb)n@rFhPdlt4mNVFC8U6H&Wk+(Qxsa7~6{f2M(NB z;A=UL&KZ9NP7hAQa^pBO+o>W$Wd||vsV=^bN~TLjoWR;2f!KfZJQ3eC!0yyjAZv&s zvVap1QFs;{+AHX{X=TuT*$+OvtAH&&mKe4!8f7|Gv^$^(?2;YP>T?JvZ4t*@$U~c_ zg*aQh9z(vl0!PghHv2_@^*`R&-cU$;oKhLlr|TAct`8~hH$`RJAR6KnPCFmf2yu2E zRV1s!hP+g4^?XG}(E?aIo&jsrg5UpM25xk+u)pFm+8vt+oak*JwRa)rudImf+|`6L z=%9JeEFOHgSqhSAJeqnZ4g}XssM8%CeAaUX@3luG$72r-8Dt5(8;_`X%MNHM%mdF^ z1VJ|)>Yp6LEnkn~s@!d;8giGp05K@uU`%Y!eJ1T+5}+%5DS7l>h57B5p)>v`txS`_ zfjKj=?shIZMEDTN(x>Fk;qxGI3$HGHFT$;}vXJj&go^jiNqd+ldE1)=n*V9g7Kd<< zde;bJ`j%nXs3??{?F9E?F>Zf(0OJ;ogMqtGsrYjZomeyzx&O7(T+wN0o_vnjI0b-P zSTdbrQirfFk`DB}qPDkmV1;ud=uY1V`zCKeL;rP{QYAs_zdjLO(ilc1w-ml!a~`^k ztBBE*f2nzlF$SKugyu7UlVqz{kbLoDTBgK;`G4ogkK7RGIC-7?cXABcY`RKdMGl%@ zcLwQ#JYl!@dC{zwV!eMA>ABvJO zM^O+myE;%(#hCwFb9GZQvCfc3PyD{ zfb%deh}_wCzGNy&;(rU3|0d98tx&YpSb&Efu0&}lpnBgL=$Ud~*rB@!UJXxz_HQMy z>fSVLcHIKo{tiK>Q;DGIFq~BFNJhTiAyPUr2trDVsrI%DpnKY$s7FVUn14%QbfpkA zbCQWh_yIH@s{zT+i@?^Rgnk&80ipg;nA9nQ)~8p6PdsuEUWI|RNj7S3i-eB1f%G*u zp1Qb3Vz*fxDG&944gHDuO>!RStq|lV{Gc&gZ7}(d5IkEOsk})GX1wu6us#bO&yJ$f zY!xXywjM+gxg>v+Extdo72hjYqv_ZH?6R^TOL^D8=g1&@eOf{G88Ucxbrg14nxePI z9uQ{{ALxzW9acm|eqMnH0OC7tr?1n}qkd9zqf<&>$xanXo9(o@NYEk9grg@>mT1 zUo5m=oJ15h56IB3V-SZ+NX!BWa&8iH{hFO<>0FN?MwIUVRR!XS|08mpr;Jxt3}|!f z;h?b@n!5#&?3G!dUVZ>Af0;ths6s|{sKBCYg&r7PsUZWCD4ISzhmBXP^&coH7zS>m(?ujsjUH^>n zQajreaEMn!$HO)DZyG9x-aBmVU7S)CC0S-&kFDfD#4Q_HSoT*0K3|4z-Egt z$m6zQt|}Wcx2;0Iqx-?>^Hxlq7XXirXM<(eO=4HqMwOzopnwr1x9Bl3vB<{#%c7Cj zt%3Zf^T-|peRN5EL(MPN60@OlkoG!4rerHv?OTlUC;KS+4~IoFEU{1T40NXs0qx;& zpyV_Xc#%$fY(1!p(OPQa?uk27U9r)okb-(L92{MNXgY7k;-BV&*I% zs2>{%oHJ8N*63rn=N|rIvOI=GNCSiF=~41;PJn+5Efa0!o-ZNYkzR@ zFaw2=2W6YoPk}oIYJtf)kRiqHdKh2+S-%vo{hneY!~c_fmM5T#jzX z!>F&;JJP;(GF#IF!obodkgmGTcse9wf!#HXD{dlwF(a_>YzP{EsDq7z^--|5kKD1J zi*YMzNavvqD0#dETBpmPw=D?O-%kOjdkS#sXu#V3gSd0`c5GkgO3HV}LfcsnIQr~7 zs`l4HTy-SEi|wHGBNP1ph%tSf58fLdk8WSC!Ry(Y(BGq?kGD8u+<7PPU7trpyF+p4 zHVyRr%Lja?-Xs%Mr*lLUm4Mk-VLO=4y_vn zp!+t5n%dP;{z3!#?LXr><7Z zuRN#HgamNl-X$(^Qff1)3|?ab?AFUg-F;T1Cx>G1#thKRpF$e9>VxAg5eQ~;Vem>B zuFSVUb+Z%{pagn`gfZhb?ZCp-bwrV&N5)14pyO^QxStY--R5V(%-h5}Sf?j!~p zLDb~EB^GnTQ88i~9b1`*X0b`sYknZeZ+xS(PlVznpOxs=a0s-=JEKA*hEePQp8byr zmf!kGPYp9eP5X)T>B~kG->F8Iz6k9ED^inA&n>)Zyq%IN=wHk)JpiU$X=s z`4nNdQ!btF6bXDj5@*k9>hh(W4E#@w?=_dO^>~BH^i2a!^Pa^Yre=Y8RRi$0KV;VL zP~y$GBhf3S4A=e+NA8C#bo)CS6W3C7njQ$;PuJZ>hdV{o`K1B0JkL(iE%iRbFuB&|Uk z6F=Ew$lg%UvAhC}(UDMoI|f?G2f`2E3@HI?Q0nuQF>km{&sJ?h(V`Hln06AyOS*-n z>OtVSpoDTh#+V1`T)@9FLa=d4G-Mh1;A{UmpmeYW{)lqq`NflF+x>9!axLD_&cHcc zB2-K?0|5tV{tyZ5In#iP61Jn!cMXWx%*DP%abW6Kg`208;O>v*ST;l#p&cAaKtaI9nEqx%=)?^o5&;&C+ktT( zeWB}r)nGGwJn-{G)Mz=o!M41o~v@XypeZKCw zpy?hDiuLD6prQoy_9^kkEqCOv_(DAw3qdflnd!Q$1Ah5xcqM8%&NolQi>*rZ3se!a zt#9b4xtb_Z=26EHodj%^F#W^?w7XmkT5mGoWl01oK9w;7W)?MHw-t<1{6IC(%Sa{& zsyQ=H(T!&H*oozYQ`IDl3(tn6Iw?H*ht(`s;uyt0VUX>*9(7IrM@GKW$EJ_fn0!%; zi=H=tbkTYCnVbg8UCBh1`;SFP-bNbss}?0vGuop%0byhD{@F^k zN^)S2Pbn5A#zOGpD-c$&7F&0o1W|RI#j}!8=wZJGduBhU#UYDP;3fkjn<8k{TMV{? zXT#UbO4RmxOD(kygWSDHST?m9`*$6rMrt{vyx9(TOJt1q+HxYE&V$?=#~@&G6pAXR zF`^O2(Qd4Y`sUrFE%z?N_6IqrTR(~R|461@Uy?!Da|O!XRsp}LltjHAi@slH0{_em z6s>9(@($TEcA+onzJYPbDNd%I`^-VP`7N&AKPF+H)1H?`)&+VkIPd zL}B+&F4#F1g4_I47&3e`WOkc?=G)1nH8hTqEg41+46VR58hf!bNuSDRB;n&IE{5@p z&}<5U{#U!m%RkjPzTO0NTY_P-JRiD_+mqJ8fkJ6dF%#I41f8-{qP((}s+}%Ik=AF* z=}sX0N&Xl=HVTcu1cQ(HIk0I(*j%(2OPwU(Q6a#q-y=bpo{HX!V?cT602p2Hr?%=J z7;&nPaOU(lY&>QKg$Z*&kg75L2FSc;ZGJ*w=FxN|K(P!yc>o+>~}Em;5OmjcuEb6 z+1~i41~q%d==;!`u5guO!g%(75ypZ0)M$J0NUNnF= zygb~UWsNT3!C+?glIXsVf^Au`mKY`m&!o$&K(Sf+QIug3+(oxa7SQ;T*Ki|_C;TjYS=^!0C`B1n&i2UFD zA6SV{R=TzecXQ@2Cn`+AhDGOtcQmk8yn)`Rj7JB#1>K&SLF6;uU)X!0cHFDFYB z?Ky|ifvv3H5(Qo67s$Z$SnMwJh1ZcX2wSZ}U#sWDV0#KME6cDazn9kJCbK(T5X>2D zg`9*<7NrHOudv0CUVAhLH+a>7U(iC#HEJN98|-OBx*3eCyny7r9&)sUm7T-Zpz2I* zHC#3a@0leu`K1asy>&vti)^NUiv?k4ECgLzhxwr~xOH|pmOp$&X9_6V&#uL|!6U#a zDH--O=OF*P9_@+yO_EbBVX5PKG*MTf=t41wa|SV;rJX|1iA8lIR%eR~px5Fmdd-`G z-uIHB{qAdGv^5t^+s{G9KnCJkSLlrL0`FNebQ$}E_9uivo{Jr-BJ}9NniLdU9>Do) z3PF6;idlL{1C66nkz<>OA?5d}=R6Aa$CFU_Dhqtat)NwZ&&U6koyS{mr=n5ZecG`y zm2lO9aqpR3xFa|Y{K}=&wqhj-9I+l0O|i7;au^up7(;2747gWj5oLZA@GhBR=z{3{K%O7k(->Iao(4kQT_%b5bYida(zlO&N`Cfg51v ziAwg{n}U+8JB-&OO)TV85?5qg}_|{_ua)O_lck8Bs@AGP+HTn^K z-q?UX+tNULzLeOSAaPut0L7p4AUz}#O)Vm*qfa{Y)Ey?m-7%oyGy{KeC!u@8Hk^Og z0S(vf#(j^Lp?Mu*#Pf7;{I3+|8w^9qy;XGh`*bioHVqq}2jNe?1iMKOH3Az@z1=C~ zJh^JlE$O1&pLc@p*VESyBF7{nUEUcOXmX>K?88;O&Z#*ZCrgsVN zUx3N|86XwMqrtEnRJAt7!qin4+YkIpezCpAT&f_VaXQp;)-K?jm|C42a2CD|#Gra! zILfj{QO>{FU^+^Ud*!T$)Hwp=LVGApnTleMP2g+)opcHm!Wv02HjZ5dnkTei%G*<{ zcN7hFlhvVZ<9Zl;Y6j|hgyE9D6try4r#5bbv3ud}YZn2}D;mCEvO!y=2W=HK6WK~@ux#{zFQ+OnV!Sq}=2{8)1Ao&K2hJex zpLC($-3`D|#a1LNh`^5jhC$Oz20NYGg_8NNh1w?$LCdIcXl|_szL5;ey0pg)ay0%Sk53rlij$$(rPE8)yyCI#&?b;~`eJV6P~Z$iQlC8ypm=XjWc|Bv zT!k<4J)Tnz>RT-PA;6gEMu@bZh9{hCak3~4(&zePW&8nbz8*|f{r{t%Kg2-Xq$ea| zt2v5G)ycSLJ8+Ijirn(m&|@X0T#H}it#c--^mYg(4ld|&*M>@`&IZ|31~i)qaeGk@ zCfC!UP<=2OKdplY-L5$1X*%w3jzaU!{h(@k%ouDx2|4*OSo3Tu@+QtB4mOji)`ykQ zdWrRm`VRqT`dFs$U?zRe+ly^`SdXzJ9NIY^^wYosoO#X}RWd{BGEPCH|GPts)LzoQ z6gjACK*hV@XA(Es8%n2Tx}z1Qjh=z} zZ-bGaF%Y%;(Eg&1T=rP#C(a0Ww`UkZYiW%^Rz+d}AS4Hb_9f zz7gijLqXMh7OV~jgF)dCXv^Y)wbokFtdk9z%hlNYaSllIw5r$Ir~nskhar#0qa-Yt z5!!fQV8Bq&|0fky0prM-5*1kATS%V_=A&Tn76>xSN6&HZi8NB1@Hc7zv#1yyo%KO- zM$00(>TeovGafbPxe>+q8$@22%ecJvr7OF$(euq*NR4DO%$3dnl9%LW)OJj+`A%(O z!qIYhF8KOgL2lI$GH^srPh>~o7(qJ5%nt+Qml9Mu%%qurq~P)=kv?9kixDGIY170? z?AzafS+5hYb*ur~JFY?4d>IVft7dnj_jJXeM1XJw_N)pZ^45QuV98aGoqJFAejI`W z#$}M){+LLnz7Se}-i}8e>__n#V8|<82LLA=ipr4Y;aZ$DmTl~g=;L%)mhjs*gb9P6K z$VTOzdDQl8Ha4>R?EL;J)U7y3MPH0>qRnS7Nk1ujy<%#s>2cDVw4q7%A2T7 zwGIXmw+U&G%s&aX3ttnFMipcxm&22`g{T^w4YS@X#z=`Dwylv8&%+DATxmcg4f-%; zO9iU#?-#1nGLe@HR2KISo1y*9sLbTmx68^gb4LS_=&Z!-nIT}U+yQ+@)&SF&03*)q z!w${y5ORAxc1)Xttp&QM{BxIvc(_pI`42>8agb>o`G)#jQ$o-8JmI-7$1%v9i|r<@ zq%$J}6WoSk;K*Q%(6a-Zd_Cw)t0AwRg|WIwi|DF)iKw@ilK%W zyjEhImIbHt*!RHXfjo1uP~wt<{U`fKeqAH%m=OUzIe$^-tE@gxJwk?+8DlHsPhn~` zB*%F`u67E(PAY+EkEUU_xCr=D-x0w;HSN6e7yY27kC$g8V#{|qG^y19|FRX)H1r2; zaRI(wH4^UJyu#{x74z>OHq)^SLG6_D=yLiXa25!NV5tVS-~B)qIdU_*qlXZD2gi zW<_k_@PknKuPdg#yNEXR$5^jw4Th*>BxYVQ8*d4~`ZJpwq&}zObS}OMy9AoDk3=!8 z38l_r;+MeCBk3;K6V$-&Mu~Lu^||<#%}jO8zEOkAFR6T_=Lf@P` zXdS$mxD^+Wra2eEK@y3%H%0KnDH%O~_7XST0N(!BX=u+0+&?M>vOgAqXW%`?S8j~Q z<+*q&LLbGX5PiMu@jcHSyEK9^*J&Eb99xMIUHhR^`vged+!J>4>Z#Pg7_=V!q>#J= z`p?LyrPVR$@Cpab{=>K!{ZTZ_g~(opP=ibESaHArm1hH>=fF~sPU&O*mer%*Qxkx> z@nH9=o8pReB3_*&EPk{XJ(5^|nt4gNn~g}fG9OH|Sp75fGvQ9Y$h4)rrnC2_qFuNE z2eW-y@J9xn2e>5e-8wK&n~2S00#Gt3hg7dPgN#c!#08qbYzrX{`o05Qu6!lD#!;ly zwGkW(Ps6DGT-aP+i>)CesY|W`cC*=w?}X_@%1OaEjq@lUypV|sVm*zu?@7q!CBPRK zvpI4+PKXRgOf92l3Nv7@w-{a4UWN7(dT=9WG^!lZ8Bz9ip;6H}s^72=U0!b_p6Nq@ zmv_m4$JrFkAs6-y36PeFkv;8&CIoM&K~}`Dk3X z2*+O-i`bP39P{4n`RDB;U{DD@Nx=pC;Tu#QuKqad1?Zm1&%o-k-$ zIU81l=Yzxi^Yq0JFLdCH2VRMo4o$cK>NiIMFEfI%o*<;6y{rewW%K&9O~ickV90fn zlL^nKp)$ybHjfyA0`u8ub4yAOyNy8|!!#UwKOg)484sLdM+>99evtmY9Q=FFvOew} zBH^urt5edU%>i%_HxkF*WI33{pUAR}W6^rG4Esl<(4NALqx3mx2x#h$ziDeTLj#B zUPKw^0=0VQQ8_A^^;CvpPi%_Nwrni*w`fFsX%B57N@#mlMI~W7KsRSU>{(lm^7g4j zdy*1Ak4i(8ARVH=5L%27aL)H z-ch_W>Kg1?D!}H27T9iogv69xg18&YP|ZjGu6k68JrjF`9s3@W?4o24jSm0^X(eV#GO^!`)ft}d=xwJ3qN=Fs zXOCG1?N&Y5&RR

iub}=6SG=Hb$OQmv-n)1Km|W$T{&bOg4=q;=!Y^yLTSyikGvz zRt*GnpF>>t7xCQkmQjpxg5;hFs7S06a(Xf>Q2!j1W;K9*btSgaHBh(T0^bb3h9TLj z(a4FyBWq_II3|SQWBib}_c7z3qY1h{&x21*B9C=EunIhhQNEPxM(ud@850f`-U z3C$x8!rui`k)Q7Zg6?O+^zdRZeQpa~CUdFGh-F>6=Q5e*{{xM1a}=Ew(mwtQSaYui zIr-~opK2SJop*qRtXGj1p}yF<;^Unb@K6ujD^VQ$4-44E^OwB_=kBC;5V z?bbzGHqYbiy2F@X;84rO{?x=UAN-awfG2Xvdy^w585D*con`dNlSCLOt|xEbhO!#$ zBe5NR6**-uh1oX)K>c7jIPabWIuB#PX7zHAI?bZ;6IX%bl#X9c7@@U(5STfi0-pCw z&}xZ9?tN`&rS8DH%Ol-W=ArZ7%hA+25fVZ&@S7?V+8)Hg{2_+e(RUE2LoCWHv(f2V zC<=8=P`1MjLX=sAV?3BCj@gNv-X0~#>pJ~xn+h!#AJ9(AOTx|-=ED7xF2h@|dJz3e zMCaN;Xt~J-B9_;ZJ2NYgcPk2QZ+H>Uhz4L=H-{D#Xq5_NI4JOX`86E$qRCDmK1EH-)Q6b2pUn+Mtr~PQll~J zp)fZAlq~|%>R!Qa>1P;zdeg=ljh6S6G$r_w*E%iYBI0W<@RJy0=j?Louo%hYKYmh1I_j12M3*0vz1N zfJ4eC=okDU60RmGe3wr=E4PvU<%{6XuWX1o+d+2h2?W0}#q>i2%a?TI;JSh&9Ihq< zPv<+-=!rTwRbN8xxK_s5(GgEvn}C+%8(42P2|LvQ2&dLZOa0m;CNW9e+`$3Q;_p*nNaz19hyXr#J-hEbk=@Gl$;Xi zS4<-E%nzjVa0@9Pk^>>yH6Ul_OqVB@X6}lF#wGEz^;R~RoehKGYI$I`OAHF*E5f>@ zRLuS11W$y{xLFv01~W%OqrV##W_FT}@Q0*a6h|GK`YBF73hAC1P#)%iEt{@Dh0{fB z`A`b76GD<1r3@|+>)h>N5MzoVv!ki?b(Y`aFJnYu zMW~1!?S|$J7&D+h?@zs3wSpN5aV)4bXAJ24q{@z-;_O>Kz@x zW=V^{#%VmvSs2S^M2W&x>x%LG&S;dRxm$$aVl(QJOVsj12<}*!0-cxBAz7{mGS`oHfxz;`CgF>NCbA@iy8T_v;*q@WDuLi|X2}#J;W#`yT+VEr^ijEZ`_wV0AxyumfPuNce2DOqeefcOq1ys=q zMD*Yq;Yd=Hx>uHgEdTZaKvQyyzPgbDZhLZwMEr`h zUO5CVty1h+5{~yJZ0>S@1l>Bw7+dyC!0NI^81&j3wcov>x}kH3{OL6)S7W``88cD4 z#Re3`QJ`fv49rile7aTy&FqkYq(H>X3v)&p^#{(D&y05D5p+&I0_u}|0iH$xCxTea zdt8Wm3ojtA@*2qv34q_@;*q1HMpX-=7>9Lo!doY_DY{JF=bpp1$^ptr z@~_rxmO!(#2)v)}hF6Xaz{RbkC5HXoGHJC)L68Fjv`6`hY7Co-9Mw4TZKNlro!^YF zSzpvbtczjuRbVPUhX?vf@VB-Yhn^dZ{QK5qmz?cgXG76*+di6UQIB&w)6wB&BlPqY zGP~-sQ8+annyd7w=tT)R7?S~u=^_lgaFIB^&<9`1O$t-?1M?~xL`0Vkt386&f7spe zP&kTSGSy}`^>Of7TZ}U+0MBK|X{YOCVQ0t@NSLkyPJbuETNxy58R?G-{dY`f;9MbB zKLL35f7ABqqshT|CBCK%W(+ODt3Sd(zV(^V<>@%;GC3LUtzXA_I-`VcUnYR=_+cdQ z_i`9Dhvhe&*t#*=2>NEFFz*}7td^l44- z2)vPA&>pZ1D~plPuQ_=S;FE@f{;{BoeN#TA@; zYN-`-1%jH#VPb9>?p!E9?j%hjW9!s=L=E`cN22EgE-9*ZMoI2{X6M&LJY!e@aPtv0 zHJ728MJB||ticcq2V$qR264!5hW@a_p}KL{WF`Z7U#w8T=7IiY4EVbSBPefDyWiz# zr*jJKrRAXDh9=p(o7KrHooM^C5ulwMMj}nB!1@h)v*A<*x}9+(c-JVFi{irpKQ903xcCzNM2P-%-3T^f}E z9iJ;eVE3H4x~~qUUq`~L$63&?-iVSqS5Vs;j^L^fD#J2itLhxJ^y5Lu_yy~1Fz~caklYNv}1E;(VS{%*7`u|0)w%qS|)UxQHyU~3{k+Cp#P^pXtOc` zTw4#mhc1$Smc`gn3-`2WyX_Le>S5i0B6h5 z@M&Nt+9t>#;?Wz*-ReOr?@-OL`9vyVYdv#-WM&Nm$rH0`j$4sX zwC(`u^UtSy@|v*un>TGgGadpQqtV5N&Am^?Q`4Yx$oXkr-62~5A}w1Gm!B7M+O*8& zk3LZho(*nVLg@NxhgxcXXqI>^s;i|?7_}SyZvdV$48#>@%24&L8vMo>Za2 z4jD_(Q)4z6w|)vPl#Rv_G9O&KK?(UDRd9Q52zFnIp^~vNgm;Q%3!I06YGtWVK5T>V z@7_4{%<2@1=44d!lu`zqk|AN^S$6)fL|Z*A_@0`}ayd&u^k5g|e+&byB^fk+{5q8T zRx+8zMJ%KKfeLnbfnv!yhBq#esoFRPc{MyC_j9)JWJUqzdfU?rGq2$9kTWROJ3`yd z@*vqF4f3aifbeG!^x}p_e zJ#;^j7rrB$^g2i`p%fizNvq*F=+a|QF5D<|A94=uUJXKlbTJq>A#`M&#^Ef}Gc0}* zwj6Ay;<^Vy^Uc|ydzJOYymzDGUoJ5yoeS6h-G-gaHPU|UHubJ7Aso7++R>w(nvF69 z=3^XoZR{nl+bW^FbTdfUG| zwnTh00d=3vrET-$aCn6Ya<15e`0*UNzaj+#Urhr=fgZKKZw$8X56IC%13bfzM!{NL zqIz}_O&eJT$Sr{Ni~gnqA|r4yzXZ+>v9Lc{3bs0IDttH_OXemaf4UEq)mLEC`#Ai# zD;f9y5TkWkBDl$K6Qx`e0{X|`Yfb~#X_|EumIYZzoBuhIpA~87Ry7!p;O@mwJtvR;||L(4-UeRh^M6g zjXh+x7J}~&Kho-Q1a6d`LBaJZ^auz>)u>rQ-`Dvl%{qpn=`M`2Gn_~dsUxx4hLtDf zAkNmOodz?6`!AHErYna;yw(7(Dh50sw$aWby5Qbkf}D_Iu$;LLc*n$y>enD*x4seQ zdq<+^S%UhSyGK*Ln*^0osADj;uWAHq%Q^(Yvn zPn;FkASBWd9{ufzowfp|XF(7|o+|~*E3MSr(VuX(U$<~9x=NEOVo-2;Jl%7i&2Hvr zF?KiA(e}u0;B^cULRc}oH!cCqs)OL@y#x==zJy`P3sL824cK2&$3KO<^CuV) zQ$j(K4bp{!Aaa@rJ6J|_TWlh>dmV?3OZ4&4p+GdXsiv9JH^9iJhB(-D6}o6{qhj+o zCZtzE5_Vn$K~Fd1eDw~o3%vk-OBnjKYZ;o#($W1w9L79rf{qhO$c&N0l0&gL_E89E z&tx^K-(2*yT~EOME47_=fEG(@ps(>1aHl^L>de@Q<+BgLeM1$bhXuj#&ynzGB_F%i zD)4T`dF1RIQ>}UZCzTdA2}K8AQ|`%H#z?|Ij&;eFEnY9-;g8!VgtM~j1(6q)1JX!Y6~_FncI)!$qN`FrP}nC+P=(Qjejpi|)M5fANs0oc0YArpODjFp9GbQM7a}(e*sU>IVaum39Hc60$M8kiKUFy&)c@qytD$8eKN(@f1JR-ZVc|^ zZDIY$x77W540PAtrGe^B*phDpaT&qXi`Qs~QU1 zM#sVZ$MLB2kb||A5K31x!qs^}V7);L18Z_f@y{99x=%)T1g^$M(`zx8E2sK1i}2(K z9c*eHgreRKX6mjORL)Mq-4T1R>)3dZX5C{1Th%FN#YT(7IyPe;oq&5&91%vf60^4o zB7ScL&(*x}Zt@(QI&UHBer8Z*<_!BCQsB{wAPk*Vist_L;MKwM`^WE5j^QeE)#;0j z>@T)ItoetGjVXb}(hywa>4N5lBk|R+5ZLlyJ__oeGw-Kn;D}N?e7^1~jQp60hGSSA zs$&VByT1xKJ8xMGS62f6wg+h)-3Z&vhv3;&B{;U=GU!+h$Mm8o2-!KAwpJFA&F8A2 z(_^%7AaE#1B}(C5RDs0(nZ>5ld8nB5pYTK4c?c9&k{LReP~wnM&D*mIZ2i`vt;HnD zIgIef(GpoOlek4UQrsH|-P`i1ZR#nSG$RIvx~IYGW-AC8V+a2})W!4D6ESyL6`0+b z1QGUG=&coxNoUJZ)%l^iCw(^9L3vM>JCbQu>YXW*w_ndoj<1pY}X z(E2A6%6)y|cVju^#->qS{e#pz?-3PnRue-mn}NRBi014qKribjavTm?a4dc+Z3iNR ze$`1JS$V*s`|2gqH+mv+`@hnNle1yE#R+`XJ_se#?^j!IwV;xzzp2YmV^~(S2Onv~ z!Kz82D83$qt(UV&8k3E`ThpNZk`oS}Q2_0S&l2-{TR>HKkzUPg#J!1E+1veSSdojSIFLPaiDUFt-jyKK*;ZMB5QD`Vf{vkI+wAhje^~U0PO282aZ{%lD}jlgan&| zT4DqW=Bu&2I)PXoxKBnGMWXsfTlCu;O8urSf)8UafaRFuAiZV|aR)CG?$kB3UGjr) zj%X6^N1-6Q6-D_0T$*2>hOJ-pX?rP~4GXtJ;GAJ#k}?6sgMSJgc4`o#^c&RgY$M@I zH6S?gJT~W~!N6RACnEMPsnbokRNo)F<^__zxH2#!_o%izLwesKTn$UWd9fG%A4TUL z7Gu|i@j()j5JEB%LP*l#*-Hi?sU#sJ=_EuU31Lbnl_be@P@|Gbsp+7@vzJU6=_TYG zLI}NtklrMG`}^0Gi)Nl@@3q!_-@k?aP)4l`Y6on-iogxjZsnex>9+Gl6*^3%o&}9 z)p^TMd}c!B(4k=ve`7v8Au+5!Jwv00>;a#m^E8&Q{JdTYOk24M<(~bB%l$&4a*roL zE{sEPB@K2w$p)iagsiq*kMXk%iFsc?YU}@#PSPERnj#T2L}hc%Nfw}U_adkYu42Q7 zpQQ0@9JxL)99tXIQ93o2MENa2tHpL;@Q`Iimp5`M5sSfk)FvvNI+a-OEu;;_{ty(U z#CFjqQYwyVqzFUnb=G*Zd%}vz#>1^8iEe`|#9);(oHuzLG1Ezm5!OZBhF#O9E ze6Xe()(w%s?8^!~ZW4x*>|;?pyNt@hVsU@nWfZQs#hLf^1JA!FiQBD}SUrd7DZLCa z|C$+y;TUhEF6Nin#^4XFnP_tNG~H&MfSMcG4E$;(dj~C0)_XlQ5s*WX_lmFXLcPe`i#EdKMbm>-qITsfbL6ab=$D z83k9ssKyzH6Z5|S^9P2Gm$np$;N68Y=yIbJcj;5m8pM1{(~dyjc{9-4(-K`irJ+?X4y4yD zz~3$aWj52Oio^%ZNg)+`#a8+|mr#ccA7~xe2i5)NfLC=OYVVo_&^QwZRPDy!^Wsps z>wST@26&TI=uhkL=)wmFM=xks~k z@N6(}UrnuOFqG}dg68mHM7zawS;;k@Rrv!Fv%SNqn{y1Lq z5Nf(@pg|dC;A}S&$~TBHO??Y&zPkziJM*a1jFr&3j`1iy{xmOIIR>pp`avn1J#Mf% zg_mCYV}oWI2Hltp%9a?~a(^oJgbe{M)Bx-5#Ni=DG06NEf%&Hyu#{!q`&hEOM*k?- zx?IDs|E5E;<~`0js)^oNo`I5-aANDlbp31pk@kV}sB}i3N2kw%=^w2dKY5y zu2THSYoqVcbT|M$fT~04%n5TM?zb8jJ|Uf#o0kHXVf^fvW&uw)yXJ3ypy&q zcLb-Z0=)VBAnMF3AwJUvg5py#G6|X@rPv=!EGAY6w*Kh6ACI9vzfz;xv4OruRDr7=Wxr4&S1stJw#hl0Jh7Q z;)}6TwE8!S+Me8kJ3qx?lLxztcNF8X3PWrQE(6)~uSEOKH0Yg@hE1VcK>n_V_#Xyh zutN;l1%0t;LlH^b#y!tjcO<75ntsSqTT9G<=uU$ zdGSz?{$;+cL&sq8h;ZC{au+J1N~t)sSLNqH$6>&r1Q3Rq@lG36(fCvp_`EG5DduUA zSat|H=k7s8;2W-E<8hoVi$v~BB__&`;A@psXl8$J*MEiZ);|ZWwhe;#o=Iff8W;3q z{t&O7%x~)+!6{unP~qBjy!B>dqUD$jAG$7~v(H$puo*<^SSDKF@WQllm;-ro{WNy% zI7`J$Gpdi9isN<5QC~6=Y*rOO_|~Q9v@VTU?^{HQSBGQ$uC)-MTY*0FW8sqXQf!r4 zlK35};QVwNX}!u4!~8t+NalxVFxk(}qOU9H zM3$#hUAzH<9vvWp9d9-Jza{i>1>j>|K0&AOc2e}mph!+W3ONo&} z2K9H@y)QoueLhv9GPe*vSOHppIz}B$SWow9FggjsXq+INX*M==**^uSJ$wb!(BrW7 z(owXYbd1&+ErJ%?gRGX!grE(Zh_p{6wRI@QyGK3o@vCbn{*(k;W*)%Ib3W+5<}k4} zW&Nn|CKXt!fn{6;tDVNs*4YuXU{o|1hEZZGIEDjmvP{Utp`bl$8=PpgLziqDkbaxP zhkfp(S}+znbhp6GlEwI_ifMqa{iyxz9CT@jBUT@!MCLS&$lBHe#(Bcir8?-{KM04u zErvGxP-xts0f(y3g3D$j7_(&+>%&JuF!N%0Zu*y6dsKp^+EP%<-2?$&i_mT2R8G{O z&S%HYhjpE4ASi#$2N^FXieytFXFWsg6fsD>=Moj&i4Z`a5WP>i*j=3p-JuuJ$fSz2 zKVCrFUx;YBMjve0|ANynivewQb_ch0}uwJlcrj z$1-qgUJnL#0vhHP4~hNKkP}5=ORNl3SMpdf?=unI4W(mj7=vW58|}=LVDF-9=y!i5 zcBVwLXZbj;pxPKdJpt>E^=IE*490h#rv@7O5GYK>(%DI%{d^yFm|+Bh#kPE8^H?lR zTm|B#qv`0M+0dMl%$M~EgS0M3d`jMD+JFDIKx}5W#T|DlW05&QY#JP})^)9(A`}9UfBM2 z0v%x+io)VkR4!ZrH%6wQR?sMr-l_oKi9B5w%efuwuM^d99<4Vmp+=*^=`mw3OsnH@!=;*}@JN)bNdebRDFo?%AX76=V(eUs@2;z%Lt;CTF;A#T_&9J; z9tTey#*P@$oAm$9qvnQ8a^>St)Eeml6~q1`Fzg__o?Qk;$4-IyjZZYY!WVY@7m04q zHxuo|5#)6))0QSpCtnl9SUV;W#Md{_{p$+Rs&o&C&il~kA>Qa?UQPP155TCciI6DS z2b*kj(IsIpjH6mOdi+_)FPH&BfslVXDiVYV%TcB?i_Rag5r-SH{KC3Ko*Nzq`G3mc zy*Lk@FEO2GO$rhH8bQSS(k%pDRrJC3m(=ufZxqxua{1qG(yrcXASLBAoBf-R?@d=w z)^94WtUJ##1^ag4=ru>7b-6#N zJTd?kdn1haaSiT&PGY~$G-%NHN)5{5LGySmR~OjD?t&`lG>ZiJlHauF$OSxg6)+&jH>%mzS$x zMpi0hQ~fz~^KIfq8s9DQMT_7_P%_$o%Y)Y5M(|GHj%~fOQM~*&FS=ksq+rZ7>n`Qg ziUfFT%3N&pSdQ_74}x@lIREUQ<>>C21y1Ll&<3Ao8ho;h)|*U*s-HawjHD&kXcU2_UH&x*(I$L6B`v2!%ch4Hu?E%A|Y zHTHVJvPo~x63<}+A(-hVVjNElA{SG2_KvF5oFxvXxzN?O4`@y{gtVj#@bCUk?xod& zAmK;F^rcr(GN=f4cBi3XPCPV?+7Ghx%u7>Rj&7!N=!3j^>J-u!HY(5IgoCWlTAWHt zJ1QXY)H^ymV=(5Nj>nd{iy+~nF)BxGf}TUMwB=V8HjWTOSf7#b%Uy}G>JL=nJc_g| zKLSx!7OD=Nr3A9gQPq(HXxAUG`dfCmLfh?;2tm44eyx}hFBzdhFt;dE*c8g7Xr zZF`&O;Q}^4>5PXCS3hX9T?r&Q zWf)b^8~cz$_|*Z$VoVDTQ!csanVu~RfqCtW{g0i&h3!fD+)!T6nvGs27UHQi2mdGI35ad zpz35yt5>4Yp)35V9eG%FP0qBb1q92Ez=kpD*k$ySDz6{K)5ZCi_`@6|--l5nHUslt zY{h)Iwa|S$5o|NB!qYu0`=a+RG1zmEh`;bulA1^7U$Vw%$K%l8vl+YUlfgdeASON# z!-(~nAdR<$y5~1Z?AI&w$$x9m+R~f!PL6^Jr_SNCh$=LC+`$PF)hputs)&*=N9%*@ zsMGbiU{Y!gs{V-%nIbuZN@XAr5Z&8a@0Yv_Ak+wO@JgB^Fr^~M?Ov3f(@Qmh}-2e#Antw z;yoq|;Cnr3njX#WmGh|iLq<{pF2lN)QV?36L}^S5FWuNhB!7p1%;P0>x-<|RR-PkG zX(p)nluHfm(&@d`=fTKvCXtW*#9h4~g~BJPoU3Xgs15W0LFqB>5j%_9mE|;jjRMM| zV!^yoMqg(aU`D`hbgE7V=em>7u`nNO{R>FD-C~v(^uij06zpm$h9y5)o=0fIZ_(97 z?Yn!aIQ3Vhj_VX?QjaDLYZLf@bQ2WpEha$=<>bO<=I@$xn+gicL2#A_&FPjzf42g< zt;MMQXA_{h8x$W&hY?4Va41j?k=x4AOU5!6=QqR6b&+^+zy=g$^e5eCnJ$pnLlz9F zL}~w_oc|&_@L^|xLOzD1U9BY>Y?)p$&YU(j><8^jVPNTB34|r5T9KL z=Vgb{Uo8rzHWWa^^GlNV!fq`*-L;c`oz|dXL@=yw^t7AZ3?+fue zJ`iynN41a1sc_^5PMTefEmrO<+xLU?Y-Ah-rvi*06U+a4QH<7FexNg#dDsSSvq)*V zj6Li0Ab5lv5=};9`9K9e?!)qpG3%(?A%9-HZ6Nkrae?v27ULARg=jwZ7je22LB#U? z;4)bc(IcC-uN@8FzYW2t*MCWbC<%2W`f#=68aAXc&Q8N=&>pmnI?Oyx1pDTKq{$dU z&x_&S;iIt7bSIvUPs8@l8$tZZf|tGT#rz$!VaMRh%tx0;UoSg`4|*{MYRd~Qld*7o z*B8>npTl5QYZ;nsWo+E}*Lac52VT0x1H!iFq1H58?D8@r;=mBjZSy?X>QDepV_wo= z0|oj_ItR_J14;8|U+(Ti#uX{DC;Pt@fI+<-4a_S;>CbX{1g7Dn5+}^D3_yil3g#p; zFWIlN06Cf1u6qbXW;!gNuFkv@3b@gic|KP?r_)`SCo{;0vsR>$4wkESxSvWKw(JDW zk}9aTrzGlZ6X^+Pr-~u5pd4ZhZ5=}(Z1`+&`L-8s2cH55)#b!0=m<1DmQYV=7W&)O z(H9)cy;2>t&e{RS)v+k*_tC;S`zoZRreMVeO$=t9&H6$G{c~OhQyrP-q9Y#yx_x2H zbstnpcYw_9C3Ws+4Enk8Sf+Ue#pBeuriyAhTCxIdrE_64IRzroO1>fRD;0(Qun>6v zwa7D!z@F{5s10N9`R3oIt-3jwkh~b&<%59%~Pi zt`~yy^B~yPu@Qr$Q^4rGD%4wZ%x9Iuw=B*DrGYo}{Wc%r6U!FvS3#3k(U>DkfU!gD zP^rc|3M-la_-s0f{~5?K)S+OozCR5D=5Nvvp;bf>?EfjiH`9zTcGGK0cH6Rks}f!< zsY3hvSFs~MhqRvTAfj#NynJgZ=M!^_M5MW5RCPAk{+SK2EdN?`R*dRNa!k7LfBuy! zmVK~Co17ykc>mm7(p3qQnC4p_GYb?i-tn3zuMs0(eY$H$0yZxM=ra6Gl-ol|TIO*O zre1;q_su9;=}J_rQ&D8#4AQ%2h+?iD3LYP#x``3!wz)SAXFg5EynlJ;+JUgz{{)=g zn~cUGF=&0H0?J&~(QD;F{Pk9b-i>ytcpG{ zJx*buW#R^gZI+-A_5#Io5$~`-lW5#w*@@Y##|TY>Z5Nhe^8r_0=SLHX7KkArBo)%U zQn7Vu4anOZXv5ensCYOI+EdWf4$Cu0?Q+6(|FEMxDZ3ksC)%-g;kqs0fbK3)3Uln1~!%;Aoo%?EIS=|%*hcJy1DNhH-LuhXTRHO|4u@cV24o-j zmkL}8ct`zWaNjf!6??j9`!s!eVM_(ZhAfB11)s@AmRU2;VBWmjqhQL?7);yP8yZ6T zGtPJg2<(EP*fblfk}rVVk@<<3E+byy%7v{@rjn>=SlHr%#hOLnJg$WKG7mv}>v^s@ zE`ur$&cF(_Rl*i4ezdZW+&5ct+o2>V#NKw)UXOPge1)ixALS4KeXo=|jUey#R43JXX+Xzek_ zrt}(uYj2ZQw?c3mJCwr#uk-?7%nDHY6R~m5Ja`GnM$Sy~H2- zegaf%%E+8PjBn&L!y;mV3=c*ycJlXoByC_lZHY92N1M+;VqP>PrX_%zXf+|e+b9Q@!=?nGvW>W z)rh&-c6j4jf=PSE;OhPP?4CRZMw-m;zrhDYUwZM`!N#C+Ydg!<-KImm*PvEr1eNa9 z;6Kc;!9_he*t{o*>h!-t&3hPA>f%Krex=8mOGXhsArSnc@^m*|Zn zE;)zMHESY%Ppm~ztveVyv-)}WH!7LvNP;qTz{pj}i-e5DZPOczBbfK%c`$W9oCam> z`>{DB9E|>r;GH{az=WQr!j8AJrTQ_Ey&Qycc_z#~V8$}Ydr=*m=)EhJw=x(%3woiO`yLQSL~uW6&O(955iH#qPl8fdz4G%t zQDc7Grxs~o`BIGQqfAjd_yLVQeg$M@-k{#uK%~(JQMysci&{sM|7^-o+cgUXuZ%$9 zqRWjAnvQ(rTITgTK*mixj>R9$`4O;lepun8XgjNya zaKajzUzO9HhK{&1Y%kuP7slrMRamGj!HTS0((s!xQy)Nlxr#)H4ZN2>Yu7&r+XA+vuqWEUTR_S{wQWBe#oOD=-=1vWJ4t`l0W z&ciELG*NBnMcUA^72>l@aY;!Os&~k+>QW3gjsH!XL=(XwwgiQL&het}-mLHIVmY8L zntc=DZg~wRe5r<{>VZ>dQ3tmj9 za=R4k9xo;;qYHow$$$r&67efI;i5isP@;8)q|q2KE-wWCDOVsfH5Vktv9z8s?9_96 zLDzOw+S0uT>OO`*>*`q$rn?4|iUClgS&RPvKR;B@)2JhUP(fYUESqH_X4ZmQ`(BWQ zGlp|>2)=%?1IO%&MuWWmRKE5+?XoK-iYh0radiX=1?%B`>ndCo;f?~`98g%)f}f!r z-QFdVv=i%bz_VWcRo> zxsv*5JtKbmS>EIO0utvd2JI$y)O&USMRxrdTj@B~r9UN!w=>Wn(Sb;FGC&w4g2T}x z(C4m_`ZiyK5-kOGh3|vyt;`3k=LF7Uw-UDl9n|)r53Czr3Yx1w^TH{Mc!9SHU${1+qI{K}F0=^@5W{n27DfR8S9$^K(=%&4EUObrTeac z+GJDeBgsdf4XO0CBp&YFSKxZjb@-Zb-{XgRk<_PKQPEvpnaQ%Rj%?N(^fwp%15ZJN z;4x?UHx&fq*OKdRt8usED75w)2WpX9K#%U6}-c#dftfT zi;Q#AK@<=}1-tygn)jgH9@!9ZIssa&8mZ`}9{3pQfUxchsmR;{*T2T0Z?FD1tQXTJ zm#Cw#Vl7{4dK!ZA%)l-y3aymKNL2V;dX43KtnW@`9OscZWG~a^j_d(T`$`BJoDQap zT~suF3uAbEC!)>_i=f}`=;D2id@(+bMt5?!&7I~b?Waq+nda$W77Y%`^QrcYP-@b6 zi9}wL;kbpHu!C`stXGd9>vpff0=H@~x)MZ7+jPJIv)PQ1&GmkV;Ly-&P%Dr!J#s9p zu?awNYis4S&?pqN4&_awGuSSPr_|g^mo`08koGr|XoJODUd3@AG@lQo?aELxb!aTg zIx8VCDFW}iSz~+TIj(C*43UgGLIcJr00T~dOgNJI%nTwooY|bnLJypF+dxm73-sJx z30?labZSv3p7uR}H$JkTeOv|C+ZSN;b!6;a##Mg*gS`BvM4c_ZaQpTK)H`mg?s}>Cm5Ne=PYylNNSMU}7 z^Qkx~8Dxi1MJu7{SuAR+zox>ytVi!Pgc`111PY&neB^6Ql#V<`+Xk{6)Bxwock#zj zW#nNHcMPLVcGrpei+Nx$lY(&YYT7Y!5oFJ14A_5O^P^J)xOo)Iim%S%)8eQZH*LiadkjoV6CBwj!>@;WY8pWtoHDlOcJ+Ol&OArHvyT!3_`5Wt*;I zYyJ_^(C;^?ANPtg>k!+z*fBg)WQUyeKr~Ep z;DFkqk3{AurCk|CaKC9kx=9}MI*B*PuIFqnl*2r%d&1!KE*`bJ8N=&BGTSSX15@r8 zVd(S3ve&36w39;IQdiF)cW~esHus3ytPt?|&2kmGi(r3gDcsYLLFx7i(8{a;TaS^@8evJL zOTFmTrGv5K*)r65=Ze`Ep3t<&g)E=F76Pw@fl_M(8ZWZMokdIou((Z|m9BiYd>|P3 zWRj`{>8QHC1h&Q1qS2Qq-u7-hl}`AF6UKz`!ic{7`#vG)dwCJKC2XcXeaZg03ag{9cKqLc9~2!m6Q8;s}nd3E0Z=oLbZ7K-1RVm}a*F{7*#D)+|%% z8Q=h%p4~)I`s*ORSbiK@Ix5iMMj>MqjY9AGU<{DiL+}nW*nRLQ z=CfYy!AW+eKbXmv7FJRDRtYL@r_h8K5^R{O;ME3(LH<1t(2n(||3!>Ip&H?;21UV7 zPdn7?Xd%xFui%-H$>`Q>$>sLbL#3e%gU_;ODq=j#fYv}tR|xo~%>>by4l;R273Peo zf-_-faBV{*CVF2aJtsD!=i4fD=-dsOOhe)2wICaCg0yodInVSWqViD{eD2$UKxd+P zy!KUaE8ovMEGy;JZ}-O)CpV*+-$K+GW(ty%rLfLrIU49WQO%%FWL?%VaDR{hTxkiY zMBX5+>shb=YaKpt`$gN1PKMUoS!D6RYOJ{Nil*CyqEvMxr#_g~Y)i!KbIS!!uL>y6 zm4ZR)6^xZKPkV3)ILg!E_(m7}ka7l|oEe3=qfN1Sz(ugS^_o^i=0IAa71XwcG8W7V z9O*v^+wQQuX1*7UwW>k2kPhm!LLH}A#i8HS2-NCc1J72)q5jlzBK=dtEtzu|(>m_a zzABY?BQ*zhh^xTJv@h}4FG8)jB;wB%6M-K`;|06G{i+gef1g0xmB)$aiyWF^tATE+ z>p4}&xp+Qd7e1WL{I~BP(%6&3!Ce*y*1o4ebK5?M>zxg&9x)$^a06XAcR0e7bQGN_ z=A({orT?6tf$ee+$ZVwG@b5dmqA--?n@j{Qz61)W9ENL;V0GCn?65r#{bnJh=clk)B;+}rOO!WZV1wOoPP%_lHw&Nx!)e~mimxzQl60BD>s zot@)GIR2{y#m9J}{%jVw?c2|32i1ZoEsa~AT!cTa`QtsO^EiN=6{`Pb!o2I%sJJYy zeC99%drqGsqQpFlnjXg6QC#AizvgkJc?6mai_!4^+TYSL6l&N|X}t*vQxAb--?Naj z?}O9++Tyn-VeD?IBq@OrC`}v9C9?C&;MaJf{xhEVG#S^oe zYC7vy1&UY2(9A2tvF%_OKuK9{+4veDl7JJCmMl%$Q^{5mbUP05@Jp4(r z1)&1s+L zqRt0rLsPvwxG;~Ra^C=0w=13fn~C6)zMGx-{UB*MENxzF~~DTOp9?cL{tkZP_!ziZ|GQZLSDb%kY1=N}=rvG543k;a?NcT7N0EoPPo&mj@By z)uZrqNnaE?o&vW?R{RtPC+zjmxYEm$Dol4+s)U=K9Al3)Uh-d+yeWeNpdYIkmoR;R}QgUKa}l1I6`-vKaL;M zmta>xFOV7!WsLP7)cWsoP&v-tfpO3Ho+c+4usRpY;wnMA&k)8H*5hh+r^26zER?t& zAbtVKD4ihWl%fI9zAy-k=VgGPI-|1qRRPqUsvzpKRPh_j9BLj@WuN(2nB`o~&TM<; z-3%u>%bt;j8~SV~pgn~8RYTyRY~UYXg^ITysN!`C>FAw{M@+_|;vIW-78?-#-p8;m zu#_%16$aV|x}u)MU+|+7^d_VJzbxBb*h8!|mO{hmeGrnp9)oJwd_?C3?OOVi@R>H)CEg4I zKV2$lF5wi%3oGjvpCf%o9755GcbuZ|84aEn3iYdFP+7eOx)!^E+h0Q-`#QiEjKjq0 zYs?>U39oclGFH|~S|?Y-*E`Rm2!?U4K}xI_*h7bN6i9bFGL7B~ey%x%!77ia#32>D`HJeSeC{WJL?wW#|vjL$Gs2Rd6>oOf94hH6+(mq zoM7kSVVL$klyvv0#%#Z_@If^J%sjksf!;~b_HHL`Lu|N;AU1ElbcEHguPt=uGFI<_ zmFPQ{d1WqcBt{d|K#dLr-?0ktQ+L32D_7DuZ#8jCJWaF$Gvj9pL2MH6DVfNR6yLhsqQn9aOp_TOk!Nht~1;|f;h z>CiY=hW=p(XqwU-8tsCJ&Z7*>E||wWQ=t`I6Lg8lh$qh5H6Zc)Xmm650NcMR(6NBk zenOU~oZ?GgjWb5;-F?7H^%-%t@I>)EZ`^I2hUO8MsG##6@eJ31wn1Y0ZKxPp)<2+H zTOLxg`#kDT8V-%&VlXiF0Ih`=F@5oI@a6h|f_qOi0`pOFqm{HLI-gTf@BfVZBeZK@{>Jsp1a6x%e zH4*>W2BH@d>hkqEF>YOlE!I~+YyV&n4!48-WfADmyM=4sGlXXCS;M&E|5eJyAE3{^ zNwMCgi1wsjrFrjJhPdBy((D*aU6OCmw#PrH+#TWYySdnLeickTW{rk5w%B;Io86)M zVEwE=Z9WjrwdB_m+5U8@_ST)ey;gyZKTlGDTT%Jqya>>ocL7Azs*JmFkIL?MkfcBH z@HwRd>Q=E@YONF;+IWb2a313)m_XC_IjCtr6~qBY*nTq^s7yHvllm3I?fMWD1?GTf z(N{XZsSvMTW;<}&7Jz*bn=y|(2uWw9*s||A)%Ng&#&SKV#R8BT~i>=Tn^qjQK0kVHx+Uc(xd*Ie1;4p(y=Orts;bP$Wf znr#{2Gp7Xo>+TYL)oN-jSq_R&29BBYnkt>fLRCHU9sE`2McadU0qbiTrw5>Ba3%@5 zvX&GS;jS4h?`>*AOw7FD{_ZPiRdxtUZ4SfcuM{oshBBYuO!N9H$I-vLh;{{~lDfUc zX!J9PE?p)@^UIwiST-F5vey*~M*%pEzCp9idxLLh3H81{4o!bYz>@1>YzK`qI3(mk z1>=QbW*rqLD#1`$No*BMAZcMOEP9ZQrI!FkyY#{fcaKAoG!Bk#-h->oCZkDo2{=Dv zOmFXS@Geioj?!=p{yqyXPf%B>mL zGQbx7Ng;XpfTCOa4^r{o1I07EIGuBwK|^r}m!>k$?y&yQu1Mg|1cu|F=kpN*E3tWk zHEc7>#`yZJVDK}K<|k6-vt|tMz9+byd!}RaM|)5|RzYvu9A&zg2IzbEL5N^CUhq7O zZCyfWN=X2N2j#@-{ssEu?j%%9UJ6rxQH(Vz2KDi&py~gNpOX=RM$hd*h`XuJh%F#~ z^Mz~VoYCM2%li#J4l?~zqWOjPNUd6{wf=^U-kxbZFMj?F#&fi41uqQOVHua zGwR{bxMKPzs9W!0e0F9v6~!(f%9c_zOl2%a_ZR$WFCVlz^^~;yh^1N*dD~gR4Wh1S>eg|S5f(JW)w^kO6^b?2k>p9gg0T2qww zTn4>M%mX@=)f2aMK-+T-Y25T55p1b~%zBp1Zixm(zkIBJGY11ASwEt$hoMW;(6=>| z$`w(>LHvxf3cL)edj{YJqeB?ga0Pq<%Q1CWDU!#_aB)C7(h8;pjkd(7)8U{!Y9(gg zWb@?f$wZr6L*Yx!RAh9K$w{?gewOPooQJ2&OXsO*%|X72twFEGOQ{=JpZ(p#PiA z?Fv|-Pi1*96e~gSd=B^RbQP$7-U0*q7eo5BVJPf& zdLlHNbD4$EK~c7`V3GtU?u;V`S8#0M+xch`F&0hf9GAWu2lYonl0t^)2XmjOiD%Mwk z^D8%)ygC`T`2ha7SAok89>8xa2cdoALX?zv!Mu|~Y=3u+OP`PezCJn3hj$S=#xs`O zxCLan>T(p!??daJ6{E?H9P0nAgxnr?4YHVqsNb&=VsDgTWEI=VRGI`q$}_YvXdU=; zzoT6z22#a9AZOWgCR>3Q^Q){5~wgP0_z!G zG;{Gp?DxP5O%D{o3#0y+zO@Xm^weOn`dr5M?#G^UY2|;CEjW~EP|{rnyz@78JgGAR zqvS@!-|QzzJDWov%s7qG2hGI#NG4ADlnKkd67Z^O4GQL^@lI_7I%ZU1^UgywYe^p3 zx;voF+z8O|I7#~R%klP@7|>;WHSx;}(4|{Wgi9wu-PkaaYcU&rF2&+dHeXN6?x8B5 z#qeA;2|v!nJlPAy5sk#Np@0nCo`??FulRz({Xuzn5}UU#2%i!vO1f_+jdZgx)8WMVIH~{4@9*}rX5>8CrO)5gG)mZ z{T6;6;x~CwC&Re_y9a=qk2P8RsutUZoWc%sTL}2O8JvzYFFviW>?!hQnrksEJrjaf zv)ZU)fF&un3`0@N3zGeTX^z1UsQ^zCwSW54cPv9}Zkr26A2q;u&U%!luHpo3G1wRs zKs9shsrCGQ81KKH>YdAgeb$pOJMICI^&Ca9ju70HPp?m#jIJ$d zjCX9rm{Uc(jPZ*i^wV(Y{nFFEH}yA!7cVAafAukF zAbmv>HXOvK+vQ+>E1z~n?qL~lDSR820HvqQK_Cb=m(1FS`o4dttaLXO@SCal6Wc@d zvxbWgxdxrH+3fI4F?m)!94oBZ&XxoHFk3PeRrRyMdR+riA0z>-faApbM?B;Aoxz!1 zJQ^HhY|ffeQg<#L($FTdZnf`$=E9Ln)t4f)A!r(L2UCUp56rqYa>4 zitQ#99xq6BB}HLv7C+rJ8tI3Ls5K;$f_VdNwI50x5{DE2)_5-e)Oz^CK0{Bd zKIpVC1x4W#z;RCo>{czni4rH&w0lG%wp7F7>#-on?#BhZk0W}!l-OzIid!OWFv@!x zI;~Bm4W|rgv(G+iagcehZ$?%w2+G728+B0D%KUSiOrgHz8}0N=MYRGk5j`@54vQBg zenJ~{)89qZiY=&qIOD-zJC5cFD#W&71-O5ogbofTDP{}@trRcpN!tt3QNHLilj(iZ zei&v{j?%CQe4@1wW0x_;?=msJ-y;fx4;*4UyZs^F*q7v21GMc~hTmr>P%VBRj9A5X zFCH6*>c4g1d+ucBpO&G0JY&+l=|x;NM4(H=3bu#zn}z)D1`>374Uugsg2Pv?v3_6; z5x-r@iKK~eOR0?mR4;>-X$bU`Gfv38VrUNDN(B<{aAeual-{%;LRtf%KV)}vFnv^rHvkMi>pHx|6c>(IaOYmXA zMT}(ny(EHZCrpQw8}`Dm$5#obng;r(n@DyRg6MI!#kIA=P~YweH5hXi;3(sjIGv%P zk8@z{qQ&T4c^L=n&tf{%U#b<5$2j68*zsKqE~eFxeEk>(2dCl|QjGE$o7g-~#Fw3R zM(byNuzrFZ-m04s<`STm=xt7bMZtrk^&mx4H>qtbWr2FNH|joR1eK=ZT_ zh08B!cFL9KJ_QjH4WOV(Yw6B07`J%V!&d=Z+qa2uTP_ zXXzyAKsujGtKm6RTYiBm+81MT$_dOkN|EdCAlWM5V z_rpuu=HVROQ;_i>n)Doh#YwURVBEeAdwLx}JZ2|N+$n%9el-}Oe-+{MX;eRzLIO4^ zg8ut5qS34f2A$z-WZ?<=E{BNXF=t%8S%e`Ua$y}wgZAGOXnb-!?F+n$I^X6(@LyBZ z-kORr#$GTs@DNr7yMxnT#!88ZpuSV?kOsq45d0Q#W0VTfRBk)G4|Bkm_2r9A z*zZ)yW)Pp5P~BttnWXT|#B+2mWMu3CwCvPNszzB6mT!zdfcDiZ!xHOE zj4)e{>g5r5;CU!mZYibzgt_7E`xlX%U5aTB_n}tbcFxh&8%(d(V&nZx8l**WHtW%7 zeaPWk--)RXY7(x*8GqU_kNv_@_{_Won};Xj^Sc{SMJ|CkhB|1FiXY2{e4|}^7=P+i zD$0VV^Cu!$rZy&l7`4BpCYS}=hq3$6%v91gOBXiD=b)T^I($o-$y|>MAR!?Od%P6r zoX=IT=vNe^+?MB~UoaoVH6gKo`H4y{Kczp3B~Is;q2i$LL~+bMu*u!Z?um;)Ao*&f zcxDeh$M&9*1)8AtVIPR1l&Jc#3-GbRgL$=I(Y$ft(BV=K>|>96N3hP|`(TjOJ>-Pd z>o~JZxtRGW9eXUZ;F(uCBnp;7&#KczT$NlM<8+dpPm92GcLx>oF=!cR14Ey<2V_&pF?fru@_(JsQm2Vzi2;v@s5$EErxsO#{(MMcTSE3~X$Yx%M@u z;ma&`_W0gOqaK*Rt<=RR+5Cf>n!Xs<-7AA%V%18R4@K1JFk?IhnZUZBBJh26nT*xQWgNjDyz68QYBDMkv%WR}m#u^;=3-uVN-)6v z;Vf5M2a=v@F#R(C<=F30qS7_;X-yznDjdKqK_l_DI0tgBekTxonaUmAg+Ccv$IC{7 zy`&Dyjs=1Kqvg~t+8HVg^Px{{LraHhqO0Z`qLH?pgseVE`{tj4*lY*K-^14j*WkLX`RpDs2kcWUz;tdna=r$b z7shsS0uQWwk%5k5p3=U$M7P1lhq=wC6+~X|?#sRgA6!VEh{U?ptL3f+Xxe zpANytdHn8=@X0I(mA`$bWB0S!SRTH%O?9vg^mV(Wj)~c)pT&%WBbL-XOGA2*WSmbMVcO1vt{l4aWx?;UTk4_}P^0 z(SGLeqTN~il(=dz&m4*xMNLp!a2_I_guvaO8rbTY=9 zUNpYjk&FuQr*VUOG=A*WMK$vTF1@A(z1#)ZzG)rwf0@S|y}QXFm#b*GB#uhG|Karf z93ZPZ7rLf3QrQOPz{~3*+va-U&ju?ru?~Zxm&fs~N&vPM42R~f1{%Mxg7GjHR{J#F zriQ1pFv<==>N|$dcyg77#jrDl?-bgyf-zdk;z?NPN{sn031pA=^FG~fq*K8P%2Uix zR#l5e2M>UiRWh6vT*CaPJl>hCiSuC*^B7+wACF1UMrH$EzqT-D;S)9!drQAXo<~QE zCSnjf4}|ypsA#B^%ze2A%a#U`BbHau-mHZ_TuGs6MGaQeO~Ua-)6jpsJJu@r!Thds z04&4j+Ym-n?lz&=Uk;tcLqOhx-Opm|AZ4mIwXr?V?|s0!&_=N++^9#|XR}>=7~3IR zy&)m{7m6(}mJ!scgezAsw?T|e3F>)TpTKAwn*T9ar`?;E1_+JSEPTMb&N zgV94jpQ=Bx2CD~+aB*=YI=WKOz9oUmPd`Q4<~kGw!x)oo+&aH<)xwwNG*Qlf1 zKRr~%kM$n!dg8mY=BPBJ06OF{z^7Fkwm!FI{6HaW{uu~KAJP!i#mu4iAJ_5V3{`tJ zmW)a(!**|LqUoAMhHS3G!Y%8uVCoV`xsuA~Jb6tLcf6o2*DT;``Xmh6uYrx1#zWuT zAyoTa5|Jy}2G2G#enBOh|Lfl1K+_CDFC|0B$YIoKFFXI1&!!=>{w0!acQ~8F^T^Ht z#VAn9;4OVJNx+85AR2m$h<3*iwVx%tc$HJN)$nw%|58aCSBEkly&ZJkS_9@!S*HBR zQRog>4Ql!Yd|q!fbO#;){VUGISEZVSNL;|xeLdJ+yhOqd`=b8(QwXEAz;qb}neiVk z;}4r(-Wrcm&3&Y=$&O_~%!xoyX6Si&0+>Zi!h)JI(s*wNbX%XMfqOjgy~-i<9#aag zwcki|U?EWn9Wh@Zc4jFBn zaFs52<$@WTZW7Z4w`j92^Pe;3;ZJ!f*!U{IAEl$1(KZ-ER{o?>hmtsH@i+tyGG z2hS(r&NmI)eh954QhPT%3(IIzw?=Z31*v zsgmZe8bq*Y0$8pMBJE#W$fmK(weeDiPV0+B>F!6=u3inMhqD~mJ!>d%_=gycN`RjM zsi@%v1*5e=Qn4BYiw+s9tjvNn6VG7o=JS~K zM~r!UkKjkMQ7E##K{BrCfa2YKFqUQ6IdL_7D9>P?j4YVqTLaSL&x|Dr+o5{?O;^ITwZEX!FKfb8ZB zF2vCh3gv3C({mFB*2JO(`zCwlv3uL~<)Ab1ILTDqioTOCG5${@#fV^dcrqGf8yTle zItC8dBtdI+C#mtu!e5g;Q6?EjmFkP3b9FvGDJX{M=*v*Hc@+NBZI92k`T^eZMHQbC z)SgpDLH;&vnap~9PZksV>+`5Nb9_EH6pd{PtHGYRjZb^z;h&;YFmR$D*n~EL)5=Sz z_jD*kY24)-hdieu-*J3L-5OAK$OXBgYsC4@NlbgpvM@tWk^Vew#?z}}yUi@rH4jC# zL^+oIk;0SC1hhC@22ItBjqvg`w5na^oAri)Wy?d_dqx#(&ZU#qhlXHhG=X(MuMxW? zGtg~iEH=E*z_t+~?;VQLeXY^`G+#JpLm)kF5iytb6BsEFE@_JVJ`Cb7m0phB+1f9L+6=AsJ*J5JoYg~ zpWSKnde=_ui%O+YWpP|B%N|d0U4}yEA-tp^3{}=Kp5nIeyl$v19?+?T{*ePgFz`KJ zawr8|pPa&2SkXXLi1E2D)z^(2*sPXYTNinSD?FG!Y*%pY62Quk0t{z>JY)F%gLP7U^ zI`Bm$be5QduQSVd#N~m{-jxtls7-rsDS`0J0r0X2hO)IcNx;-<(&V`p1IScZGUN(~ zj~(P98ssoV-GNJ*#P~yl{;OUXyc6}5RzS+zwOl;w4WE!tMmhiakYdbuVJ4AqL?s=S z7wDpoz?s@jIsvk+lQ^}Or-XB5?(*M5P@Z*HMu#DUt!cm>g`4#EmL(`ysLx4$dDHr~ zN)#UUq5{2kz9#Mf>j&5fI zDu_B@tgwpN@SMHK@-?~up<|>2KaMo=}N@+-D4tpz}P3j&&Y4L z33zYiMX)d044>30pk-nm^s_!!eDpXluX#iy(|tfTz=9MGEMWW59cX{Vn)oD8h@Bn} zBgXH*u95NBHE9hf-c~|QBV&*+$cGRX9FJ;=0_|Almwu1~RZA7fhi5@}s9Y0;4ypY2 zRVAqW;~?AL7i0C)Qq(lv3yStG5PGi|b+%ujex1wFK<@;c9-xY)>SHlvwFB5_F<%Tl zN;@yOgU7aDR9l`+oA0`S;QQe!;RcrF8Xynx`ph$;H<{R%Dv)tQv!P?91l0;mS#D67 z#IIAM3OSR}XP*ucs(8_^$bAr2bDzZ52jKg^8qB$Oj8h*S3G%ZBVN^{GxPPz4_d27{ z+4dsZ+^HgcS_aIMkVZW}HA3MsWsLf|4?Ih55)H}|joDkM=3PCuf0mT!pw{Ffc!oM)zuRCq{wfYhdam3D2@X3}VZ`9ZvCv7`7^Z7_r*J&U-R8{b8mL+zW z-=T^B$w*)6SK7Bnn+V>FBUL|sQLS_~r^-sLUL#+H_IFZIH2yJfekTdkIF>!WoC2jq z`6vx$GYA($T54R50=B1Xoe{}UUipM);c#=&hbb_P0<1X1yf=d6~&&u5Gd zo7QM_JoSpSOLSoVpICU@!(5oVMw8`A9KJd;1C3^RlP={8pm?m1rah>H?^Y}~ap(XU z>mGz573Wd<`!EPxbgEltmT^Z!+Yo1_k$~;zAZ!lH<8RO-;`}8b&9OJj1$Z#qS}%KI@PQRb!HwXig!$jnm4lCodQS;C-a(&smmXXfpzmL z;nU9)kQBu5;^*1b?HWxusKE@whA)IR&<4ek=RwB&zaA$UpENNPd}9YQmedTsip`nL zPj96EcbX;gjMq74E;L)7|9?gc1^2DNCGX1;~-A-vWh>`kEmWY7UsK#!Sh3m zZ}ZP&xawz(iUHH9SnY82!(WF`^zaMm^0`I1_BedQI^pJH&p^fW6zCgu5K{QP)c(L= zVq$k5)H9MWWnU$VRaH1g$q?A*xq&gw%6N;@tn+f{G4-j=g0IaiBU1B@inouVg}0cq zZE!j1d2B5}UQdBTpL1}a$yrRfrofv9XtF+3Iq(}M zVU>OZ(eGy&j8}P>GH@d?ALt1s{S^>3^A1}NoEE3V~)F&>=;*$dykr9{DGrn zz~gj$sj&*ft}Fx3&dIQFjXJ*hoQZ$rPJ!`eZ}jM?2j7k1L`=Q8oLR>~&N~sx7Bf#N zlt96x^XL)ui%RY-#M^A{VsDj0e->Xrqjf@%#$~cxR{`1u6Ofr%aX3E#q#6O7?SpuR z4@m*_uxG?b-I+EY`AO=|I-vBAfPUQ-i&atq6;=;p?$Q`u6aP?I?@7>!h{3Gyj3f3l zxLVyZ8|M|IF|JP#`F<@5@cME-&;F8bP9SHp!jDSe^zg)mpcHk>*cc zwAJ(s*WMgW?R#?2FFg}#N3nm0r>m&q6gB8j%_goNCPLQ^7Z~><88TKzfb~`zj31=} zVWUQX-`W(^5S3G%E6Uh7cL)Ud@gSFyO;?RNiG52QY3sjE^nObWYJU%m{yx+81t6AID5 zzz*6r{zIp%D}d_$EqLMdR>p`pO5d9c&_(Ykcq}EDVj$r>RF^^gk8Wxs&?H7)eXMUC z$a@+Jsnsd5#%LA$z$u)+Hb~##Q&- zTuBV_qNvQzn3Gm-=b~~a)6%cIaYIZJT7;*8TPEX;1cXBCtB+*Q(n7T1ZK&Y45X8FC z#$JIpY5%bhXxo6$Aq~e*b@gDCJslrvp2BvGOtSl;n9VomfnA&hxSGVH-J&ZP;I4|> z3gofRG8q*8-V^>_4YVE_2^s-mu|Qxp zLe~v-MbY&?r1yj^02e_sPU%8X=PJx$yfT|76p50f!Mx2AL`-PNqMQE!Ir(LT=pcPvVD&8q@Ln8$JW}U*H?w6pg&l-Iwl04&V z=4318Tn%@@8F@$MFc}QOY4%`lVZ~T39rQ8FehD*g(4UnoQ+MwJiE5$r&AqGOrRE2& z9a*S0MuQ4>iMW8d_9)DdfS$OE{*hf|T8T>P7;_qO9DG3-`pLK~ow3&@AbpnBfDM+h zaHC=;N}4wjyX%p3@wy7!ziB3R-P}tOpHG9>nTRF*?Emj>fpI(g`9|h7pq=7i^z5jI zu!CJxx-FScF)QRASX{)GH7t|Sx{u4C-4wi7cU_a6ZSPM8*X@eLg=N`oRI|v98>cYQ z+z%DrQ%t%z5H)JDh)L`i{CO)IzQ>(K7yDY6V^fcQ=DF}|=M=C8vNd)hYVI3v3Bb;S5 z-Hzk81~!+g3B%fIe-KFP7s}~gqF)Z|8sfwCf0HQExJ!BT4Ztq$y8#lffjF}QfO3QFgiW61cq zkUKI5BSuIWGt>a$N1g?vX4cbax&ZR)S#BwdWtw;2Ctd|3@Egl8+zzcr&*>{cnPmr! z>Mp~g^9H!-S~luWn2iH=rQ)9pbufFycI-QU6&h2C>n>`4+Plx03SvGiLc@fLAb<>!|vmr641oVS7Amn}- zh#oxRg_+{&mL@+Mbp9arRZRtZt+h0wN{m?<(=qG}k48h{sEyfnkRK3%DIJfAxoRbP z{!zqXFKTg7HtW;{Z&wsB(WUja%gN%1cMLzvDp}iWx+)vzA_9RZHmFN z%POEVwwIPiUdGNl8u0pM209TbJbo9>oP^BF?HGU_X)NN~dKRXNa+$9ylNhhNiu##_ z(0El5h8&!QN7Y!)Exy*UXW|&rIdK?!?iGnFu$1)738gMrOG?dk?p7FFAoO0f*lxYkc3WK5DhkQ=;D?PL#F8BOT9dZe># z+QycFc~DiZfE7_!!HWIN1>%tvG1*4oa&91uE8mJb4}(E_Q4<;1;)lXJ%Wz3d61*IK z45cTB@KcJCakZ=-qt8jegJqjyZMWk6dyKQI&b)nNW`pC=t?=`0B%E+>VDrs(V>?MK zb__WWT~}9tT3ji4lbXRYn9Nmu<}~_LbP}tnqgb}OiF!6<@~hlWVBhp$8e^1BehJs3 z`E}Mm{@0#rE>!?u)?e^3974UVGoUY5l~^2Kjq&MQQ4)WPTNjcGik}QY@K#o}!)-1$ z{LMq@6gETK5)QosH$mDVMcnwU3Vn2^;2v`!>bQ@B4cC(KXuvt?!RUDaZe>~wNAmDk5^g0>|^!ke~;n;$5hr`x&%{)M4)1O z6|sMt2$0fAP3n_z*7pr4iXIMn=AY=NW$~c6Rspr-E>aIeJLum(5elTCM7S`O+P93s zZ&@)o?rtpDg&YNO_#pgo-VEhV4Mp?!L*d7V8q`VJLHTJ#;MW-p;=!uadjLaTv(mKD6OkR?V9&g z!(<&PQ_=a?}uj!r|@25 z9AsE|W3yQ}mEu>@XjTf@EKk|~F&wr1bLkA0@y)qM(L)+T%C7xGTX@#%_KYE4Wz2uO zG?v^O9tWPzU1X9On{CWv`#|kR*g8uWdm`+iM|A*^K`$qsbe$)mC-BX3c2~^U0?(hC zU|$^TFh}1a;{z5kzk5INEOmqwI|0eGQpFf)InfV|*z^NuE* z-Le!V^RDr2uZwBd*^g8|ET5iR;Exv{g`w9e2e3cKSmP5z;KT%fOms-aQ`$M$k#U{K zJBwj5>u3oqEvp`#kHUw}0<6jz4=OEZ(Q^RfDg`u9C&ywmRT&JTh&q5oR#-zksVo)xfr@hLQDEymuK zdsNUjquNz0BHOZ3AnI`zZ(jSEsQ$yet=;MT78hOA=#3>nx4~iO7?-%UMz(J~h5xScK`v%24=ZHHiPLg%>g}6w6}xk`MVH zzVMw7+1WrA8D4;@y~?0+sT5_Ob*XOEVZ37#&D?~^pp&%|J}zXw@8zs}<2DNF>3@=umKn(Iy^VW*o&&!q@naTNlYfo9SQWOHv*GQ$;?H@L5SlF;pg|6mSx+ zJ9)FbaUeFVzlef61|VVc$YUQ|(Eg?v%wyf4`nMC>C>3({%FKiQi7|?lY-!=sP{>-r z_=M9Q5Lb(OVsl^%2&`l159S{(WIfpq*ASRtj@apUn1)m+m0MZA7*oLE)^5wAi zUO5QuxA4sqPJ>Z-D9EQ?1`Vw>q=;iZ)e2Q?OUMSn(Z6)eP(93fsza3r7r@^g=GZbW z6ohNDIXV9jkd&yvmo<^tm>3HO`qc1b0PB+{jVDK0r?M)2FS`@2#S}pfOdKDHwS8$I zw)@NXDO-b5Kn(6;d6jb$W}))@q1fq~NA(|Na8mbEynXZpFi02;UAd#6(VVdih1TS` zU>eH(aRt~q6sD%{!kC2P5Wu2e@mkMDmG1TTk4~G*oQDSEUW&H`TL_Qu&ZF8~rIeQk9 zZ;))e6KE>Q2JItmtlQ7>W5tiToP-6iy*veVr?{j4sZ#o)r=#o8AwLuQ{GU+E@uNYm*9-(#2Jk^; zwcyL{j~TzSnN!is2sMwPt5+@<#5NK=M)K_X*95*cHiGo+Rgx;n$7nkW%>j)t{EjKvf5q21{Cmt=`(oj+41=5@Y@aJ-Oe&@=m}la+am`iwuRVmYdX5r#6iyBQ1o#vM?2|YT+qJ? zr`Mjt(4Q=`RHFi&X^TOEwseO=ChFvk26fX{)J?7u^PXjby-f@8jE;gWjD=r{6r+62 zK=f-EivMe&K1LsjT_>A8-kF1f-glh%%nr=yUO?qVC6;xif~`|j|oeHO8~A7jLXQpmU%fgLKPRJ~M6RK@A&ad9QY*j=DD2ey*P z)m!m_pbq0B#*vry%va=ZO0yG~kGOIP(Vtb1b3B;md&>;|VX_elY)%8uo;{^kO{gB= zfm;k&Pw}n~_$;%4BlWsyzdaN!^jQYyGlAvo9hemyfJSBYw8tRpOXML9K*(te`(w7PtIP+H`U6-{}VxwHYLy%V4_ zyb%Pd3uwlJJTNYgMWf;WkPw+BbEDtn#Uln859vLQp_f_Lew7Dorjt^yiFO7c<%E8lH7h~47xopN>$LU={(Emm;eoPvWwp!+p zJx8T;J4r+1E_`QQjuWp0z(=J}Y!BH9ka`FV;!I#gXFUGM@I}P|JHYXe1bytkl7RQI zw6kFk`;IDt^yXsP*LfM#mZgJq%zZB5a~_(${z1)DB0)A+k<7lpylgLP;hSPQ4%~ea z1UD8y&yf*SZS@EK$+q2?c#dTgtDULW)>b+$oVhah>JxF#4g{7o#!f=|ncL6#Ll|xxRg-wGP&w~97d!8Jl&lQJbU~@EXJHY7rhX) z6XmbRfc~2Yq;=*9f(NW{>j4f2W+Y<4w*uOg*F!e>#G>B*2paONlE~`WyKagd_8l1r z6?bc~UwR%)1*~T%K10ENCDj7wS&;e2%oH?HgDHKjz<9Nzm5jiw;nL?o-4+kt;UKQ%NqOwbXZF81|{U()MHt`m_0?)vXkGdS3;PD;&X$mw#x5 zX&y-HCR4eLAvm9JfYyJ<(v*)ysP*I}pEBhT&G=VB!UpzH)j1=vcx5OC9XXF>A53xd zYUXD;zm6v6u{rl4UkKY;00wzE;5H~8GVfnPvG->pd9s|w4<85mjx77PUlTj6W`d(O zW4CA%`r~gf>*4g%mIMCmJFN;Gt{16i*CpC%I}&?3tiZO3Ik}Ppp*t-JVAep0KfI5q zm6qX?bAz$#xDjZ^+@mLCj6Wm}=kiBnVMv=AZZMWW&v?c^*enK3)+O4Xu?bxlzM*Ni zBC+ehSy(-U{A&+j_05L||g=|wbTWj_fq*$J)N zhd~$b#VSRH#8GZGp7}q|@s1Moo!o|X%8qDgJsPWq)`RsOZ+y~S2~|UP(s+%J#6`Cb zhbYux>wGU_y0n2rDX#?eUBjStv8xPRC_^xT(^ehEt0Z?zk| zBz?4dF5?L*v{A=QU1<3)n!elEh@)&XP&z6c-@l7NeNhP4zkD1N#ASlH*o6$(YK7Vj zBJliTNCj?(7^~Zhd|>au3s0FJWyLm7T#^iZ_udoJ$Lu`z{iKnN(nCHa+Ml_SGT^6o z0w(4s5!seV8k121i+<~4Vg3meU%v{HzAozO=nL|30$lE)g(Ah#oZnwh=2Q*@fm;EI zD*jB2v_f&hoJL%=Jrm7OHKE=7G-_s83*H_YSimx)<8J1&u2=vseS6k8Rlxy&&Ki#e zub5A2xd%H>U7*sDGvV7k#@xR-4&$Q@A!fNTL<9-ZCG-+>omo#^4)a8%bTK+zFhq&l ze$aVdjO$*NfLBrn?fWnagssJ(bVUx`Ec~IX-Hy1LP|!>E0O4JI5F~viU%JHjX?8NG z9iPE^ty{R(RkNX_wE#kefl%W#1$S<_1UWaC!Fz_N)QVWf=`XYd7g~n0r8cC>VIlKA z93U3D1>jaP0S$U4K;q4HsGY*jV58;v&WCK@yJ-kDeajd%Lw^}3>Ik9JxqyhHw;TWI zI0vq`7C_&b#ZV&ejMtkMq2u2IsSt#flX-#-}r9>i0-FSi#dm|t8NeIPr3m8^Q*u>Y5=`a*QlRmIT##fE{CK-BGcOp zHX_DqF?WUD_k+>Yf^`$pk8?+VRiMwXT-K>E1Ml8MyjRR-4X(B{bt-cnN?AX6_at;w ztR%ggZc`gM#r`wOj}&+b5>*0kHc zoOxGwf@`z`Ni@s^&t?8Dd6I-zwnt!4Xbl*B3uXH|wh!Jn2-Xcr0%@)*SF$t^ zMT|uz{(Q08N2LmclN$IPUY>aM^)}{n3LzohuV`x z6&4s^_mqmeN6ty>tyuU#%988B~X2X!>G_J)LWVi z;K#bNgCfZHg$$dvT?=-mmV=GIh?*Ky!>nTyaos=)a(W{`2em+SNWdTM-H$J8^HF{# z>pv=m8b@fSgJ|n2_|EduRXsE45qc6+GM9jw&V3tSkOZjU5){}Z@VER)EYD@ONnCIA!w5e zMYqx8@!!vEey2JOU(OOUx4IYyw zO_}q49+7LD4r0kcUVl2<#r3_Q0_N%~j`l%8_=~FR>N8Pe`E&Yj?Ix70E@!zK_TSbe z@dEohT=GX}G*~_g{Cn8lBzxSwLen2Y#!L>fx%6NuocF^Hv6;Igazd?_e{gRI*#L3~kz;^G5T5`iT<3zDgAgR^$**u^oJM^uw2f%(3_GD-s=| zi1+p(_*j?G<6H&GhRtO&pk-jC%@`J`=_oMC=2EPr^w!Zzd=_27_8bea;!Po5-?Ij@ z!~xjq{|`~S)xmlG_{g_4vUkcdXCkjv54Rsg;grgFNSu&GW4>>IF4hU^^?yPti}i4p z0b@}_{y*dAi?U7y;@WnDV$oV~{qHpyziSoZ+Gr5E8W4VwGa5~0oJlx|K21}wa-#<8 zR{DW5>!@`KqDaev5c+jSGT5ZeC03iA@ODoc$U-LbQm(T)syCZUZZpQ|s`VgH-)7ux zxD1whB{HXOCj6~S!V?&YQGI8?wm}mwoH_+f`_yq6y^eXn3 zGhS3(C3Hv41md_Ii^8h#T0dhxZk&TUtD31Ac|&t97m}X+F?r#NLuXzbs7W-LKemRmXD-yR zmO7Rj8cMdlFGr2eJSu*>imNJFPkbEAz^ICG=L%dw^p+#73uF1t6^o(w%WPOv7=wR5 z`QewuR7_UON1MT6_&uuuIzAtOfXkn$OvL7s_vEpx*AlySOa;e_L*RCjIeN`#B*Mfa zbnfs4*jf<*EwMGIai)R(eBg~;Z(fn>S2v(y;x-8DE5IYV`A!-&UJYaq;!w?ICBl(B-0E-uW$uO`6^(=GV|`G+a0_kM zRKk6)_h6WlDcHES@)M@aN6ktXh;NZVqhdL839RMJcW@}O7zAgWN1*;nmS10{h=(f6 zQM2eYb2tc5zNH;|m6-b5?+D+w*ILBSA&+|gxtJ8}uO_h@6+ z$$>Ov%{Gu53qPIeyot38CScErP+=!+P3s{n5M8l&LEeQLTQAODV@g03lHaOJHJb}S4=ABUM}r1X)x zY@7xTiDyxCQk8dP+>%XelF?tGmbo|jAsmGn}{OXeKnrk*Hl6KYrzm7uLx0d z)Bf*w$7Zb!>`Z)$uK%2X@4JFPB>PRa`GlgR;s|-*$$lXl&rw)97$1&Jgl+UH>cJ%{ zvpvX3+p|yws42Lc&6G&OtL3JdDL8O|&Ezw|3QJ-LYQIfQWjspsF$m0+F46fe&hg_ zWGFOg9YoLdwlv`3cDQF73F7giKt(YeqfGuKQN9mp$=>s57fwN0R!IHsWJBBMC~OgJ z1p5umq~}XL>&GF;r_@1|N*&ibstA1c?V~*-9@iuZCP?2h}Ry8Y}_>S5bRi?vRdjMLh7o%don zNWI2W>^${@H>%KrIqT1YDBwHwsAf#*x3PS{p%Ku%dLsCG=F%RnpLU&C2p- zdl{#J};$5piyuPqHqbwPf z&8mo|g%89xpMtU+8Xu{})^D6v2KjYr7kxFgs;;|Z~MT~72I+5KSt8`|$yK}Fsl`Tq81 zipsmeq~SE<=WPd>oINq$pa5wGEN^~Pg%}J;AX>WSB=k=sYK{tk9kcz|yrvM2ysyFi z_X2Ta^hzAGp@!Z4nQz3!8_aV{LHT_;+tHtc&i?{v;oBgT=^r9$jcH(UITQNkXW*ZO zn=oaP77q0-L2~pUT2dr&leMwOiwPhRx}QZ0u?7EVZz%C)Y+SY^V}Lx?Z#qiGg%GPwApu|GKG2+ z2|?>!3RmEy%$O+&=wdY(oM;p%%U94Zvvkl+cnK~Pvt5VQaLiC23bL>o5>;+RzrJO= zQJK`({>2TdUXj7}MW?CGaWQ))GKttWtNKa#IyN(v2VaK+V5uENt&Z~ScXT#{EmVXX z4HB^IG{>0YXGHy06tLSW4Os+KqkJVyE|Frx>r0Te&>#I|ti#0Rq2}cou&ta2;rR_P z;#?4WMQ|Ygt4{Q1`$3A@df>M=LBI@oXnH;v7b|+;_k~+=L)lp{I~avO|1>ZUOA_6C z^8^b1aWZx+`AA!T8!#@iDr#gVf{}+N+4FQUHh=b_9TvkFtM-7gm#Y#??5s!Sb<3co zRS7?~X`)dWhb>lLXh^|iuyeY~y7$9j$vFvXG^#-ZFxNxs43f5BBH9ed=f!twjUjIk z%wc=JS#mk3HJLF9D@IU*fMg<5curb&A7j0TVB_56u7FF zj*B^Kmh~>IUbFlvw%|s!*8I#OVb@vYv4Q_hb4# zl)bx2+U(EajwXWRVwnHo^>JvI{-I310xp@Oq2rzc3J!gxqJ%5_@T*Cv6+f6*262p+ zU=2cF1+M1dGK?;X!zFbopsB%lZRuCQF}03pCQSf4WoHndlMv~tc;hD?Le$h91AQM3 zVY7udaU58Jj&CF^4>FQAuDeIXG5U~bbqbU-PNSZ2E((N=7tg?x~fZ} zntqX@hxw?ZdI{IDdFR8HOmHn<3mqSdh)rz+Z3#F70aYRplQqVxKJ(Z$p%Q9^EYHb$ zW-VH4A*OyP9<*R*rVB10DQ8R=vjZ4)ialTVMpBS)uw(=CD{iZV#1U4|KY15OH(jsR za-Kq@g=5LhEu*pQpKvUx8;||w1JU#w2gibEqL)77OfAje+g7E)`AGH*2rJ352njAL z(!w(7BXXz64s8_0WYNoN5NcjF<_jmI<=0?1p<4pYB@03SpV>q+%>|ps?V=f~3Gjpl zf$xfOF#5g#{TSzQ!|q`CsZkAY#+}8gmusowtq*iY;5HOF1aP9A8nmut@?%i{;kT@k?z+KAIJCkic22V#dEIo*~QgISVX zklDSVEF{(_Ja`l1 zLj082KudJ6h0lcu>tFowg+%>#3$0@t11#GSoW9J&9%k|6bp5HQTI9&oU zr-^IlFb_QWIFzs1Db9E^4{S!x1JVbLcT%X?c;Sh0^`3??ABm${@KogIOM1%im5v2QE)lP&IQmEWK`uO~F@~=k--+ zcvb-A!7!|o&www(_F%ZT9IGW6G7x%aMB1={jik13QvG- zPXj^yYcUjA8{&$6=kfFWQpj=fg$;M8vr``|I7<3rYj1M&t$5B3)<>b!%sS?t2I$zc z7_{n($kmg8_3xIU%iKWbNBhUx`w9q(BtK!T6%1YAgAQsU#(!4e6ysEom2G5NTHYYC z>@4&8Qp(0uiP0foJM4+K#iN%u;c>5E^8N)dxvM(2UmOR|weqQBw1iJf(57K+Pg`tzf z;HIuQWu&<{^hG;2{Ca`$ZH!^*j8r~4^Nqn<~w9lk|-h3&__E&@P_8F|^ z*I%Ajund>oBu+pV%F*b?qm~WjQ!e}hUA{Inl1oNTGsZYq6y-yOTyqIGtezGvq>mZ6vZ zDj2n*h_Yru&~R%dy!~?wN-edSd@%X7CT)errH6^RWCF71o4MtxM9>@?McLw1h<9I! zCjT7+>+Opm_r@uF-syr`ig4(5d@E?Hacpr{^0lRh!M}Pg8XTC&jsJ$iTdNqbYoMLa zRc*1Qj)dpVJ_0ThBkupRnwtty@R{aKetnN%%9+8KR*{LtSgRnJQ1^jadgSEp%P&|tgG-eHf+Q3L= z+-nWjRFJDYbU1n}B$n%(3OxOJ8ru7x=Pv4v+(zG?@@~Xr+iAo{-f_d)@AILFKN2D} zlTmbRT3L`|9gis<4q9I+xBVzZFxceJ`pitmm1W^*Z*!8)AKO7?NFOwawGg|z`+=`( z1WL+d#hroiyfcm11Wuc{VzLwpO-@1Wuvnlii})B%!Kbvl>UA*cd{_4X+SB^7Z&D=~kLu4wv-(>q z_OyweswTqG!HY1XEeU)R0DAV2qkE0sN&nL`e9Z4bbVDUm#=%RG4WUWTI4v&#}r2?&Gl zGtuKzED85QT~k%CX;MJzR#Oy)Ps1L6E}~poj3(g#gZ;u#s6^Zeo{}&)EH@ zZ8#&9GW8Sh3i>O)^TzFkTroU|m+slgr#VGoPGSrM9wruqTN<*=5RBg<1L^fQ?E1L` z?6~@kDKn3V}`biVGh1tYI`qbOpJ zl~>6Z@#bp^uyaXbUl;kKWFWbf4X&}QCE4KNBBdPXB>34|N}nSHrw=a!wZ9u+p`ki; z>)x?Ol_$K!g8tm`eoVHP&P^{bgWc+pOgi8_`@CEZqPmDubvNP_^#6zFRD9xiO^Kx^ zeIRhoM6|!70yS$KKhhr#OjGE{4d zVrG%%&}Eks%5o-vuVX7yOp=3S-Csd^v$bs8ZW|2#F#;{0$grVt0ep_S0?)q&qU^pk zZpJteg>{s^5J%yGeM(~7+n0?j-iIpo!B{h=7x8~0AZzGxNXv);_i2^Tbn=M!fBP5T ziD_K^qEu|qvVqkXZbh~E+o9zc{keNGx@%a&(856U%|3+sxvl(9dj$2fUb3JYflR6$ z!0RpqqdT!I+$IG=?N2*|>N8lkTrLj1@4)c9KEyk1ft&_&-lm<1p;j|^ho(Q)-dqE^ z-TlC6f)>m%9*xpn23D@Wj&j{+TbTazKRo&iWs{#JL(I!^$j>H5-R}ErKu{?9uP^3y zz6J1|*gbL=Q+QG7j2ch1;hTtN+FQ%8)YuIgmSupV+d1gCxClhakr&MNE~75pL>@n6 z5BCaQ1vl#kVCgnPxHFJuLZ8B!gVRN!?XVj-yo!fzU*qBIoJ(*$E)U&q7eL33%TPf* zR*$bv;ILr^3U_iK`JdjDy*UEYL#jbCZW`?m{lTtU52ez2AHC!_1DAetJ&*~m#@?@bG8sRqJVtWtsA}04C?1)mMY+)A=UHdE45Wlcy_+f;pb1_-(EEsh8 zE*4q6wz7V^735K_ETc~*`nMZFM^qrOTsU}Dlt51GA#8eZhBAhFVx_$qL=0XHr#bC@ z^W4B^cob+SN8ybf!FcV+R5bJc!7Lpe`Q#_bIO>@Ut%jVz2+<;9FqZ-7R?xkP@?lF0 zAh&fKHhtF-D)gvJWE)%dJJSVAj~xM3lYLNI6@{-}rb2Db4k!(8;(auR;~uk1nA0UUe@vnab3+;4fwwvO@o3@03bomNFnsW}; zsGh_OStQha`j2=$jpC?~Dzu4BMnk=i+(p*FGDh`>pMD3?8i>i*FdIJiI)c)Ax|6qC z!G{P}I$yR}jq{XZ?=zt|%W5D#T67duU;3d@^LZ#u?8m?BxuSeq7?+Twy1YYyqU5e1 zNmmN@Hq*V*fO6ZRw;yS0o z=J}df8IaFpOVzOOE;&&zjfdJL=1@;Pe7gr%xUOM$ycklAzENYi++#JfpI{HxzlNb) ze;SjN_7bG06=k3QH|I_{Fa9|@11-%~^SX$k(BR|AL+^%xVbwrX{H)p?#jmTux$j79TT>2>2g?vsW1wTv6o|F$ zfsW1=n78T$x7Vp+EeFnk0QJ&bgb2h|Wh+;0| zCUcFOdzsdj9O?yAANFQ4KDwvGhT!Mi^51SOWv&1QYmcI_*%!99bRT-r87t;_ALx>6 zjP4JQv+&ERSU2Pp3!<5RXyYIpbZt7CS+0dUHf1csN-fmInfd_9U_vL$7F zrVlJzv;QRej{eQ%*5pc^9|H|3BcRrE9b{4tV#cIgR7iVZw@Wg9t{0s>r+0_@ch+Ib zv!1ADG7F8>^0|IM1uwoj53YZmi+@jNp?`$|np+cNW!NX4Gu4R8j}&o{b4Y37s&ovx zO5E3}=U7|)UM8Dj&D#85FbCsQ*0k*mxW2p1JPw%8@5s7451$C#m7&`6lNxF1@^ zSkQNGC*~LHk}F|>xM_+b|La1oi>WT4+!I;mx~LoEnKA6MDV1jNN5Ht-a)d1mg!CfN zhUrgvb)lCOlIl9uFI1_+Aj^g?aHuNe=c~ae&u(=$;I`w zgL|gT0onL$v6n8fx&{Q|+}n95-{^o|D_h0Q(J{1kGv_xG3$j_=gY2 zLa7W(dzEsNFX^C}y8=U}1oKJLY|+lRkn2|U!#lcX!7@fbm!A@5`n3rCtyhDcauXNb zv}CGQv)HTGt1x(x6!cq4(W@|&PqwEn;>}^;eR?bSf1ZP_fk2&^PCm%c7)uRIS$oVt zxYvW@gU^$3t#LVI_a8wH!$)Pc@e9HKsFH6UqyPgOZNXyL1zwkVNNiplLcEm~yy?$- zZnN117j}fB=wfqO-m8ruy;fOf5`2R_`kaTMSB#)KfpWp6)_kL1KMY7ah;Olw9FU9$ zJzfm4bz9N*c4jlhd;kc!7S#j5E{H2V{EFKWcGdjX4*sy ze-Ha2k;HfC z7Y4rH42Z{l03XaxMZL5|(Kqrh@j2+TM(934(OqS=`1;|}zU?@LZHGxT>^3=T)!;BA(S^8R+b zMHtD-^|L5@uFq8c&cjsU3cAl&K^^#5Fx^Q4n#kkz1<| zfnnVTV&|G;=t%ROt*jsB?Wv$0*;}izX<^tlR|i5@A3(W;Jnik&cMmn@rB@hnaL9l7 zB^n1^aKpsr3lO7X1K|ZK=(NNNGkixte3TE`tybgy-}6x?XaY7S9pw?re4#F>7<7AE zy^K1rF z9;hw**hd|4T{rl%)e|jsj^m#vSs-%h%UgFZhnBo~tnPdOjE%@b<)`k3#{SWuk-rk#?_A=gKYOruYclD(5QdJwhhpW` z7;LL?=Q`i$9Wieetbc;!DD317kNz@g_7zaP2o=v-UIP9BZeTI>7;Ae_$D96JF4T9a z=NZGvOYEx3Jf7;|iW57qLz&0jC(Q+G9Velorw^0u`pjJ%bNKWIB{uD^7QDvf;c~kX z=yov%ot9aUQ~NV34LHeWG<#q%&4=>NFTj|U=eg2&4Ga3Wg1ErhSba$V#neG8%_IyW za?8OeCzP@tlsivd#GZ*RV2{NYKr^cdLJiKaghkbO?dv(r;cr+c^~WTUeZ(iGTH?yt zCsDRg!DqZv$KTQE7#i3Wt->Ct}I!1bB6$1iXWfFn5=;Fy!3-a_b^Yc9~90lt}3Iw3GKB zpJ8YYWmeV0QQChyh-Zh>dCUeM4NnEFTI#ReiD1Wj16mKC#oKLzpil?^{Xc17Jue^B zeprHkhB?($t(@U^^ zcLJ;ou)v5BBJ>}<9iG#B-76tnocn4wK5RX~f2*aBMjBuH;TUdE+SHgLF_nBSDyBe;y4TbwT6h zc5wJbKZl4d;>z1iOx5fIH?iCXV;2lXkGkFPX8mI9Iddl(eW?VCJC*!Z!DW;dIB=B& z%7ysEqu0n^;?YSH(YnD8dOb@;Z5=Z_YUhlD4;SKr&j_!2hQaiI&yee~09`aUgZDr> zH_{#8(DjSx@|(VUwvEgwtORQ}O^3FtSGh#5lLd{U9$!WeFgrIB>c$FuNB$~ocsU7@ z6Q;xTS*Ky=u9M(<$AIfbonm&TpO}rQGbVqz&h##qfbI|47yfVq<-RDC3=I(1`mY4( zo-6Er;ATAY!ww}eq2!lb3l2-u0M?ymC+_=V=D29=7}1@totue!dQYLQUgPU4kuN#}d9uD=sZ$urzCPeCU7ENZG3g|9pEPOw%SHb*h&@uX;O(?9s2xd; znTZF%W#d-fE&U@8`WVb3e6(TF;iGWM>oR8FqYlUE?VyZ8mbUZ)e5$NK@AhyOy?GG& zjN6NPJ(VbniUn)Et?0UBCu9V!##ZY~;PAzm?L8BRr7o*@MZjqEolu22tFpni^#n)@ zt;;klOyT^CG8C;GY9$TgWmojQF{qjL5R_Texl~Cxy}sZ{T{z=C{;Y>#BDRg|PQI1v ztZwCDaGw&z{aRdcMRqaXQD&g8HtjARycKNxrPv%)$~2zrMc?p5Ov^fszWsEeu1`;L zU*BM&^5;TOPbGYRt4(g7LLM-1C0cs;L+zf6(A@7MH*R<3W>d%o_tpZ;W`%+2sl#+m zUdg1kUxm8#2W;vhLt>EJXNtTUX1gN~>Y|&)-s35Mc3sZ8pZ7wSJE8pP^;o$0emW{2 zrNTir3v*J*B{--zY@?r%*DNhus~Q1{I|JcO`CM$t_`#%4Z3PdJC8Q))qREjoX8(C3 z1dT`nlQ9}#F<~a0lt-X%-9^ZGyAo<<*ONQ#dfCvzvk?AB8%;8fusb`>frtHe+9^hg zRr_s%Qw=gK9!vWNUt1`jaGIRMO7V`C0&H_y1~x|vxw7rMc&riSa3@$(rnbo{@AzQs zt3sVJ#c=SyZUgOJ6NnvpgNHVjvv0bAV0C*w4h@KhzkS!CaqVx`s=5S<-poQ(*H-Rf z`iK2KT!BIJgV1nL89LY=$1MFg9CmLF8fDP?ui!K$c1Zx~?VG%5(M7>J<%?iYRW9^M zS%H3U)4(8jkXQsqnC|HzykuGxR*f!1$HGv2+`bzPy(s%VNotkjKr?+;Q^?t|9f$6$ zfDSc#o^0KdJjkoiWc+H#G0TDW4IW_7;w@I5-vFKCR#EV{h`Ws5OfIo0pq$%Y=KAb7 zb9@|+vV!Mq?eR+}-?LBrd);|d^J?ZFCSS(6;Y09d+68oamV#|dE(1>63-3trangz?ak_Wat&o^$I|y>TW{`rE08M< zZd&E7+z0ZHGX$^9v*J57dgRTe|Gpw!+%a=Gs~a(!_8r#*37zexEEZ7eog@}pt-_i| z!@z44-%1&*fLX_brbD_$W28M;Sml}{fM&UQ33q!>r*K9DhGA* zYUrhwgG+zpW1cIyGe3pHi|@pYG8qAivIlJbp;UOK;L!El0JLw=gF)$g@ZA)5bS+7R z8@dy)U6O2Yv3aJMl0#0PS#^}1T<>rp@F+Au^nb$x6~-=0CpjUNpEKD zt0q`i_zN0i_pqcxGqCUuF>i-_;c3pf=zb%cmv7I2#;yAccU&@GAr?An* zA#@j9A@u!HflWECLPNJ=t{Fo8_T)ce+u3VTP#U5^nFp7K46w4K*)0s33s`LbdS}p!3>jdDz)zCD(O}tz+3yrm-pyl^c=!i21{Y&{g zd;4sBtA7?G&NsPepoFUg2gAfJzUY50g8!*JiY|34m{*!3Oy949iEqxs*OinlJ8jOS zUypIEgT{V(=iDz-Jo?UC9A&IncL;F>sNXKlU^=C5QNK23paN_5Xb{D;?z;9S2ejArh9(VGt zrJsPc9!J3Ii6)rgYq9kwFUWEZgwoC;=_-B+08ekTt; zn+t-~Ih6df1>*;Z*=@G~)ZP=0m7|*YL)&6J{w9oa8^4(7xxb~orZap{>_k5`dTthm z!ls>$xMYVM?RIVhiwpg*{oFqI^E--|2BuJxYy!%dNQlY$#EYL5aNWZyyy4YTUZ?NJ zq93fo7h6}LLAteIeKW31G=H3x+NCa#7kC(EY4yjHc*^U?M#AHUFzj|JhyJ0A!J#|} zTrz{fNw0)8jk_V#864)aWhp|-f4Ok`pHh?$>?LlIRlx?!Kxkd%g=VySZ>7DMNHVs> z^Cj)0#wc0NVry{wP=&90l;RR)4w?;}0tY&}qdYo|mHL~p;FHu{FplDO=c2*wa4_hO zpNsk}DzIJaAGB}{0-N4vKvdleIwrhly_bc-jY)Cn(ld#5_5sv?7YaeIPD9SD6jsyj z%{mS1m}f;O9?#WB-IwH1_Y?Sk&6e2v?JT@*-3NR0RZzdUhgjuE7<7}_gKEbb*q%c> zf6pvv_$g&%QG+Rz=a85jhw6RD(S^+cyPgZ6$xkeZ{<8xUNj6HKtQH1Pp8mkAAgJu7 z4Z2&X)84IJ&{$~7%(O$nTD^{`x97uG$Hi#+_c$1QxWaP2p5SeBp9qT2V_A%LIFoEy z!Y{qn#ulAi@ZJ^58itkQ8Rc0Nr_I1bAqX?`YA#|pEBP7?j!m4Of)TOLr^15T-@G=(`#U4ZXlcy{`X*2s3J{w=_3`fa- zC4AkQ$+%)}GBk`)Lfw*Kpq!~7KXwmr4mblDb7&{E;V`&p`*UX>1z5YOiFN3^VNxAW zS<$uNVwcGaqv-s$sg22=WHP7Qzxnh2XTka(E07fpXId|VcuekGh;*kjS|;%@E^V_? zJu@2i|Ik3|ga4M5E|9U#{8*+o;Sd|GPB~Xqa?Vtq1^0nc=2EefyjoeT)0=uJ2_u9X zAB=FfpCPV2a0)g+7i6vi?%8z_4f>p9d!J@tj%+Zvrf1`Z%w?#Pmka$jT*TJFce&E$ zC`!keLermX;@0!^EQazM$JUh4b2WekyPX8R`H}e3Dh2=1aL4CIw_sfQIb2t;2R$cB zz@TuR;P@sQTC>*Cj*UK}NiX_cosAD)WYYe+qf}w8&1F;KS%=|JWZQ;d+axX6nVm_q zhZX2#I1#RgtU}Y3a(H@T6H2z^vV%Irw(sx<*&!43479MDCLqbW?cR|DVUOkR_nefZg}apW3M2an}6 zbCI3qHtCcvvW&*g={o^t`k?5+rP8`2a#-(eV(8}&mf{s`wzxk|+m(QtPb#3>&YoaB zYag5IH~^b3XhM*n4?&(G5Ye*|EGKEhT5WothW=nPa~9!e8#9d3J`Q$XlOW_qJXUro zWFpmUc)l?fM!s=Gr-@F`?U#&o%qACAxIVyG9ydR41vXU{ zuy=R1V$Iu6+;Vg%rr75}{X`=;YcUghpIm^7*V;TPO&u)?$q9cOp_8)D8aM9oL3@Uw zucCt6J0*ecZWU~_*$y#Rwv*p+3OG4RP#0{OXkMG8WK=yfHZujegDo@2Q*za1DkwKw z#LSGN#GiMBc#?+0MKP6|&rl#6!9E<@As^Fnfd3XJG7 znb_(d1(W?>_|ppNzoqHI!!vZ>y|fXuf2CpYoGY+*i#Fch83Rpr)azDR!@d4ofSN~J z__m{$q2>cCHmFfB6Zbrm_nW153D9mXv#?M6#U9KV11woZxX^U>$x{&~Ee( zXk4&{^541zil9QC~RX_6Q-dub~aC`4nW`WTe*j| z8Z=EU76(wzddnMYlzss#n+O+>A5~{fR{k*mz%)Gak@!;R54ug~v2W}NRC!YbmZ~w# zYxg3^kzHmRu9f2Tl@~!0p(?cKoPb4*`S|08F}A-C2cJ=e;Gum8%o8q>d%l?28=Zix z1!j0q3jg-fsDgQjkBg0u8 zkHm)kKG1oxod=OG-r?K-&WvLzi&F^2rzp#mzF08)sz%N|N7lORBwT-AiE_2s;`z;# zCx{Pc?$_snx8D;!L){qnY*OI&Y5TDAQxWX648Zzz9X!&Jy?!Pr>oTfO^GkY%8%;6!k5JHo9G=Are2IHue)UZ}3@j@Ap! zar*KQSg>Xk)|x!$&l4#hpuUmG6z|!Id27&a*Gu+$Ul9uC`LJ-)cJ!&|_-pi71@>0+J2S#QE{xzwoW+oeiH@{IZt_}wNSgp z5qk7lhRWiXWvaW#HD5{di2!dfdAZ${+d&o%Mbd)0Yp zU_aTw4Oj5io^xm)5(>F$-l)+z9dusj;_+W6&|u+4VYC+AVa`=E#TokZxf$RUw}@G= z6neHr@??KvHzak6<>$tW=NY@>hV6(@5(J8aJ^0Q9;`T|$bLmtQrsFsTJ$jvF4To35 zgpf#dI85ArdJfyHWHA1%J!%F;V8V?{7*sV3jTW5)Yh7a&Fq4>h-6>xlvJ6yDp9Njz z4c_u^3`T8CM6>c*ajh$zZ9gm}_wa7;iAsRvu8l0&ES)(&qC4z^0@e_?6jm(Agc64( zDDCNm5rN*Am7fC|^J8K3SU1esKMLA^<#B~%I8*%o!V4FsLZxLhH(2E^*6mV&r=!PX z0697>{SPqd%2mQYxE7VoL!m7&8`YlvZ(kpR>l+VY>koYt5ABbu2aH9Jcn^@O)d;te zwxLLRn3w*kXR@~8yw~oj*!2D^KCX#Grva(_z||mV^EZc<=r`PW<3y&WlE~ME>fqhN z^SI&3W$b_N2>SQv1&aTqu>M0|w7#qd(nI6L$0z5Z^v@l^D?AK>#wUSvv=mCmyRjMh zsVJdm>siliXdX>mF;yirZX@pfv~$dL3(dv8Dg|XOd46`%yrM>v>w6x97+Ey44<85> z!713(Zv&4g83#`eu0n;E4|vT#gl1DVFxAh!(YRC-N4!gg*JFq!l46A0e;&ju%5iA+ zE<*4jZqJ`P#7m3-G- zp=m}QYyJ}sqPUrCw|yb?D>PV;c{=z?wm|7;JJ1|?1mcx1xCK1q4Ye~cM{h27b(;(# z?~!1Au7h=aY-HXF7Z7!?6gRDQCx@U+-0`L_7qy+3y6H^}if--}`wv_J3o{GQtnD;! ztr$Ri`xKT8U7@W~461)4py}csu|VIuAj23gagW0dSIGVGPP#SK!l!mkD9CAfP2Wt&f!Fb3#^cbJYq|NWG zzKxB7=Y2z=^y)&c_KiHRTf@M6_kDh-&x7L4Dm)cnKHRq=+IOJjWO;h{ZuSe&CNk&w?b?%b%V{x`B1ZS z1lJFhqDA$C-=kE%fit6zvXbIUmV&VONCphTXFoSa+LPUEDJK` z(9pn`tRK1CV%qo;lWLT0psaPyzpTf~Y`pz)80Pq_gV4*_+|VWx?H8Y={Y6(ksCNOn zc3%Qnvx;HojUsGo=mV+`nt9AG7q~;8ZB~6Y1}&ZgzrQcW*j(bVZ#)6BR-6FeM~2v; zQo(8t?t=Kka#l?Ig}N(6{E5z4)T=o`T}yJ$FI9l)S3s{rKX_=`KGxE*ki|Ich7m*a zz^CRRA~jgiZ80^PTtOt3Dy1D=J}?_uQau zgTlKtPS?)m;EhQdE4nALGPrTqQQYrP6@=xV#7;{U z6cwE+YbkJI?u&M#Mn47K@5l!quQE_(mHz+ks!r=ap>D}uZgy-ssQINb@@+KC zdy;|!lYLQ>nD*{E_3T?b$GVYA;hc5`D*o*YT1qAOXk^m8GKuBHt3!tFW{5YQ2Ti@7 zQX_dBgzD%rd2kirxIkV>jsm-{P25Hp0*)sRqd}Y|D^3i6yS`@FZB+{wsg{bBzl*Gt zuT{YARx?xHR}xcVfjA-ODCTY*g+?xgpdH|VzgG^x?+HfOt!oIpDhr2gu@%r}nSdH= z0$Jy5IscfbK)Zc6`E{?$5Vm_9{&}B5U7(l5VsnLGy{mh0L-w~L<8+} z;`R&lynDO6tlN}4{#d=|Qj=o%ae9fU1-k>hfb>+{4l7Ku;cpjWC%+mY9BNsPA(Lp2?)_CLPKsjE-u;=u z{Cp-aUoVJuJ+zXp*+-6+p7^;T4Q6U-;kNxjAiw^TW$r$Qu1NXilhN#VMJ9IVC74Iu z8B=}=WL})RT&DNu*Gy2^MZ4nur$M>(EWimrP^g!)!G$4ctec2u%hurDQ{+XM(g*Ha zZKj>S57%9{0E|{&#-M|Jz~oaJ*0`-gV&t|`N0Axo*f16Pp!naQA#+tU?Miy$*7~F$(wbz zfnLR0R2AoeS7a#!RnV;C@iOpj%VAfJj6{#$xgb)1SY}=HkJxOtE)!kVVKJNka*6jq z);OgHT2KzpOFd2etyd`ZRT7A+L*0V+A9;gG47_g~j_yaRAhtib-zAGt`e2mR&`nh! z8k;L9d*+Fq^J38Spdk(t9Y*PzjIyV>*`WE?6&o&ObKUMH3ja-*$}$)dgVN-N)h$dhrlywu!d`%}<7f!tG_$zu2ip9^o6@X75p^NUmafFEb&T z&S9|jAg=3Ph05$pi9T9@uf-&pcd?EORqRC`(QmCOr8`$N!ja4$AOmWwG1 zo=?7%!_8tb+%h4s!(0FPzGtWNu>_;_>#x#vw_SKn+bO{rs&io?V? z8w_=0&ax5r$D#DaL?Or3h_?;t$6MFsqi%#bJJ4DJ8n2hJ-7%&p(W=6xF-Ji8l-OKF zxgg&^iCs68;iOs#Hibrt>$<*Sk3ug)A4tGk#3E}+8jBqv@m$xjH#AMo$5Hy(=p+?^ zg^e+||H%N2!y@ol?+88@h_|z>3pS5^!4B<6LF1&?+)he;%yNa`_5GZ1>&Yd^okD%} z&~B(&GnbG2eh8y??#Jon#8G~c%p_YUh+9kAd4qTyxE4$R#g%sExG@569iWW7G6;?C zB!c?)<7irc3fi8{2kXbLte&s5z?Lv`FzH9ES?YZ|PmiTL6Q{0WAmuzR!<^SsF_gy< zXJa(apX-cjpYJmF^E&w0H3OgCD?>?N8;EI5L5bA~y!?18N;_S}wbOfp%Xw!sFx^1x zh4YZ{gFHtqF{nCwEjP^D46cr4@TQY~uH8dnR$p>v{j&>gR|}K_$-`@}FQ6mwXJze` z0RL%%xvedF-&_OM6Qn}vD9R1>^yjZGO~d)DFaG{E7kkXuh}cLz8z&QP_R^D^f6fD4 zuQTBO;Rny@*9Q%a#)C;~Bpj{WgyUkUvTCZe~*TK%8O`ty$DsM8<}XmkCp00@=QHE zLI3V3Y}vORI}OYEl^_k`pphcgpg@j%)VR4vp6QT-+`c(RONOCq+!)05ya zu?M$YR)8Mw&#;zv5q$H2V$jGb5qou^dG3D;c%PxS@$V z*MXX!!7O8C6?9cpqH@oDt~^O@Q0nodlxe`Te3kaFV9 zo7F+S@SC9OD*;^SRz&15Ewzf+Z{Fp~~m1YAtmT!dD&yiQ|eG>9cde0uN;jz@cQhRij z`&@{E&~fXarFjPzekY)E+FssyaXdE7>&Gmdd%&rTe1vP4!N$50^*e+4r{q#>|Cr8` zPYr~4VjhefnTc7;%AsRme^}?1fd&spGLO=w%tB`!)Vy*A!`r%GWN;Rz)13Kb5&0Wx z`hx6Y7~Wn{f^FkV1W)4kxy}*rqiH5eC(ahnx|)u%^KRgubO1(ubwp{XvH08*D^y(; z0A}+SFpa>MEao`P+DZmN@&I*E`t=0G-OJpdAetN9BxdfrTU<9=l_`hp5gb;fUSCvZ6FWR-zE?BK}B4~8`#p1u{p^F!XUPHssed%;icsWDyXE)HFQ@{*V z-mn}MXKwg5owL)2JVyoc!2Ia-JB7IscU|mnTqRdIB;oWGOYo-sakPsY2miKS zz_vbFLU~Lm*l+&EebrOJX>0^|zOClQ#=GE~x(q(2XM&DEd!gRVf=RiazAR5g6dkm1Zox*vAg zCN7_q3Z~nV&;`~oOHExgez}Kc=Q{9AIT^cuNW_4s3=ETx#FoHlkfoj#`*kq=Y#V0Xz{5pOVMEQD)DOh7L?Xrwz9aBf;FlPhIZx9_U}(V|A!3H zC-lcxdU;s4bu`-=M|+#BD5jxBoawx|{6=ggO8)NPw_-EE?76@_%^JAw96UlyK6vcG5MS{%Fr>s7`zU==c&@osIhPW z>+t-`Bv*3ShcU#W9(GW?q{mup2$@8@he9r&_M0`XTS=YJ7h>ZTV?p_sc38OzLALoF z%OeLy$DU;vS^X z&r88vk_56n+W2T#1lkRp3G(exf}-dwwmta6tVL7!x~1-@co)sz8byJ~G?6=1{vSu@ z9v9R0{_#N)Ck#RnMnV!oQF}cy2uT=(5JE^oh?9irp01-((?zC>bk&r~wAWKcDj^9W zgb+d}p+o5KTi@T`o!9Gl#ol|Z=lOizZ*4XQ*kM^eeLV~gvSn<71f07m18eP;fz*!1!Sy;sO)qDP{zuR#tTeUysTxf#$hFAVHPC(+K_ zY+A^2pq)WaX_xFc>9q3(<+FC`zxM^noKZy`?o$xld1aZK`<#MYMh5r{M4RdfB>SB| zzArd|$!kI}^m`e09lS{E-8O(H>w9(ed`IfocgXOUXT;ThAF8 zj|XM#C(g+uh8OoJELDf6mu9|sOzmG?BzEC_*-Y&>pBYn11&KSz++V9vHcAa5t<{|K zP$aqsPqIBxB)9L4AGQ?5k*LQk_j06_Gc6lLg_X(p>&rZ>J$(*zJ0sxo?NUs?a{>D5 zsIXF;58qfuzIo(HUS0o|Xq{jT@G_l*>3xethI_|1sxs>%F zC^-o|gLK~o0xzChd_Rc-x(BKy-7LM5VVJn^xT=H zsPL-b3IuF!CSuOH9@?bj-Ix_fB)FCR;cS_dTF)%f_g6N*pjP z9yGSkM3K=1sI3VH)uxlAk!8b|Jhj5u8XngRMzcQo6*R{vT)4p>?;VJT?ujeGZCo*V zdSMeL{c{S2c~zmHqyNZ%4eEj zH17e4`6a`cK|P@MT?JLO9Vf;?39z#%1STm=(dp@AjMx^0hS57fQ8NYFydsEH|C42G zxFx>-&jq`uU7{ToV(5}|kS!bb?}LW6%&z- zCppDBL;g*Rp;CPgcZQaM$=+Jp&E6GSN0woXG7y@32Oybp1~bE>0snCYoxCaVOnDXc zlm5^fjfb!?*pPX;wlY*j7cbeCz;_31!Pe&^h`2BFB8DucY|j8TGapllaWbc~fxYXJ z5@5?*c7HBmUJ$2p=3Tj8+QmF;Gs1kJk76pColQsUX=$XrUktj0cN5baPEeV2o^-Ba z&(y>K%eD?*+VR$$-02gAl9obfU`z|ORZ8i$D4qDQtFMcrHMT+Y9DB?IfowO|S!G?#KjAdL-jS`QMWz|II)X>~ zTnJG6piuJyo9UfJLF!aEGE)f(uTj*gYY*oh+>10UWIgupy|8-=<9;$9j?`bqRqu%e z&h8?p2EHa9_BZHW^Al(hdV@?IlnAXSTd25YG4bSYQw>8ekhmX!83~Kg^ETthZ{AsY z`r}p%PWhX;GSBgi0$1wvRF~7)H30H|8K8ab8OFVK2M6(C8WV30&tIBj*B)ysW*!%( zWahQjj{v82PGG#zAKH%Uq4WGmFyGDky|P8{UV9;K`8plzC2y#>i}0;n05iQ0jD#0jW6g*qbV|E2EJKT=_e3)BBcHr36hu-Hb(`dXPkf`n z_)`D`ulPZA!zW^mO=3YSw*GV(8+ z|Bd3B;SMNRaha&!?I1>DXHn7Wsnp6Rlli4(_e@Z}IHz6@w=mdqyY{qamf`*Lo$A4M&eYrz0l=X~2 zk2d!G`pXZ+8{bpM84hr=ei52&Q<31Uwoq^93~O$)9k@^dn-+zkb4`CN`B#RHr&6iY z5d3nPB4QVdxm-j+w%jG-R0+WP2{4 z*XKy)UJ55&Tqsp+&LcB0|s&8g}*r zZ6qwCe$s`w)OmpI7j{-1?8bRph#;&ofd~$3Sf1#g0OO}*qbOhmuf;OXQT;DKtK&4t z>}5zce=b1bfna(sS;aCqpGfgUHTos*#+c!|@n+ds=xW(VtW*QAWQQ*9)JVk!uhXC# zok*RUchJU}S4k6|qt^3ti2F@7mCY@ry<+sx!n2T$4jhZZ`_HMrb}6K-j6=`k%wOKA z;uHe{Id$k@a33omCT*(#2jq~3hGyC!F$MAP94wg>i2W7>g8I-_PJT559^YpEwUOh= za!(KJe9o9jht5!~Prhi_uODefRfu%5P0?~6$)~}``ZN~)Jx;-`kf{J^p8ZBibKE$o1pD?*2&Vh z#@tbi8Qfk8&vTBW*0y|>qb&gUmV7KaT1pzDf03dS9w5v4NP3kl!5%~N@z91{Xrq@8 zO+}&f$Z0JUo{(d~H6xVr+br$gQ1m?Tj>ujg#lQUyy3<)5o zmX%@5B^$6gxCV3t-XOg`kT*qpdPl-Kdbx6fzvjW_zmxFFkg=$+SODF7L~NcN#=2HJ zskitP%(^9m7B4k+zhb+X8LMgR^$VzC#6p)UgKZ#N^18SZZF#qvBQg|g1 zI(t<@e#K=xGBpu{k8Gl5+ul>BPo}&>vygODn}M^2n#e9`g4Gu(xXsR@^{*cD-4n|h z-)}!JEM?vZX&|a?zmv_&(;;agW4moR$=NvDlI4;s(6Z+|h@*cH|79oPt$7mGt&afZ zxs4E3Jr|sH&r{8kJiLE!E2f%xpuvN1sCAzVbCyM+Q=1>wv-7*@t72%~ph0CWDMY9K z5ZFAZBhQ^!R=83EMu{nqdSVduyoMqmeK=wi2A;7lZ5MBKR^q7P^PV!6T#L7`${o3@)q0438XizlW%< zUIvgB8EcgiO_cMj8Z&_#--atN3#=1D(hdCqu*FirF4&}y#i@(OY0F~6NxSqbfR0PfC zyJ+TLu55p^66zO6(?yTcFz2#6O3fBoHYFVZ>9UoU4bzu^ncEO{zRCdQ=ZR>OGYrIA zZ&5STb663Y3vEY?L9p0`e!X)6zb=%Z^DHUuYuCh@os2=~GKuYbYRL|b)!2|5i@}e6 zkOeHuaw*9gmv3V4V%wRjCiC$i%GRO673*k?x)1Am=CgD5H!?b{KVz;0aWVXANM5=T zAFPi@qY>#WcY1}s`j6#~#u=0Dw+86sb(-(l?gJ84CR)tvL5FGNfuG$mw4Kd5CxgR? zl>KbR06(}mC>9$(>Y(n@YWmNyNUS!!3Uc?=Q2&BCjbG{zk6+QmZ9H2RJ1{QZmQ?b- z?4HFgFN|KaYS;YC@m_y+CpJq(pk!|5uPX>c&y zOs4p#adfg5dJ4B<_^`NEUzHBxLQv#IgK?Tp#PGE)P+h(dEcIH=#y_={y& z>ly_yE{ye1#^yCS56MH1i#V$=0xAn+tpA+I#g6KMqO#|NZI_|`OfWBx=tJwCusy=6 zi4YXaD#AiLFl{rz+*UQqhOZ*W2hC>Q^BfYKeUwN}w9$^Vdz`g02#>^TXh0&-1@pxw0W(K3Zn6tvkKK~hLOei0 zZ!1Ae&V%)>hoswKJN!JSgsy)tvE5w;sXx)lH>oB<%iCb`eCZCXzno4Fvfjkmfg{mo znv6N_7c+LB*(cE#Zym z_TU_9jQT@Wp5sW^d==4M^Mfe9YY-2MdJ?uMfMslaiE$$9O^#j$+QUOYqAe%QjDzE( zahf;w7>eP&qtSS35boK28S9hp(vEH$$Uaes!+n|arqr1hn_HsDWikBxa{?K; z6Fb-E(U@KHK(xb-i@|atiP^#SP+LlG=J?=@ie9K57De-HS$A`C8@FNKJeIw@N980F zCTxlV#pF1?u743U)oj2;I-Bv|p}M%wz#3z^B(QJuQ7mi@K<7WjIKHeHjg7-`X!lUe zF7<&->ki^AWqI~J#TL(W%CK?UP;lF$2~Ng8i58tgb#G0?R_hH^F~NX@C65BHwU$^{ z7RmS<$+S~hO++<+arIWC>Fd7duWr?z-6KJM6z}T2Y zcw+Do#x}M_UCmLTYQIFDKAwT!Peq`=&K$Hd)xxQ1Y&J1tFz5N|F|nV^dO`h90I+~HxJIANkwJ9({R&^WpNH@ zLci>55WMvWdiEU#?Hud*Ze{s0{q-P`A0&$yKh37$E19siCx-tQg%$}xWR*e-8!aUC z>8D+Iv!4`_Zw*E37mtb4`SVbFY6o_>B+$b2Oy*#JNN-Hi$BsWO#N>|=_>Z53HLg90 z^5i!vFt%3(dW18#hYat+Ds)rfr8&Bo=_E@j6l8 zc|dZbFQQ^}5I1qpQFQ3{h;(GKd#8OIJ)v0uD$@@%IfC^Ji5E_AORhVd+4p)+_g{+Pk?eV2Fet42$~W$Rb6ze*V!|7Z5V8=%feMt;uxd(Ug?Ga#uVW%gKUisrY6QiNy{aSsW!v%fn{L8b~CfO zot_PY0gHSA$0X{ziUG9r$)B6Y-)ffskGIl4X z>q%SKHInmqKZ>lT@OBSa56wCsKGdW$Um%;KxU7Z?fioCa?HqF>P9yZ4(Lyj=eihU|pIUmnp2PO8pNak#1&s_}hQV9>QGa3{RlbyS&tEc*abzj|GB^Yu z&n?0FixFgkXa^RooQ=^B@|lOhABzeLKz@OB|NSyiyyOQbtqVinK~jvCD$(t)bDkd@|8=FGP1T8EkfJr%t0{NNvw3 z+7@sL!&a_nw}j69oP*uPY2^)#il6N971&Z{|%kR zQKYT;EsfJZhDpKUFeo<~)e%O#v+fmW?OA}z?E+B$-h#hOlkoI#C5Ewc*_6#g83XAT zSu2l(MLiNgbSf9!#i6`#tvy;yzeu$V|AG$pX++CzGfleoo5q``;@tAXSc@!oz3K{) z4H*TK9%bUb*g!D49D^za!6zB4Gjv3X;-6Dcd+R)wJz~t%8|B!}_MyQ&|B#YYDayBu zraT%(FRr1>o%yVDcakGVFRXu4yrx4-;a?yLOUr?;7S+XX9G?G&mWYhBZo- zD=aET)2IU=a^KIFf69ZU*%hc5JcS4*TTq8V0Z{(a2fLR%qww1tS{`J9_Q1zP^=KSOU!t`#8wQlh5hwGw(lrA!m#}=`opNrs*>;pJW_P1QokVnEDQ_^G zv92)y6}1NV=H3>pEXbgP**n~56nnqQOu;SqJaKZ>;bklb&^@Y9=!c_cg;=5x`9{0 z!S@QwDijk=au72*a^PZ+8a#T;WR7OmF*ex)X~(YO?;1bYc~gMuCIkA8ref`-E5uW= z89QtJX=~gz+BC(7DpyX!89n{bA+!=Z2LDGEus)IA6D76`5@3085xOnS1Aj4Nn#}-g z6=g6FJL^NeDFPe4YSKFQH`R14fK>rJ80W;`_NC6)>N*;7gvYVdRSPnXO400S7=KG*5p^9Z72jI;Nf&;GaK?{u|@FG3|wQhG2soO_t zs(+Md-#-f8LDyiA(hbYo*thYz0oQT5jE42jLfs~||I15JRSsN+Ax&zi9;S~X$7Srf zrCf8U7w0|62c2?KI6DI`FbX+Arq@=ZwKyFY`T0Uj(?JZLOQ`wqNZiNv1=7hjD9Fz) z^%%-B4<@l(_e8dT$V{hZVf$Df(gqKHi~-?qwih`z0qjpmAwee@XBaZ)ZlNXCNA&|m zsx`qSpR35zGmL z`}3Be$sGlBHJLFcBAd|~E+gu^|19fOJ)mn!A27Z9le5@VPnl~EGhdbgc^ivO@6$-p zjsx)V!)6rKCUcud8ls!-B+&ITh14fD7^0R#$gv98pAd#`F&v{uqEdu zk>5=MBPT0zWPLeSPs)Hy(HD{w!tN(I>0p;@1ufHhK_6m`+L~-O?>?NaU7U`=%>&2+ zwrkwrngSM2wCRX5X>enB8ghpepqO6BcVQHq-98`9B6Xnt+d$$lvJh1kSLv#(Y_J8pdB@Ili;t9ID=BLerPZ|M8>o!zK#u?;jAin0y!!R04`IjAt08 zi}+|22>w*4>hzU3|HmoVSV^$xd=E68y#yOn1e|UxCnCL_T;E=7j|gl>DeQ~EZ(6BV z#B6Hr=70-^SYh4ADx7b??uedqsmJtXAb*s~^2OCe-I+}78QaNd*hZ-SM-54L4poiOs4bJBKsbd{D~G~? zy-paMwS<1z!gf(l32){w9K{E`q3g|E(s4Z!8oh+%)e8%(cU}(G+s|WlK`@$Kl@W~+ z8Ju=!e!r89IdU@_(ic0!Fr5Q9QG5==zH9^Ecd6i5Pz-IF(fIdRD>m!dk5$FysO+If z(@$i;a_ue5*^ms?V-WiEV>>&|J4C{IB%)qs;O^?j>F*H0?swG~Ts?u!q@Pg{>-x86 zrLnvIMr!zvIqVy`3V**3M8V!(uq1yhI$9qAP0dTdT{#Ui{`1G>D_5e*X&VHOo<;0# zhCp;)Ip#jr1QvpWgjzrN!1|X`>noNmlJ#(p%~;l`M_`ID0b9NE=^|wnh+d86r3=Pc zf~E>?Pw~Y}wnyj`?E`y1%X?x@nzcxkV`auPf@QS3s;X z^WnLzHzq|HgZAwJI^jk>h+bB4`T{nAm?H*@Yb=|zZ!<649Ea@{G1#@t4@K=d#QDWR z5KvzT`F#=GHd+HTZi0r{6;u-6pSevI!k@&IEPJBF^qBeVT;xkWERTl(^B^of8IPl5 z0#W5$feUT6;L23ML$8Km*VOeS*|HKcmOG-$&uTKc&la4o6AP;610bn+G(63e9w-8%dRN1SXxC1n#*U?Oi8Df$1@@ij{-J z>Jk+fwi2V6x^Ta@Bj*0M7}W3ga8*-BVea@WRM_S+*W)$RtF*?A!u7a%ChN9o_XgYg zRLBUZhA|ZiG-a+Qk3~^n*M9}bH^spe!$7pMG)1pk%Dgh!@H4AI!qK$)(`Gs~B&NiiauEYj|{$0@G7iM`9;OlnYAu z+7-;Vs~iBLzc)eavVN%E!tq8+E@O-06BXF}B!O%{xoAudUVI!26AI7c#Zi?QSTYE6 zGj~D?`|osDdf}-LDje{{A9Hs!k?#9?FuXn*jp84XitX0;*1{VV28*fZJVNScpXW{2 z2Se)?AJF+x1X^xTnf&eCP!m0Dg0wH7`Imyp(#Iv z`p1@1|I|v_P+|^dGrv%m@0Z}O?HACwfPHV>tx4u;U1GCE7yO+nf z;ruM?XsrKojf(ZkOFOcPQDvD%73(ZW$Ee5Lr#@CFICYR8EMT+uA(2G1VGbrJ{~KlF}HSWHr0K537>1R_q#_9C-SnPiu{*U z==gzHe|LfnZxyV&oQk5|R-8cl9VdKR$J>v*LhAeaK*xiYrnIkp=H)OT{2) z=raNwd+ULQRx~+jxfabg#^99rLKIm~hEV|r(Q(KPGNJD&=Kh|J+MUNyBR>Ss{l#{j zi}Q)GR}UOKI|+_t=c4k$KYWL{kr#WrfJUYtN_GW;ckEdzh?>CL-(CqV$1gz1zvWnV z!wZGC`=I;UfB9Ol1DnZfVac)CsCtwJ1Ky{ja1ZNo+|Go?P&d#sIKi@P->K@-1n6it zC2IL7jM;aU4s**zeT!5oqut!x&2p^$5f0Kt)%=#fO0mJ#4R*Q&v)*wuHqTGP*7|4? zeC!udk|#v>pHk3Yb&XbR^8=$fa~WrG2=<*g9S5ZjL4S`F%)IxRD!Q+sc!dn6F6e`1 zy{d>&P5>#I&GybME=29WmKP|}V1!j4G|s&WMt^NUb3aVy_oe2*6HNkW?kW?=Ut8UNmX6$dl^Mg5i&R3OdZKYUU{kHons|Ljkcf0*a% z)J|9>vBs9^j3@f|YiUz%AyK@(g8cLpP<7VR!ry9K>{X3XRR(zLG~(a2+tGKG8eVEG zMw<;HSoKGYO+`^?e=VO3PVhzTZE~u(euLjOggL3QpHa74j2Ce;4-?bbS$^|U5QhHd zD<|o&^V|qxSO1D`iAzWEr)9j@Sw_4Erh$#2n9A-3aQUB69rhPvauvmQ zHdE2XCLFZXt|Sb1;NRa_)@O%4H1ua3the#h;U1fR*e#=qr6Xb1jYK>?{|t_FE<=Z} z#%x}%3!MoQnJe1_fPI5nK;?u@~k@$*pS6GDZa z>0r?|n9Su;Sr%NA)*t)MiL?4sN4*%bT{9469?7)l{VK4yzJZ9|bn*Qrp1?S>!}zoR zC^VtEsMx^zEek!MgZYnb=lFt+$cacF48Y|>4`Tas_B|Nk2oYnlp}y%1pJNz|d4E=- z&7@r{yUBw@&7Q|mPpNzCcQW*`8cU>^;IW+Tj_baKe&K){rCPeJS?xA)fet1JUKB@49 zdwtSaUU4Li=^01Y9$>yh+ZRM$*g;iO)Fdn>h4ms>Ms`^nnKeHV`m>#?-F7)zm#c_i zMY2UqKrmVHNDqgFN!WK<1^+sSp(OD!?|J10x!LzJv^`HHX0BK1wxKMyTRs&`Hflrg zjs+lV{Esy76;SB(7wSG)4$@JjrHy|kGB#N!=ikE;T$BIDo0|;v&OJ%6O(@Iw{1~(w)ekh_ zm0ub2B-GNbqFwM$^;P`vOO8e*0vr)F9}U;4$X{=ZapiH=$%;<|RS9!eFFOsdY>S}# z+FDSW?jg3zvzhC&5>;O9q-J+M`sT4*VV_Ku-Q*}DiYN!u zIqm{5+&_4}Yost^2N;UKP;KoR+VYvUZ?roD%hB zl;iJ|M3he(LE@U`pssm1m{bOWWZfjP1zCO~C)L7j?mlQ?3{U4LSHU^$7Zt9|rJ~lp zTs?dtVe1IUJdB_vMM(q`BZyaT1FZNN1tpRDP^@0YZ5++Gz%Ii$7uLP1`7r`&y`?0x z<2p^U&7)RxD`BvTb?UxrlHzAs%=tM9cDQDuz0(S)u~#vt&T%k&T1jd?t!Ljsc9z{$ zNCmGhk=obHv3esCzJ#8}&bWLOFC7k@*`Y)~B9z^09f&OP1Cebz0e{B>N)7ES)!$9{ zY<52ESag{2n%jBhTtlK69tTk`V_;`LwiglJBC6Ng~Lc_%JE!j_K;<$wY8qe zmk$NS>9L$-b`meELN@<+PVT40;qp7$C{_gWhAWKFeGp@Q@f^uKwHz&~WH2y;y<XXi%c{UxAd7XgwHO3tNx8YG?Uiw;lIv1`;&A}0BKxZ%hf56@UQ^d z-cgdY0wB}EggDvOlLNbBFeyG6sxHjLZCUJ$J*{YXQqUHI>$^8%C3?99ceJ z!mWN1jM~qRgTA8>Fvk@7Egymd4n~4<@-2ps`$Lj??qM9iCQf*5Fw5CYBbwhRyjKUI zuB8hVy}XFq%JZSa@*O8FbfEGVB9J`?*)Tgd%o4PvKH)TZ5x$bJoDT$Lt%cJvACTA2W9ImZXr z2%sS^oU|@#C*S@YLF;^b$W3Ltt#y@P`u-ES+K0J(V<&*9tA!K$-k{QXr?_Db=TQ7~ zDy}k8qR)sNl;`!r)k|1*{8BK;e+^=DP{uiO970Z2yQ0ZuH5Q(&z@+7D_NxkFOc4vN zdHr3|IhoCjOY^uNTnKI(BLl%_#<3rkPE;O0sbpLw%DbNut!Z8Kt#vA{;WKc_<{fDC zI0u}bdeBDE2)gx2C~Dnx$CkGm;JITwDw@xLtKB{vTN#b-9CPr|c`qzqMWLDHD2+~h zB)ZQAk#y%=5GI?DywEbl#@XPy1>s*GJ8W>j0_pD=Td4CaZSMVsDoQ{UJOj%9N^XVT611KkN*x~DAT|?ysm69UP`x-pE#~f~?yXANSvdj(EJr8V%fXWU zhA8X*i_{mfY{AS5zO9z^$m%@7$Z$Uqjrhg6kMTlf=1_DR!8j2H2l%Gv_4LUX#$E6{ zgRz-;P?W=bxvV4YusRpROwyoc+&XGA=nRJayn^lH8F#KhusK7x4>Z#Rj)w9f*ew4GZvff z=Ym3$(5TC&(cdHIIhlR#^tyye7)EyI2M)nq>zr5W{8qpwm+a zg+VTK>mSzTS2B0X=X5mM9s^H4PeWB@5s?mFMFa`KkR@D&!s9K}h)Rg}s1zDgybzvG z^9SiJwiDJ{OFvH;j$I4buB>zcTt1hJlZuX`>3J3U|1t;Bk@Zx68e{dwuHaJd%b@XB zB8nc&CitcpI)e3~@ns+Qs=kD9J_52?*6X2RId0AJL7TcYWK4A^jvcoI9m=}MKt36R z$v-5%Dgm{r1az$m@Loe1imKa4;R{E+XBdxxiVDoPoQF?JE3mtq?P;?^SVq>GwAsiZ z)BY0ad8HJ9SA)*T6`)0TVdk<-gqKFpteXVWi^kyxngUvn`Zk&!po zqEYO?|K~uPvVTWUwyD9w{v&a934|gV%ig~S@I`ONa`-zEd_PDr)g}mZjK)IW$11d4 z%eY$_iRigy2c*xR2Y>W8;=QIknC}x0A1bb)1@n@IkGqD#f+5s!n-1FOtC-7511oRD zQstvWK1u&3@f;pVNgr=iPFn)1)m9|9-$znE@;&Etp_o?c3vlR}J*YNlEtS{Ir_!o0 zPHOO-CP=5_A4|4lWjhFCg$-KZ6p+Xkk`=db&#CGcF&2gP@6Y5i^=5Si(4DpxNM%w=7@dINHBS~2<-CZp+AFJgV{0WnM83lmun zC-%lAczYlV-T$#9))!LI#NamZ4GaO*lIvvZ-wKcqTZfL9%dz9q6W%rAJRbaY9{u0W zM>V;^TaIaEkn)-s>WsW8}Sa}nOzM4`jhc;*r(Xq4aph7!-=nTtB7$BB`U zRhETXLGiKv@}>SK)U*K!qVuf>Bq|B&-c=ml&3gfot_fd2HiMC;+l;Q045 z=qBukG4~tj@8f;3`Ctp}xb&KP?!6uRjd8<%Rp)WQm(eIl>`)1`cUveIn?d^HQ0iE8 z8PuJ5V7Kx#II^5|zWoBscySgQ86QzvEP>*wn=$Ir8K|mTibbB)C>}Wz!{+!gz9P%9 zn^NAeWg9g12!+gH3(4J(C{&nPfcj={s7nin?m@lLNd7nI9|~Z5q25sNc@0{QUxN>g zuY!^rOP_2z3mqk^pwc@6>xRZcX1fOSvY+8tCrVo*BrASVvk&;8TFe0in!nNxZ< zWZdb4deuc3_Ya^`pCHyZ_ht^xK$2uq0{m)UuwQF}V^hRf5X8Z zqS5JGHg(#`n2!tm!TZ(#>>Hv$_x|r_nAIcFwbd6y15R=pZ+0_AZ5MCzr3%ueN1y{- z;mfWq*gf2r6fg%?>U{PInpnYoKT(3ev*)0vc`U8C7K|1wJM*2*GDI8mN#==a)`2WT zfuM9Yff#gaaYWV0C)D^|J_whVaO-~9qHN(8Vx6mj?R|$}o9qgDD#k+QBX_zMS%<)H zIPGV`_5|sZG8yuFlYEz|(J^;^MZd@;4X!n}43XULK_0ZcG^3jOOhBw)4o!(AO(btHe{UgRG4)%2D+!LgHujI{B~H5-)B^z zMbJXT3%e7eA*D5)~55_stPUcT=Kx0&K$DRY%uE1ctrHN8K){K zlP+KGibaRny!hTTV#DYr%1p*e4zQ={35j%a{TWONsluT@C_cdrx( zqwAnHy^a{ko{@D0LCCS*)r!WMtSgcOdsBJT%ZWqlhY_f|Z6G$@_Jxj)Q+(#{U@8wR zAP(0YS$@aBQrPy6SHF>SCdI)p-Omjb|IXv@-3vweyN@*0Ist#I*o-y>6YpwQNO*M#@a`N z;v(U!HZflNqzc;X-9di8oQU3P1zMcD05&-Xp*|^+GgJttiy?QZa83{U z;k-AFyX}mbF#-&^Z~+4~E75L976J7qdUKs0E_KO3S79J7G%CYgEsOE_?kGHVaxN+r zv7le*1@y`s6m$n&R_!Q7;Wlr`JRSy)H)lfOg9VHa+#hmZSV4!x28Qo*!)G(6;$G(j zbSo=F&5X;y4=I68btNQz4Mx%Devn(zMP>7^!GDV`qPrd2o$-^gabEzDKAlQhkMzg4 zgM9FFFZSLw?jZi3PLa$vX;9be3@TF2l61d)(CSkRegBlB&L6gqZzuv^D_SFEXD432cb4;Jy;nBF`iy5X{b@dy|@rGJ~1Cv!yV}X z+XGlRqMkT}J41?%l5vGT&E*^}Ic@B^-AB3Kp`vdzWfY2$0bS(@AyEr}Qvv3@mc`KoOX+Dg0nurp{ zrxR6OW_$T8OJPn1RcoXYqdlzu<~bKt3%yBm#CMX!y{A&h#@#A$mdf*+)OITlfkNIzI@$So;IYC8lRr8Pp8Xm#85+$!_ zQrsEFkb25DTWeF%k9J}|rV6~{wt&G(1*(7dr$=I9p!#|hXq{{&{b~z9&(;o2zOyXR zEl&vaV`seVQ?RzR92y4tk!Kz3>a_C{{cnH@MbZdv-k?DAehr{IdjVzwctc`*_s&5ChtY=P~+s3D)T%C=5c#mx3_17mOw%nIi}1 z|InnBQ$esIy;RjVk*b_E(6b-&ge{y;W?xKZIj&MDG+2pF`{tr!`goS#2nEe6zK}h9 z4|XJfBW?exNX$&;NBgP`)e;JaoM&U^iksBgYa`=M%?5bL7``*h_|A1Hq%B&BI=TD7 z@WC@Guy=>tlxs+@UqXxS7qpZ0IUF7a5O>Ek;`C@Q@8)tAv{$^QMQ6?u9d8A?^h_rm zFIO^8;AMoZEp%Sfddy_bdixCuXsvonB&)_i^JO-Amseu5=Qu7gu@teJ?L#;2g;kl% z+c1Nm&}cj9aD2)KTZF@{JG0O(I0xiGg;bW_%_}~hhTT8e9M+O?DQX$}Wo;}i8yAU6 z*NxOYcoG#GO#%z+W_oi{IT+O^VZ<$4=Hy(;g?Y|hz2Gkf*{$%+T0?aK;qG+^i3ZR;#&9Xf;0-5IyprOUGZdN-%pyC3cr zv0U!4N95A65vY{>CPvBxs`OOS86Om2w$C1%j@yHETM7xhYlNRN{PE#2f;ENOAaL`t z=xF;uBAT@Eo{<3Uea3;@ri+BKpNj@l~k zEDbsYeW~M$MEJ0$45j)_rGoE0XhJ|w)a*Qhg1S`8n4?Kh^j1M@#>Wz+-a+P5bBClA zEWdg;62yIX@DD#S_Q#^J@a?H5cCBb6TaW5u=c^o;TW5v#+uzXcH-}MmdN(vlhheK} z6;X3fEY0k;Luv=>=x@9PE**!V1yg9Sw>wqm?c^_xiGV95qtW0$9vaU7q7$}cqIQTg z7^T|b^C7`F>B*x1SlOjw> zrUw~4>AC6bRgF|aNJ0ql3n7FM!oC0Zi(bp8bN1P5UDx%!P(C=5*0bl%VUZ^4rO$$t zp6)RBbqUT^SEAbvHgme>p{;lr7zz9t_qmu@UC0I-=nCioU$#-U!!ua-)ha0z#xu$+2GFYq|YyaGk6yW+shV#~r9s9rXbbksc~Z9f8V zj(8H9?AnRq&wkWn(N?PCumBt47lVf! z)hN|sb6Vh1cr%!J9eZ3wOV?{O|4=ktHS|25Uz~#Sj}fHV;sFuZqx%7Ik$vzWAvN#MnCSJ3^&d61R2^7gqZP;~i0Y8=wR z*y0+m6Fr93t?!AGZdjqTEfwFdyu$q17vadA{V3Ew1{NeAV9Y8I4bP@07Rgv4 zqmk2jG!o1|Gd($R26=E&3H3#JkihPB7E_a;;r2t4{Za@q#_?1?f}PDaE>N#7rvjRS z4OL5_fwe6uH>*jlYXD4IC&J~PO6+(!lr~)uLH3X?L`U_3T=yM?gZ`ZXWL;U_L7}8| zkPh13JcSk`N3)+G(mXyGzStLoM|&Yr9q)r`r+lWFk43?>54`AXG__l42>nXqP-bDs z3w|D^tM*iau>WVWRka>XCap&9gAxN*6+_0@63FPCO@qfV&2!lfu(p0l&7UxRetbFD zRlFqXx94s2rbMB^grijA_lHZJmU_1Lp@Ilk5__@ z9*}y4DU7#>1i>f;HR`-cJXR*s@B=l}C_NUeiu*&;B2U({n#cNTeo_5(bC@@MG2Nlh z_DKIOaLU!6NXo=U>NorV1U?I(=6XJ$_|>0Wd%GUJx*sE^S1YmQsXJLWN{X_u3E&NV z;R_!JQ|`NA^)EIfTYAEHNjwPhcY@9>6Oa@vVI4vxAav9s3dXs5Y;K0$D=D~aD~I!p z(OdC3lMd55g+~3hftg7%+AKCe^(Z0r8XQd>hn-}5nl{oVU`zwuVD9fh4{Xd`2ZA0x z#X{{hAow(iw$0j&-M>=Sl^Ox^N(qtbBUTkM9oJ<8@d`V_PvaM3N6+q19W#Lf(@uTg zJD}4+eR$a@z!e*!F@NWAS|(nL7{3GJr4_Vs>UOrf5@Rxr$2kp!I3qF~D=O=#-61`U z?Nf~MZ(~5Z`8;&a+yDy~TcP9fc!>TGgMuRgykAKb9od6<-qJ>K)r&lFM#FBjo)8QQ zokJuetqY{+J7Mjc1?V_R14sFnz~lPUXyq^%YDe{8?;Pt(h6)$$Tb~8tq@;Oc}K9$BNb7XYZFIH=KVXL3n|=D#yOBukG-!*p7ILbs1Cu} zPvuZ)FUQBe*@y z`nje8!`n}=oP##)tUU!y`<8JjVJtf@4~EX?DO7U%CfC8325t9WllHYawAF7NI4E2| zXYN~)af@{~r(P!}p)n9B*})+dpwP4?zB1?E$*w&JaGN99^}O3IAUXdhS^b@@0C^k+v67 z-!rzI$qah`sT1~_Gy|_3U_N(&80)hNNcPk&5I&<4_4?^zW8Ws4wk;ctRRcNwSuAV! zMaVVI+XN9zTiVBVxk9BWF?{EVkC#?~@u>o+36DaH^lvov+Bq2gwKv*b+X7y3XZV9> zSE1c76KXws5>acn^IJ(N2zM}6{oqzI@n;k^RUf2LZKmMIA0g&uzv!0T?EAQ=xme~i zj0}{p90BH1KNnNT=tx0D)IX}Q-Gwo}3t-LQCD?XkEUkA7B-OP!__CFGS1-q5Q`!_@ zx)upH41nK<1F-h}I+CxtON~DpP^-2fP&#uKW``)TcEd@m&mInaa~EQe#YG&jbOi4G zWsgN9&|<{i*eBW^BIwS`c2l=i*d*2qj37eC2;(=9qqIS zlAe4R4r+K5|hheA+Tt=-l8|l<;Los2>FxFx4o@B4w3cYKXxBAjt=9gvP zPSb?a?RMFmtT zcc-nr%W2BWlMt>Ck>w{OKjN-f(-)lbHdn6B`$?leAV^v6ec7{C8kU&3d|;qw2ui;P>KHl7AM9NG8G ze?R(lJS28M3ZZ6@3G+~Fgx}c_Xu3EO-8TkPziC47yo#Cf?ssD%YX~fkIhBT@(3RZpTS?fa3?wbXH9$1A53LVD+EA}U)q|gMMSF5l z@0ls+&74ASJW9rjiODqBD+@Bdq!TB31(~qM2aPS;`RX;pQ0^N_1(_>(mH7~;Uc3h4k3O%O7Id#W90M|Xb~Pvl$jS%?mmHZV_YuE zIM4^46}$942h&u<9NVd=!GpFhDkp;XMXKYY($Hx6 zXqcUo3%WPM;P8QA>@IzeF85M__8~JKzhgQMdti~w72o-P#%Zc^~oSO_Lc8-y)Qbp%kc39 z#^=!+3l<0Sh&>Ttl)Q$FWggN2-jmVv)Nl}KDEQ85PZ;{(64Mji7d6@hHt zGOHs~_pr{{ZGn*RUoym<41tDTnqc#51b+8R!l=yqeEwcFo6E-7c&@obb$;p6z*SDr z9(4|Tq+deS#$MoOxC5RJT!&5NHbhpF!zl_fFu!{|W~YgYb&8+STSvRxRlskGK6Pi0wMg} zRpRwT&dD|!6KU&8+V-15!u_pSEiNUt0&SEyD@jxOG#It_B+Kl!a7ORvL7jgP3N8i1 zwNgJ6R}Ui_(zjzdrlh;N4EYGn8dhcaTQLTsOKWqjs~H ze>XLf*6CJ2)5XtZtRxHrCjHOb>PBkUF2^Lxy=c9lKb87eQ;S`}V7id8Pwp6V;+7TI zqH3n%juMp2pU&$H8xE39%j{c%<1o8y@~A}5bKhO*H|tsa$!#-V@IRGasj+uu?`(> z$FjVc9Sht)AhPFAY@RJO#?A}Ca^DM~*2kB4*F2=Mscqzq{b|&cPG+8hXrlcl4pRE= zgocM%#AVB0s@o|9*S)c%CSxhYJoN!(NpCbL8v!-C6G3xrDt1`eQ`s{!>NLKc6WjkG z)BXhG-Bd3$)ye?ldlQKGN+ReH_Rgzwp^ZzyO1C)t^@sT=CU%ngErqCj%zTl$*Qvm7 zy2`luGMv1v#*~!_WYE6F=-xvUsy}_FQROdab(sTI^jZpmURS8n@hW+{a2jrOJc9)> zRcQHpBI-vQq5E4o+z4Q7{@N@!Unylh%Zor=-4`6@Z2=mz1P?W)L&nEo5FGNQBcdHy zpF<^78!|6Jm*t>yt|xIB8w1wPEN}2_4z>CnNKf2X;*3pa7_0mOcx(~VVC@JHZTrdB zwuW(%zr%P>jVYkM5&_JlMZLd0p_X^gK$Q475qovP;tUIv4Shyja`!`R&L0xqYd&nM ztOB=%Wh_%NowR?wL%sK3rB5d9Kta`DmDRHz%nKVuozm>keQ_$n7gOly@s8+EW<3V; zN{Rmb46vMd7oRBvXl87(Tr%JW`;H&=r5VCAVBx`7!*>&?9n-8{?>q;?qYP2P zPvG z@_U|zPnz*)9Oy#?hOdgN0s^oHIIwOyCeq3bA-;mK3o@6(%YV!(G%AJ4JeTrqtC?5E zpZO+ywCLJsrmJh-;zM4afR=8os$SqNiUMKcPonnxWm8?Y4Wi1Je(oIt&V^-gBk>3-2DALi zt_{VidrxTidPiV)ROqc`g(+UQN%nX{`sWk#=S&<+o+?YRX8HmM@Nq!Rau>9Cz64Ce znYVb*7Et>uImMDy<7~E+8{f3gBNhEBK5tkUO>vqVoGtY1R?aI4BL|#+E3*@}B%!5D5bh`k;3I z6A*mD54{#`CXENJL9hP_wjU$JD)<%Mp^*&sR|>%N=qPAOy$B}CW6U?h*g_}gW9rWt zINtCGq>PJ&sf?c|84}J5H}?S%C!y9|jL4X1=D9l=596Zr(DqviMx`=eH8+4Cv_*#9 zyACd-P{RB^gBC$xw^EkfairO{yCEuh1kJCkB}22bFhn6EwrLL7GSrWn zO&Nz**G@qLbuV;X5=He^$MbDkJIFnIV=V6$0nHy3BxAS(800L6QCUUMW{R& zPY*91M~ojgb5UFSgL(w>0i7BHl5yV95Nlw_>G927GgTQv_Wm?_clFYJZ{n`=582nI(f?_2n?wdqo z?gnE02Pv59M?s`*CRWsL1k1&-^zURgTbJAr@7@>~{dnm$-sf;uP?BwuolDi$?>qeo($P44&Qb zMp5u|u0k{2! zf{Lj=MBJ!FWU-@2mR%&soUfouXgGT8{YPFq9|Pk}@m$;|mi4x6rcoVgzGnYxI=X)@ zTCP8e<;UbW>TCj(V=?Y_2*kGLY?}XHE$KdF5B~Ql9z#-(Va%i>bk%|)=8ykI)H;an zp`IXn|Bhx4n?SY&i?Adu0qZ>~sj+k}xo*v&th}9s1-W2IU;+6K>{$y~1d1i!iSXBV z>T#i7~%MpO6iwfR&@F!9m!ZO4jUD+<(9Xfkw!~4g> z@U?FRng=F<^=}OV#q9k2nFy2{fjtu_c=Ia7}0IC#H%1%0sQsk10;kMV0+m(!jWh`95x- zc1?plM+TyNQCC#iy&&lw05tdB`YW9c@o$#NN(!d?{CXOg@G4qf}K1zGGk*bu|~ z2PJp;dp#mhlscUz_;o?k?!6)E71T4#sWn$Y+Il_Zr0+Cn*5fkN zN*M+-Mi!v(OgXgCzofx=GTE~480s-kjxg7j%(@bR_6fNlPU}b3`b+P9Eq^{!W}d_W#Tw|By$((|U&4@G{Q(EvAzKYgv0;`A z8Q3xbSCo08xWbCs&z7(qj~(QO%fMb=gyusvDId+w$7xF9YOO}M@Dm`_-%A5`tH}@B zW9W3Rj`uh&qb83Lbhd9}OzZvBC;T)>x`c98wV5QXhVjFTH{ck*aMbLgi+{**^c-#o zZQk1G@QdmG@dK!8cr=Fm-au;utRch08Ed0ubU>{Hzc@xiy-*W|Z;Qh98?zv^YXMpu z>j;0;iF)4nM1*$ozN1vg?Q$efJ53A=qPF=%@dNSR?#h9Y=D+6 zE!6$pT*w&j17{q)Q8UFB^UDctogK%z0xp92E`4nF)Q8%C+epVv9op^gc=R{PLG#5t zapH;~rZj?7pI<@s))vyYCNgXsbA?*3vH^oHOFK7WirX1q) z6R=uvh{oG&W!i`akn@H3%di**Pf=s|idYbw&9#2ET?JCfJT%(m1X8b)Fz4h7WMFEP z9h^+76S@+|)(f~i{5S+|1*l!K4C2=xhbCe}ol?&+_LrRcEv^J{Mn71^@<8nq*FgN^ z4l0!fFfT&&{^-|vH*g?@|&-$uNO?@&jLL3o)jMvm<%1=k8; zv7ZeJPppMM!)!3wegqB>BtS>SEVw+j9P8h_qo!xC5ckeJ5Y{GvL*;ZByl*=yJ}e_I z3yd+wsG7R&Vk`kVlv<5j3QN=HV!-MnsFlvV)9tTG?QSJB$eXCCBpW;r%ZOdQGg0^U z=k=$nK+oBlo+Z!%8C% zd{e5rnj!9HI~?On16gihFgd<=B;z7nK!uYS!WUen9mmsg^U8SaU_FHos>+$JIs~NK zS!Va(s8JFKO*2S^ z#W<9+EY9*?$KXcSgXle?lRntPL$Ui*eAgARsVfjx56}4f6=?UfoX$u*iMEPFxVOv= zfBn!z?~U;|={n1zIVw~kT4wOz z^#NdO*#qu%xS+wEXnKBv5-*R^N0-Jbx-r}g#lJ(q=#vi1tSgCZWhsaby)Twk-Y0F( zU-QYe} z#KXanERZtqyR|?0HJa&n-L8^adq*nM8*Ag&F##GaRPdB#?UT=|v1u4%(fD*n-R%Li zcURUaaH9wW*O!4)_5)5haVGiRgRu9Tl8H};qjvK)G|yW_JRgRE zqGB7b&t`Jpg-7tPo-bOi{Y^c?d9XU#7u1uTp>+z9jwLI=p}Qrn6epmu_7zfJTLxZe zgqB62C|=YBL{}udGlywrwoe zcKi*s{5XQj+zXig8;?mg%+nx+?o2EvmBnj#U z1(G($RoK4Mk9nJz_au8wQTCZ@oGRfg)C4~wb|?1G$LAK|&kyG?O1lSoPHrR?m!(kQ z8v_Eay(p zOK~N7_8164Wg_#MZXk^z-)OAoNNgE%f!LZB5s!`5q_MXggH$XBZ1SEw-WLGZ2Pfjn zH;EYXOav_}S%x7>=PoWjRf(~J5Cxa8g4y{4Th}WDr{kK+natQ>RBq--D@S1_h0yior|HZ&u1E~;eWpLP~WpS=x!RceD4E|n+u_b z?G>zPazU@HH;BvG9NIKw9OcGM#hNEUP}PrhpkE0D(`mzD%`0{$j4=lBdsnEiFM+>1 zNxWikr>HK^LwR-xZUcGv0`gz>in zaI()BG`h+7YV-YJUVA8vn8$qAmo%Vuc`4s}#VOF)_>9Di3}n4Iqlky=c8IAi!d9!Z zAY@&9gX5-S3lO%yJqY6Lm1N<{bNFZOAvAW;VtIh$P;_uM)@7W+nzFHgL26oWd6B&n z{h-2O57hRZK=ihhK%n?ANMBWwjz^0a`_~anEn^{2J`L>F&jO$F0Q|UW0E$LfkOt;k zk6*nSt==Gwn3sWp^R|Kd{5%l!jpJpfcH0EQdB}dDLA!O^h#^D!!vjtRQjs3b5Ud7i zmOb%Xb)MQk_QRNeV`1+MCF_aQrH>g$NX8k`iswQ2&La)=I478Lm_rNW-Kg`alQV4(b%dWAnD_^lX1m zY>rFD#-N4x=#K+Fmo8+xc_HU>h-qH=hiUKU1t3^vP97gEW4$gv_{T+Ophgyh;o4a! z?ao6}{wwmR>jkuU;s$|>CV}45Fk+FT4L^OHaP9j62%hDz(1hieE6?)ns|G;(AA7K# z7zMU{qKJHiEe;)&f+swM_%Y=SRDL~+f!$wF+k#8@GF2PvS3AOM*Er~Gn2F{$$|)WU z0L!5)Ta>Vre41W?(rcfHl?L;Wefwu~pq9-Szk9Mi-fb{;YBnmp455L|%Hn6ksPDWk zXpy}aKEGvNfpSmA{~JR!cNyZIXBl{uW$xocyMid?7x`rq1yh$5vcBh7_H9z;7vHBsXq z%)_%qhZ-kbbQi&FxXTR1UnN@`yx&6W0&j7r;`9d+tLdB9H^wEG-tOqTb^UAR!N|SiV zSThiekE=k~mPEUzRbuw&IrR6`NECEiU2OZVKlppOqJhCNj0!(XE52SP@GzB%i+5ng z8+(u)YbSVzb=FT$f}h#$D6?J_M6_LtVxlt5Ix0s16ng43*v#6kmMQ0g*V zmcp2AxyPtt`!D*%l(Cd{P2w!?rqktbm`?URjLIJNEykOhKoGo;j4pFUZ^t#z&NK|8 z?+ZYtd$HJ2D-fFI|7Lo73YsJrfXrhz&>i$_KPis|yMs(AslBD%6_wBGVVlMP*z+D*qjhva+sF zzk3MlLOl;h7GK8N))wk4kHyc!W?^CpV|u@h0}o|3R%aIBwnr7LPlx5MI{d+T=?Rqe z+{x?AJ4kn`GFXN>6+EVgG0)@)aNUv*_2)0qxLj?N-jx!^$~>66@+yX8*3-zUZ1C1@ zrZ0`7AwiM_w&$Fne=B2r2w(6qO}*eU+x4^=vl*;A>xXYmLqR>ug`Q}$zEQ{W&TCHa z)+zDSwLkM1eDLS>R1&z@Js2f@KXbJ%1y~!9kEUNjU{&{MocQM?zFe3Lk}EffwdV{X z(JbL*F*%sP{0quyA!r+PpG0tx80EGT^2rVQpBPak>_r6nD^(iKLomBxHS}N|&^LDG zLi>tJfRT5pcz_|a&P~K2MRT#FE1Q8w>;Ugq{Xp$h#WhTv1ND|E#CRm@DPlPxzg~f; zW#kQ!+fIR{^C_lh9I;7QJ|FMsrK2TLaT;G%6kbKzoan|_)%y>$$B7^rY? zpc{H^WWLbCr65$#1?7>mV6jmJuHBdYeO5y%Y-L_rT*+$da) z0-Z!EZgnC;@fCE?Oar5jhIsUMIXbvEp$ zju)bM?`a~kQQOEG&QZzkFGT8f6}Rt;OReXCtHzR+?dl1gdg?DDBVmjniEN#EGN^zx-AgOxPFq~L+G2~`FK$j%$RJR zjBoJKMt!^8rZ<0)=_XNFqIU@6KCnBP1M9d@`h%|DQZP5EBjeIHqrl<_H+O6y7T1*F z&^7UpHIe02F2q}-r4Bgdr&Fhy-#F_LmW=ta8ziosT+5UZ#LjOC2{#Xd_&b@bx+8fC^hrxJ5$@t z4UG90jp1TBQS5Vv%rFlu_+y40Mk7J*&mHpjQ5s{x+$tKl_6nXHeG1R5Ov3v@HfR1l z2k_++t)EqhUgP`Fe)Yq#%`XLl-o&D9*hUOYVt1GpmVFt|IvaMK!hy=AXkC02rI7;a z5v+wSqnBdCmvuy6Lk2B>_tV;tE1aZz0JeJ`2E3;rbIYf)eT@*3=Q9sPyBT%Q6oFfC zF&G2ON+{h}x55%4*sP`}#pYP?zz=R*x{4#iRTyRdo5)rS<4x;WM}&z6z0Lez!eei# zeaumC%~BETms_A;4;kKQNI<{gVc5_RNoSA%)}h2_eGvS+a}JwU>+`kZU}Al800cH1h7NQG+w1dy6?|dE zhXQ=g7+kjUUQqFoF|+bb!6LyI5*p6J9L6!XecBx#y=1>bvNMS~?Fns7>0n;rPNptc zhDzV#P_wfNO}!66{E(xx`E@bL{<{|PPfbM6fvGgRxDfIKyg^G(7oD8?Fb;G&)W&8I zZ%uD>Nl3w?BTG`qYxzo-*jnc}nyG-jm*%S~#b>D;jkxKqq4g9+&50 zu%R3*UWl;oK7Ul+42O5@9{8OpIgEr2R$Zb=lieY%;oT${p;EJaLImLPJW`)!i3w&$ z*$ylTT9b2$=jg7m);kkJLhjIz6<>_MqzVA=>}hblh?#07Ksv0&L2s z19lw;&+~o3IHQ8g*P9Oy%mSdwzY4sMUnV7o_9OO@px+!7cqVRu&ipB0y{ewJjC@Vo zM-@ZEVAeSvd>+>xGh%b)1Tg>|!%P(KnM0I=IN zw&8MrmjX5nftbmRz4-AEZQA0^1r{2j{_f!@F*?ho>mNeV)pb<&j1+E{$3o{$IXHzB z@g5Vpf?Z}hak~_NLe2;rvSg%s$sWkJIRs{RCZhQ!vP^@e9G+&yV)eSkxaD~$Mzz<`-lw}^?b1p(?HG-9{X#*wum|WqJWT~S zy4diD0$sg=XuHvMx*$Y?UhV|MMr*<1=mLBaYr*d5_0+v+5p3splpN)h`zirWcfD4rv58%BeRoEPImuTFsKrG1xr?h(VABN!V zq1#zD<}259UzgV(R0(?jZ3kOjmLD`rfc>K=_6|FP7{GWM#^(GguNZV)D`3x$iq^XA zrpp5^!_-YG%%5|Hs(h=7=#w6_zPw8VBeiiwZ9aBfw1k261dSfDe)um%u~{HH}U!L1S^o{hnfoioV; z(=yz%dpR}^%w^v8n>74h7C2qpNgBFJKxb?{4SzTgCPke@*`RL43Kt8m$@MoU``6t@ zF@78frVi$Mht9(47q6+e=_w)#4yWc-8Z_P42JJr75$VB+wByGWlsf5AX52)9;|^=V zj|<_Y$mo-1l=a3Jr0okAClA@H5u#SLaukY}G3p*fp-(}ono zFT4IIifJdtB?5LIUrw9M)_~-g7w2N9rdI6i%3KhFv5O6{o-wsFUDQB2Y_Q3-9R9Ky zx#gQ7IJPj}{H!=)oVN})4NL^@lYKy4IE&jcI~%HB&nKEnmi3t$2E{MR(c;$`RDO+v zeS>FV=Q$azp&7U*!kKlX1%h(w3^*`xBzl$fBf{lX(3!glZ2yJBQJ&>eXLo~7mJ3jr zn@ZdIo}s;>Y_X}YB?xC#fy3a{xJwA=Q_Om~XacCmXV8@HD^Z#^icSj{ho4Tdyv3tC zv}MIg$cSaUh`aMJ zESZmbvw&LKnq$F}a(p~04Q9W+1eP8;#IbBKqE7~%j!K7wtix(kA_MBGh?Sj@!rn9y@u}NYq8x1$6`?VM` z?%rx{>bHYfyE=>7{z{L&yPgoY-I>k@6d&K3m%*LWnsoE z6%nZ(QN?~OF0Yy5HoYmh)hQpEzikKbVEgg&Sv0^U0_t?upjX3a`gLI}TCdIpYezkz zV7@`w?r*f~yeu@x&mvu0X5fJ1lhM6s479nN0NWp?Si|R_rQHWi?mUX3b# zCRjWZ0QW`(;c;3_FKHqTBd5^U+aj^`S~it$XH2`Jv$6EODONSdU~cJorWZF;Yu7%o zb!I633sGU*pK#ROYlg<}B_!oUGK3l)2HpPxG3M40+IiL%nw2rsnE5^1+Xtab3Cksq zJp+2b3%Ixk>@3>k$9peY1A6*b(Wj*ZdM!`F*0M~Dx!}e6s2w3SE(oO~tf`Z@g1n?h)(^^f01K+XdWsLIWrZ}nc_y6R5CBCt*t0sH~}-J zYi}j>2*1k0S7%9+^ci_^Hy4N6vF~O48`APN5ck_3#F6GMXr9{>LcSHjlJ}!=l`0kw z1*F6J?nkiA{3gG9X*!PVxfECCF@3agG`~rm27zT)K$rDFTWi^p>OoeFn>ZJ|HV=jo zniKG==@iu2XG;Wa+l!5VA3&qC$<$Uc1mcZukrRIz+jLb3{Jwb%U4QHW@w>i6v?`)F z(0?zCcdo>d4z=|KOHUsv-MxlJm@L8U z>NDJ+3B_PH>mxb8M};0g-x1k%5pN`ZL-pT>VAK0J&~j!u%Oe`#H7y#NHfMnATu*rP zX)4M~rJztMA%B?~G8V1HcN*uw>tPiy-OPG20 zkQNHP50}8Hm($QsGmiPrZxNmS%-=DYM+jitnePfTj(x{}GE2pl$9us$U^Vke&*2BM z{czL1_uTuhG5DZoHbgVeuG4w83+ueeYkxn7;{Le5|)>)lOQo6{Zbo@cT-c`rDb^PF6p zWiOn4x$w6q=tA`vym5-bXMXCj=?gSLmAd`KU5 z?X+;gRsoyyZ+xeNPWz+5vtlBgm;^PxiKON7G${Ws4K8doLND(U0?Aph`dC2JL03*hfW3#uCsskd3>iQ-yZpy$;OXigr!@i>vvS3$31zN{^rOh+lphEbX z`nd;#fcb~46`Md3XHK(aTi_C(hVIJ~fE+T$Ui+o2E2NRO?Yhpz^!z~W{`RL5p&5t! zCxTBW^J(w&g{Y)J7;hDUh3agyb8jK)?1#l7^F1^qX&h*eOoA4zxsYPe4Xn4E#^pm+ zqnBO+*YV$3x`y%EtM*=k+KV5k-WMrJxix_43+J%&V+Jwy9ReNs{?vTXdd4ID$4~C% zfN|p(|Il$d>&Du_=Lh!#$<@hGnqJDBtXa9VMc+c8syQD*TjS8WqJ;L`&e(MR1)!A|0HW;|_)~wUqqbHuguk56 zc1QgowC@opwY~r+zj8Qq#Q>bQbr&|vW$0eVvbZrlX|{O-H#=1q%^gOAtkN1)3N6%T zd%UC@ZfJDyKf28`7gfwZ(Bj#T3^lvJcC%w4p}hd5R)zo@nNRUeA;=Q1Q;ALrk#F1y zk)>?DI_gDbasD<&#~DBGSPgB7M5@3gQ-=^Zt zx+F9Rh@`fC*u5ww57H9LFg~8;nTC~OSRK=C=Nm))wd=%qodtEtsG!YX=7Yhd4BFD{ zPqGR_F}g9I^^)JB9a*{{ldUD=&5}?*lKDflcax+(%qKK;5Z$g8y`7Dg2ikb+E(58|>b_0&%t>IaV62+Nv!LE_k+Ul$<23Jj5UN(LqzOM0LHOwwSt2}-$}b9VY&?TzZ?n*$>o5pu*g+-j zC1`xIkT*D;1KAWGUxcRb7o4?i!KC68lSZnm$u!)E8FjJw~LN9E2k0v1cbComH1 z*RkE`f{iGcgAk=(#M_?t1&;APS?Uf13qF&ysuk!mIE3g0?V$m05}60)zO|Fsk!HR+ zhJovq+?eJ&4!1oexqP-F;6pTZ>Hx25Kg^-@Tm!o9wY~r-x1j(*m0lmUZu;qag zTSL#GX5~m!1m|Jk(6!ic&Wc?1j>Xm<%vw&usCgP8CWJRBMmRh!L2KM?w=z6WO)F-f43V24YPUs_bFKVasvvAwqSB4 z>(^dW$m>`%Q{$sdqtlsbV|?Z>IWT=X-Vp6XP1#Aj`oR&M_6~roItnvq@^~RK3~yaK ziydY5pyOaev$xw(mBBsI=EAy%dv*nCT)beCmQwY(pX zmu5o$X$P=>%I4hw zC9HaT9(0espd$CKoJZ402r07z_vmDzbD$@9jLsvLiQZs!G7)7zF2eDBk?5p1n*M!w z1(Z(XSyyZdw5>9NevVQ2SkCrCt=)_BJAyzmB^uB6WxCoUroDc*qp}@`Y`*Qz#1oo1 zkmR6)S__z8s=5cZ`~9Yp0T(%eU=AO6Y9aLNor~7HO`-NtADEFe4n>E2z@X3wf+s8C zn=YY|xIc6qw*_7Pt%Rl%=DeHXQ5dj(5{4gUn(z7JRF*gal78lZ)`Mfj z>B3vyekQv+%6uXF`4tqMt+f#p3wVgFgy;ULtQWzAnhs)j|3WEkIBNpp!qK2BiYM}$ zVVEF~1@k>BR>8)!0?(iHdk%%&Sv-2hXb|-!H|p3W96p^tgXI-b=;afLx<)J)QG1DI z_%n}5Z(E{S6M((fo?scy55%z-(?J~16440{PG;oGH+OnM!d*2q33@`GnEu#g{+w?! z>kp#$97wv49}S?eaRPW9wc{&|7qbP(}KJH+-XuZi87uf*wQ6gUMv<6@4U!3@jm z2<<{&lP4wgY=dJfcGw?~=HWsr_xnWqZ{s4VIr!xu7t*AYALx7~;iLvled z#?rd}dpFQG4}qSO_FsvLb|DUkEgQc2 zEUXSdj9qI=?uAHHZQToF9nxX?IwM@ZtPH+xBX;5l`k8bZ$Ilg)p+ngK(2iEZ^7ta^ zX=KyP=9xGnDV3Fs8%*80so><2#6+C(PX%@2bK-SqtO!QiaR!*!rG!cIP6!6;yFgk> z4yFYMLzuc8j~iBF(uhdd-uVP5LPBvLqwkT*8;Ta41J58OjGeBBI#V+6gY`hHnVbzC zGF|Ss>@Y}DFBdnR{>{`wI)pc8^Kkl+Jkago#&=|yVB)?aW_UK5c#6xX?k=uE)gK!O zH|~MX4pz+O%{f+A`9rKfy99jKrm;=bwQQ=oz(wj$myWFZ2d!q0g6sE>fvEC5|9nP8 z9@RMEvZWnHh7hOy#7?jZ9LNr~NKyU$iMX;p4=sZzOPHk=KaEhp#`MAHl@rRwJBC2p z>!qMPTfq;!lA+WuTYOF<8*95&W1YN&&uYxZrVbIRCrpQm-84HK+Xc0+reQ|TVNiT? zXWo5>vBEP2Ttk`8T4QXu=FfasGiolTj0ggm-5P%DkR08K^INw$ip%E;OqHF*ZJv~Z z!^I4cjt_?#OIy&Wa>v9PXEssC2BnQ*tZBXzYr4?Jz3M}tPTVS1xc20g+s2@2D*2}= zqw-;QA=WL}$l51tfu@~Sf=m2*Ys7x zh5^3|(8*^2F(C$VGlvyeMC7RUZ_CpMZB zn137!NKE>=gIt(UbOm2`ucjSz5)|Dyf@V=AIH5|3fg)%LXU$Avi0WM}DI5Li5pd&`Ryc{nD$U>=|P4f^goqycm^#fT5c@D|NUY@!oogi8M9`fV`~ z3@T`UMcT4Y+8B&0?!?>l}3IzQ}SzM_R}g6UE$T=Itdd!=IS z*JEHgYaQ5UnLvuiAuNlOkbl(+3#;jKJL{KVam^V2N>5?MX$k5xV$M0|f(e@hv%cq} z)-vK$&%DN)-iC;Ew~yo>(!)_bZZc~>^plm3SHP_9^epyzDX#0(#GX4}Mpc9p*C>nU zMnmcU<8}oG+$_X~;a6EsB=Pso&&7-x74&tS#ya+yId!_k=LS$NcF=q_%1!}|VcyU_ zurIW=NkH*mir~HYAkR4&!D{wL@p)G^QBG`@*ndkh#20i0W1k4@d20sR)>z@Y_HF34 zk2*?@jXdNw{T#N*vAwAP+y@V(tXK`NKAVhwR;%FtK{`jjYh^XH@!&k;1It->gU=mV zf$Lsx#KFh=QJ-fzG#1){>*8~u>ZJoI53)g2dlaR4r#bpvVWJ;FSg?bh$L`PBWu}9& zzC%D1GTb`1XEOTFn~LgQ?(lsQ@mR!5peB)6Jx^6UUL6J1byx7cxi^@c&18w&WoYnZ zISe~&OCI55A;d=yy`Lmgr#T!aS$ZJ2tOl1y1)xprNQ-Mqal^zZTy^Cx+jJ=l8?J8# zKb=bOxp(pYfjsUWV7|!{m*l9?Vpb&H`4x?mI*mb}s90>;Ia|=pDWRV5a`v?D9Jb~+ zGtqV}>y#V?d^q4tdH5Rf+;V#FQfAn7Xf&4{y2tF~=b`CQiP-%_2DeyK28ng2p#8Hw z`J`e%{_itkkGTz6Ji5#^+%mcG0TZtGECnE50@9XY;4ywX%L$8QCT>xzbbJNm52Bs= z{V;$57FgXv+?7tnD2*zoyAJIneaqoT-6&N2=Gb~>Hr-PjYy1$n&7}hHbLyg({sSW*~G2cdKK-?w!Pfl{Cl9OXV}q zZo!A$9LT$Ulv(8%VcQB5nC_m4pKH$1T@o=dXg|27@1*QP8Z7@!Gr8@3zzhAL?m;v< z70kyJze`Z(e@pD67zpyy2C!{<7>dlv<5Sr6 znUt3w{DwIn*bI{V3%pLVN$7nz78L%QFvFfY1(AI)$$bW99pA{<^=a62s1t9hhyzc7 z_NuR?yyZtRI)o2~>Wtl}m{0ra{g-)*kv_!#OWBUNv0!Dd!78VXhSuyrp|;mBtZXh| zf2IXdS93Ach5Rcxbm`4Ao5Qd^WDVFXS`9B6SKtr5GPHVB2JhdNah#7_Z?`e*t;mPQ zBjwz=L&k@=dSC;N0^MQ6A@UUCCf6SLYHJzgscvxXD?4EDmQXZt%K`ZiXYMfJ3A=SC z8synBm}p3m71jejeKf?i)C(B3L5&(4yMQ7iRP42c7+kT3QL594wU`QEp56s44xME# zt31iKXTTN12ebDHE8u^sx!Cl|jXS^I$sOM0ibZRu6xZiu@*#`!@sGtN>gjib#M_C` zV)QS)8!Wj?U00}U3Wkh|aK7qE6*~8G;&KJuUl;!3A72DO$+1|(i^t$R?Oo*U4+W3F zsjz%5@uas8N3DO7A$TffN6WV`UB~In)Pk}RF6Vjq5lyV9l`D$iXk%4xUPiY%u1|mh2ARFfb_0MA16KMd}_L+%tUO`@< zkIcs|4BAZ2;<41h_$RI$s>e!kX7@OBPU}xDnJ>KV$}t@Nr86qJU4R^gHW=QHz#M5; z*tazWwQV%8Zk`D=DVDPNo^oti=m?_h0{~)8Y@WOU9Zt)*a!*fYVRKY4S#t_jz((9q zT0y?b^diFsay}I2f%C9*xP5dn`W{fR+DpU8)6o^Jj%tC%uq|M|>n`_ctK}JgFG6ep z&3t!^MtO5@cq=b~sMx(IIx!Fus{{DgSCrAY?$7@BY#H`Eqr!~!r@{Oc%?2XkaP&ww z9IYFUwNpZ1xJnzf&zV7u-8g76l)wwibbxa)tnlVAxIAw^=5@*iQ=%qq`85ykYt29h z>kw}91+l+(81A}L32SE;!|?5h%IYM(ZM}k60ppnDtCKjZ@jCC?vIaY5shQi`3B+5C z0FMxa^XY}?bh`{?{c>1_{xjb1VBgxi-P{ZB0FR(E0Kry3_k$@b62R)=7SQ&3$vuL)a+mmQ7CdDD`aeNP_d^Az?%z%gaQOEid|Vd=PS*XPeyKScbWVX2TFH3lg9$nmoZ(t& zRp?Q)m^JhxzGR9iI7J-gJ|i8$J6a#TzfhhvbO>cv^YDaw6*9MjXu6zuI-ciwj*!Xf zd&aR*FB73lbO|=)%!S^QN&}>*mX9Ix+VFj>{}t2|L6{FPTMf( zzv1Zm;xctjoY;gxRrv1vMBMl&6wmxELesb@e2RP-?jK-^eZ~`SE|4Lx5 z=SEbYcu{Qn>kJHAxsv+)X9WZ90iv$O-2AdRh$4!`3bR98^}U$cZYl#ohdw*y#2@^< z5D!lZLi0LL@(PwR#ip6eL6L`vrrq(f{UF@(CILGVhk?VuRg8-6m{r@4-0lWQTU4kY z*co)SY=Qq;O75#l@rb=^u+C&24_#7<7G`GLYVmxQ*A@nq;elA_n9V#)`e0?E73@4r zIlw&!SV7J(j0~TJT9%g}akwe>GrPqNXQ;8~v1pXUB;lsUE?85dL3ycg*1cCBY&&BL z&L2~_|D$T`WSEZmhf;9h%w3r8Gm<{9hEm^p26G>i!qZ%A@y10+9#g!-AWxEx4 z54j^4e0P9mr(RGP?hnSpq69n75vb}ugyt8om_%=oaD~o%l1-Lk`H9V36_rgMKpU*N zm&`gch}FF$1YM8R2#cdPpnP5`Sm=Hb&paE5x+Bl<(8wwbF6j-fjj`xaoB#zA4x+qi z6fc}J96vej!UhmMVJ41p?TwDE-|X6_d-|vGowEqSZhOFt{_zBKjZ2!SCGnez-z>C zNL(?88~Ll?TmDf{{W<~%{#*o)9_!%#_hZm}Es_P=27|_BJ@O2gvAqN0u{|sWEGW<< z`>n~ER(|DDnNP7}m<&uPA1GzJgi+p8&?Z|KtSp!Ent$d2_UR2vmxp5e6nzx6j8i!u z_u?-S6cEwdn+&0=Fn#_O%)6U}jlI;oV-K-SLO-$X+q&bj2ZiWP4sws$rQqtL35NbV zLHD8{c>Q+_Zkd;&>-TkH(fb|Nl{*Dg{+dU5u7lt=DTx1f#RI(;G&7~_iC}xT5X`>_ z5IH8BDZYn^9da$P=|E>l4(<7Z<+iYAU!`r9$xPaD20cx~#_JEHaKqlX+_1 zF6+XzdXz&>_;Sd&7=>%b+o1Yk2u}Z-i(ma|pWPY++C8IKuAUtZN~T`-BU23ML%U5o zx|dS#YtzoHcx&Yen57#JSsHd=7C}C0B#uP5FARw+M2Cu1JZI%;T$CFQwZF~5puPf} zU97>jfcE|AvDhN%&fX+nM*A!E-29anB${mIt{d~=;IQ>5-@S;do6LEUNibM_(Be7$ z3wfl)8FGc)X8(Ivh@elu$ES8PFbsyK6@F}+br9A$|I1(frv9qab*@dF`L#c@pw*@e z#BX%M!p2OtYvpMe5EYNgWy4tAm{{_QrtwOR+3<&l5I^jLIAcp3_c9se8ycWz}3B)}f4{g8ZVB1tpc==X_S@UF&K5`&Nw_4-N@A0VB*@Vq~!_i~} zu?B_9Fy)FIuA?TJ&pro@+9~X)0nJsL77`=yIRCh|KQW44u<#V(29z#`Becg${J57J zFaE~AmeG6s_6+cOl8Lht`s3ceW;n5cICYKPgtM6w=!jM)2k9_Ia`5?6qHr-%?JC#)6g|gU#tpsIWZ;9mczoe>8UyXqz@+*F_jvdpZ*3qJ%|H`WE_S2ywt#^ph_1Wrnfd{p zb32#et==)Pd_fSjo@k`ZGYSst`!iu^0F0bsfKQK0P~xb8?(?hQyX_f#G+`2sPVGa? zsU+)@r9;uQb^{+5MjU|Ymb6p7$#GaF6EWnj`yIfC^6Rp=7-&`W<@H->XAxLIKHAw} zk)H{M(|+>cia=20Pk@e@pZE&P6}WKLXcR?9iTC%fM1^D($~#qyHRP4>xr`f6wGZbg}aPhW~%R5J9{+{5&oMnR?X8D4*XKNw9Vj@Y9N z?mhny6FtdNCCp0%{~qKWrp#01yKCIp@eS>!$%lAeg%10O>-*&+vr9`MAN(=I?~Sy_ zcL#^an?gm8sTizr5b;1dH=VQx3^GUanrDx>ZCo6$v(^IN%K`YgB@KNN77?@KiTL3y z>NB2tC-xY1mhU~AN!g~QXs&w@8=3-`fv7Jt=@N|wcAU?xsD#(<)#&^Yh=Xt%hTqBs ziNg-!v75k!Tjr>qmMwH(7)3)plk)hn#TM zq0nS;iyuCoi#uIQG4VTslS0&Acn%@suN>2oZ?FLqNPzuv;lmGijGOL@A z4pPr>CR+1?X;_|MuEyJiwxTGguiOS|%?xpbegRtia{`Z#jY8>x6Zq(7cgkw#Fj1sL zrLOzTY6{nb{P9v~a@_}ytqQT}`Un1ZQ51UQ+F;$sQff1`B&S>rYg}@73g% ze(Ve44d+nz=pPpUDF7W3Tf}O&d)8T_#k}c|G57l63l6W;pqTWV6<&KzUG{9K*)$&# z$-k(3y&Gg3<)G{FgW~r_Ke+s&DR}N!fRV?u!S9S1jHeKTGNy!y{__^+W^X~w-n(%4 znL<$Qi~$^2jMKk`qv@`bm=$sb9i{Yd{C>l0q8gZD`fM(nJDJs|WN?oG*(~ePUB=55 zSo!!S>zGRS+)MSvBJJJ9trM!)9oK0YQk2qqFtlq+Xy;rXD(EgvX2}sbFiSVnWA zKF04Xt%gE4gl4{!jrr;Xhdi?ImsKUo%(Af|v6qRi9G=}7b5tIHJr+XP6f^Q14&jX( zPchf{JK`_?71-CP7lvaQ*i$$1VGtPa2?L)c)UBG`#`QD{VZz@q zREoR8ajnzXU}Xpy5Wwy4O3>|p`JnYh#j;96xlzM%jC>aaj)$*6E4j<;PdbBBJatRo zou+qtPZl4k2a%^+`F2SrM0H7rg-I%`)6+xc*m>|aT7s6T1JHb_0$PKAiydFe@t?o~ zG#n6)!!P$k=)vI6!CYr@$}~Z;z=!fvH<>c&xwyAp ze>}M}7nMC1!=}*z=(jkRuYXUov=SdUb9X+f)~`p6v@(|E9n2dI%V1rp8YkzT#f*2? znZfx$HdZ|nn~okKx93>!upUg#so`L3+F$5c9l%@ueZx9VrGswgH{!}wet_GOxh^?` zqzhfy9Y@N?CH#+jcf4hy_w==;>|r9C&zJv=PogtZHETar3NF*hqxioX_8@68T7AF9 zGt)<6;dpzd$$;TotH6E8P{=uc6zqzRK$jllQ9Y-!*l6bw zXlcC4Z!h17W*-7jwk{0no+NTR;>vW4lya?kYPcT}1_p;O;&@pGv`?PTM5d+8KRp5Q z^jxqgi4-jUv~yj`(>9%o60htWf$rO?*nvGtIN!$yg9CM-&Le?EEyx7ZM05DKG8_)l ztmkuRKgub*5M=5+5Y0R*1h-3|J$*Q_)&wpI+$lWRXos!~Y`97M1J<~BA9lVLiDsts zdwFFuw4@cV#cj@LQJ@kEYc9iBDY4dG_du)uUAb2QaSPfrnPg6Q@hFVN{CKB^^~*rK3FI6O5HYr4h3_`_)+A*Y&FS}ad!Rf6ulD)FA6kr-y3i)x{cZA;xk z8P7GaYl#f@#U*0@!^D&scM!Y&PC(U@6I{zGk+omy0!{yXW7jgG$kmVuV|3-{VU|qq zo5f&Qc7$*L5JUNkK=gY3jT=$^$u+oI@Mx%j?C^Q$zGV@}Ie9?ljlh;(XSq6}fY;P% z^B${0asHzVm^@qqJk2kHI>Uh*%rHiyWsL6Rsp49nqkQ<@B+Po~4;^s}(V$Bg{CGYF z7QPF?Q)M(q=w1fuQeyo6-UI#u@frT#muVdU4rV4`yksG?ZQTgo<#cZL+zl4nJ_!^% zU`ngLAbT{38Qr1`O518CYSw4Q%KgGCx?9B0D#RUePMBccjqc1=&`yAP(^CQD$Jqg{ zOV#lCXDQsxE=9HHHkKG?1s0KinCVIj(0Y=_97^aMIzhA1 z|9FN`Hkb#7L;PFnTl}7d5vZJhmU(K2LtXGGw2W1v^olcxArcb? zCxK|gK4uyc##9Gr{(Sy4j(B$*qx?A3w+h&1JrlgWop`IH6W>Gk`u|*$P;I2Ne!B1+ zoVO?ew$Juojd2hN%1X84_=TDqVnlt zFnJOL4sR2|qcE9ij0~nbY7ACeU&g(Drqs)df*WiZN@op0ljb#0`=J~@O(DnjN%EMw z_u|vEaGy%K`9gBD`roUJHpk~6usocDGJ1NF>zVxQy%VAJU+C`*5F znWq^<{>ehEB3;l^#iHiU3t)bYvbC;j+55e5U~wW2hC8O=)ABf|i3 zLzsHlX|Z`1H>^u<63gw6fpPUGW*8LDj2CZVu7_5L8_$Sfpv^fH*}i2PYX+hBKa@Wh zxDJ|6MR4(=f%tWjKMq~37Oy8vXPcd@IB2cT6` zEVljG2QODdqt8-Lu$|~foxBXN|DBD6R~4YO$4K3yu4jX{4RtJOeJbyN3nmI~#MTw;u$rzt5f%?S;bjIt&lSL6M`E&pzNr-b{ zal07yj)1if$c?q{Gds3qA3B^J0(INAfMnG!LHp21Jf%~H&ic2RMr0fxbti-P&_9Te zwj8AYQ;WZj^+nrVxv4^%_@KDx7o|6&(I$6@?~3rtxtny)H2 zhJWr2L+_l|f~@-%>YA$Y+RkP8@1%1WnU@2>y__NWcs!FdUScs>G*jJoiDhdH!?soA zDUl?A!gQ#h3N3{3zpfBN`T-vSSvanJFlPCv0397b%12m>;y3ZJk%eHgPln1VxuE*7 zjM;ZYK&d}<0ldV}H6R`jx*B2AoqwS2x|~}Kxxid2e+pjA3{nkukRR6%K2~y=vwj0U zxP%zBKNmcDPJt=blq2g;x#a$T*e06@X!SK@Lr>9OZaFa+G@W?w>*P>+`;Il8bOE|! zz}ef>aZeu%rZaYO=M!&ua!qGY_b?HZ`YYKAC2=quj*E?>l9)>{^*|4J5}WM{^ZQ0K zzq$9g3+2olw8CJxZ3MQQyvd(kvqwDE1CI?xbkaD^?MGLlzqSlQm6T=EJ`9_Vkslf8 zjASM!-@!NT(J_rB(=N54#UJi07GQzB9cuo6=5+HjD&1Ik+v_yQMdzVHX_Au4FKzMN>1)R62 zV8KJg&y`ouZ(J7Z@XKcnrj#!{Rt(8aK}>d(9DZeqP%Ab8XX|thZjosJ<0-c`nqVt+T_949+C$XYgz5Oi|X((bw=0^B(-3+drm^K<*6G z?o3%{^>(q1*>$cfcLNREe&F?M93)>sKJ{KA?w+?1k6Nz5)^1;T3%yGsOBjeU^2EBm zT|s&29M9@~nEdg?<@F4K28w~}CiZ8R-jrMWJQ_rr{t)$rymR`^TYML8#HH0WE#>z;$7=xQ8q4v$h0a{-YSEC5D7zz)aLN zo(|XiQc+Dz5M6I`h+jgx(RdTO*NVazIKhX3Qcu}t~Y@gW;?3ahbkma`6rdNWN zXAgjb!EvV1uYik!Okwa-3w$+&SWd(Tu-qDnwO!{!d<5OwLPW47iu|E3?U}~16z0CZ zFL>qNWjDepuW`hiY3bxpcX<$9g~#qcPW_3ATQ zd7tvX8v=L}?;0U1E9qQcvfwnbDZtOh>94A!3l5j`VSeuAFwGYyHY6VN&G6&^9 zyqW1-2e2JCfmyxk520B(;I!ie^i9*k_Y(&~=rYlTcD9x^pKjn!@1MmxePgi6dw|%rOE+?~NTKp%H?ZxWhK8QM zcz#k4)|y9v?Dl)+(S0ILk3C6U#q+4zC8C}{PujWaVa|n0X8)VI)=Up>-A)0cXkdlDen%2 z9b$=UHK;}n;2!T5gQfN{*z{Nh!wE-SQ?nH6p8N9hPMP2^F9iLkmBFLvN8 zK+)ovf}R0@{+x?2Vf`#Twy^}YYouJEZ_Qi_w8RSIy|mv~z`{R%cr`Zzug4}sQ_y~< zyFU}!;v;$d>Hm1k+kE!1YX(j;15~D-pq*v~%#it@C+$QU`fr5mJONsJ%9!)6ODs}! zj=L$zb)8wxyl=#D@z%L$Y@$Hh7Ymq6iX(U>dV&0?FN?AZhPw63;Q5KOX!^_uUfj+B z<1bVA&xz!CG&d&4-6lv6MZCK#0wu8p*7DlTFnw7U%z8T=jW23IyUqoi9+M26YIdWA z<|V=7+AB7Yn97oB4+zd51~p3tqRFZpF4+_a9*fR`dNCF&o~y;)o8L3bbv1na%-tC2 z;0DfSg&3KZ1Wg)G*+lmgR9x!}8Ba>lW7d0a*ry1XnT+OcTgiXxP0ak|EO_WCm}nY6 zc?Calz4H&A-eVDp##z9M`qh*fOdx;cSl(XW1C-;N#CGaS&=@5n9!eS{SITW&9l~lRVQGYy*|qB>ANpG z$JJHYaDD0&RMejo-a1_dgK8DDMeD)!^F@%cuz|m?jl&JGSMWwOb=Qxifd1GJXi83C z2B}U=Vo}4wADd(F+j!R2HeUqs!al=w}y*N&gPPoN2iz zEuL3gyXH49Ok4r)KW~7y=BLocB#vo~jpDml6u5h6!lXx0*uFHK!O+d{AifgoC>PxH zBbfNfL(!$%8~*#rN^I<&$UHaqA$Oxa1V7>2-8P%K&5lIN#yhN6;%Iz2W(|tZ2+>c%K{9(G#9QX#F9UAF39wZ!7e=#*Y-&V`JywZ^`!`O z4d3yG9jn;MA%1AybuMIm*W}8D&xOqAx@dSn3nDVsBNVLym{r6SBeme#*)W{gH5M#a zYC>$92(?a5W3~pNTyF$Gi|;UK9dkygF$~7=(Vp0+Vk5?Grsw41!{VVEH>0(ZV*sSl zjxY+#Yt?A_`52GQya>)Wp7D&S2f=ICe|*Ik;*OayaN&QsdqpPb1=5T%hV!zvK3Lb~ zEMN8V1m$vHiAC16;`hWy^A7n33~V%+^NmBiOn)~f)>lKu8rqfFeqftsR^i+I!RTu5 z4*hiK4l!rH*mu`nZmRc{6-92qw!^tx5_6Tc^$G`LU44+wc4HFtR;HT&n7vVlVypKS zNW72%<=Zl#_VgKeHm@H_a{tE~F4Hrak8o>u7-rQp@z0r6*!giLW?a(($6=|Idx^)y ztC1MruL=!Xd!ov8I^0@!1u9);p|Nu%G55>G2CEFvbmbY`kWKxo5w57%Fq!$t$j!ZV zAv(@1M!DNOF7NtlX&8B~Q9!JumN?=w3vZm zSMGdqCbR4vgn?eviFm)1|35!8Yf=*5d3!XQPS9Xd_j)Z>u5 z_&7d3os4xi$ajz&!z)J?^0jLY#@F(@9C_O%cm(D8z=bqvG z$2u7~vgTpp;{?q5(aNOJz0fZ*h7#)rtv!ClY=0-0ME8Rcxf} zwo;|+_{Ok?I%QE>STCbFf+&u=E*=4vQyy?x?0iuFufMSQW*K7UMsWFggIis_$gAZE zXz+3{7$30}L=|3wqqzYF@4UrkO^ZQ`D&qDo??j#v&MbZeu|j(RrE8Y*guG024jaT} zk@S6Cc?#N6zjD#ULTt@>$96?WqT_S=H+0P5R`exw_m+Wo_*lw&c!EXtMrIfm3`ur>F#;HDU?5HwHmg*HB2fT818f3t0j`3vUNp1XFnh zSgd*?G>u#W$^B2@mr%+xUMdFfpd>-*^qs|LT;R6Lhtaz*om=)VLi?jnxcklRl#Tp6 z)qDP9A@Oek8x^q>*YaYJ#O84gYdwNJ_oLuTNO(2LS&?1MrXj5fE8-rWxoX zCM8zDCX*PLsy6{23|fpZES@*72**Z?r)=XYnln$Me5%g^2rW5EXVVH)CJ*QG=OOHa zRT4Iz9{?7|{$*BDfz|5_grCObEAN^IO4AkacuOC=7UzIce8L=J3!!h%P>?Ji0ZG)q zHojg4d8hKR`CoEhEYJky_feqtM;G5WN3!<6Y9w=-FcysaZWMGSMOgl;>f6!&*+i4xH`5WuQ`TEOVYEYF+J$-_&V$)Z|6C05szSd@H~6xPTd>)DF1*S(fzl2K zvE(>8)vt#@vh*MFwB>`bO`72Bp@E5q=^oQngL3QT+$%&0@z%9G`E?33{Y(%t0#5U; z%gN6?QH#2YE<%w}AQFBHD@>>lc1jy0(hB~ziZU*aRou$3g*nZrg4)6~?$KPzC%tjP ziE8S`_Mxt~#Y0t{<0NuL7k%j88El~TnJ0zb>WuG2y#oCJrSYs9rzH>J7?L8gQ>t+nT z8`m43txmvQ8!v!q@CYz^Oj%Z9^V;ZzP;RXNyU}|tezh62ZyOGhTiM{7yoc$g-4^TW zpBC!EV?pRsfeEzZuE|->NFZZJ%dK$$79of zapL+~C8*;lbGmCNn9sUGjOa@!Etv!Er-p&)gwxpjPY6uxQwXnOR$yKq4wW~O;iEJ4 zG`HSjUKLer&yEXdkQT`xFBaRcuI5kGC6GtIPcA+4iS;6e4xtnhJ>N6+?Ac5{u0LOX zyaXE0r!hUho=}z;fzHLJ$n8RWGks4kxiJ)Ko2Y|Q>B!6j=fj&5Mda(r_PrUuLgCWB(o_biWg01dJ>|IZPHvdnY<&}+E z^ciTgZX~?YNrSpW@xqa-%TPY0l{wP&BzrNc^<~D98ln>ID3BU_OQ$20*h8?T76b!}6s`XcK&aH=Hm8 zdD3y7(n6l)+AF*vr5~>!9?h2|Y(c9jKD==ERESu55>H>afLaX$;68PW>U+~U1aC7> zNhZq2`?ChuRm{qLFpNK|26G=Z$V5w+hiE%9pE?$#y#a(MBTTHCNxq{B-u)-}2&ms7 zt#lPXyd8>n?qtKafJEXw?tz@d7Tz#bhsl<$Wywx~D7Qa>mc+U;6fc9Z69t-a5s&1b zgXCqt3>IAy#Ip*B-)3eAk+Y9r#?)i*UMm8>r6Ho!;6Y|OUJo9EKIW!qxZ@Oh=dK2IiVWIj=RoH#=2$!T3|D^{iiR`n(0EBD^UtXS z`P{M4(3pTy-y{~hJr&xIwy`-=%P@b1fE~Y%^Pg+;DOcztUK=4);hGo8*-Bbm>O3E;bOCxnl_NREyo*!t20^KJxy`e&fnMO_XCQ}uY| z9W__kTf&L~ltb%Y#1;25#RdzFK{VlrYH{TXRId-UR<_#ntTxgesrzChv6-@lR`4V> zIVAe*1=q-3LQT(n{&>_%yql+okGjR7XwO@&>pB!Pp1Oc@C-O@@*9YGZVSLeUSA^87 zTy{wgUI$va<&-3zv+OK1dR1~=WvcM@{t+))Vkg5sZB-gO+OrTyEjb`stiN`?TLYc%~k@nNerGdJayUO}&m4 zvq4IX*|x+!;7hZr#t#>nT7z83TTViW+ejQwJ)uN#Cd`)@p|SA_u}x4Ie|o(XJdQ*% zcIi=c$T0!s(`@3mSAzHFd)#!q2^hv50sH=;U~hDicepZ` z9v?#;E|<&&%%&ZTa`kt%kn(}yd>vl{m4q!f0HiBWq zW7eA-V#a%XL9tf`Ay2EI>|xv9{=T>o4UOOwuV z_!I{%!>U-xI%4e2PfL5fv(PO;#cPqvtT!yo~MaHnKYBI zFA{$}FcuF^Lv*enp2njE#9`ECHc=nA-kg2-;J->tyq?C|6Lg9FP{(S&oQ5g-MR=}j zGQMhx2h&(Lh(DYP@3-v5wyG^KVZ=b(e#DdJl-_*iyv69W%nKwJ2aD(RT#NB@lVPRX z4&0%(#N?_y@b}_eRAfwpv9m}-ou7oJ_f$}O=L4@hKAc@sC!^-{AlUah6m@T8@@82g zf8se25uP&p$Ys!C@ef+Wq@!pIBtai#4>EO;4%2kjPOcKZ=jd;bxu zKgL6AWR0-&b}$zHGXq=fV<5hpHK_fQi#IbpEw6Udf40wkXLgfG-aQa{a=122Uw@eGglmQ8qW=V97hL*rueEqdEcop$#jBM$2o2NVT%io_Z{Xl-y@8oFp5y9PFG$`Iif=a+WBc4i zU@=rJ=pK114s19EgG$oDLFI#5yI-c^ubiCB5v=dj@V zBCHt~kHLmhLHf^M)>_^LtZL^IXW<|>?j9=a2=k)Mqk>ue6oJd|2(GOvfsTEV*iiG7 zd)MA&qeRuH?;1>*jc~~LK^-^G>DKapA264_9X#slS!hXc!koJzdbUiUx=SY<^=KkW zuD`W5ROFI7=^aaNIzrB$P;?v60~ZY~!LLy&OkQxE?v#hPBsm5wx;sJbbQx+M%7-V1 z&Y|j)9;->42@VBE#MU1I(WajOy2g9ho!13$$EgGtU%8BRjo-!UFgtX(_=VMlN1)5M z&7h~3%0)N37qjt8QRiSj#`*<9BeAyI)Yj-z(#G9l58{|c3tZKy0N3`21luTGx1`-*XX}#CH*_&1{z~9^ohd6RgrZ-@8D?Hh`NEa8 z>}@(?BV54Y6GK6}P{rDo#Im~kjjZXZnQ(OSX!3q{!r^g6m`83TrN=kccS$fLUFwCm zayp^v>kd#(yC=5!a|H)9D^dRaI?c51a))1wnRIF%G#4Mk?yvizp7BMLjwRZCkQCG{ zqxt;OrD(|$Q2WPzXzJitIbsG_SsVZZt8iwpX$y567}|P8acANk>3lef)-;2If#;!l zS|FbulLqal_c4v_8T{EX`u<$30*`yc`OyI-=$!eQSsk1SUJJLuH~UK1XHkuJM#YkY zXrJIv&-!IAWZ8kD5A`O)vhaJM zFPhKbtOkEEhfQ(ZB0m81Bza8oHWhPM_@m-jrMUiGCHLL@n4LHuhuRAwq3rrr9Hl`l z7x%uHmq5=}=v=^;;mlj!pZR2J;elVLiRF8XKc-#tw;akWmaYNg%}S6gISngaM&Z-S z2-uaCjtt>RASRR|kP^EEl{(bwKq*6I~;^Lt@rmwopYp zzWTd7c;j;}lG{SWiHD2aK0Rf~?0>>WfveHHQyiV$(C0H0J{7 zzs$h*O#luvMpMQq4IfhHv#-T!bXDA==k1W#b#XSkX>bvp4CQPh4B+fNRlK;6G8}4ROekdNJ0o92_Z>^ zB822brJL?IU5|7*k~*d9c~?0l385i0CZ-{T5Mn}Oe(U$Q4Ysx_1NuSjGZdPoQmBFMovemo7V`s zUxt%EH<-6>^aIt(A3U1;KfRScOlo`qgZ(0)bklR#F`Jt`4G{yFZ$oqwdU}*DTRgg4^S2afd(kx|fZ{!&Y@L@a;-8IO9#; zeO0h;w1F`%jnKF89*Z7s!*{}QY;32FOE zn4@_)sQ&HEG)}lMkEB4Q*o8UX_T^35P%mDM;FHqKZq=u zx}8Yl3%gtf_0|jFcAAj-8V{gs!(I$NlMOOy19(}g@~*0Ebnp#>^fYaB^-qWL6yoHJ zo65b+bC~ZoeQ??)XKlF*N{=RE<%3Ji`TQbS(I`c6z)^5{U%+f+lX&6)O|*+}gznCt z3}lZvVRO-0-3WRn%!Aft7qCFv5uAswc)`>KToe|{1= z_bldt=jXxQKJuFt9E8@bE9e{K!TTMe+)&^TK4yS9O3V{@%3miiFo!Z&=fXHl&xTUc z)od3uKy_n2Hm>~2`m*L@Jb3_RXA|X$oxAY2>`~M&zY&}oX=dI+dgTWn7I9`i@rS)w zS^qGcBsN6BxYI&ENM?p+Ie5X>7q$MngrAe@@a|%LY|VCs-t^g!d~r7Sa?J*zy*6s} ztz+eXNx}DpFLbp;qSyR%)+^WoProu0Kip#pQ|0jSG-U@kf}*-YzTc5NSRrNd=Xo6M zt;qv%MPb=}@(mBk$w%9r860DVp+W8Ma;uT#6OT0DeVXZf*`Wpuj3rON%}dPT!x3W4 z-C!4VN1;-)5lU3dxK(8i?FUAKLyj#7|G9!*#!rRcc4WY8JPi`-*-Wt~8s=9Iz>-^) zDB9`{TXtrn>nF-8O)mnGkrh)I-xqf690kfL7g!VRo$8NBSzyIrxW6z6;scI>#lu{# zHmrk-^WxFMV;7&lT@2*~nNU84`Um$IV#|*#DA~Uo6m1{L?>Y9!v4@{b)CH8GP!tQJnbWb!1+~@@bloaFvoK^n*2&Yk!d;j{^N*^ zIjP*lu!Xl79fc1gLcqFMgZ2eRLiKwMFnf6l`itX0wDh8-;~wIttscdv9!SFpD{`q9 zN|W|onY^p}DHkj6s%l;pA{?wj-lyznG@E&wpTBz=)9T`xxN4*@QL7fSgT{eqd5fjM zIzn#r2X!DG3<2G_XHadt2xKQaxtC!gxcKOx#G{Cp)*Xfm1R-#43=;|pu2P@xER1|t zi)qh~L4TPX6Cz^JIBGC-PM*TWF%wwS!P985?-3XHkmk5PiZ$-q!G7je;KrnDSedC0 zMysY``+gPfGohS^%*$bgbFaXYz7VtsJOXC9L7+262`Y6Cz?8M*X*_0#`;sz1wTal! zzB(X!I~R{_ID)F7W=xP%ROQuE#V+>k$4YZeW;1#m&mAhotzJPOusvR>ddD74eW7y| z%@__wvzf&~9gxvCM6qrW+xLgPNVdrjF?D~F%sl3R6 z_~+Kp{!zjoK>?^nq;oa;oqs-pb|U+}vgThyplkCTCT-fr?N|`gan}K7(Gb*|QNQ^853Sw?R>EqC$DgDI~osk>wdt8_?)g2P(q zJ2-~R76&tl`a9lva0+Xkd|7t~F`!cSi|EBv5Z{%TUzY5Xgdq zuG4q2UaH>$Fy~4n3i`sE)cq-j!`Ur zs>?$0EKe>vZ^YWeg5`gIAijw2Mrdxh4tBd&fOkbIDqYt^vD$x@R@q@(doOiZZ?Ff| zb-#h0`4V)Pc#-rzId8rYje>_$_^T=%?4G!d)%;FA-{Xcj#KQ_#{VAi4q&nEuJ{1dA z%mi9I!m{{6G#R*q&w7!9bH4?m@rQR@rQI4w#H}Na{SqPGx=i1$r4aa*o`Xr}pxI8w z{m$hx?d@S;drry@si3q09A4tnh<#=MV;lgo9j@^x=S9b~b$;I!!cy&WVR{@HRxFC-rcBaVdSB6Tf-}pFblP zwdZWWUVev(HD6WLRG$J1i#EQ$AN~A?iO)58BQp$eN1JMMzG`(22+m(19@<_u_$FoW z)=5E2}~4C5_xp!>TJ=5JR(FU=ZyijDZ~co24zPA3{1B+Pn8`y00d z&{Y}7ANu=Y|Gx1!TVKh?h)DrV?R+?t zPoz9xLoqMxP6x@10rF?xw_xB|()+1@%RqCi+~JZt_pCU@;z^GdEt@Mp_iqq(9em0) ziiX06oHJP2Mc=F;@%+A53`m0~K;xom04bRmxNrsNOrDFn2Xnx1mm#!0+l}346Xb^D z<$R7+I67-DWQh;T!8oZ3ls86Tafd554*1NxDl%C6dUf`mn5Zota?C9a2irqc_<*zw zjZwpRBKdSeeQWW({b`I@A;(FP5>S{AmOpci0L6rb^4Y{7s1pys&Kv(SmwR=v(j@?0 zkFLU&#ssdhJznmU`I7e%3;*sqOOzZ56=wO>p+zXMFt#2Pk_U#{ib9BC{Gh5m`5e!4 z+=)hC9GFgeBoulRXR+2tG$YV?uk15+3F1ZHmL@VsZdWqYz{xQQ30iyYoN6y zoJpTqz?$_JP-&$fCQtm6`yDlfK_y$!z|4lXDlDO2Wf8>ExuRz_U6)^^x9h0D+MzYj z{LezVZmF>8zXZzTcnbw5R%7d<`)pEaf7FT&=88XNa{V#!pcop#eb?tPiB~SRuBhNo z6pOJwaXma(e2TnP)FY5o3}Opcxmcy5Nnv)R*Pr#)HS1cB!>30?}ZLIT3jRX z4YMlo1Lt5P@Y>AfL+b4?!6*nv(C>~)XDlwdQBM4yLTry-$nA&wLCFl-X^zvzRbxuQ zXA9Cg`3Ig?{Om(0G0s+h%Hr^)hu3bT?NnS~&4&PCSzoH5- ze`FFUROfM>DC*Tbup6uoxuD4{U-s&e8eXbNMrXltUimtLo7og|(Pr}ZW|U%>N-lg@ znML`pCMJ;Yv{Y8v3J&IpymE~Nga=0Aui+=KWpx|#G;oC$sR1O@EVJ+81rUt(mbV(z zfR;X;%`Fn~%+Pwwco~7G&W51Q+xfiS<1l#rSj*QfYe4&TBcRe=jT@eFfL{IAynDen zVNdxYsJjvd3qoahK#g?T<5zfD7~RXqF~poD-^=5bVA?$wTch^qv>bJZ*WgbSpOJDDmIYM&aA{biPeb0k13nvIDx&xVbk1 z(_Hi*W_&VQI2?yddnA~wJ`@ZE1!(cL5*%ud@>Ra;i2HaE&ye@scLaItLUef-&F4*n z#(?Lxzxj{zQpmm;0lCM|L1V%~=JNA7?>3I-m03dGJbx%g7NkK$L^;?!m;0Z(7b9 z;_@$NAlm#SbbK%8y$eP_bYcvMcb;T7G^nrd^%{u$?;6xyyb2L#*MXICDzrze7LE_g z!AGt+xOwSbRJ8q+cTkq#{^$PKPAn9=09STPwin%vDUUd605ne81U3`sZ1kPDsjC-a z>%3WDaWRxwH3l%IkzvYbRqB`=Ef4uL1=1GxakHu9HIeLPL1SIfv8)U|w-rJ}!Z0kq zqk{n{@p$036f9M$1zATg4@|wy1^xdKsyfxdo&= zH`?493Eh+Pl{~ddNF6viMKcI&^vO299^N z&~AGgaWJk3_fZbc=Pyk-Hq;Oc)USih;z+i77v;s*PI z;r2J_=xuZ!zco;A(H`n${L~smkXy!zl{6uSn4~OF#U_n9^ zX4q$;^P?GZ2lozU=RA_X-75n}-(A@6Ds=(>caZw}GVo0AIgEx-cy%)k1bb7#>Ivz% z)3upGdp>Me1YpFrK;3e5bpp;!KZIe(wQ9VTXg^mR8^ z9(@sfI_0p}cqyuMCBfc2>Pd_~kHb@i=(~Lkh#Qf_nOKc1YW z$wf;dnde?&HS8I}3-&!?qV{vlE00!3S9Q7M!#mcNbe<)D%4E?^E4YP6K3o1MA2yrU z(0SAy?Cx&j(VkVpwJx*qEPdnKM%VGThmG;zr{g##ZazkxjlwA`6)Tkkuz5`)7oY5A z4Iath!!CeYa6h=Zr5f#i9E527%b-56hR$UP=o|l&-ThpSqVsK}nrVNWkIi***5ft|rFY6P;*S^*G!KNZ?0OLW{e~GNZ9s{4 zhdkPTC_uvE(%yeTuw_iwc-i^B%CDy%$Go#bNn*74(s8 zCI-S#EdMtIHb*2O@GBtknF%7R9?M*hNNCSq!{?WWkk>Ymrxohr{T;K=z+*nPUR=X_ ze+}n5jok5T-)dBrrm~CW#dv>ze+>NN4?e0q4l{GfhqUPw-}G%8Y8<-^zO>i(zMY7+ zB1aIFd0{wd-QBjQa0s1ImwOgKyU`zV)$k=OIJ^%0XJ>-D?*P1U%ot0bb5QlyWodVb z`{OrJu4#7-RpLWomUTIGl3s>}!<4TL@}Swq2Ez2tqS3%~n7RHijvH_q4YaoL_y?}wscoR@j4xxFXmi5-o z!TVtbD4rMs#l7BGIy4x&bo)W8W)YL5gfgSw3L*Bd4b;ay0nBaGac)5n{MNG`BQ{3C zsb3tQZkvp<3+cT4`Y9aWFQ4As#VDOW2@HycQ1`1l?z~LTPD;3?pL+yvR9(ipWR3jG zB;w`lerM^H{(}WBO9#IuO^`hQ#AHcLLdgOE2cb4^j?*UI%XO}(N&i1@7cTqj6T3qW zMvG>mj*S{`G@8jx?4?2zEq9RJb^wLt8%wh>!yxhH2%K>?8g&+s@2WZ#)NM{+@{+BP z>=6ek!%suJdm9h=;fz0aZ$cNBP}Vwi4-0=i4dGlG4!#x*@tZSYRS81i!+l`eXa+{d zJcvtq2|D~k+4)nNsFAV|#AzYiWpXl#F4oZ?DaMqVfmnKeDfIjk$6Uq{4|n1h*7dQF zpO8+#_R)dh_6;cqcmUlp{^mY6Jisi6G=%+Uuvw5T)R^=^9?}@k#fS6cAx`?xy)BO? z|8pITc9o(@RR>eetVh;QhUH^p!2aey6puSy)qBL1Sl8WbQI-*AS!aQiyj`M>ba@1z2P;4i@B4??DEGY7J{WwUz%^S^@`%^{-Qr%bV5^t7L2d+^nNL+UG>{`Cht|F~!DFKT^1*HFa_I4dW2OAF#@*RPG#^#N*>qAWlz?gZ+Y0a*MRV z=wHlN9tM}!q+-D>2f*?yURggDJo=x-x5=^i%V7$__Bdv+$c|4rQI63!`cYR|7B6g& zVSDd57&$Ev1!>9h(s#L-yf6|EUz?6MV-N-3ZpbB}E9C|T3Z^(&O*=y~mjBBeQ;Egn zv*rNoRoj4mLEm}%%oD;_^RrP$`7<+2NX18IYvJ6G5y<{)BpGq>5 znNDV2HT%(P$U`B{j)m+)S9!D9AlMyhhjzcsh48_EX7v9TJW;mnUTwq{%yPlZ9khSF zXGIzJZD3XD1j*uetod{z_zu(sXNx#=`6`5#mO$4;gbYk60bD34h12$kU`DorxA-39cVfUaFyz4>+ zK31O103;5+0S^U0E+ z@2PPh-qTRk=_rB10u^jrc8J|wLOI4H7w!->2eU(MaQ;81;L)}7AemqwuP=^-&VAZo zp>&IPI-Bzk%3?GappISM_E5QP1-Ps#U}@fAXl6NxdfT1A&>;Z|eB1 z)X_L~4VU>!S>0ed&(b@!(y~9l5k}0z^~4fWj~2F$4hP?K20edfa0}JTyl_V(e4D!m zB`>Sw4Q&k|vrzE9jRQbMG61ct7D7ni3CPbhz{;Y02sv|yXWZ1p(no{wWHO?0S`;e2 zB?~vSM4|YCT2*i*btf#S$Ag2f;mh_#IGp0BiiE(u2M>p^fPlsXDTZSC|t$=9NN>KVkgnf4=(9gCGBv#vmS~*KWGT z@vTeTK6x~BPpjj;Rco15Zv}Ik4=CB#pSm&W-2G%E%>72)p>qTv)2IElQ#^K22d9^= zHMrR8!x_>6nq&UvI@^l)0^uU8U-6vx<{SaZe>QTr!D`rI)W!`y>}N(>FQA1}2)xpX zfJyhlL9jN32Tr7U(DW$uI~)jo4;r}Yz5?cRKMZ$Yt9L=fL@8GzeMN7` z3HrNYOk5~~jFX#CvSXqA?}Oxh zm88^#eBd&^V9O-DBn?30q`xUMS0z+vPXXCmd$75$3DTkSVXJl)*t(=b%gH#L6jXzi zL$ZirsSM(mAB7I@qoDooFU(1A8$^Di8SWeHcQ^b7t;653<(mg$h-o^v&n&{JqZMGc z_!o0ZTn#0cuiy?%4Rllc$)`1{lV>4Qu6Oe?KKhPmcHf}_c^c5@y1yOxu;d=4>w>OpZg+j8mHWK0vkU_ZX*p^JVbI6WK)FWl-- z)lke*Jv`_fmj%(pa5}XCP<-i$rR(4rTrm0&)@xDT^h5}J=*dGqO^mNc?#9*w?ac6# z11vu2gHt9gMtePCZaOF67v-S$Kn!}0XLxex87Qs3%ZI&UXx1_j6yXEq z5o;p3s{1$AnWfHmY-#}U_+k8?@^I9N90dM~d}#gpmI;=Rw7^W_aW0Ppo#D3NTwn`M zFD8K~IT!yJPJfg{_jZPacO=&GWbZO?`6LJ5#cAwRWCH4ZnGTX$7x>g7$`u{7vb1-m z=XmchFkbT??^*MUr-fZ*O@?bhGW!6_IXDTGh#}N27>KRoPNQ>OGmCf>gb$Qyhu3#i z7$5pKS6m&&S6vMP1N{uSICc>8Dmn(fk5{tQ2NXDdNEE2fh(zZniBOsPk>_7YMu|}` z%W#eZr>+aUG`13LmzLnx!b12^fVg8uHvBkJ1Mbu#(7mEl*xi2uQ@0NQullvT*zNz? zwHO^FJK4d%7ovzfyckzN{(KwIQVM0tmwdr&K?yU`*5cNK6(HL98z_v%Vx{OV>t}um zCT}HHkGvFZwA5+uoP+IFN@(+yV`}0=EL)yQ-t#|ri)}UZWSGG4vl=MKFM>vseeBIs zcl`cq9yV?n1EteE!Ex6K-1(k*!`7LC$?_Te?T$k7K%KR`v10-4Q#|DLn^JM753#v# z%J|x68?a?yI`7uK!-D4!lX-L%^c?j7jny})tCSc#>&}CT>=ev+LmB;25f|G`6|SUZubGVs4pV~bd&4v-1Ffjy7 zUMI^X6CZL#c7o9F&Q_T1PzXZt20XP?pL)FV;H8B+#ssHf;H*;a3+~*F{8?S*r`V3l zD72kIyXd>?L9*_rTI$w6GA3|4QPT$1qTt?kOYP|)u=wm=H1Png_c9NiE}Oy$N7B8g1`5U6$wIZ{ z2=t#Fi<0`SsPvk=OX((%t)Iz^zY9^Czlt@UKxi_4%%dljV)o@u-X1rQ^_&<4PWEZk zH}RBt@1pES>=^zmco6ZLiqOLBA0{fDDHQ%&0)f@(5V+wo*sP|%H={+^I-rN?DGdg} zjUtHOISBRDkm35``Jj<|wXE zhSFztkX1sn^?)Rtt+AUJbZ5Z%#sZM}sB-ZikA)}q)?)jZUp)H3SGn(BJv`d5p3B>` z@K<^*szuAV*NVq-gBzJ_NJbpS_rJ!C^g@WKr3A|{mVCN;c;8M9`@$}vk3$-p7@LN5 zm4|RuP6oj1Y2ftfCHYQuU{dQ<_;LRfv|KyOf9xa;V_6=CO-M$K%AZ2B`SZd4aTxEO z6^Q;3aj3I7mi9QD3lExO;k9JQ_fbNh56&Q&)PT~n1KGZC74lvK@{*SaBU@ABo16m9?zr<`%eb zN?8^6=iGB)1DMgfBGC03mNt?<`g=NY6|IdUdZr3VmE>Emqc_fzsO4s+qqqvGxPZx2%^^knEfK1_0u>HIs>h^fpH>!{W}F4 zPZ2|Hehte{;9z@Z4XTdUXN6!Am2baP zP)=%L8gIf_Yv&QxXd~ufluc`0t_3^nBB1h12w40*fQ2lix$hWqx8a-E!wflAJ1xV4 z!%@63&K_I_WJ7%88hm7~gscxg?e9cW=77e_@t~oc$~!ORv*;DenZ)NPQ;gOj zo)PVdK6-Mu!Kb)yOET-+Gz9G*&V@x!!l^e-A7J_=IPlm5HOBaYcF<`^Hm!$(3Awmv z#uYgCp9Hl#B2nY7cDAZm7n|zVfa+rjhz@>`i&HW!BQo>JcUHicT#CcSuvUI!cm`^( zt-;H0i!ekg;96CM=<(lF)EU#xdjb+6mN>$RY(Fy=6?U$2t4oxNKd69+57S}oA_Up0%dGEL0_bRMWR-8F zILV?6E{bR2)_;$p{>c59youOx+lAajcQ(BG5Q-YV-{ukcislUrCp2#pR zhP)3;BDq-Uxn=v@Kl$e?*Fod?eC9G)pSVNka8(TL$A|lKld28yEhHW`pG!xV#4Nt{ zK^BM)FSG1w*QRgBNv>LPkhM}z=klC5xTPF}gTGb5R?`BsGWCHY$IfEO-*qVQC$_5o zW{mz>Mj8Kp>_cD*{2F)>TaIcGGx|8VJe~wS&1XQSLwQEwIxvuRF^yN1e9@)?m{%Bw z#UYa^BbmgEKdV8Xy@dM=&cN=SYgygb^KeQ+TKC~)yjgz)}4j5LW@qT4!8Wze z-3jt>`s%aRT?HuZ@MdWv&hmrjOGry=z_bhFVV9o)cDsy%^UB_MQ?MF8->aqWQ|h_N zCLQ*x4&@G`tDYYShd@p84_JhwXwAo}^_3G*T=Y@ck@AKEH6SRIwnO321dQ;e9-lkv z5K$$@j+B+)V`~pybCsd%iW)>aRB-KmXTffHI;s%cV6Dy-u+w+pjSaWBQBn)PRZOgf zIVNb-$9a=uG>Y3<)xGR;95dzuK6`u)^!(a*_T~!IT6l*ScLt-w?jj!D7AHJBj(RdJ zI#{{18pI0&g@RRCRiZnGK{IV7E}eNC%by-Yk(&!?qZf&T?E^iH#NXcXuUu_@101f{ zh1oOd+0ZnlJ*WY68PN`9;8+kA5R>BMWO&e9LmBwIvgCnt*htIC=$a_QPnSdSL!<)A z;*!v8-C_794FTDNA@E^9KNS6w$ugC$p@v#Nw0PCbWyO|UG2c<%t96_?{5zQ|0Ig?147xQp`Md1&s{Hf$jJLG-MBn^)KC-0m_@gnZPoK$6p(auiqZUw{(7;G9?CWM)b$D#}@$F zPO(a1A+(2_04HNP*RxJT-H`<_;J5>djxG}Vm`AW?DwVc@ErVQ!(s1 zzY3%02XX-xRf!F6%9CAxu}WeoI&Uz=?h^pE-$KD`&kj0UT%w%uC2-mBfMa3~G4I~8 zq0nxNG$fV0v*cq2v1LC@>!mf*LQ2-X-~Xu528B?lDA*jT2bgcgjtL zeqoI+e$a6_mif-E1wYHN;N%eiMp4GB>ZcLb|2`jel2$?ZW+&0+JsJG0+PQsd3|HKJ!NeNDOtj+)kMP%p`1?}) zLGOFJ-N(?XP!59a+W6r>8i*g>WMzdT5r3Xz^~Lm_6(`BhsqVmwPXe(~MTB>5CLk`T z#S-&)2w9(rQ`Cl|#3Ynu?}$bDZPFE+v z;TR{H8R{79oDO}#A)x#;kNNHy3SC8^X!~C#x7s}(1R7r|&U~c3@qPMs{~?4;2S=dV z->GoF%K)2J)DfRPo9PAH;Jtw%bB@!HCU}y>l2C98?BXFCRE@n!0tb zmviZ|)esz*1EangVDy+jx!YMY==kq5cTu9A>(bBcjQa%)d_*%&!D5*GCLK&l`{W&i zG(p5XKs-GYxo<9f5Q>j_4xz!HY7l0r8ef2&%q|?Z{Qg-8JxoBV>>bWIxJSf*#1}1&1w*IEFgXJ zr!n`AHpB-i>ruz23}VO+Q83dKtOdc~r2UxN85&@_wiulMwiAlCpRBsS+#Z|58mM!6 z0xMlH68`(M25+Yfr%b|jFlf9ZESTL7l)q}jBP+^Xx!z~Vu73RVl`R1!(TjfrQPW=#^1}sG<+aKJ~=#&;b+E;nXvmiDsn1MxP&uannt( zKF=H)BVAyhZU7t*M1W305^Qt@Y|0>Z?9&Qx8oL$zht;CtL8PvPzgdT6Au;`jLh$u? zX#9B!1%h>!AMP0;P}j6q+EmIkB|_H(GdS))0Y@26#WrHT8+N zJmGp;4SEKJu*!a`Df>2>X(O^pIoPaKVgI&o#z`hSu{~E= z7?4ng%|Fkf;aUf4jh@9ds0XPs+5j$8$}r)sEk?wg<3<_Rnd+S#bnl6+ z)qgHac7D$_pCyAL#{rx?+qv%KLUg{pPB^|yfCAkDp~HbR_9F!`<<=7XtNSd%;tAle zDjy9t9$+WFZ^F(rV($=#F*aZbYAdCpeat>^YpjLR{)yWyvF+N2*nl7Gcn#)4NUspu$$M8qv87$aM><}gVz(VuV*i`x+!>l zgCk7JEThiLkzjm35EOlKOGBq(a1xMz*wfXb^tdUUlj@;cKsC$`P6peM3QWH2zzw>l zaY6eA`HZ(ius0f*L*E7l=WS7=E*Fb4+)?!%`Mcjw1C20yhWB@Y^>=T4HKux_+uo2|4>{W~4uYdH>?Ohh!RnCUa1uo>UO{KUepDDse!mB#f1QQdg0WbtABm-x+W2f|4{U$(fDP*Q z#(~m{6mMk6OO!>l*&)3oF+g(0iifCXy2=^?L;L>$D7_XQM zAzm$fenb#DTUwzYaj@L?7lX;Cc4P1F*SYxPlPX)MVvPKYd;^j@=A!E4D&M#BnOJuM zDjFn0+q{fIsLipl7iSdi|C;>Ryp!PxC!(Gdl?9%*2&`#WnqLud7=i<#$X z7id+v%f{;yFQBQHU;Y%0S;beN^TatQ(3lQ}qm#&oyCr zP+B$`?MVyiYu1FwsnN8TF@}r8N9k)Lf5yc6sv$Ze+^-RZ@i76cW@8@RZ!J*s9!_k> zP~oe*OPE*ThhEXEV0%9^e75ZZ*qEfDZNMNDg|xC=O&745c2@1ec6qji5UZZnqY=7- ztk;Z3&-o#2?s>rVx~f@a?^w{->&pD)b?A7l81SC~(3F-A_~0qmP4vMD#TPMfPZhLf z)H2b#_42TUSWtutp{M>0?@g4Tv+N`<$>QK(B8Csgt^<(gwB~jY+J;|4N!CZ!Lw*l| z=R5Q63FMJHFsn+o&k??MGm745LP*L6&=duuNtP~p+SA_oN-fV8y`rqb zgeu8^VEMer38)%-f~zeaOIb?dNiAp+`tD8wy{frzNo_Mmv<##SXF2zbRKd1`_3)J5 zU){P>xbY|AiUuiB=T9ogRt6CXFAjZNh%9rzoH~_*S=(|I>Nz!l*CR_2?#FVU1tlQ< z?knt;u7Rc((aiqjaOhFG#%*e%d7I&HV6;TSy4zEEm(g9;Y%&Qna*6G9;TBV#y9kDC z-->DTB&^cvGs`zO!_v*fMi6f0YWgp@#)>8`y^zlZ8q?_K*(bj>J`Y5OdU*W(9^y5p zvRn3g=u78)nf*xWKHHAY4g>k`UuUDkzmi&OszLDLzB(c;83 z>h&#U9dnj|0o#gh!_C3?DX})93oRAiNtUI5+F*-0&GzZaJV(6}L)bjfXov@$4WmKD z%M1lSA6V9lT|rUMY^im!2tL;OqsRYqNcv6?@>YhPD^sBR#Zkag8|ay32VL8%*+)-;jgxW9;6QBeSqmE9 zZp-zeiG}n*7j_?Qz_od#fs`1aDAWYa{>_AvtbAPFFAihXGhm`s9`$2A;)5fvLguZJ zcs4W>^X8D3ph=9c(kM?#IU`}wBHVsOgo(QoK=v*SMK^U}Ut&D=`%3)51!pm^72+f>w~?#nOv4#p%4okc38FiVSx34m z+If$F2M=X<O4~}T}rcFBJ^w)^X)-t*t7W_H*OL$hnHWOt&J)g?Vrg6 z3cD)fgZbd;O1271s}D`XWz|7wuy8P6UlfCtr!8TXViPvb@dsTGDW*j|WZ%?- zNc+))w5T&+*1DK7$^Xb_YEH(qm2u#pQvH9pfHmzq%VA|C>LsUQ*#;}z-EkGq zPR~T+L21ysd>9UXa~X2$Gtubdb*41|F}}$VpOwa^l8`8P2F?&3|UH&LOL@lJ*jQI^q26kBOL@myY2RXX2_WG2~fTg9mJDFxW94r`lz} z@>pWkw*Ihm*yF`Iq}1!)^B40v8VQwKcM#)2AKYGN^Ym+HP-I?2nMYzy%dWBp-BVx^ zQi!d!YG|vJ2%c%)&^GNj7~LHQ{`L~o&?nva>Sp5Q$FmSa(q@l!vw*aNXnW%#7kCy_ z+THyo6z++_wL=yWTWSk4-*5trhlX%HUjg{lo(IKxYbZ~?42J)1gtlG2Ak*H;rEk`P z(b6SgMf=gFgU`8fZy`3_ea+QUC&G%c!?E#m3}~EQ$~uqq$Jx>{cw;U{@tXik>G;XK z{pjy;x#DcSvsH0k&7hZ z=)Z=#5Pwv|;$^1^4mS+C@}oa?VS>n2Fb}nmmzsvXXy|ObgbkyN?Ob-z=&6r9z=s3XYtjvI`I0Y&*XoJ zvD~l*Jk{b5{z``h#Yq_bE?MqYPaKAX5KJ3A4pp+kz+}b|z`*|46j6_VtX+-EO(anH zhFIN{{rj~)5@vjG#Kkk>vAs{3^(^;BLH0rvH>crBVrQ+}9!1~Uo2;1+B0YdQUxx1i z>!Cr!=2bxd1PMmVhGJ4g3RpQnjVdpAT4*&o+p0 zK7w{jRMB(!6Xw>>fwx`T4$qsYj)i_3&=4c2JuHV$o!rh&xMj0vl+ zgE-tpF4B3+%IdAKeZf!Gt(z`y-%Xi`jbbeO6pZc9Bl#h-HCS1y#_Ya~hw^==(Qj}q zUpsdRF+y_C$lH+}9Fs};fZyx8iTGA774zy5WXF%hBkxM5aquSMG+coMM)(ZwP^G9<5d{HR0IuYs6Xbvz0kKN znyXp<$@SZb`SCpyTVq4$8aYFok~Zl|P26~_3&bZj@B-N)NM3#l)1Hn2ud*)r<#`1Q1DeJZDW*aFx-t$DvwZ|2~Gjr-OS!Kk`PFLZhi{z8F zu@`1XpM;R*reIew6~3*eZ+~wV{Af!8-(OwKIL;LgjLJdj=30=_j;z%!3fAhXp9Glin0`_n2q$hto$R?F{ZzpJvRR0U$|F$m zAw?Mf`X^TztBk>B)Q`M1oEXF-&>?&qB*%C$C;h!(yD){DYsX;z_DyK*tcreXR>Q@k z<>+uE7Bt43mWTWo&-Q%?g=yzTpvK};@~7E5@W;p1X#1S9gCBB0@@4_Cu&(2?UG@0f zTLzV97I8(97jAIPgcbRevp<$4YDqj649Ud{F7qfuIu>xF zKkOTE5%t~&;a|B@)RB;%bUpcz>=2A~&O&xDmn&e`K?k%c$>ZZw z#-h?QJM8G!!VO;k%Uk-%K_bf&il3T7;NNoW8mENr4m2y7S5jv|5)*$P#H{+(!qg-& zRBiIW$EDLSuX8Qh{S^%kc9fm}p$+BlIBuF)jE27#!Mdr2Xnm>>Z8k6A&K84Nw0b@7 zn|p#iH6v@1yDvaoJ=(k>Rg=A&1Dn{diPoul(>N5XE%^ai?MC*PIM`)#fEnk zFg`dE?(eSvae{?qMAieof#$ZB{GDKMb&ODTVl1~9_mQ<2?g5ieY0S+vk>5K$hnS@d zL<5Y#rp6t+N9wThCFjvDIv*3d7NcOr1-aY!bKGLk5bA*<4`Ssxs9gAlDens7r|!$Z zVzv=Fl#K$1J5r2@ngjhb7Liua$NUZ*1C1i;2soz1pU+7of0r#QE$iVQ=gBdf5!-3e z0A@ef1p;{ywrhDqVgE=}BOhSig|!&JbrrE#q-YYJCX{)k;c~+yRC%UK{>5nFB&{e+ z>xl#y@s+3i{2xbW9v1Wa{oz4KLI@!WAtXttd5(;PBpHNM5<(I}l6@qVREkv6GOa2t zqjj3+$Y>Kn2=NIagb+d!e&_r9({;IMnfLOX^LpKP65M+jh-|6}wm-38ryo^j;4nR(|K2)(hAo)@L4y=e}drtDRIjt23n;}mmLt*~ApWw8#6Vdk_TJSnlj z8G{NTtJxVXC0k+ihC~0)fL2Y~0Bl_)0tzu)ozAxfX%gyK|_oV#55+*62&&|jH z%E0-eF7#u+5A-4Y(~teb^c%(Kt6!8QV=Xy3zE$DCCS^?W9#egkoDOPWrrUsQtOgY{$vXK z&_@#m>kWl3!}B1C_B{>m7UYYW3aXN1uDv^o%{w^<&x_`wq&Zpfu6-TKe%xS>a|dGW zsZ^nK_a`Q)`>HS%IJ099He!}pFZ{9LELf@bWztEFikJW7Fnp1a-en^2jmuH6(v_g? z5P!I5b^I-im--U*Zt9SE4?I)!?aPuyxzHSS9h9+R`z!qVIQwjMGl;|g44H40DhOOz$wQLwD z_qFo%e{6~u>C8FhwvCe#XftSyR?{_+bq6VdsN9tMBW0@LtAtoctVbY9#Ka`(yL z@>ox3-19uTthMGfjDFU(QpMQ zI2NS&ezXJL0{3b5T691T=Ho)a^i~;pONv14r7t}P-f-8Q)R*q|OX#R$NZiH?7=GeB zZhupZZTqH!MPnTFGpWQ^vNfoG`wjOkEyMfog}8ET3>ZZibKSt%3U}3PC?$rSjq@ue z_3;GJ<4w%v4)sFz>u{y)idopi-&{F-E_hbf2-96u!PIsSN>=I!W338!b6fyRKO4s- zes_dT-|5*|kFoA^7c~#Z=OXGb$0uS^H_8PsmMYR0AQ&HNgdw z9?;C|ml)n!4@S9bAye0?W$}BmD6=bp0j|fP-H({1g?g~K-4T^shx@`QxM4^=8S6f% zyK63UetHfL#_D47Z)X&Zwc}z;;i_gSM(1mAkJ}Z@Gu{vDd}#+QK7p=NsSEX^g9{2S2#5Y2h+5r` zK>E_{JZw>xP(PPkgR57Av)?7kpI4$s#X}ybssPsP4*6h zVcUYx{&F`6dzH-6yDOn0_8ZCy(Dflb1P7Bp{su#1PN_~QF@VNks-6of^lO&7rK!Zc!xdcsY=B5d#;3~eP!w%l|Z){6ZQ-+t#7E9e>WWhS(o zGAEY*elYM;;=$Wtm{T2mQUx;RDaVB)F-{ZY6A4Xhsj`>uN<4Xr0!o$5d zyd1IvNA?K8YFhv~<@tm2ENJfB6(e-kfZp|P#O2%v2AkBu^T0`Po7~FVZm3Wu^(vUE zP2@H|X2Kg{tlbxk#M)c?!D_u1EA^g&(oF+F-1?cf8)~DrJck>8L-6`C6?$a`qhQV! z$P7<_#`$j*#(z%0!d13t8v2tQf;U0(4!;D@xEctmRtMSVn+J=Od@-I<8`pf#bMm*%{Q92C-!?&S79@HH-D$ z2NIQ~X2aSRq03G`URUY^)21Y&Mtc-!)xRa~T9Bgovnn&V?g#!Zm%wz#KyXp_h3vjG zlL_BW?6eE4y8Ht5n26P}Y(9wG=|1ovP+@h-mN(B-Qb*sD1=eiC#WbIjIqrteH^J<` zi~w}e`Hu;vUMO)t=8Qc~N7LWC&s`2)Q^?k>grWN3s4IJ{5Z|2-dcT|@^^qT%Upft{ zbIHHD`zEh-9Rvr>?V;|p5>jHw`}yGzAKOB#q^2-#V>z36>ZQbLUJoa79I)zX4&-b( z2caWqJ~`|v8ofFPc9WINbm+UcrOAyg@&?E5`280&)KkY?}I%Ti&?IJ$=JKlSX{9 z734Pi=81<__=3ZIdbhQXfKc{`norLSKpJVtIr!CI4&L6gI9#MFFT*B*hdR6|kn-HPe%Qsc^| z*GxBZ21@@PBHy$XxDj90c|#mXE@*L4!!0hpd6k8X*@{7RrI2+s0lSr7K!M>?zN7sF zW`FiXt)ZXz;fa?~D`zt_ha2;Tz5dK*bsEdn9fX0q%VF)l`Pg*16s^K8@sfF@n;M^h zZHErS8iTD^{p&ZgU3`VIpE_JJ%-BrwSIAs8-sk%#MSzQ43#&L4L2lCx=sDw?BDmEN zerH}p>6;C_^_n4Tb4mo4Zr2svqW$o=%oGhyR57P9$q-1rMx)_Dysn!VO%E>S1F594HcraT&LkHW}Ch-?3l%>yk&1+H?gT>H702LFg?-L0x-bKv=0X=@S@gMXd~Y_L*~BB5 zz-W5J@wFfR6dK!&lO}W}rC7A5X;-OK4&}f`7y(>|lq3>7T_;nY# z0jikvt^*ic@#k7rqge1d+D*Pc#HGVOGD-9d#f(eNw2xd+B-ZVP7A3?$O8Liv^@z_v zc?RdVIUv@!${Gic5srLC8SBkg!KCsQKYmw})=cZt z5J=&~58f_hqTjngn|9}-^Db~+>4bK|-XNfi!ASFH>Y!0>N?^@g!dz$oHY zE`}PN-sC793Z=Ob@I<>1qD~PF%5x^jl{Q@IU4=&{55bFQhUX;^gehmr%tvO0qz>4vzEj z!c3D#q0_P;_H<7n*4@ft%?WWR8%f#8r;+T)!izY2R04XtyFjpGD@!uD3OONruwJ$dYT9~H2BQeIJOA){@{5{G zbb*g=GH^z*3^aEUZ_Z>8n>}JS=1o6}ZWBv+RuXYSh}GIWD1|v2RnZ>k9oyPt1EwF* zU`F3I@X?!3!tbxSX!7Vhv+7Da>V7@3l}`tWqeKW*C)m6BsaU$KFQ)LfT&X7lqsKG& zB;te(+kO(fYP#W;7rvM$E}`pU6>IbI28Ukcb1k04($rkBY57&O8Pmu_i)COxaUgf{ zq|92CBh*H^f|2HB+Lw*z+9Ulzf7l@K#IfXw%7M`_K@c{zKzMjVISRg?P}~cgf>vJL zz2`uj<2dh;n_}*2C%71fE^~)#b8Z`!9=vLsE7njlNsTuGU ztI)FWELO)K2KAM+`)~fISaEhAan7F$14l|x78#`|*y9CKO#s1Vmy(7xk?`lT5>)E(7MYL63Bi!Ap$0z7RCx4M$}8QXzE+l$i|_$okuiXtRjcV) zrR09@fHrXnd=c>jJ9lW|rNVJ|S4V+vI1hOf7&H}YZC0@42c>8^b|JPLGJ~akW~1DDC(m#ULn*nd6t*-w?x5V|Z81F$ zhk?q>Uc_9O2nBOPV3Qj$E!-J%s1AjK%{5$Wb~IQ@&Y<3s6Zq%sB=TKq>I91O{-8K_bzMknJU_EoJPn!H&I>S05`pm8jyt|2c=;tJ|Zp6Age`)H3S z#=owoK>vLfYdi3SX&J5OzWt@>lynq>-9(U{JO+HOw6cSr1NqHPO+0Lu4F5e!M3pC3 zh?l()3u2N$EK=u_iUYB&K9dLSyo{km`Ouljp?>^kn0}@NrGjCgOWY05C%1W3w<2hN zlF9z(Im&ub!$sT4$F6XO-0Vbboiu=D&LJju%t6*VYcp?+@dDR~WN6e%fmLZDn$4ks$BN2i(6dtoHCKO(6(@mbRk5&f zVIO9na||PP(T>?M73-EIGL=35k%4z1m+$HghrGoo?Q6)B(-nB&$6#_knkY=a)^ox2 zOfKskrSSasg?W8l44+CX!M^n{9Nu^a+rm#l#cH~?bgZ!bt2NWEPsU+c({N*BB?hhV z1E2jG;PN04N0$bHlRo7h;xB^LtP|j7_m(x3CZLTMfB(u%PWN&)T*Xaa$&6(k|2J+(X{j?#I+G zB1q5nR!s1%KBM?-I=NP{J<< zD|8H6hn>5XOln`JXboG*4w?C(eWnYPZoGmjzrzssJ7d@7KkPLyv#*_={Er*c17k2nPpb_%Ge?*p}C#1OHJa-n>P`PW=YSKpmteHl z63(}r!&{C=(Dj}GFt`d1_NQ*TdYrcUOO^`v`a4>w`p z5=-9b9*^x;Jz!z51!a<@+~OHM`^EFIJuwMAyZ&a6-X6oghk_yUd;yr2ZR3Xp=`j1T zA+8)J#Q<3#F0#qM$G42BE9eIHmlra-i{!ig-!pCY25$deiYk_uVW;gZv<`{Fi&|r` zZE6ZPjepA2Pn=|S=c9lINu|p@bBdyj2)f>zUG&}#%ntsz7>rz ziS%CW+@X+pC-WNRS#GTw4V|S0yoY86Sm6=SUo;Q@bl1bb(<4!%e+R$fXM zIC%@i(zW=%x!=@4If|m&nV+DNFWp2l+ZO{tGDRSg_(eObT|Pcc3l86E1R;EC_<_$>%`b>M~2-Cc$R|vmkiRb!Oq7jn*w;VCx?M z^3=Edb8rz##>>rml!rje^ad6&=N?@PLl9pO_s07M!}LMq9*&1M!7C6G^Z;##!Kyc& z#FROQo3nP2|MnU4+8@B?n-!oOz1fK6GHhS71|+@bF(>O~VCJ_3Bh$`#>!6 zm`?d0^MP#TOc`u%rEJhXH*_1JhhndIVOF9u%=f3v#1QIM`LARjd#%Hkz%Af=BANKC z`KWbnG^$N1aeU4ziGMJhDCvz>CgnVdfvh;_*#&@v9g zAAgnLv-}VLH#HLlZpAF8T?SEo51_!_!nAt4DX;C`mp3n^vzbo>Y&dWk`_s9t{%Ins zSVb3WgaQ6R=zYQuZoiv{^~zmD7eVBA(3%Xq#&g*VjlH)o~*mnkbnQtrt_emF^tlw!Ui8_PnVZ^Ap6b&y= z8WLYHn|4+S5ct~%7Jbx43pO6h1aWZiULX(4UJt99E@0-AW7vFIlPmwePXmTgIt@(a&r}(hJHQ z+iU^xDn};XJQ6f+lAjTK@ivuy;OUgWIiwf>rqZ{w`coSBdG1 zsMqq`hg(~3K>6`yAU9q_eC|*%N*_&guJfRkcZj8TtpNU~3^hv&L1T&x+Yc3%O7iVnf(?Pd8U&1OJ(TNwnm|M z*&p511~Y534C1IyhHE>>H`EYBy_tBZA7;rUy~B8uGz&~e#4zQOl_+@lzwTo=kG;si zC}A}GU6%`DSq=038AH1+J<6|aLeuHHK{ezp>$E%x`Wd#&>A8}(b)-P;e^2=g9oms5 zO$58{lW<#VJi2{Vus}y4t7V5Vy2FO+_SFQFVj+n41u24u<$-SKVPV(w2+&b#)ClQ8cp7o0Ztus;V&A;e&>AIMnh?=xi~Cm*09|Yt0MBe$K=7 z_af2szBl|gwixXO`@s8@6m$xF&9%hen9}j7ux!;)+JT=&@pxY@t~5lIrTO%=mVa^?h>iQ{_(7@EyZl%>uw~?nIP+))9)nn=z5o zNB*$SWoX*b2L{w`Mioc~1M|yZv@w+FCL9zdUO9%+=1ZWelM1F4<9SLDbp$%;oYkbF z5MA#S*1uB+L1s6zj@5H8%OVKnrb}7FyKF3YE>?U^|oW(q5?tr|@Q?U1~0_?Wq9C~=r9?Q2s0x^;$UrfN|fIAg@=vwjG& z_T}@-dEFA?5_L_(cG$};@2>%))!NLf{X96iJ>Zhq0G@(bX!To*+^R2yH?Cg63nLbw zdg?Z)-l`26$Ey@=*4+U|UPeok|G2*1Nv6Bx0z|(^#)-s@)aYHsJa%8=O6-H1cAW;R ztrf&X@!`5$#A3ta$OU`|^*;AN<^Grw&w-_=za@%ys?33)J%!L}T+V%}447T^b*A|! z96C2m1IbVyMGW2Z1kS;xp*<+SXPt&d!5^9ClN2s#b0sI+Y+(VNf7c}@lIz2TwQZz1 z@|XnF+O!+oGK0V&dlmLro`>>n{=|9=Wwkz6SWv7Zbu9x>>$g2K>Q8;DC6_4vp!t}u&e8#v^+EoNWTMs{a&kO{^hgN7&z5rX_pTO4EYs_oWI#er* zgLZnJF_$A~6FL&ybvT$P4dKoN3p6=$iuh?2xMK1FtO+UsZGkn|4?77hX&SJEoEbrn zA~5x90e-c}fP$CaV0ZL2zi})Ter~2*ufLMN3GI%5d`_eC`ggOUh5#tLq=b-jJ~*a& z8A5F~tG3sKT~Wuu@BV0L-090FWEV=6w6cB0_ z{JNA4*h<%O0_A#yTe#qw2Y1;qm?t>aE6MvB9P=N z!7^$+RK%WuJdZ#;m|=!@zZzhm?g6kFF&+%d&w~+h3Y$KMVz71qs2phLF2)(ugU!VB zJNH?`9vSHT^Fq1jBB+^qmU+>$*GT6US2gry%0vxC`nw=V&kDs&mU8$UQiQr$KZI{B z^)Q9_zJqI{KoWRZk!M4>9NlE7zGn8j1q@z9uF7``IC_$H zXsfoO)k7N zGpH7Q96}#Ze%~+yTov8GE-Rf6_??5>9I`R&j5q5H*g{?ONV7v}#0WdFK`45-m_PkH z0ShRDE|}5GY+NOuNOdEWzcZ+TPShYBs z*-xHG`=LFc)klM+uL{N6KE$Xu*TlvCW|&RS?+pi6V8ajM&x|hNR`>ic@xXa(8~%#_ zUQ2wKDV%mqZtM*CRVr?Cko2&_z$a>;zE8@#9CJjyyd{7p)CE0#OxXIUf;kvdR;+n2 z+h0-yx7P%K^8GepKeH569Xf&j?>$Ulm0zsCDu#t^CARI^Y0wsO5+i!dgY}Eg=$3%mEnhHqYdJ7x0<3TlF9V2fa#m1|qpttilSB&T*OQFrPDURO5?8jBtH*xAI3+WLtP*eC~FT*>QKUSZ;%t6B71c-lb0Mj+5 zs1&V0rNn~^mV25tPv{Afb0>u^Q}!{3<}iFIq-Rk47;IYA4cn6_bF$?ocX_4;1AL-D z5b?zflIeQUv1fpUKs1po__K5Dx{Wx`kCuO7dh%nx`SpxZo&*7Gd;i%euI>@_Fwz9nttoyA& zQC79${Ui=6iXu@QL)qOUC(+YqG+RNe?JVa|7AwEQqucW7UGJ*G&CnNSZM%XSg$`I} zumc*uxGD^t{lWc46?A%R1sW)!>fC5l7St$GI@`JO{yp9?Vh(m*If1LDq~P;K1E>q+ z3&*{Y*nt0pdR`Y`#ZfvRNI%U~dldS%}U#g3pS>k5J;qj}6%{wUNGBYKsxLrKKcYu*fR`bc}2?_Hs zx`6h$x7wI6t2a(CPC~WD^JwjG8rsKJar?!iVAY{q{G_jeo~EhXC&Y+&DkFKu>O698 z1fkz?7c{y!mRmZE2G8B|S?xXn6#X&5z%`?JbMz@ZW-=Nr2iZW@xv{YFdlH0}RN;5~ zS?JZz4UBG|Mb%5uJZq#0D04(8rCD|JhG3|g?uCjGS0Qg{2$;I7NKJT3rE~5{l8#{shd#X&Bps`l>&l^E1&)$&XruW+v0I;F}qi-6NiV&p{xm5klcg zViM1B2SMauQ}u7*c<@0I*1hrPfz;0u(=}>6IvrX}IJ=%V9|xQrO1;cg$est|rzhvI zQ+}Umq|N}Pp9edb9mdl4&x1>Ot59Q36?55<#%_50kFVL)Oun=L&5J@n9=DEN?p%(d zUmv*s^+Wtk@6D+7IgH0%w}Hb2*>EjrCpuI+@GNWKngjZQOTrk|_(mbr zklBLG?y(T|VFzo@ErORHZPBR17X<#3A zT~YzEoO*J_Bw|s-1*mJEjW=|}xGR>j68F>ST`q~;dr9wwo2eLqv6Qtv2B;Ox`_?Hy z^vztDZ$A(PP9Drjn8oS`ID(7EH+HdOIF1@a&)(egY{;}#D5nVb;$Ehh(}lcU+5=%p zATbD{W^;RKjVrtgVWbOtuZGFBZm6JyeLpwZMQjJ;jLyk1O(`ae%t!NyANmVFY^KK8}* zwZFJs(-YotBng#;tDrTb3hF%4dA`#LobzNazSAFr>Ecg(pJgfaRX%XL_;P4;z9&rm zJ`o)(DFc(6g)uSvu*L$JPuX;GaSI{5&q4?bz%amwCqF^mkoGZMmy50M)2;AI}Uj@9O2hXR<=9`itI*Ui_i?B-+$%$I{xsi zyC0Nh9*5c|BbhSio6vZoh&rvQEN}^s7uFoA{oJ|F`WUFwUyju-{-E{mDf24N2K~Rs zS%H5ouR{wk{To3UMpl6xc^>F>dmCjO-&48C7o`r9pgAWFO+wGJf0V5i1&S2atL8$Z<8?(oW}s!D z5#IL5g)Nlv)7t-vJ?l>#rLJdj;>>ke@v{W2dZaW~0>ROC zv-i34(5)pAh8;K$r~h+8*BjKkHt*ny;tE*uZ!nr3v!wn>gj{psSmHUAz+7$0rS2NT zWLa;yuKp#VeQs~ouCIfgXOAL$%)*7?Gcl!LBgjw5nO&>_1eO@{8J{m;%q0%`JzaTg zdROM9aRI!fkJ-v=1sJ(>34SV0gX)JXn8Kn0?hl}@SyDMu^)4d6OFj3#Q~=R!UU;{< zie}yYv3^r9SC)NMtSX&|Ws~#Ks6CWBvGr)MX$wSa61(H>KJ;6>6!dmAFu}lm3grZA z<{5GrJyrCep6%teMROp*KM8-#@dp=^g>V~lp(e%+B5wiO@9@Qw&Aw=VfzBi&^N1-d zVJ+`7sqa@pY|mXh`T^y1i|4?MN5{eH#8x_sjN($^ZC2@h3VnBuz?^`t*m{?8Z~y*N z)C>zFRs}=71JkKf>cS%0wjj#jE)qXI7ouspQfXmqbsPU z-C;$~{4vqR8NaT+fH&4#Vc_IMCXe3EDxzMnz>1|1?X?5FN-RL*nmLR3m5a4eD%^Q9 zx#1_53SS5c(dn%T>Li+>QPwJAEnHSWDP&EExU z`%S_T=PzQt^AeEBrb3J$8JqKoU#nZjI(L?`k-sb`lTeCop^B@5qdTh z^VW%#e9BEa=brz-H0G>=E>W4-*S05W_B{rLW0qqO@e{)4B!cJ1Q!pstA~`PlP!HrF z7R2p`OzP1!FD_=sr^(S~o}4vl?0{&MzpTYI2yKloV8EPBSWoxc#tchpg$Luh^p)7K zq&pP-=Z0+onQY~%NI2xY7VUafB4~v3^)KS^_j$^x>m-53q`Q3B&S9uxc$ziQ%yHi_ z`gvsdU|RfetPieY+8z&>lgSlma4u$o5$}a7X?}yzhe3OF6l?Jq4J!kCqHAs?-rhm? zgtgas@FWFSd9@zX^^@>so*hovnSe6CcTD%=SD{!fiPclqYEpg%YRt=GT_%U4#A^dK z=%u3NXpXLab`UXhBksXc^ts&&)EbGKKUpd4lvlFQx)3xP?*xLMKbS!Sb*7Qy;0fhu z|5+E>*I(tXR@o@nFq!-9WIGeiE`_M~VHo^1mXB~8hO!%uH0L}FqD_m1 ze)Hyn{Fwt>KjcV0!wl{!Uk0%qhoIs4JPhk1VnZV%(1-QqHgl#z%YV zF=J6uv4pj?nuV*ZzMv<8`qZR&%GkC{O3iEC!J#pbv%Tni=p zbrm(!NY4m~D?EjCi?4f{7#(i<=Qg`&Y9e~ox7mCKQ4njYk&wQLUA-4~) ztcxe}(3x`T^ShS3dtHeemk!6Qml7`i+RBVdSF(OVfmpn*lIDyV+^RDH-|d`&KKiOK zvWggMT2Fc4R2Q(9`SBS~h7ym>oSQTY!EP{dm7h36`kN^%U49nx45(M6kp-sxQ3>F4d>#tEJu4w@xX{HeNa$0%02<4s zTr^7uWJ&)C$I|`oTBbn;sBLAf zy`>mLd0q7_S@2+X38u@8L6J|hoxc&hb$kyH7;F}5ZQajYopgi4JZ|Yh@I5)))+N5} zzOKaD9)hvNSX9>JD`fRom_UCjIYG()_4jXy$+)d>ak@DMnr!7iw#I;q)il;>Y7UY8 zvZ3r`4yLy(1%v8&Sa)hDxJ{##Uq+%rdYv37C)Ciub{@!XnRAPJHR_PAfF_k3kjNjD z=+i7Mt=myBb$q}l#EDRr@s2n2=?>x(3&CLGSzh~o854va6Uy&)vVUh5WBRo({QVy4 z{~WMUto>_;8;TLZX8{O44OM*TKrEkj8MU3IXpne>_L(}c=IVFsFlm<<)q3~j6F)@SE(eqafm-dMVOM5eT%&loxRRvxNe|SxoXm;>x71yfT z!#uxMz#sEmw5qdV5=~-F8QOv77ZtEulE5|mj6r_143_n!PTH;EFplmURg((vFQ{R= zsTNb|u?5yvQnn=XD9GYXA>C~mh@Kat|A_hcenmO-Fsz{2TXz%>*JJNWQgNb50p@Kx zgj1+@^xnS^WIsoO?7|3Wb=81z6Vt%6*Le_qFJfZVUaWb}G|Iv^avAOYs-vbrtJ5H8 zxmwIdZldm+-Y9-LCkLCX$VKyD2sUj=;Yv|_iB?4{lob~OzWU0)+qzQl$a8xE#5;qayYCRR`+WdfwF1GaVKFv({!*x}2}av(N3l3v zA5FhivaT;r;He1Waz44Bz~}zt&`QMw+Mn1Bjb~-QqQKAA7BXngVDof56nwY}Bbzp( zrDhM9UDg*ptJm`C8g+1yCo1ASbD?pdKL$PV2Mgh4obP9ee>+uCCROD!^+JdT2W)=f zk1oAG2m@vpcNv);~G1by>uZub;h8{<40U+bDnJsqS^k= z7Vce<0rp;w%>H9Jv^rdbn{$XOP}LU<(w9SQydQtG-x$?)UZ9=PY%G<9fuL{%>ku1a zY|p*0rh6dWPYx(Vdw0UHW)Az0=74U{Ud6=~G%sGz#$1{P3N3m>fv$rcceSUUPW}RH z&e6kyBbU&2@=El1M@(v(4d~yhfP-tJpfGkC+GK^pKF^W#c}`+}vFV^W*9z@*b3nVN z9@x;iSpA1K8qUfkjwL|Y<$9En|MCs8MHseDCp7{}g_Y8`lU}!@0 zBOP8~(-ZVo9RiVfI*dBG2~W(BlBe`6Y>$t|#v()N)2g88XdhvE-6W=z4^UJ-aK!d9 zXDINsf{O6bICE_fmNn)>i|!uE|G#1z=h5fB_h67Z=E63kwZxeo0=~!N(8ul=7S*4? zz}bzwWXKlUKmFkz=G!T29>TNI`(aJ*N@(o$ow;58!Ws`*vckI_sBeCU2ag{PcK43K zq|p^9u>E0L(~aEQx2O+y?h^K1yb`Nb&hg+UkC;cTh!G@(<%8v}3(y!=#tr(pLc^J>pq|$QY;IrXP4n{k;PTVBVE0DsWl@3t zebh1R_l5(-k>5*{tGF>R6++J@;o#zZD7*ek5mf6BUOqu+ zSF?z?uTwUtb2l9i)>4kTqt$c+G31N{;c)b87S_Cx6W?^H!hY)* zto?KpR~pca*r^1&$66wr6ov;rDFFP-@$oa0xThz~JB4AY-&!Cha^fX4K_sFAS{8Xn2H z>i061AxuWMZ5b$XmkA9`k7E5jWA5^EEA6-CJnXX#NX5SuL7kBxFck5J{UxYO423ku z3vi^*7&Kb{gJq6WV3!yVlzd$!ba+HPtclcPh(8Wqw*FjTG_1JCa5;1}k3{+VRnEoq(7zAy_*=#Cus&udd*|;^5{> zQ20)SEy>38Ueyn5>eHCAa33^Yo5_rBh6-DHG+4J1-Q$!A^Y(K`$y(u?3 z%op7TT63G5>o9Lv7(}-3!*^T9VOYmiexpDJvX>2fy;?N9R?bHkaw|paPk@@SS6IF! zWr^>+V&d^1S%--ntaHYZ?|wR@?4Aw>K1HI)NmF?J&^SaZ3f0;E1yF%|ZhPdoQDdbST+ozj4A08f#d+*ZDanTt? zI=y$cPtIdCw@RV$pOerjvOi>5tAS|P4z}uz6yMN3LQ_WX>$i_Gk?I%Wlf6}-vWFZ+ z=_fGjA@yXePJ>|PK4_mt&rR7Y=DD$0cw>+;hTZBy=Wj0>2&J=0)5~z$=~x`#Rs@}b z?eKNZX_^z?5%w&ejY)L{Q2lx{Ywn-Uo{cTV>gpVB^F#-7EK|X-;wrhSPl9NG6I6#Y zh~5A!xY7{nYE-e-@hrp^41}NW58$DtMyNI+3q+3>I(rFg)E1Odye8s z3x*M)z2Uz22=-V;d-_f9g*|3%LnpI9Xh_}*sxxPzFytsUXnKQR7j=X`g+{}4U+#3Fv}!bF+V5n za(Z|2mv+b6?h`@#X#nNM3sG`>03VlKh_2L)bBWpr5jt6Xt|t9^<1f&2Asv_RnU0-- zr5M_ud{4vYFqmN@Lo;=5~!M2H4%=*kD+frfw{(P`f2}6~fTBhB80K!hD3RPRmAS@~$ z+65Cqx%`&U#ypFsAE56g=>szz=>xH;iLCNYA_i_%vg&VNS;KZ2TlKm>nkbHPQP*)o zBfoUCpQ~VBN=)(Qt}Phd^Aso_J>k)k*Q|Q>HD<7jW@dM13Tt1-2yO5BLNI-AnuP*% z`C`j6ANr%VVLVopWnu4?&KN(k9BPkbvjV4;VB)qH42~Z_ z(@I_NSaumx$Q>?zjm&mJ1qkAtOFAo!q4i>jBG@GntLIFCs#SI<9f?pt{rY7lD!A!( z65jqTfyQ4mSd#K8NCPwBK>+apwi=P+ndVyoLhN4ef!*E~plb#>1s`X#uq$m$le)@X zeg~sQSUA&f)nZN0G*NP7By;H(#8mFbaPf0uhU8^1(hC+pVU)Z&Y{OK(pcp&ZKG-oY^zjGF&Z0|bOq#eQCCKCf&Z7kMa(Bw72r+7Nu zH>A(PxO7Pzf8tq)ji2;|_|^n;`=kgZr_4(Pg_PAM*Y8@0L|rcpP?|a^#J}f*QK%W* zeKs83mJZV0K>-ih6q~WL+;QTJP-yO}QTI z5|PvD(maUxnah@?9BciR3d`DXCTKAc3S3UXN(~NIm-%7V^hEY{iWD<0le2g4aJJv7 z6z*@PeCm$h3cH#WAo(okUYaG)sH*_2rgv;DWi)cy&SC1$X{dLE`U*CeA$;)xj5z0y z%J|;Gij7&U-Pi{Va)~`Qz5tvyhOySZn#`;3GnUf*5V+ni=c?+ekZ{}*W&g*~xrfD^ zzJGiWk`O{jk|cyA9p<@ZBqSk(Bn(0*QXy^Zlq8j!4rR#ae)sqH*Iv8VUb~v+b9nCi{eHd7E`um~hB#m|{fteyg878SVCZ)ltbB(-)Po4< z)kIvt1?ixk_mjoX)r4z7^Ux)ovXSSVg{(B1uQX&a{o#Fh*yvndcQA*$Y}m#OIR{M* zVt=@9V!G4AAj|FwHU?#5not6|DQRe0tI0jJ7lQJhx8N`+3{pNe@V4W`SGzt6%16%R z19q3=wH!Z`BpHe=Q~MJ$Ym~63<{aWgVk}g)iDS!jFlEUiu)0gnRIgdwa7G09cbTy> z-83*jqZpkZ1Yq-Ebqsu@374;yq2W(oP`gU~kX!$$pl&^{|?{-HYHS(3-cEwRRR-H(Ff{!3iPxss=ZC_w+8 z8End#6p(v#XC3yX-1A5Q8mkQ1PiZFYxshq+7m#b6`lb$(#AEiyqI!=Ec)cke+kbV4 zHNPyTx$z{f2>-!T_JyNjMQ;cWSc8fy93NeY!KM#q&~JpC#ov!%!~YedGZb^RYe8@w zPLf9`?Z1>;eaXGu)9v9Kd0A5Du7iSMt!jMAGL ze67ny)HV9am1DP{{oJ9rtRVzu`S{?L3nx&Pp~-e1Rbu-nH3;7o2Tiz^t1sxzTn|nq zZufEQfBP`GW0!#shCtIWM^<1S0JxOS2Pggznr(~uunpcQ*>hD8oMORt5zTy~77F%* z*23$;lc*jN1Y3(vW7v9qC>%{)lU3uvZ`W!5#WfC$RuqH&Cwtx%*B^}MXt9PbGuYHb z0gawl!qB0JPP2cCL##`QE&N-o^Ls4+P;?NR2ZZzTtxwpzA-UkRdJQ@mCJDEOWJBPB zVvc+bn8ZD1E?&9NGQ*PNyei&aRwK0B7!D_W9k6ToM67pFfO6$X_I>J3G%*MVlU}A! z^S}YT?NjiELZ>)g-K3eM5{r11R=EXzF#A2ettP~!;dTc<#hM*wELQb0ps z6%Twj5|+)%qw%OO8s}wmrPY1mlVKzl-ARGG&s#ClS{du!P=QX6}dH%dTKUm=`FmT!i?5Vx}H+5L(6_7Hr8w@Fgx1 z4PKX_ww4j3u`CGPaSGJV$ne@t8{!L_Lwg|kHL7Bv1(d90Z#76Bll$;6$9C}>!SDMq z@K`heo%$w;wHGURyDUf0u$+%uhLMA5bua#<_5=jJ%Yr*Ah!M506g+dsqP+WWp`p)d z_OkzU>@3~R$_?Fl{h{+<{&_M6PP+hM9o9^A;FjRGUJZt9B|lIMWiq|=aO-=@cnK4t!XA6zpk7D_YZY=)l87w&Kg*7V+$t9R7r0CIi=d?9E?1)Dd z$zCx~0eO#>I#GB6NvxH{H>-r*?YH8%on z6B4mt%SGt?5(!HI0In)IF!!?hjLx^=gJGe|8CI6lfd#t>})Fv+m|H;!K4>O=` zXFQk@-yn4V5qPP-1oS`Wg5-;%pgXmgUpTb_LvzdE#it;AY8eH$3e(Uyfjk10SD0w# zzt$wY^Ii}emyKEz%b}#b6l3pI zLZHqH;-YHtf-!r+e_Ic7!%ha-jv3rgLj}!YS-jKeG?R7qu$GP3TVYmSi|P-6SkjGR z{T>s9hAofCO*tOQjaTwEhf?Yr`atZ|1W;S(i6O^du&{tU+&i=kqw?rGBQJm{qvAl6 zG+cap_i;S&l0#QUe~{pCP=pV{z^L=kIQIy#+~?6wJr_n)D4<0)fu0NMXwsWAr{NP} zcTgFc=N*HPDNdlu3W3OzuIT^5mCGOe5K=1QK_$OKeHaCJ(cZXawrO8@#XZf>l>|ey;>{K6{_-e4YqnZ+T*YFp-HoPFu$QI*-n` z7^qDl_uyh+s#JQO<&I-t+;`$xEq!b#{m9P^n20~iqfn8?nYV8iZ#Xvuh8ze-yOsv> zmfvExWZ_`u9tsX0Ga$aF5;ZLHpz}l!%9|1dmj}~$=+{EJ&mS<9*wg_c|2MMOmbwamoDOmUNBy5b%!j`7h@G|2x zgn|`*(4UX9?>b|-Y&sNNJIy024l}7oHW)@F^QWUDp#A$3aeS(rN$*Snj~n*9{^3}7 zeR>tvK{$`qOMq2h9WkbwJX$s(#M)oZ23?`&wEA#RUeOfy8h~J2u!?15T!tDWHIRP@ zg%FyD+FdgPzO@#uGUq^hjuogZ3a!J+_p-Z^(*SF)GM_J_F(vH+mnc<2%c}&TYvX-Z zzWoRD_@w~(XX+hx+_QGk(1T_gF!)C>FHu<3R63t|XlNU8qk0}o= zhtAtGAynTLU5yx~B(Ddj8{gTJ^GVoxApvYsFM#UTS*Cte!GuH=sJp#nrq|Z~uTRbV_C4p01Fy2W+xVqd@Bk;>1+Dj|<^2p+05V_kGa%K_7Bd!LnH=o7v^L5apn;y5{ zMjp{yRS?;H4AVPJ@#Tspo^B$;oTOAR*dB#8Erlq(vVh0uMALjgz$p%C_+SdT^9qwe zYoAz^*jfID%PSdcI;F>OYBy{cbrJSR zEKo6H4J){n!|MEQab52Lpr?HthU#@gUA+nrZCgQmtbHIl8eiUd`XAQ%MH>b=Cxd)P ziFjl_^$vz7valw7{`7qx>dl;lrgg;p9_t~96a`}Cwq3A=_JSsp4}jwOWU$l8XUdrw z!m;ugl$!n!43qD$r-e1tx1J9-($3=7EG25b3557W0~i~GnB|s%;c5|}J7Of%WO6h- zw;KGz)Od5yIac140UnA>I1!kGRmM5kv8gvqnRpU_R)p2-Wgeepymnj=YPL&}B`3SiNVc#^O8k>`@$OV)Tn*ZEZ_nR}9V?Vb$!>otVY<6|-G z%n{ytvyjU-=rhgNgQ03`GB&qa!H!nMFL|*Le_juoUhQYQ8m&;1X3>t@lc0aO0xW*1 z@Iw6-oD`aicOqpNcFLaX7$r0Qts0d$1>EK=1hqbvDBm3}_N5(M-O_0cMva8}x7DC5 z3ultnLugz}Y=M7gfa;kZJM?=hPQB=l<;4S;OGq%9=J;@Rc{$eI3BlT}{`f303!7dC z^73I-T>80IoEJ@wg#)p0x3mhP;#Gh(bHS*(1Rh;F53Q!pxPcG`PrgjXg3tvx=XEJ6 z_hf+WUh25#7;vflnYh($ENCa*%z6+_J-JF)~7M(=p}r5wmvN1PX= z^TLWECVjk(&Ak$a9nmj{lV=VShZjQHlS0tX|5q%(Jd}66AeQ}g>cU1-Uz+wfEwguV zm#QVaP_q~Z3tm|8WHYxnod&h5OTqA59GL$~4 zmQZ+kU%>hUbJ6^x8!BfG7F=G9hUcTsLb<*-DpiZYd*5JIe_O>=*HW#0nhs)~F6B;X z@8h~L5jX9PLY;pm;(5~*__FjIH@S5guEyx&it)sPIM;_us@^jH_jV9Ga4#w-lisl+ zk$0{+ggMEXaC6iE^zRVC+g%q>65AUZKJuKQXw9x<5l1 zkGpls}% zQn*FkI?-=^@zMCnI7N|wLC#?q2!p`1^b(UjtrOZwg6Fb6jhR$$g2tB@nf8>;;Pg6M zJY{u0y!`Vp6o`KFzFFI`kFXStt|vf<>NWqQJr}j6<-y0RMtGqxA7h6lp={)9ChZf) z7xga2hG&%!@?$dE^e+W}VGPq*c@`QDp9J-<(JZva3=KMHei@#Fy4FMRTQv1p4yw>& zXdf(>YJ+3aDfU;GGs>rK1pmQ%Sj(|s)DHCkz3(#AuUbKVta9FQCxO+_xlenn8N_OZ zK%348?Cu$dQ<|&5qreQi{z>CO^XFq;)k-?oO#>6(G}hJU0-gzq1`{MV;m!W+)1XY) zzL>fhtcSQdH5Hnt?S-j(^fBzC5ADap(4|UXF3y*D_*DfgyL%C8UY}vUi&glAI-BO1 z?szh14Ziaaf|@q+ADk*?`T_mea~&n>$1MhzCFCA7n+2;?GJI!L0@AM*^u3zOf{Xo$ z@2D*vwy+kB-|gnk-^-YMr6tReK4J|U)Zm5P1a!-Fr5?*F_%7veFqis?*T@l&-wPjn z?n$nDn$dnu!+|FlPAfivq7<77bty4a$6XRyhu>hL<^KutzwKG@=?tLVHptqLSFOHG zegDZ|{5S22hbKe%nj>82w}4xu)SKC^6sr&UgWI*{gW~)aUcKWmb_MpslGjn-VWh>! z5v$m{TOc>@Q^j(27;C2;Fj(V z(N$Bh;a5KBr2pm_`e~q%BZIcuv(RL}8yftFLB~?!bF0ai+Wu4cc|$xlSlDuu z{FUnF0rA6`bT)IcWL5g<_+{Nm2ubvXuHVa`m*pgUG_?;o%GBY(ta&uUUj`;cg|OE= z1nwsjGtBExG)+B8b8OnD>xbgU)D>7BK9f!V#}VJGCtuGl^3e8pCDz(>MZAUby^)_B zAUNnW^@>74ew}uKFFMKdDMA0n6!I5Dh#M;1V4u<(6%W^A4H8@aWHfi^nTc8N4uW~a zC|J`=g#{N5g1h}Jh>TbQO-?o--)E13`BIQZ9Apk9#1ntL7*+Fvc*|EUu}kGs7SJw3 z_uHqS#OEw_Xd^S;kpNx0n|b+*400!@gZIxa?&PP>c7~>cQ)o7Hep(HBA?wgRNC!0Y z&U4ulC09S##)5Pfpsh(Sv@$b=j`{Q9B~L}s$0RK9ej#{hxx)UkQdF-~LZ%_Z$Lkns ze|W%^_P#e$0aQ z#rg{@#W@iN!8F@~%OuCZ#ONuL{En}1B)&*X;aPG^9}~J>oaaZax8keNF!cWQLag{N z8_RO@p+P^44zAloM4$dsz>B zR~Ct{RVQ$M)Ie0zn*myq40JZ`3F0*uLAk?MJSZX#%}1{UpYcJ^tXBwi;S<>Rwr*&j z%ZNd|h(F@S_+fk~)b=MfRCNxI4;B-PF_AlZT;$RLwql(=GSCk019>)o;8@WbbU7*k zySP`}%=9?@Gs=1HqFJKH%ncl&#wr z58A8W@c6$}Tx9NV?VQ;gUNzKU;P)QTbn*=s1^ptQg+G99I)_KZ;<;Fks_(VLA-~DX z4~89RiBRUK3%~O<4Djn@ddNW!#JkH4t;E+L8O-nQkD(hyY`@d zt+TjGe;&9M55vx4V{%#!#~jIW-oEY(4^FOyywq~gD5E}9uT#)+-HORBBFb9?9@-R- zuTJhk<@6IQLVZ7FH44$VpB%hrO2EE78aBP(g)a9i`GMINQ29AdSgo0fN7q*1(B7JO z>A@1zf4`jj4Ko3$jTt|rJ_nN(yHPd1lFm{q>6}A3`QZonxfe@O|LzLDWq|_1^Rg*V z9e_oBkD~GG5^hYI#fEb;p!1(ac;D_Ab{WRga5Sr=GItf!))j~+<$_92q1Lg(mAcs67$`c|-S@nueE^oME#>9it=T`H^znMHTAZ)Y!R?t!DEPx4tj%}h$R1Zv_92$3 zjkSj>nl+d%tVWHm(b%5i0ya81cq(@@#;+d++dYhMdLre3U#3FQsal9H&4+FhL=SK3 zZCNfrt&}S0{Bs<42r{F8r=D9C(cE;pA3t{W542S`Mx)Eiuw9*YNn_7|pTlo1*O=sp{Zbv=qxE{wg&N~ctBV;)GxD4ZW zoM10^Jm%}&#MEVTV@|V({XWwNy=$q%RGE*RS;5dze+DW~%}3dP2G-ZTys_@Q8B6JN zp4koz28}d1zH~0-l0`>g!og|SJjVh&ens)iKFe^Bg&YjJ%||7DH~ZHrh+%8WTeX){ zBkKg{T22N@<|}StWsd6e^SO0kG8&g%Wu^-(p@H5#r^zcZ(B&LAyAlgl?rY)t9qMh2 zRbl7x1H9eb2=xyaiGBVc59YQnTz{n)l=1|@BVCTt7tKQWxgDsf>jJtnzwvnIKHxCo zI1IF&i;~jI)}Aj)LH0LgCXzq#_y_x;#HoV#mRljIG06Jsv66EUxcv4m^gs5PhyV0M>HHM*zv@oC zO>zai5<@%lvXKqm!oG81%zw-a93Uu+E3~O`i-{e zExsjgdDhO9&AwvKdNVLPL|uOmHE1PAm}khJ@YapyLd!(pvTh$6u`3uF?)GI;yUTo7 zZv~3p4Yi7ojt1FUx<|(rK}yh2RNs7?ZJd=0x>m_NHOmsM)Sk2A)B8}dv@bMkRP)IT zsoS1AmdKOU#vh{3S|=~(+Z75(=gC3ZwVL^16aJ3^sD;~Q_8 zC>4tm_Cuqrl6URr%jLbEL`hf=Me`Ff`WjY9pBGSq0wuv_&_1nAGa;xBh%=pnOcD{Hp=6ziU z?tN4F>K;cBW60foV=n41%@VwQ)S27FW6Z@W6H3B!VaxXzP}HipGP9bulz`ae*H$cQ zJVHMB$^1%~57vAh0#5J#ME%tfXwYp7b{tLu&$VlyO=A-b8SQ|(pPt1^F}WOmWTR)` zS`&gEO~iW@eS2q`nuFyylz2HxxrRzHuxe$y<({GRY{4+FHCq)uI!`U-zrH+Z|m z0yUqIx5-cs+wz~W$RW+l!|5z_l7@g!?pfS4aSK*%q>PouT1fG}1O@F%3=N5e!Mk

J{&&==+=uEE?wawa_7$VZ#5MMH@@Xl-rbtNz=74U>XcU`7hhi=y0D>~^^O zg#O&-l)I{5#IvjnK{{xvV4to9;~DL}2S-9kU=Vtjyf=0PHLrJItA0-w zHawfys*Ab4#JH>XbX|QE>4E`DGh0%#q zQFlr_R~lA`l`#v&(wRX#5W|SEmx_;1FTkAW8m0)3;d#A@OBQ*8wI1_;lzqA2pO(a5 zkBo-$?t?-8cZIm1hrV4I1tKjaI zA=u*im3qKqneo81V4$Wo<={%~%Ok3LosktuC5^aKS zqbhOsgE96c4~>43zhzVz*6kL- z!~=P-V^RRg~rjYVh8eyB6)Hh++%gz10eV0b3^!9sRJe}@$MjOBPcfb!Cx zhX{VlXM^der(%!CgPE8KxI=Uf<+ap}`S)JA)xLV}7wrqJLn3jPbQYSnH}az9P<)=J zKph_md#a~IgP!q_wOfYjS-n6xO9y^j@I?Q>BOtmooAl|#?Nkc`|5obHBy}*Y>?*Ny zZX#<6{K{l%!&yUIEnmClJO<9{iGI}Ck%ir`-f5DI-M>_z=G3Da$3>tSS$!EHqUw0lr zEYc}l9?~H$xK1pVpFi248HHFU>%qK_`*4wQIMi*{fu$t-w(4ocCaNXCrF+}4X7nhC z921!*e2MoUk0-(=v=Y7FDod?gC~ot=;yR! zdJE1%&WC5rigJc!qba-Gd7a5O{SxmrJ^|Goau_N-hsM1Q5hwHlD$JtMEp;@_L~k?c z=X$39_fg(_WdIEL5)KXNJkw+KA9<;r>P%ljhtJyC%62C?{Q5^;q5;#p%` zIB$=6%vNazpt-LJxuWyA74_ns&Ju5>WFb~6$D;Pt3Uu6Y45c;m#S^cdfvMU-s8u_-Fl0K9#p0w>pA!q~1-yc>QTynFm5KK1W3^vjF^ z`+@J6RV~ z?PaB086ksxbH-!l5Bj}P#wx)&9xilTLg(Z8sC!Tg16}i=?}0c>nV0|pU-r^xyAw)Q z*0S;epV=c>JYK(71ojaszG6`o+SM0vHETz(x;&Qs@r`!EmZsclzYdFE;|!-CNl;d6 zYh7;rm36Huz^nC^DAK)GVe@wg;2$(o&T?UqUa4F(UJXKSO$JBmyGR2Pnbu`J@`8?M zlIA?YJ&#x-Z~S@rr^`^X>oGHWU5nBs-+AYH8S@BV#3Y%ag34~T(5BlPv`;6qh?WF& z2`b~JJr{7r?rE_2s}~CTA^6-?2Cn~B(_fQNWf~~R&)pSk=PG&pzrkz-ufa;uD2$q= zOZ`cGe%*uEKYiNy(3`W#yYSmeHE;&6Y260e@(8T=x`5IlQ^XBE-*}U39`AN-69vU8 zxj~s6%4V;{eDXRg&jb_S!wQ_H?Sj0bB4Xc^Lb+Wi%Scf|`!9*)32J@V3yLi= z+Ba=wM)zePN-PqZyv-=fx}W$tGGbAwbK_pqSj|-#J2ul3OCIL1ofYYzaXbV)N1evT z1u2Blk$~*|S!>VUA=q(z0`-^8h<7=TMUEc`k!B^(wxusLO143Z$7=SWqz@*z9>QP7 znP5x(Mf+}IFn*g2>V~~38+C~M_DXtY5NG*E3b;FrhYpMW(5}$sqN8I4rx^cR-* zySoIZ9Ug-+qkR<}$JT%)tp%q8|L~e_Jz;wdaZ+4<3QprRz~kd@Ch;Q1-m^vUy8SdZ>{-SfY}TXB z%Ly#(DD4WyUJ_rvy%tr*A1chM7GcP04=C)Lk3K=MV6&?Tv@~;Jo>LX5BENB2>_F?d zzFTn2&vlq#L#~F^y(wT_%i!32;WDsC-0yJKK&vok}m{NK`2)Q1{ z6n(X!ZRuZZP|y{yKfDXnTUJAX!9wU|7z&D^0=x(g#Fek3QS;jz5H0Zqzob1NSy(_l zn-ctb&=zwh_GT5asaR*U4}R0VcIAshIP;H{s5LngjXjrf?N$ZTO{iy$$B>>&^FVt0 zn(#Wm2t@8*1*hT?wy}OBTJ)uPzn?mnT}tLDJet+4kK!twr#yAB6c@Xup|)%;C{v2L z-S__RIo}bRl6!+{(-|&`$*E8U)xgWSlcD2MH7p*J3>$4?LG@Y;8J*+FhZKUUah{;| zVi-tfUu2$fCvfUK0i};=$EZ;Ob?Ff-zBUv8F)By(8~vCp&;YzNtI>I&ika?nfuf*N zT-W;ow0Xqvj`a83M7xfM%GELCl@(n7!vbZOsmon3pMNOJ1=Z8vtP_=>n@^qio{I#- zD~>RGIb!vN)!5gp04MfH0E71-pzk>!F3jG8=RPLm%NuqOSmX+a%FD1x_ZE|76j*l^ z(>0OW#RC7j%A^aY2rG_lrK~XRXRKpUr>%~wRsPMx%95efwLfMk%E_A$#-wpqg;BdE zp#AnkSif`}Wjo&TTzb|e)DwT^aUi=hxPbf`E13HOAKKw(fMm@#!BBH8G+XC_==N&s z)*^i#7_=4Z{#}Ss>xl!sgXVK1?ZDNe3bZYB;NWwbo&Q$nlD$2lrkem^+si@!)Hv~` zQ?wJg8;|b2%bER*Pi(wL8T#RJFbqzl{74iJs?El*9$%Q@yjqsEDGt`9=0J-w8}yf3 z!o+!Vv5o$pw`ebv7em+h^6yNmDM9>PCV=MmLVWPdqk1W7Z79WqO)9W(K8Lz*!m$am zK-TR6=iWy!qM1A}RXUhex)e0&{1`P<1&(fUV0`W*9|;WY6zNR1{G@f(ToG)!M03lS zJp6S<0Oh{*!c*Tkw9?+f-#r_P{<^)Gdfz=@Qr?X`w*HptI=;*)ZYK9n&}DMXPuwSt zILcEGvBgGzV%rSlY9I2rcY-yXsSEmlk4&fEZOS$WSc~t_XBr+3BBwvWI@p%GdJ?n$nczj!TvIlu@ehnEq9-Ul+hPN8Td^~OhrQ1@sLOqrHR z|Bc6NUQ#|9&zQ#5{r#XlC=2ut9u$x1brkhamqE?pAc*%F0QNd!IP|&@b#@&E+mWHD zK94@*rz2qdAIC7Ht%a)&WWe^&v#|Y0F|=#H5x4YvDV}?BFV5H^prU^S%loT|Esx1Z z{cr09=ToQAS#pAD?xUUW@%9RXdw=4muvn}gSV+C+aTPjlc3|<>PIR6X4q8UTLG5BQ zll3Wu{--Ly#PJ+HUF?lnzs=COw0be&{lE zPfX)^^AAym&;}*rC$gCbPGg&QGG!MG**uyjx&;yiVf|p&>dO^PQ$A}eS8FWpM912JK4ZVpLAl;+H$qi zgFz(!YN2f-#b%mWwUjMH&G*l_OY2`u*RYt$E^HB8Ix6X`-JJ!VU4zmeH--LtMx)8l z7u+fLZ^3UNdHHT0W^cDshp42BHGIp2=G9A~Go=qWJ_uuudTyZ8!vX`%q&OvkSR*E@ zpy!^Gu+gfu_G#|ac77Hk&GsV9cz^w$-O-G{D?CH#ON(_YEIE0cJ z^%V`V8a%$27BpVdhvftJW46yMe1Drb$If3X_aEUE#U+3|Hjm zFvpoj;C4C~@x&$w+j)<-SREFxFDXW~kPuWZ$OHMeQ{odBicoFQBd)65N1}@`2;-T| zv4rE`-b>MHW+FWoS3{atH&mXB6cXlT!Sf@Ac;WbBI)~*la22>BuRGS&N5I5GS77Xk z-KaS;9{u<_r$X<(I{$v%3Ad{v6t4>EMW3c)MAY{AIU2Jmu!2)aBA#8#;_<_-A3 zPk5$bUBztFbXW;0pE4eHeLPcae#FBbtOG~MMK<0y1kWc|q8GiF2BddK_4u_sEg~6= zb7rtcoe<{j94!9tW)|6@R3lcJHg)*{M6t*!PR= zBg43=+cd7o{>7|azVBejx(^pf)@s3`^ zRdC=RhIhvwO7ao)Kg$eaBOu{p40)Wlf^K&u6RoB_*p-z`@mC>Cb)%W{$0X={x0vUb z>!NJ=A~e-=g!rn(P}B1$6UB9}2%3BjMb{j7zJE2^zaPn@nGxbGhlyFZWi%)ctPu3i z-WQto>G46G$FX*x5;mID;DfVqpes9sy9!TZoz6|R%wY!B<>x}4-7zpW_>YyWih`U~ znNU!q5KHwIz?YZH&^&t!+>v;qLD_ulbj_hXVJ=2KkH(UZ#3CP$%-tdYR+(5~X6YU@ z)LaPpsa9AwY#TGKisWinCc>{MT?{`$`Vv6VCm9LxHF^)3rcTbP39r)|XYte}K8jMnJ?uFZ#}J;!=9{S{>YoDHdk*yj=`BZ4%hA))zOfErdlbm*}}Q zkL`Nsgs%-^z^=BAX?LYS;3_X@$UP38u1~pLNGSD~zwqy^Q&D#2o>*yCOFS$m=w#aX zw#Nztc>^;o5MXDZ92WN4jVTXASi7PZ`rmlSG~zCUYSbvP+QBpIr#{DbbiZ0PV>HV0 zoY{+>M^WQ~0jd{$Wb*KLD z=m>CsM6)yVmsTRpdt$5djhOZD5_f(+8`@9Bpvk6UUVn}JyLER2&B%nDs!} zAk7NR)N=4lW8j~1km0S%D8=Id$ZX_ z*kmm<+DCJ{aN&Lob5F}hg4;I~!#u_<5!e;s#;xD5W-WW5CZ$Zsz# z?GoD`n9V%wqIuxlQ>atc_kX+)9MqHvBE?o~r9i5YpPx zvF`~xZ2f5jE#3ADLjyy|y>kVdyQ0DNxh7uUkqB-!+8~KZfq*l~pjaSBE}D#nZ)Op% zaUF<;?yhLPGl>}U&zR=u0w&v0Cx}eCEXB8raN_3#nt3I{xv$jmeqGMh5;K^5i6hh1 zKZ@`6oy3NwzF_KNLhj@F;Q!Q??&*)qolab1j~^zWPsMR;8>K|;&m!=QbwJHcRX96s zJBHGHw_$HOlmv|hd1xVJZm9EmaU{)ALYb)S9dFGt#e~ivNb%Z+of;*u^a*L2_`{6ivFo{?M{g0rnu>jJ|CQNS5xM>=O#+-qzSU7eyrwKC|V2~f-34X=$;Q|1yBOU zyMtldw<0w7=R6p6#ehxbT9p00pu%Bf6mg-m04`htovyEZT{|&ST>)E*2HR$)$2kz;P!km`*^bGh%F!`^9 zTZAWowt+g#uE<4~1^TSxB%QyrlbL+kC+3<(J@gS%VCB#hD7!zDW?zVpb>+CQX9DUR zc*>Sg%~sxzyia$}LHVy><}yS<{me1q)lN%MZGQ~tHL0*UzX~L`qboL!+lT>A!XP9i zh`B!*hKg2e?0n$ES{vT+*MKCgDNO7*^5iEhN3r~hIN#!*Q$FdmcE+>Dd&kx zu2zN8J(hwb@(gETm-nA)r3Lg*I(}$tf=lep+=A z;$^j9_p=aEhV{a_3Wf3=i<@OScSOB@;Lr^&KV`O`hwGpZ|phkw$|+l!NrRN z*l?o={r{1%@ewtcJK-XFetE%2^Uh71JowOUDd4~NF;Dqg$5s(bLiKdMuyXVY^!PFw zv@Dy&Ar+lmGvo}2*5~ur$LU$W`VfQ}so1ht-SBGv(}*)a^41VJ*GV5mpZjPF$+q=u z-bEe!S2&CDH&#LUt)-Cl_7IxiTSI)tcp*x^26U$cLXmwitTG&gx6YT6gJ=NPpC^!8 z>a@6Q^AV`|y#sU?&8N?Z!J@@iFru;mwRGyovnqmenoeSBTUQoqR?uV3$BL2c!s+D zd*5%z3)MNO<$Rm(nPZ4{Wx0Ir=n%AeyB9r%tOnH>1vgw;!bkWVgRT#5FnjbFGPt`gKa9N zJRb%x3-0sCzauc@S|QUZ|HdAq?ZJ*U?jS1@#3gTHxF3`8rlmjG(Zl&TWkeCEdKOgV zWduO6Suv`l8^mgtqQPmaK3L6&<3)@d2KT7LyJil!T{sSr3uV};rUA7xi^2cLd#;>x zl{a2LO1sc{YsFhbPzi&?qBoINM~^5l)HxKdS5)BQuyYVwY=WZ6Rpn-h+BiCl_5z}F zz+JWQ$u|XljZMSAFNG+od1&R9*TD4yuL-wq^uvZN;aIXG2c)k%g%>J^`8yuOA0~x ztehD-r83QlH~H%8GMZm$z_?`=`0Q~4&OcC!Va$OtaQ0|(@es%_*ubsuc$jT`0gVGf z&?|WfI(rQRMYjR0-R2MGv1%u*uHS_zTTXL(?I?02CqltG@(Q=@f!3NTo^mV(3Tn>6 z1B*;Bef5h)F7F4@S);|Bq6sWvPa+zU`&a*iEqDym0sDthtl;Tv;;0rg50b-q7#--aul*#**RCa(96(9aNcgfm7_)RVVShBueO}z+ospwJ zZ|Y8TTrnEkA8dm0CoW;Z|8aD#0WofG8y^WtNRm`C2q7d%&AKI(BuSEl5Ry(vLJ>yi zk{X>)=%l9ebe@@YmnlgIA%qY+v}qGNgxK%;zu)^X%{IC0B{6!8xX( zS_n$f>7@PUZ5~8cQ2AhleOD83ZM_72_S?baeJr~*=@N85F(+EKj61f6!tU+_Feng1 z^2P%gL(H(Hq5=4Own-D2#(103LY(+|En0?d#)2zr(Q*4l()oS_3@=Q_PR)EUc7H;p zG#T3ND?(*eF}OS!4URt!LG<<#i(8M@5;Od}OFK>dR;9GaAi^2y;~f3{Cb znby(vY6`eL8xHz5nxxuVN?X5NA)`%?ptB;odxiZ}G`18Tie1ogUOtEv{^Pk?d?M00 z`QSPyhnM!K9+gMGlm1{lo6|p=u=2YMAZaJ`Sp)*t!jxo^9Z>qud6v7AO((Bkf*-A8 znJxyX@fXJX>k{Gl&1$G>cY_>os{|IrBPLvrZBwa3sq zER+1NV2Rnkc~X9vpTw-jh!?{=8r9*7cr+xKF*@(iWoIwp(Bi3ZJt_~m%nw*5vBZ9z zb;RInDfAo1@+^})FnM??<5o98>bv7GIB)_Bl?<34I0#xVtfxcY_d(S`F?LG|rK+3< zG+=oKweHwYxt$3-zf=KlFRRoGd!61es z;4vx#M3FiWDVhih4I5}UuLL?(G=Sr{O3a-e2ZJ;OiVx>t1gk3+Nma$e}b5$G~yjH6h8O#JE&EtTiMZQD)S zF*cI^($-*e+d}$OGX{iV$LLViS2|Ubc>-ql@_O%V1kO7LsT-HgPam!^`C@5}GWq{Z zWCuNYLwDTK-r^Tgx7|-$oxW4c6UW({E(*K*ZV+SsQrIw(Vo*5qG=2!e3s3aXr!yGG zYYI?7R7l-N=RttI0qR|OO|x#Ufy|c+(eqq2x+*Z=k==06?cPLCQ9?ACrbtds0fl!& zL@0^_o6-p2e*c#!Td{s6>=aQk$VKa=huHp4hFjgkASR;*oPW5Hk9MgLQ`3yX?eWB~ zCyMbPhDe(+(`3Gw%nQsYMgQ1pk}x+2d-kQ_igQ({^(-2@KkLA^iU<%UKco-#d7<6n zacF-h19D%jN4H!1z&3jv81Fbk{a;*$$c=pP?GMLf-vn52c_B_~wMD-7X_MP0YFTD& zA^12Q1H+SNFlh2qnpNiy7boR__FyJssmL%ux)@GnSHgmut1!rf<=soKL2^h9*g7$7 zu?Yv=wok?CiILc&-c0#^Ml_*f2DXQZKo)z4muXlHT#ZyVOMfcaZWo08>V??1oXrDf zjVHF1W5BK|32S~{!2_>YE%K|7>Q=;(PkTJ^!-Q*SA36daEjkOWN?x%0i7)C+KL~ms z7edB1H?)rVhkm)s@;mpn6TVUca00jR(B%khi?74g>^ruKjDgz{j{~mKUS7~Z1IA6V!saoqREWB6|q!7;V*AV}f47mLBILptI5n~m9b{ELOfZ|j>UwnfMwbhsGxHAU1+mTMwg!}tnjN2%HFSZV8Z z3*;;+lm>2NoX^|KXkuk3_#86vOU5sx83U9+GD%0(Bb>WW`%Z zn}dBJmF2*52giWOb_+3f%LL_Tnq<+n*_atq2D;~L*j+=QgYEG;<(iXA+YByC$Fm)NJmJq;F10qw00mJswO==tv@r(vDsI^?DB zNnq(@)atGvzHf7BWkwN3J5|FHo7HGFpN(DX7mxw9_f&MG0-WQD5q{pJ{AUW%t@^q6 z$bAvE?VW(PxUN`YIu%3#B~%}Zn13M}Ql~G3aYm8oFOyg4AlN~V)cou&Y;UB7wXj^v=9Sb^>|^Gc!QcZ#Q`fz=>4e zx5pGZK5v1WjbW(yQHBPJQqZyTgugN(n6|DF1ojO)<-P$jdL857WyFB}r#;}Jc@lyi zUBSU9ZPcCgjTnC&$Nalel*w^H+0~o&XIAkpwN&Bj#J$)g5nyO&1-U-B7HyX}0k>|D zc_wjn7vE&TX znl#eZ+vzMW4 zY2A6ud*vyyWtyF;N_MY$QgrRiWS)C-D3~ijohDW0^VA~BZ~u|X3cax89Uoko9&7!t z05otqi)KyVNxH^qOtJ{o-(I55RyD+TcRKtp!@Z@A;++IfY#r?Yf?a1Zc@3*k*WnwM%Woez6VE-$U7js`ovL-IYZ!RP7EjJ#6N6bOpLN>n(59AHqt3)?4 z8H6)^iSs8uT6@=1p~RhZ`IkaR!Y1sC4<=&aCU7gcL`745U`L%0ChVVvz7_FwFy9{M zJ8eOM&PuX6C=s0msr>mVa^723+p0r$oAc>cT-x=y$Z+^dDsrqFP7xt0KV zMO#2{#T#|Luvvy_7J*~2P#Jjz9<5_%U&lni%BwK9^bBTY2hb^D%oBEfEcBXiS%QN0Kv?%M7r6_O*zCRp zA6FE>>q#PZ9>tSxn~gkqXDQ57jAa_N7&QCWhCXEaf7gXap{xL*H8~mhCuW&sjI%*y zjfu2fZZ@>;a-u@TpTxE|1n_17Sl>2;%ty6E%otUtqms~X8XpXMXTq&-%1B$vP(8L3 z+QvlDttRXY*{3dTE|5~u-@|cW^C7&s^eX(v^k9)grH+7D*f z?!z+9{^h}cdzzRhB!srS=3zTOg2xXNG0%Du5gk@#v&t4|W#6ldvWyCA)S+EJjFvsV z3^_$1=$wBVI@R_dKXk51Kvp`95V2fq6IQeJ9hSC^V0!rk3w91`!SfXrxc6Q)>V9`; zY!$|nuH8ZTGtOg&!39vS)rKB5F6hr@T$wngvD#8jQ(umS8Nw8_-IvX>MNdOnH}hL0 z&*!C;F^<>F`;=ReBn>cCqd&*BK+(tyIQ_N?du9osj+cu45pguN$qfYRPo(y#%mef~ zh39-UhI;ZgVOifO^p!=B*p&;}uGSg?AJn1pn-ZFp6pyVsH>o;V4+;}*5`A(>s{Y;t z_}5Q>J>zCRbCs}+#y}97CR4uakjeW8Oha7~3BvZ3n3W{}r;a4Z>-t1p9;q^|?p5Bk zAD0+6D-3o1Sq54!7NKtJWtc1(hhJGg8ZgMTzzXxAU~(h+h<$N(@^DoA&7O&0XOXvw zvG|$ZKzl3&&gBcz8vaUjzPAlJE-|h8VV0NuZUGK3ujj4%?Dq;?P7htT!Op!Y(AuU$ zTu~8}MfE6?InpfMdfuM1EQ@s#)9@~~2C>E#!gWxU# z3hIVW1{t-b1J6Q;0qg%V8Ys$!DPviR5<2F_V58$6cF$adfgN)}nC1c9_EuE4Q;B|k zlZ1y3NZ32g<~M9NA-0-CTo>hFXnQhtSzSP##X8_sc?E3!jnI655-yi-Mt(uH#Q3Tb zR{wVxI?L+8W1bT+YD|EIX1npD0^=wdEy1?@aK=F!kD}*E(w?j&vLkUD-hUGcUL%{4 zYhHlW?~|ykc`Z>b*8=fl7qaouC4?o3r0udixU|R8&QdQlN@xN7O+BRl+DD%Lsj3WBQK--SIyX+t!Jnd3=$<@8)c~ciQm&`Xhe=l!|MJ1|tS`x0lm9)U(Dqhi8fZQ2ZiPmue zbeC)=B_nfiP&ORDyyipu=pQt6$b`itjHjGiwI;Qa1~B8SMe8GqWZ&F$T%mFnXKl&C z>~{fZRKgh8nKR+>uQ~W9Gmz;Ng=kx*gQ`;{bc@GMd%Bb#c3crri0yK^QH3w<%#BWj0@8$kSTa4m=7uyC78*5L^~e# zQ(Ft91LP@{mp8z?k2$FHR7~{cmw-J-4EGBeM~NRuVvf9^ZX->hkNI>unpn*;Fr8Q? ztpWS?BC=n@63r^7z{i)7PYC?( zZ{#bfX>$zn;~S(4la$bRxFv{p#>4+~3R$xZc{;~zV4koZH6P8#jyYAxZXw z0&-a9irMZw%zFKe$mRx{Yv3(K)-^RS3)oE@4yAx=%xPZdCvS*98xG16PkMXYQRL-?!}8}WPtqd}CvMk7$vMVe z+;@%E7A{5pqfQe;r6f4@=L)zElYw!78Td_V0qx~=u%FG2{$uBL%+mR=ab764>MjPF z!p{E;&U$Qn(N~_{t!a?BvlQf?2h)YM+1R$w z0otb?!s`5sbexY6Rr5-rs3C&sARZFEe_BY!9UaD#dMZt1_pZA4daCaJjylvQq5PkH zSS?q={6}^q?{EYRdiK*n_a7usVI1nClo}d^k=7gi)cSKIEm?mCWBU_ee->kBEI1A$ zhFjsqEi8ZWurZX#vAO1-HzaRG8+9G1hM{Up;2y~2adtVI^fv`lu|R`f+%Ouq&1T*z zzbU{UO-wkM#S+=82}C?M4(2l*P2V;iE)TRueK{XkzSRr)8GI94og(Tuc|Ev=<}r@% z4dStPBmRb)OX2lF!&|| z{)HDj&RbyovT?ALi(b#%h(jA|$h!6k$UV22_J7y~wJ~8(ooR$E z300_GY(fG;W2n!f{qWo%3i)?fFFE4_T~RH@!NELYHhu;#^jHNbyb+<|lPjR{G7?*R z?=WtS3$|6Up1D~bGK*i5%+>2b->8?^N4zJQV-;Y8T|Bx9uj0Z9MYu|FK58<}hQ5m$ z6ct9{p|a(S0jYtf2jbAzdMwKpafY_59FX^$4)x1fX0rDfrbDR%y_FHrxzU?wWy}Wd z!+%N4A4A|*eKL`HHo>?BsUVBk2&pFBcb^FNT`*(13Sj8{8Q>h6W1{uvK1ugUgy-~-3GQyxV;gAtwJ$hau!nSIWW6?6Y9542E9Eiz?#(+G4b2rr_WX-E>76_ zN{<>#n=t2h3N%|T1Bd8*>|Sw@XQcBs$2s41i6Cp3kl^Vc*iOvA-=MY?>ZVe8xt z^Dc{^&9NLftZF;D;UYYLAC3+Fr|{y~H1yvc3VP2q!7#Oj6m8gz(TqV9^C|=#y2aT1 z`~ZBg^JKMa2E5hGg37lA$fls={KE-9|)^Cam%fpG84QMnw7X*eEutA&$ z-;*}s#i`S=bJj8VxH%tW96j(kE(C6`BCq@TLz;JdHnjOH!tNqv@b6xL>WMipG-?}I zIvIkJ;UOsL+zZy7e6UJ#Md4OItehi<;x`r$6)+#Sjjh2%&p67hwIga5#n2_o#cpA< zwC(gt#<&?yx#c^gf*Cg`HZZ-$&*dO;R3zaK1T4QS10weXqr-_v__U-J?RHaGe`*^J zG2a8fBU&nA^?d9$mOq|11LS&h*O?1gXY<447yN8Ke|`p;NP=AxR3dt4SpEQEsFvEIQB&s;INA3Qn~CH@-;oT`Dc?YQE>+S=;l6n6c{0<+2uWT`81?x^kYipW zJ#|zD;+gy5LZX22zAuAt!vt_Vu@1VOfAI_tG!eDUk+?#y0@HMpP&VbF zN#81G@bz|O48iee?XN|5pR+*A<0D|hhIE_-6EWsp4w~&c0z2Qb-I-h=TzN7F4XeG` zu4yy4-gJe$bM+_^Il`dG3$DZ#V}MZ=-8#PzYM;uW+Aa`$G~QF$@OM1^^QoX}eTjG( z7r=Xe0b`jBBf8Fp;4*szs2^TJ_}zBU{i6bO| z4CM{YxS~#i?Qh=@joHyqu-ObdPNdT2&CKK7&oZ|3m*Wh>P#m?m0yD>qgDgE;sTn^* zA}D`A{hN1!a&8OhKYpLbIpYPQOWDNv-W3S4-v+K@Gr{^VKG<|ImY(-|FpMXqEtOTafZ3+|)JEC> zy`kk0qs;W7PGP_m%mv+j$H_UTE4aZ;#y;O6Yz?;qg{}`&ZPOUk8ZASs^JgKZXB*rR zCu7UWP%sY>Vb9ML;=eQ*ysn9{ccVK5jjBOwp#f1-%3ylZtwjG~1(b;7F>0bec4;u} z*N*4X;pUssDl8HDzAJ%CLKd;KHV56fD2V*0n)w)>5&f>kjA7IQmEE&(&-#hzr(Orj z>r`RTOdpKfI~bGGM5?4e1FR2LgMHp|;ICF?yQ}>$c>XB$l?YvMhkqjLxHa~N~+~J0z~i2A*gm59+-a=4~Xi(*1Hi~hn~@`v)PWa zGzRiUG=M_5T{L5a+TX-WSmL&AzkG>aBVeJ-uX%W{d>F)EJwMOVRqKL;{Tu{^MYJ5VX5 zi(ZQeWH~>Jh!}0?tKyBgq--TB-wr2vn~wrtkrI9TPU>7_&3e89+Ed9gfxVcY_uYTI zp7%K@zgQLZqM07}@j3W8rVv#0`Ka@FA=4;NU|c=M)m)?kCwq6Gh{yC^a*J z6&j0{2SA@s5_WDag5YMxKl7Z&8?xC2zIh9&hhYopIWiY~Wcxs*f02&wOJRAzU#WW9 zHOPz2gZ4wWc--ki(sL>bq8zJGaPSMUrjAtHmq1;*lBu|#`Nvppa?HF~+V;j1<{z}j zr9@_B;l>{D;a;dy@_}XYP*+C;LhZQP*V(?LFE@s_$I}W6yDPy|*d$ zaVw~vZ9H=<6O<`FA@-F*GUUCLF+(;2x2*v^rW^uemU9=*%KM?tC7^G*2xHc1Ly^}` zgkQGslg+HV_f=4Z^)ZCoewaGXO{Kiald-6TvGI3_@LjbL>IRM_>MGtebWAecFm?|5 zkI6(gh0FNDIv2!?wTZfRIhth-rwYq=LrZTYSUy<`t|t;uA-9t#yIaAIY^L`gbqS1> z+KDdv?Em^Q?bXC!#*5OBYD71}+8VYi`Hw>OzQd?@DV`Yq@xb@hESLJ9{Sb3Z59}r5 zh<(p@>C&=gm^C?w&ORoG{NlU3M*(6OT^fOj=3ek9yap}nIl>&6>xtjK3J{T>hKL z+DA!4f9KP#1D+sGo(FFJKj`}Fi_urfjLdtIhy7=jpm+_-X8Yugt_Og2bLW%JHe(R@ zPoiePbX&)^p>Ui%jZ0+rqhCMKesdNB4;$dfXSr;*>`XJ69>TTXReE(-J$5neL27I^ zwI4Qn7?F;b(PX%WI@2Pl zf?HrRW=6PaJSN-Gw6XKi*X;_= zk$auC#mmD1)(gu^2o6X#fH8R+Re^z^-}DfY6lRv3qQV^ zgsC=um^nBHwSN}ljGxToubp-BVsi2!=Cd2in`4@E#!4@9h=fH}zF7WaF{*yMN8&u@qWeUatG?Kq8jpKOYlmmx z8Fx#>bwwaty#gYC9t4MHmq9RFmvl@1k=_hT2XW(U!e10i+jq<$!sQL*)}3Nhx0wvl z$3|g|RUz{4O*PSv-$yjGvY>nAExLpK&jF7NAfVb1^gk|#t^;RKW}_+XjJXE){Zb)d zr5a`}^^}U;)nVecXjD@Rg|UkJu%~ew*sK&_YSsqW`Y{dckN+*LxADc==4@y-<&zmZ zcOvJym$bPn98T$_;?9q1DEz6){JT7y=Ntm#vRCm(Q5`g6q&K#238&)qIau1Y5f$p4(d$3PM&`{TJktwcZAU12NznL@1@vx_ zCvIB^wl$vy&3x8hUlvq9qS|u)DL2A zN-ElXkYn?-YIJQq0}9S#;qoX;RPd^&?P*_Ve^CLEU3x=0H!v26>K4K|5oZ#-Is`-i z+Xz{5^0@vM%R&;SfX<%tsI*KTwis06rn`;kdvr1Qhq&RFunKTpJ3t0?nHOTtSjf6t z!LwS{0G)dhK*JD_ll8hbWvT_{r97q`>&HNA@G7Wsj1&+b~(7LLQF?Ue1=F1K=`&%p-RBfTI6|ubc*Cgm0 zn2zla#gJ8(MTBEll2*n)b9}xDN)I1H?mU)hBwq_{CnkZb?GSI|l4zJu&YxCGhM7HdG;QJ;0DGla{MtG>`%3m zj(d>^0=_CvziDt zC({>Gm>zf1d(ymU0a!2Rv1}kK=&hbfv$7`fGF>m?DZwQOs1m`gi7ME=`Y91lZ2}c- zYxHd%PkURx5|M@*<*ZyLvAfOuZx2FX_hSlK+B11oD&_bPrhT>|TC zpC?Ow`5olI8uq={pTePu7vOm!^R14|13x+$`B6%A%7F{0CJP7I;C^VWyi1~djWCAQ z6oQqEr|((FGz>QM&78Ry;OYniF3QO9&oVhJ6`|R)8)VekMoj;n2C09J(M*NKIPyL7 zel4bGJ#__DmcJ|Ae`Xc)V>N?x{5d2sZ1!xm7kzajc+J;6p!(G)&|gQ`oH?1d?Ys)Q zyf0w71amNZ#MpNR6;hOAc@B-2pmT}~QC=@29*%Q>e@KPwl{#SZhFDlxyA9jV2ZQX_ zKPLUH=_K=x3&hThg7yx5miM$BJce;e-bKbt4b8-+d?E0imh(6cqmfehB#(Zt_|ES0Mo5R|>b-4l_7E5xju)po-}* zhn;0&#vFW_*@*Y^n&EGaP<$TfhMPRt`|i#(4p9v>=)MYcJz%*=3L6>2^IEO@OVxV@K`0rN*cp< zNgALq%NCUnH}dk^60m0VVifDz!hz&7V6{FGI86$eHI6Ig+-BOv9p9u^dMfZhT`436 zF^}NNDa@}pmEefiMBna>l%te`IZy@BjTh05?Tt1u4@VaNJ5fLKg}6-Mz`M5;{bB@I zJGKS#K@P2MkHSvT5At{c#mw)Up{LIr?CDeDn)#S^nHWJ+O&WHuxkCK8aWsbA_5OEF zfqYIwj}aU?{74yNt>j3PZ=J%VLrd^g$!XLp$fezetUiHReY%u!5l%C$^XtR7_CpqabO{5= zi(@GNVhjvwZG($D&w%N#Yw+BIX~hrr(D9+(Oan3wT)b7m})CP<jG>jY5jp4Cp=(=DN(Kt4EAx+u=NUuHw3l@AjwQHu^cD2*e@5+v zy)@>01!$SE{K2&^NY_pyP?{}+qUVf-^e_*_8^%y=oqU!}BE+s9M^JD*Px#Ar@wVBX zM$VjXVnLrSm30o|#UvB7?g~QAzh`)SqfVYPSx4t!Gj`kGm;UFz7`Zpqd0ehH=5JBN zpsAbSet9LdxAl`)2LT$~4MXFP3V8nA49tzwM!VOk;NPhP<8POv`kFT+=^W!oai_96 zqmuZk`lH)|8KBki7nxeBhDwri@YNt6JLlC<{nRDkf6@{<8=0@uEsNsa63Vf_FKG=2Ic*vZOThQS4$lPdpCj$ zZzEKyF>U1TG@3Xj3DjGHiSknk)Z<6}W#!2-b-l zXn!Iiy0$DUE=d4w>Fhj|SCSa^+k^790phb?3zf3}2E}0t+`R_vgv=CI#st4Th zjB1THgwCbD)T8Vmv46AxKE?~tpf46U$2Un+?NZ5?;c-y*ssfJrTtMZ*2B~tkHBp_q z0{y#+!6Slc3?7((T*f7MG35-n$uS>mVS?28;|Hp6^yL4uF`Qp+z>wrZ++$vcUCS;K z>pZ5H-Q-1ulNUhB6&2LBJdE%1V-d$Nt=x9@KP(z)V%aYb&Ovq{-gf}oEaR!+)O1kw z^#q0cMkGBs7hE1z5WzBAxOw~%MD4kV!lz*ny!8Uk%uYjPulv#<#Wtd^HX4Vf{i41b zYC)8ehm~<`PyTQph}S=5+MC%t`vY&JYsZx%S0;MdJ9-X^iRaq#)>H3NQ2hlrDWMGQ(8K|~xsb0Q#j)sJ+}m_`QUeF!I`RorP|1ovqRNY%^n#vQhAeZu^3m&Ty%#xO`d%Y55E zSPv5JgEl`eLSpJqx^Q+J_R2AB?})?T`!CC-8=pY=3IPZ+y}(%41a-{UW6ZrLMA;;k zw8|Aw{U`FI-_MfF9J!9o_RkUV5)=675`b-gLNKyZn{uDMrMk|upd?-iq=DhsmC5#< z)Elle)}eFJMXFw1iKj=F<23Fp)Z=G@T^|Kukp=b1_G6ltFxvev2b70JOQY(iFuuSF z9J+l7LYhKQt#1*QtZg90TTfzFR1^`-iz5Ro-%%gi$&h99fEueEr)p1gK%iR;R@M1X zQW#EDCe=gp{>yOk0MiJc6QJl8g8tp_Bs0F6bYD9seVvt!p%z!cx#bzzbifCVub-v9 zz8O4DGt2$k5X17jUyzpkv8a35jdA-VFefG-h30XjHPD7+KMzLE?Dt}q%n^{IJQ=$S zIF!>9ijv25tS>L(iGDt#-Sh9$_7~rI-6M|hV&qrA+9C@aFnU17AIL)6QB9=(y&=*!AyD)E`X5ZtHi%rBO=fZel$8iYPoa#6JJZ7U_hRJ@}1N17k94 z@$W}z==QaTmi)69%>s%b-y{;H@uN^2S_92)OfQ^y3FQKE*{+q3j-mn*b74N@1-oNb ziYa_lF9nyZ2--J?Pkje_q*3eKQE|05cDOBsMtOiGj-C-N8ac?1{Lkc6XEP))Pq8uMz42$AB^ooVP+?Gu z`7g(UfX#!w9v0vY?>XrE)QG6Y9|6k;f2fW@B6`fzCmS>kus+YG*|_* z%4=|wb^~@S3<59j5Zt56`aavm5Wt^6_+oiU%(-3QH*Ys?ieY@*7F(LXxdH5}&Y-Q+ zb87f~1}5HL4jl6g9zTB2MB%TMkh)_(>GR5lrWK8#b0`p=td!x@#SLgbYaZc_n@f8Q zFG5$WGPp-oLJ8B#mR!U!3Gs*zLm)LTf!f=Ox9l2PEtN3c8vvnnnuR)7vZ;ArGO*9(AkqbQNCq2w6{#69rtfi z?7af}4&~s9dkrXFaGwg!ev#_8t5dG_GM;;MIs}J@piW~2Dr>e%9Y?-noOFK>7`cM_ z?m84GXoK$k5{NOmM6@@RK;`Oc*z#90wmHp#R5?x9<*kc*SF(Qis{$1L%QVSPj#1we zzS#cK6de=8sp>5$n7w#q@}7MMS=+_9@x3>Owpy@0(*uRJ`k3vLfVvS8RA#!8aKFE# z&c0_zu zR0u_++AL6!y?M2Ie(*s&n;QHhd6-x=EzYNEPk-_ME zAcQJ0ZiI5Y6To-@?dHvg)CWIF+iD-scMm1w*`8vZg9h5KVRsy5JD<`#usm4;yKbIi zj15*NFlP3%6b%%lr%JuQ_@NnxW%>F%r=5n=A!uJNb?UJ}ebw2F1LRJOmL-99l#qD` z^U!Z*F)Gp$yrE4{?^_&ge{Kh}pN64N@HOWD&xI&n6`lxd!C(I_#5OkDKe9-M$|i4# z`tb>ny38C5kZF&zlQ-Oyf83pdu`Gg)ClqBi}6OE0k-Wp4*$fJ zursZU7ju0NzT+K3J*~Z9V-XAU{8KUax-t4PU!K4-jrN74W8XmyjG5R<{*Jzc1sB)g zlI_dT@#bo%aWi17Z8?cYxdP$5vxkxo+d=Jj4gSvuq24=#m<_L@gJ;JRPN!|1to|Mu z3}pI>Q%ljz{(H_yban9<W`#aVgQD?xKe3JLO2*Q!8MAYtY$z7=jX$iRV<7 zlb9$V7Tqn_a!VhD>H5%hXc#X1CPm*38{vL*4p^Dj!v5(~QDsjPxHVj-;w{X-TW5gI z27y#pFA%ynPGZ`o*(PqSmkB>fB;`!4FgEKvf?t|iz&Se?)W6ukI-U{6zwk!o>%V!G zZ3sPaGazVg76@+FlRKwtL0x?T$g4M;;A#^NSG3FHK=5(=hW&?Bujliru&7|av z2$V!9x!2s4$%E^+IDo?t%?! zb93O)XNuKq=IWfH16e+ASuf0AaefPZcqo$b0yYtzW+V#C4M<6-80<~^rJRH$p4M(Z=sn7Ek;ZT!GW!hJ7BH^l zoglRJ+6DgW#mKW@+CYyW%NdSo9M)yBd5 zwhegmuP{tounZH`bI`SHnAG(%q5D)W;J4>&u6?f*P3N6O^=e!2Ai0#ke-(B2avfVYmidA61HPPQ{~f-Y(KUoR9OU04x=1i0h9vJZ0;L zwEs#Oa&1=;$JLB6ob;1&EGvvXgkjKVI~T`&iA49idKmO$57Rf)VdG~T+~E`sAxztm zm>o*Hb%J1fI^$$1CE*=gme*CZ2|JIbkfq1hqx?pezxd}6{ctQDna-OyK3xd*hc=Qs za+5K!4p{zm7PVaF$(WP3B#{FRl%sW-*Cfn=-d8bju(}2fM}DSRK})1rW^Dc$*#v%D zF2K*RV=?ldv*fo{=EWTd+aA3t`~syS9|&`CjMFAF$j zyCqw$aRe#VreiA9IgVw@7BWtG1H$JohA4dG zNR_WWl4jRjL^Fd`)Nxb;#2l@J!Kc@tuZQJG_iMqb$v$YfHJoxrrAcD`kD_z&i*fz> z@JI+Dgb+fALJ0NTO9n*}CPEU05Rwp*Fp^5rK?jx8sB{ud=V|U$O-Vx1CVnBr4k5`o z#D3TN2kgB+&7OIl`(Eq&y{?>iax}gYS|`5axB6Z{v9U-wYe)iUbvpo_8;PNyu?2A4 z9{5_m600K1!OQ0qOz&O<9+5W$x4;PQICPewyk4n4g)!gHuJ{&)t zgA-2;$2|_qP!exT4*Azy{I-y7UtEfMvpi95@<-{>c7~fjp)6doH%J2Fl!1B$?3uwO zgadIfe&v4ba^DqIF1O9Lc1?pDav65HH~@W)YJr|^IEwdF;k%88Fn;wtb~TFT-Q%@r z&!K^Cc55I#WiPl)xWofZ2SHd=Ii&PiMSSTY(4O~*O_iG9V22{SB)uVp0b~;IuWg&qc6b;SRMgh%q>|FZ9}T5PslxhHt-8o@dgTG+VV1Ed>1a=)f5aNfBcbo-TnXtR@2KBp)5+;j~^_RAF&k$zxj z))lsXQh_XGE2whc^X{Gom>bq1@0%6u*yrMZLbR0a7+k2C#F6A<O+J12Zv)gDP|{* zGK|MZy@@S3;seW)#jtNnh!gqyf-=j}iSqCb{6x24tUomhcRB>4TU8`)ck+PN&KKP5 z6XjL&JV6~k^Jmfkh}}x;{@ir#G2R*?n@@8^sT|*ZkA+rQIv#kakKMY>#I435AQ_vor5P~S2<{FYpyFY_>F z<7cKjGY#F0O3^xkILFJ@AN4LYNZ=6VY<8de^`llBuWr zB9w98lh~f~m^noqLH}*nP)p61a_4(7a9%j`x*Ne`KG=YPO(MYANnqi*g3fUlg|O)o zaJ8L4@3#fuG48UExs&erKNfK-a|djlr^N1bb_JCWJllI2{nuAwTxJw#W?REoS|7&O zSb&cCaIWF4g@dQ$kkXU2+g^Vj0?de`N-XyV1KmSWpF)C?)2LsH0K_&C^scrl$lAe90+$p`A;31iH<$zs@uA!nXKROddYUx7D_;eQbd@?s!Ifn&z9RR`OGnsFOJ<1nP z=N)=qn9dOk*6{2Wt5WmFmStjy-*6b_&x=Nprn?|rUkQscBQY}~96GyDPSIQhjdKy? z-sXJRpk>(cK%IAJNVt9KB#VyjyO7nh||qfJ_~$MZL+VTMXUGj^pc|qcAjM zGagNjgv2wO(dC{$>?#`eX9>vd(UH8xfglE|4!oKUkT(J5Chbo z=V|{QMDrOj*tAQmRP`GL8&=V0?>z!5pPSJ0Xg74OEF-q*Xvos_!xKeTcu)y6o1$!w zj}NE@4QF+dFq(hX39_-#T;1XYQ}o)zzs_@^EXE=xS@uD&IUED;v^eop_XsU_Mx%N2 z0sKvS3GFFsAbw*KZ>w{~rkELAI=dTnX)g$-6Jq(ExB1vg8F|In3^;8Oimf${;IU;S zZdttD7* ze}RcV-zlC_Pxr&q)ZaUN2&<~e@2e)m_5q*S<585gPP@TG)A}g&S5g<#!WYBt)hYvK z6d^S2h8oJANIo8fJX;(@`Ru|eD-@vjNsk;e z)n>*`F8Ii028wo6nn_MP5t^UX@WcmOuwY^sPWqFDlRp)s);)53{_F=@savpoj3362 z77&v2v2x}D$j?pzbV%Z1yT3E3hpiCOi`dD1{tE3g!VtSFsbl7*!RqXwox`H z$0JNXFp6o6a)RJ>-N8Wln|bx^irXKAV@r`a1V>UPLC2ZD49iC8lpRWW;y3nV%Mz5@ z{3;G|P{DHCiSxcJ#jiY*>+zFdqC1cJn%9}pHy4<{whB9|U-13&qR{kUCo^{H2d|gJ zVNmu6bX{fy8u``C;eaQK_pVVkk6(lz7Mjri%bDPLMFd@HsgwEG0{&f+gjs)%!?mN^ z(7@^%wja79RQk>X$0L=PbbboD{)oWNk9H8;MqZoj|M9x6OQGfKQr>U+ENl-wA^bb# z3>rOn%1mD6F^e|NTgJ+m_-uZ0n<;g0jdv;6-`b4%4n?5Xdx!FD&K3A;c@7+P_F?E~ zs)<%FLyzC@l&>cxLSw=JaM?|nMcV-Q8q2Z0?+L**Mg$g_<00mm33K|&anrHOAX*^e z?+o{1%{~QFoy!rDy7gbU%|QlD|N289v9Q|i1!6}x z6P~V?&#ftkAbOL|OOD!N&HC%$(5nik@2!IL5>qC*at71)PlHyIGpOO8Q|EET| zwTi=@LPNa2{2-n^aRIa|{rI*a{qX#FJ#@~|%qq4$AWIyL&jgKHCfCv?KR% zj|A7qR8ZEdClvhJf^DOR}Ps3k!9LpRzR{WzTg4%`gUvE16h&r5GN5ybQfOBsUUu&!}aFj8BM{ZH^tlgNI8Afhdaybq;hGFCCd1Ft`PrbZ`9sl#@Cr_S1v)Fjl zaWi3}HhnYkgY(K%^H7u1q1$+qY%5xw|HfR^ikRkqePDE#bEsZY#l@>Oqt$`Symmkw_}`)V$gXlM&&xtz zu7>02d{h&fffXLn@PKy9)~kAuy^B)iql*p`7rY;)q`itGVq<30-N1p z!Pt8k47s5|=aCap@1rAcaqq@HM_otJ@LkI4f9WpJrxF)@PQqTj%24!kk(r4!17s`C zGspKnV1ITN&7Pew{WaYkFD)ZZVkVB9kb&J=w_r$A0nK#Y3C*KI!2gsl$YUpPSwtgy zUOECNO+E>2#8Z%1szd(Y7|h>Oz$Ims%;GS)C$FdBnped9ahnF3BMyN2NqW}(rv)2Q zi6#C|GR|-@MBG74gGc?rt^XZ1!mAvw)K0*sfzxre>N;xmXk+h&AhtQJz`*W4;C06e zn#P_1%m0G$^uF_`vNM6N|7M`1H6CR15|r-M9A+5?1I=2{Ag2e5Sbi2Wo0FmTaVWa_ zdvmdGKQJ24AjFFH1-?Jn*&T5xKE73{&K@w?p+q!@Ol2J<)c;!K2sF*Wklh0?-o+36 zZ( zyoTxSI?PS$0dt{eOm)vA5OmuYCZE5K(=sEl@t0G{rZ zh3&(>Gnu#xN-yp*YdC$G1y-b?@e2>;GN_P=UhQQGbNmsFC0M~pYDw9!M)8e--wL-7MccDP_V zCZt@zlpU2^bgh=HT_ZtN(0kU>HGutDlz{j0FA@tiglXA!1L+n^H159<%QSWnL*=Hh zKXDG~-g&`n!Yv@s=ntzYi-6|*`S3S=Ev8;Nj*4+xp`&LQJ}dS`kB9-_(|ZnR{XL_s z@bbr)y;+!bWe*x%A-1*p5N`GO33Kj$PMJP?8*3Sxh?&@bBooNT;YLroTmChpP1Ib4)7qImD!VtoJPC>piegU(zN_)NcJXAcED$(Er{ z*BzjwjH0dX9<*y*i%C;+P`$7M_&4O=SZ_Y6;`x_5#L2JP(z!a|T%Y(?Zx4Hb^ zAnJGLpe$`Ao966+GqTgse?S(@cNl?LvrgdA0n^aUu{#PyQKk+SfwHym+UZHnzHq>flgWKiz+U z@L5r3q3P^yp|V>F{O@!LZY^I+X3w7JGd~mFg=wJaG{nYndE|8ofQf6O&}3~OuNhs- z(;xQZBTt`!UWOSM7@`EPn*9)B( z!VDNwMW4fniNcX-lpRylnQcf6g&~t^$GeXBE(T+G^Q{!n?b8#APk5sxov~z|HcB-; zUsM|q15)$*Onfu4*eAc0{+=P&qnTKQ|NAC%&b-95e%?@aiZ-C=@gHFm%^}u_EYVLq z7rh>(F|P&-m`NQv`E;7)O;K=-stP9k{D3)2hA2hOrNzy)mk@0ZfWAi&J6n+qUTs0} zFe3-P>b&4yzTa7!(QF*tTnuL%ZO~@oSn|dl0k2~#dBqezyn2V;`KNDlzomN6GT9LA zUY!G-dAp!ix&WK!q+pTeb@ccV0ph{)n2!Saw(C>zezY<9VN~Fm7{biA6D$0hALymD zFioSqV0Al^XGP3}L2+@IH3-=lwIp12Hv-=GF~W>3sTi6?S&5+|VLHul#NPwJ-ydPl z9uHhl9gA5f`hoN3^Gr1Nwb`?^)Cs;i3C7Pus5>4kNTX~)a-~+N9_zvF_8mhzzkBQ} zXDA+~0F#}CY)oGcikZXk*H_}}jJyb?dzWHXRUC+GgZPC68?0$5=hptjJXv&v?zC|z z8ek&qczqJB&sRV^_2c^Px(bz}yJ7Rgjrj0ME*jQ`()T8f1xBUe!njhnm#9FGuiu%C zLmG2yPo>W`7Itf2Mj7pswfc<2wl6Mpr*Z)uyK1gz>&8rGQ+`R~0Z&ir!Be*fp}`A% zbXl?$7XFHdnEwoUsdXTv(7R31agEp$mso4tzufaqZ;*OVG1Hs5Myc1nNzf=<3Ld>q zC`}CCaD&Lf;CHQ^zdSAyq8Joq8Ev>lDU^=JK9d*;ui)1Pa?nV0G{n-t_Pc$k$9{?Tqoi_E%t> z_f~Wk#tVjz6Vb275572j64sYhFv)CvP&Xe8dNZc6rORpeQ$7qd*YyJ*+Zl-cV)=NQ zxm6C2g|?w7IAscPqJoFRULgwRe<~zqKfQYsPhnkd3p2Ua!f8)(H(9B5uS{sR-i;d&RN3yY1G z{@|uq#Pi)(fw=Jms?pA8!YO+k)Yldpvd6;L3DjS&+DB)rT%}rG0HjC_pmV1?c)r;L z(u7rzn3;pZGG8>Fc?EQfDQ~@@H)wt+ViIW|7OA)jJ-(HoGARXH)hQ#Lmxqrpmq3$+ z2?p-ggR;HJ*wpb^S*M7AO@`4B{KFQEZM-q%jUBu`aS>g^1EC@4JM(ZtVS2`C(3;p4 zG`3EFXB)^dr5_7B{-sRDm~kx9GaL;bTmjLjpGxV?cGky@p+S&043cdkA67aTUp+{7 zsJ<+?tblDe>4Bl=%h6tcOQ8iM5~4gO+yy3No`RO_)eDx*!D$Dhtrr`m4LE+-k>OT(??CcK8buZ(qw}WM5d;?iSt{ za-NBeell(IFy^9S)Z20Zk7EzG$DcpEGJYU9k1=A7y9;qx{7H~D4OU{yW~eMJXF37< zpz%*IuN4KuLtH>RzZD>|&{n)$SP8c&gXvhihg=>1 zadpbZxH!e5@!!W>e`+0Z3FE`JundXl^S zyJF0Vi|oxF;^xiHK%0Md!Qi@V;=xXalsgN+@6rvndVVN$btuGB31;Y6s(|LMe&8A5 z$HMwsv$kA%*3Q2SUJfE~G#!uHO3v@U8IGpgPJy%E5#@F|_i6ZdzP=3CkA~76vL)^Yiyda96kS*fUCVRX1T~=$@!6}`{EjQQno$U zBnC3O^+c7^NM%4T>gXQR0_S=oLFdG7o@|wdRf*@YWmPn&(;jG~UlL4Ls~~@Y3Xa|{ zfunS8zZvIy4NiCHku8LhWO0Uk+#_=A+U4-E#>*|V_8c?}c& zFJ741CmKB`lmTk)BzFwW!Unf1W!r}_vmtuusP50N4$c8drViKo)WnzVO@@XOW~?c7 z1!bb6QLoQ77W~!{U`!gyPuRfPzldV>wdnllHiPs5;2OJ?h0H8OpA8J6qvoLK4SC8u z?sJcs^qp=sbzsVwUQ z&7TjonCUf<+fG-5?TBBJM%!ZPj8&@JRMDc$-!g)1*O-m6n^%=Uh-82 z;9tr@pPqFNcTTM!U*$A%JA~kKQvP2C@fdaYcRx z&FYN7|41C@^en_U^*Cr>Tu9Gr1^i8>zWySGS;h0|c&}3zt@;N-*^QkTv_Bkb<~tB~ zqMI^NJOw3tnw1jqE@tn0m6!Vw&)|VG>!_zJng*g**JAWL-VIDF!kKCIRh z<~}qHN<_A3++7o7LpBOvSj4NRuZH@`W)R=k5>w2BLHGMrXrmprj%5f~#d@;%@v$h1 zB-WJs1-2|T9Ti?5IcR_9#(&pJm=r{(B1eSRJQ&e zPmbBc7`(6y;veW^nC38SPi*AIw<%Nj&ITQJ?nJMkUficg56F6a8q&Y~VbY)7isMuF zfGGQ^GV7op;=nR!UwTgXzVkR~+Q0JpAj%{>ddoF`QHS6mb=rO`f!)SZ6m^c^|E>$i z4885BDhxIgg|A{dYr}BSUFsySrQPDCSiHN3Sj2tF;aj~BPWH`2ShOF8uHJ#o3$xI) zj(P^O!dPKwJXB|7F~t{wr&dLf*D{&gPZ-8MqGs@8|NlNWDS~@l!m!DdbNi)dnN`I{ zrhO}d*Z-_#k~eC+eL^M^-JELXvL=WxctErAU}uagO9St!u!ynSw{vU*uuIwJ}6L;)z7ag%B3)9mE1N=W2R=(Fz(TPVMd!7YxE zduSu7Dv_-l6par|w_ynF4{NxH92&h~rJe*cXvcNzVg*{S9z`zJ-7vOk7HTam;LgL| z2v@f#V8h$X7$fP<3te_%MuH`dJ5WX*i!3hR6ab$bjL^7WfgpS7%$m&Im7RP)*ryY} zsct!H)Xd_)#I#RnZD+DhF*cfMfqkD@80dZ!R!2QeD? zaFEzvRHBwXgpC-@6)veP#3KtEiAk?j9SgQC6H)fy3s=o}r@VLjEL0{$f{&32em?7k zq7V}nY%4%@`8#I%%!&_KHIVXUTj8jB7)T8rF_`YKr(AMSquW_nwI&vw6FvyCMZ|qM zbOCeX6j<+kn_1^j_i*temenf&eeW#A+0PbY+lx(vx z4&c2kl$gAw#W?aI^OqH)hW2#Oxp$DuY}a#xLjzfzp$i`$m)ExEbN#% z2J(-d<_*R@pvIOuopbl2`YvZo$uDA$=EULsBTg8bT7sqpmdtt5A*j1Bk^Ap-!EX~c z;D*J;kahexNZz)w+RoFUKA)Vg;sCBbvIly^ctc@ODERI#hJkM63(%GdE@S+-BB+4Z z8@Pk2@td$v?ueTc=b^;_Vrbcnq0F`xmwb-EO^phO$(X|I9)9Hd1us~`Z-0E4lK@yr zXN%{~#d@#IxiH_D95{Wrxc50_i;p=N6su$D!xCKjVhrtbH#2J;Uz$yP=JHwBi7Pt< zO3Iut*WjYJvxhX9iEwu!oJisDGQ0d!5++5pcW24$meP5R-;7 zlZvOj?(19}Hn9?<;fs|GwMwQdiiOF2Q*p!3O|(NUK*uu{piy#)sq0q2u0JzSb$b%8 zz9C1iHzT;V&1t@_C%M)l_ObbvY0z+L9IiQ~hkg@gz$b6I4Elwj-kVS+x2q5eFC{?Z z)2{F}^9**_D8Zmn#kEJ$U2@P2?7S95-{aGQOG6Kw5$%Gz?(Qc?*iLBCr!LH!fuLFx zQ0&}q6t9=2W7y3j)Wa_W&+L3S)Ru-Z4+TE#TLskNQnYzrfJ^$)Zd&t{pt__q(_B3h zE2>Jd?>S%4czugFL;+BL{R||y_rUqGY}o#U`XhE5z~HtXR)(Hq2B|KrPJ13GDhlD# z{sGu(;td*alPTNkh+!e0l+p76$S8j?VT3c3?vx zRzENUtKmx^f5H+tDwn~-VI}Ci(2k7~v{1DEgK1>1OlaO14d01bz zqpotF-^Lhr=PjEOkPcokmV8)SCJ2y1zJ}${G-na>v~q#!!{My`Po*+ls|@E)E`ii{ zk(8A^M}7l!9yqy*Ytk8WNPGlp?JbAYYCmkgb(fo3bTawFsm$eqFWcad4}NE|SiQ{{ zsEDeBcUc0uj1OntdoGZJYCCVrb5LqD5+~s9J+2is3u^3jK`+x?s3zxL!vqR3{c?l% zk%!RReKj8I7>r*YIHLdPbec2g7H^F%1Pw<4ZgfyK_EaX*D3*b9&MCNMb_hi)4}sHP zI+WeL$(^5{6;!Dz)Tn>T>rcmlZfzs~xhoI<%Pyh)dNh_+B5pOf0#@GpSl=vCj(;8gyNWs#G2J;qY3tZp+8M|IvFn@vWf# zbT!zSUj(bN!CX3dyt4XE9u|IHh#fB+@!>yFa0m8cSYik_eYP2XFZ_Rh%ysrQ_!6-N z=7PqH?OZ8eKanDU1dJFD0^Uj%r@|S7l{Ku`Qxj9?Xc#_ zE#4Hl2;^PA@CaWEocUW94c83BSx|~vxdND;9z@;sAslB6fnLW?;4Oz-+$TxEoYi@_ zC6)S%-#-?Q-jR+K-PLhz{C>RCc@8a{S3{w-0~$mtA${2sE?RaS9{-Q#)>C$))!1po z_jTb?qeo^Y^iEh->xy2ld|~e*e{lO$#G9tCV?JAt0h zNSsIC^UWn(9BB%kqF(TI0NpJQXM>kyB9mzkf)#tMuutzzm|oI_K@VHh_1*`P-Y(Eo zOY3MZ>l}-Y?{be3{$PRMXe!p3zdNvmg(czxNL>P0L*uw>V zpm3EM-fBG!VU2&;K&Ld+`LPG?sg5FnSLwc!2TdcE@J<%X(|sJ6WI3H><}QV{f$@;= z!Ww0*kCc(DC-EeEL(knh*zr6G-sooG(fDzwlXe6e(mwF5d#-@&<}+cwH!&EiBDkjd z5U`0T1;buRl+V$J#*4*Ve)c(&tvL)H^TT61qSA zSH)Yd7P0T$IEeqsAnnsNZ0&vq|C?|D-5&hI7aX{T(jjM{#da;|A2$W7xFEJb?t#to z3sL_590W}afp-NJ(7ZT+*_*B6(m9r}+VToywMTH-YBMGetrYC7>X@w8CZ^F{1x;sY zA6cgkS#jwgiNA_M`kMd$tXbs6)4Xv;6kJWd4y%y^eaVt^x zs}t-E%m>*&n##b8xeNtQXqM(OpU>fVNP9PGekx#7Tr$A(ngVxyCzjiQB-Dz{;5|k3 zS()o%g~S1M3l+R|YIk^GwhoJP52E$_-D5ZDe%;qgVsIW32uLF}q>$F!*~uN}l|Wx$Kw-8b$ZHG}ab&{4)a!UH}^D z_%Y`h3qBvnSNQoR>l>E|CYtLZD%6s= zo5c1ly98tIF$~&t3FjwOz?A)$iPcfX>exDvjyhZ{^<`#%uU`b!r{z!`vlv95z6*~| zkH$T_)3If_9QD*52*&O&gp>PMsxQGBX^+pK^Ho zh6V85DG^#l-q7TDMX;MV6gRED4s{DG+1*MB-ik?qvPsnAa~uwS8>z2knF2jN?8WN; z^<_2ZqR{bE7PQ`Q1AppQKw%1BIyM09LIa@t^^+LfSqhGRo6)ymDC#U4$jWVOam-VC z#~mKQq=RVY(_@C{oe0rNZB`K*0;4wTqpOh-%KGOj-?g5>qqHL^ou@|( zyg8uW^$*wfpAQ@4a&)%c3hfi$F@w6f_&Bf_Dm)e7tacq_Yriu)k8+kC{)k1Ym7q)R zE8a470{5_q7Xo!>ArVf|W8e^M3oi$w{*%D1OC)MoPhfTAwJ}=c0B4E~jL*Krp5;wM#gX+~v~QJ}R@4rzaX1UY zUiv_THg%4zj}jMiJ$M|XY}AU0(6YK4t8{aQSx2rzL!u{q{d9r;yQN^$xfW_F^+2vi0kPrdCk*-GgqTb@Ur$5o^JQPrKkUtQlSyk;7*8|v$^$~zY1$CiNNzz_cU z)M@B_j5x4^-ZO2>N8Bo|ov+j0fj#O3ROno0rrUM+dSBuSImL7N-eRtLna4b&0`n8m zei%<;u4_DaE;xwBEiN$F&Io7%k!EeYeR?5N)zok|=X&Z0f8~w&0jOAU z0Y{F^#;W`ISUoKW&ke4^>Pf!PIQu3GH_}Cg#&CY1A`x9x16c0se4IsmmWISojLE2H zFYc_vEVC|PV93Cgx3QT8VTfHVaD0FU<^`?7z)C56xhY5a1?nDmw_^t;k#o?u4L(2Jsf)otvQHUbjS7zqD6;QY$7*$C@ zW?+`f?Q}w6*b6$(A5MkTlL3(VITlpsBb2g-EqquVWksTL`3`MgRE>)@Q)y}|HUCRy zF*|lL`D*I2Ra8LBCO_`=bvdi6roX$2x;_65gB>*c)Vpz>S#;?E;ZH13Yujq>VLlyv zUOnYiAwDSWZeOfeU<)RDVp#BU%E+$HgR{>r08}P2x1v)_+c%y2z1t6drdh06V>zg4 z)BH-ZlgYmJN2Bs?pot4$nn{1u9N|xAn#tIa{h2vi^~Sc9p|H}`4bLBTKpdXN4+xd; zzo>)wxQKeChh`E#WTV+)3(A4KPXTxG-HGne(*q zTua)?nm%L;DUw3&e9@N6LT_;V&lQ?_9RWudYp|#Zq&Y6J`^6#f=?0?X5;5f|HKD1` z00G5Ap)sAonh@eHs8K%dYdLIfIf*UKM|k|53fN>M1C5STJblS5p0cGk2)C&lSP_9K zeS>(tULa^JoW_5K6Bqw`23Ta}anHV0OtB&aG%poET{q%gzK;Xr!#kNv(k<#a<|z%% z?_yt)C*Z!q(byzPR^Bo{iRS-ILk;*w_of99ox1=PiSJq14(c$`j{4bL;=uo+-NjjN z(?}EYn7A%Nj|u0|rsyHjG6T@hna-67IWST?9VRu<&NgKjb6c6no2%`i{niy80&*}d zddu8$npkHOou55>C`E4$^5Xwe(Y~k>KdqaH^0jZ3(q~c1gf+2HS#^~gJUIf9d;CEv z8Uy`*9!KA<1*ivVIEtQW&YP@w&JT{QQywu#HX59_q%!mQ3(#PuB{`#qLx4y{j0!_$ zFyc5lF8ULz@e&KmjfSKVGL-1q^7NABpx)P-dcGNgTS0H~#?4de&-Q|*NqvOlOAJw_ z->K{~Q_rB)Bu|j84+fbxogv?pF#CX!%xGUc^USov!Jgre6+I7T7@b6u zZxcXk)J~=sMC#;>B5ZL==GmviQAckOc*GW>Rz;>@5!e+rU8Igj)H!lum|)Z7a@1U# z0j7JGK=uFrQSG<>HOYz(?N( zufHHxm|Z2x7jERk?p;R5Q>EbZGl#8SWrotXAI%IZe)06K72NOmYIqep4)-q)M%_gs zFzz45e+hx$OxYBhMLy83j1|n4mvQ0K1Qb0FL%p7A=ec=@F+y3EX_o!KyMI6f6Ss>R^E z?ll+XuTwfqEQ0%P7qRD?Qk>dx2-UMnSW{V|(%~@iAUif;XP^C$F8vp7q-j!s-RMRdfx^nk4AD#1)$NTw>1t0HS{q zg-I9tpzD8+So&ZYo%utdrDG;DJ}85ZI2CV-)1bR=A%CJj9fzo&MonT(xH!y)9n|eL zov{hpT~kn|Igy73EJB_7MNrqHAAe$b4JBcpncujpEdN9w^laFTdXs;!M#mGV`W(U1 zgQQSC$sBd{Ub4w*=P{yp5L%8sfUlj`pmP^>B?LHu#<^>#Ic+Fd|L-*U`PKs{%h<)0 z;b^R*2JpZQ&gogA&QVkLIJpGnU;G6B%_?~JkrDg`=x(oY; zhJeVuM5x!4!Ib1OsIFWJbzu*g&f=LY#3ulvg1s=sB@Ude_cA@3BOHA`@ZISGz8^z- zs;MF1a5f4q=NqGoVK(<11z>fByq)dm1Y_E_M=vYJ=L2@5s9={+7P7!dP3 zq3+EBW_y=%rH}Uk*qo=kyAAX9B1VAY74X$hgQGrGST|Y<1}V`P`DhM$X1?H}qGYA2 z|5fGuT0bQP*CQKj*h9!X|)wjaSH^W zo)e*REV19+)WBwQ6L0*%!Dah==;*Z*!j2x{P0P zS~SM!&EXb3j#J-jB-qA>pxQ}ow29%Makx8EXjCwHd!dj%--_#N?tpGC6<8l1$X}bz zz~-Kn=y`80N?S#IcUdN;p4LXWbDGfptd-R${cu^&u_$jhh5FlR)D?|{MN2GElANTJ z_bF!@Jx+kx;_djo4>?$8I)L@d7*riz&f^Q_!7~_vH5(nlpwkR!aRi#auXxOod2q|0 z{N?q5;P`46nz|EL#U_p)R?DU5mJ^KR6_jJj0KK}ITwmiXc>P`i!nUb6`r>8vXJhrcjK`d^MhVIsgaEO9IWwfmo{1 z6T=ke1!4LiG?pjg%UQ>vE$bwt>|X>%(`B@aFTz)&46%3R0leHR2WNC0i%<{-`}9&! z`ipkzlivt>`R|x$-Z?Y9;oEuOMICGkCCMkHQ zMjbsH+7mP$YVsXx3t{Gg z9CSR;8$7(sq4kX}*o)q>4qFTEcO!`_DqgXU#6zs+rU&u81fg|;21pEzxJ$r#{Ca*F z*1xUV|HA z0qvCFzXeq9slQFRU&${+ z?9kZEI&D=DQg8`kbiMFWuSis|6p$VoY39{IJ#osr#Eibi72fms)`)$Gqx11Y|Iyg% z$6#z;exJ=8Hx~D(N1|!ges<2(6#ZVGnqCYYgU3PBpJ0^CTLsJVsB2)Q+70yYUjCCcbx#5Pk6R(DdJeRH zl!2dn5ak*~5Hr!2*$~G~HT{Wl&R};O)|KX|caDPfDj9iW)IdCTGz5LP2G2B)pd;Ns zTwQwe_jmko?^6ZsPqJ9nG}<3uj$&``$KcLrQ{pgdLtI-R7^oF;Q{M@E^%v?cn<$~6 zJqWdXJm8*7R)Wir<`|Ru`s!lxFKuP_ZZlJf=ozr<_bS6ysNi510aiBR#&g!>k!Ph0_=vn)nj}PumJxF4ZbX*22kG;IH z$5sq0vW5JQQy{Z{C4B#U5@wyfgfZ8>Kb)IQg|5Vy5y8-@p2uSjJ?Fld3pJ4o;Q*bd zzW<pfId_ zKL{%?41k!}MCwNsbD7CDa31wHsR4N#H&inftBFwo; zeESvFJagbkoV;!hE;$*26{Y_0PjnF;d``2IBY{vmOpZ^@iE&}Aq8)5E7!?AAguhT|sxzwLcy_$x-MG6f05{X_T+@WW^ z94+4T0?F{xAX0Z%czOV81vGHIKHZ_fcR8x!UYfOpDfpu;ZsZ?Lf(@@DUM{ zA(K0v27}p~xarFapg-WmB-$lgJUpNMH5_5Z8H5`S4I5)xnbT3TO;!{FPaRArq?*U?D4U zN#9Cn4NWC4(mmnfCJmI9)U)6S0qoX3WNwoK!1VGnrc0fhXV?2+*x>KXyH7d5lgI4r zKQeUA)mNT2yMnD-ns}eR!RWK30Hen!Fzjp@$kwM&DE%pK*NcWJ19CAeB9Oi-Ic&Ub zA*S4m0itfXt zAfi0LDMxqgqMip z3L8SNuu~h>qvyVKEBc?{EpuD%7@u( z>trmXIa!l?6%)Sn1A)g#*^dh1Pf4z`Vd*V?Psr|_q$^L^ldYr_ue1nv0l8nyNXNQ zt>8rI3cQ=T6P@j5;m_zyl)ey>pG^-s&s3w6kg_i&(LAI7Ft8hP9(2@F+3Unu^lT1i z%DXIJf>u15mB#Rmwn`{{zYVPSisAm|`Iyl}@8caWSj|EosFmn~sMiIiJi-AkZdr-~ zG}gCraOaX`8=3h|BWC))6g1I|hM5Io?2u*h1v-IPXjTBC`T}M@?=tHimcp}3_dt+t z4!bck29`<_v0T0ib>_ZgsujH%bk0ZfpkW}BodmOwTk-3(16b8WJ-w^T*f*_A=(;rv z1ZrKvHzV@#!j(N-y$-$jpIHS9ld0dr>2-6i}4|D%|RH|Dyb zj!&boBGnu99I{YsGh7BHF4*=~j3);Wum1tvd%S9ChvCmRETD|*`at>~se*Aivrxa! zIS}{vW!a&}$cODK9Nn}Qr7KP|$&5`r(n5i9TBqSuKoqVyJQhvTBA5sDu_OUm|PBTDcM2_5M3DdkZ7iJzW2Je3*EJ$$*t8qSq-mUcfDW|b-Z5+q1FvlU4 ziJ(%`&ZRr-;0L+BukWS)U;o2kwQV)nAE0^pV_%`@WrHwuWj*_LmE7Sgb=em~Wz4^= z&gvr~(Rpz($}ik4SJdZVvik^ADL%=vQ%9iT7Mdx{qdCLqOFVm1IoyaZfoBgqQTbFE zo2@Mc|G$(0c0a-_{icC{&NXUUH`(2$L)alQ;Zf&`QP;9J!hlgUkN;P$eQpc74^RcM zo}=vX=`xIpRsyprilFgX4)^78sJP4lV~uEzRo@@-D}CTn67_7Qg>Zaw1(*p=L$kIA zv-=(lX%i1a>$XJRx@aPJ@&s-gKOA-Z4lw5&`k9m`amnb3!omgg40x_X@vW}%^g9Wl zY&{m#4O(OZ1C+JwI**E40pNZ04o^(%gYHLtal-d7oSHrfI|m9tPgq6%Ep2j#U1V2! z9YYhxLaz2`9(+y{VAS(8{!b|u7Y|Rx{%Nrmk5=Yx!)WbKtQr z7?s+Zx!Qm+kQk5y%|pU)b)+dS`&f#7TZcl$&_vXCjOMS>i(qbb1v(s1Ltn8N+LD~1 z@XThE?(f4l+gPLer-dMw9S}bA3P!7rNZ?1W;)lt#C>~@DDi4#n;mTMbPaD?%GK95m zrs#ipD@4s+g`J`0aNS;rs#0Q+A6bokhXsSPZw&1hd_lgKp?*7kzwdE@VshQvPIF-; z>Xq1bUB)a+dbo>99(PdOgTG8wuLR!w zp8=&k#Ke`*Mk-l~9uLdW@|2M>br@XiA?k~cn(`(5mQG)cofV-*}u<{r&BrlavMnN40 z{qRNcx>RU&NCiOw?O3Ce+0;X{r@Ebna}=rT|8_oe4mga%=Hy_E4>=-kSA)`mIk=ME zmsV-(Kw4*k?V8!pO!v8{HHQG(L%=>d4@|CPfxW&nsz2HQjlX_!`Qu5fo4yB}PJZW` zU9Mo?>n~W3Su9*P(Zi-TN4Wgf0(~F)W03z*PA;$Jj`Cd(WPNAZgS7WL z=xy%@9ZPDt))6tyP-IX)A%Tl|03Wl%0{sV-z-K`*=+Yda?5r~;-l4qepOqk$2Q&3r zd&>8kGqH`IO#j&V!=D!!=45LcLxUvc3mU=d7G- zg(-@f;?$27^2$%^-I92$b4rRLYfTW;>y{%qXv~J6<<`&7fR|$ssFloz;?DKZ`6G{9sz14B zRUx(#|LlHzxGV_qhdMYpgu|^Advw2+ z2$8GJvHV8}rhQomH~!2<6@_xxwsAQ1s`9~VksEXExh1PVsrZSfAdTEv>Vo_TBQx0fxDoidlJ7Jm`LikcsQ)R z7Mq`$K8?S(vwGI1KYwy3QCfQ!%5SeN+YN5*c%4|g)~?Ra0v2=V|0 z!&&^v^(1WdI|xp8@3>7pbsPs|fW&OJu-kn;=sW9!$Y2nvnSS8%?c330);%_0S232S z%dmE9J?s150kCl#h6>s1xK;dWXz2i<(UMc-9ooR9$@$=*N4(Q%zlEQdRuGpygdBVi zd7FhD!)z(#!lwz1HiqyYQ;PAu3Ehzszcb4_s`zx96=+nhB!7)1h<*CQ#JCVlS1kn% z@;fM}t%Sb)6TrThc-guyWS{lUqPMn$x9T5+A2nIAwo@M2mGy+g!0T1W;uEx>Ne=Y&lk2R`BEj5$A!yeZ%*U1i+SKSF$zwGuEq1+ zx@gd4gR=7(pp~_QSWULj_$O3msTjcHj}?P_S#i1GYpTr9>VsDPck4AX9Mp5HVK z`V&*-4|Br9H9|DBUW6Ur-Z9U=Q^>~=4xvRFU}cz$4Sf$n*5hcno-s zt0Cu6EHN&9WQ`twsMCCoPa0T-8Fj099Pt)wa-yNbZVb~s6V9`T%fU)~6q>t7aaZRk znZDHuZu{|)ti`L7*Jct2ZmkhC(f`xpa!x4XQ~=4CU%=nb^_XANszu z0{6OVv`p#dpHBJXO$Qa6Cagw*qXMX3zbo?}MhvFO7eQd9BohTSag+a@g{bRRu(9A0 zSV#DPBt1)(wyTIW#M2q4@~lu0{jJAWp) zrc45^Z;vy7n=RlmdON*;&OwLnGzb)yL)+i4-0IzW&`fA&Lu${^nPxSsFF3*LPuyVb z%~{YkRvQ;+*rU$)IBp#u0zEGV;)%prV{FU=wK4Iq|cMhKRXShM!TWv@OUtn z1j4xd$=KT}2&_8qvbR#o`^qPRM)hC_j48sDJDDIbc~CBLoe3?@Equ95H!`R1HK+P_a1$}a5rT*m7wO$V0lskVFF^{+!w^_y* zC1{8@pgqM%nZjH%s9l!H^@E2ob6K76PRurPCdCMY1dCwFD~?UWEkL0K!1xVE%hB{a zetgPeU(ycKRSfSwm!r^6PArdKGW~CQ#8J)TP7jts``=fr;O-iF># z`|<8Uu{`Iu9+n7Guy*rqKwkw6>iv+vT2X+PJt%J`>fn{NlpmSlhq_M$V4oa^(wE1Ia{G)PI-%?++3{3OjdY+om&w(j{JxTGJ@Os=YV_p zJwDC86uLv#g16^5*ylo>p`!^Xh~6rU`lk%V5sj>C;u&;z9}f*HYxr*;%5UT?#kWsl z@cgqv$m%|g_91@UMQt5)&OV7J1uFP+&tVk3^N}r{8V5a(FGBq?2Yzj5E=q02GV#td znTB^MBzD+={?L;s|KCDkn{hr`#hyovyC-1>bzgTjT}Hd*e#9OzhNvwzpxEI9h03&N zY$%5yhtIs%ul?xRa*VI|TZ>Uu5-yL+1Xt}g=F}$$%&KP7Tq&A878#;uZ4H20I7?R* zpk&oLu-GyaN7K3X%=1eSbgdc{Xy^L#MJ9HI(mw9WUxUtI;y_*Vfjbu4iBZGA#AZ4S zT0nW&0`fGy>IVTUhM~nM4nY@{IW9d5V#`#%!-e<>E#)jRgmV8|r(+r2ktBzx=b$(R z8;T}CEDf46%!9FXav<17Z6}_>LVlc>6F0U+LbLi=HlsKef9#9{^*s}~QI;~8Z(k2y zG@oxND+7zy0(?K)7#oV^5cwmatlRD0Lwh zG!7|K8x=+jki#IJ_qANSM4v1CJ<7GJb5VRp2d6y}!!^*yeQrn5zLywgKt4YDG#`zQ z1@J#w$1vyq4BT}-hW2~km}IdITwPp_RX-z9t6@2GJoqWJHfiU*As#HGJ5l@8Hq1%V zz$)Q%OrgH9?PN7!i#_qI$K8{a?~23R`jeQV77q1>*UC z3+EO>%sd0MNx2L~=0SLQNeH^m?PRu1w`E?g9Q=#t;*B!mrW{EDeb*_pS6ATPVS6C$ zYYv15waZlII)dbw0Glq;a}z!ZB(I`mCuRnt=FdL7>wp_%K0gbUPYPk)$2ydc8i`_Y z1xuWh%lhgkGxz!4pk>j-EWdg{l12qiI-Z8D*u!IcuSE~)wu%Z>;TZWO9@Otb?+D5{ zO$^3x<(a%)<1Y`E9K_njF*Li<0Iye5Q9+wnby*SI8#p*k=j2MC42_x%Y-2?gv@e?h z#?xlvKFT+kO}WBH%!@^r6T={-!x&wsToyJ>o(INCVu&17j0foqZ=zku1$rxm;q%|| z$%|9q$FX^6o|nTb3yWZPS2DJV`-0i*(_EgOgBu@aK&9n8EWH&8E&ndFJ#PU`#@g`a z0mM-oOaIEqQ*K5+tnF#H_uJ54*3J`%R&$RJeSI)#tgl}%3g zI9|WOnHk;Ghl!gK@tbBAs0L1DwI8m6zTF(An)I4=3_k!pOU~m)pJ<$YIT4JEPebT6 zWvKtRkhQsRjPmkh-u`;nxWs(9ntk=T@^h^j|xxY2lHnE!Y(M)od&+#f06I$W(6?HtZS^Lmx4|FH(}aA5K_DvjBTr1-NtU+K zf_OI$yw;|UH}=*6TM6ZUA64RBKV4MmqS;!@Q83?njPG5w3TH-F;b((Opfde9hzmX% z1W}&J`C2eMZ=H^vD5NgBh^yS*hduMikK;Oonx=VhrD_MtFWwiL%@aV&WIy>Syrnb_gU)twYIdRi-ud3b~K$8TkS+aBUulT9>g( z&1%S4R|Bkf1nO3o@PhBzIAK~H7(qH%>`a8tm{oB8*kyV*(OGn03at4~XC=yL;oeBL z_J<5LLdF2jeZspQ(u})ZL8#yMAM4t+l)LOFzTDg~d{fUM6qqR+bnK-J@P!y2ZoUQD z&B!G(J{n%r&iz!B0UE3NQs(>)w}1JN+hRRafKyB~SZ<*6bSzgJwI5V7qoGZn!M%0E zp>p^I$RA?_ZGHDsuBMv%_T-|i!8Gtv%R`-86~eavZZP{RQW!Pd52p8oV7PM!OS+d! zGr9o~6g~*vUOf+!>5iD6;K~Iba`bDOQy@ij3O%mr;J(hGXl7c5`ie!||MNlO#)qJ| z?ysyx6@@Ay4hx)@;H!V7sQE+}90!WAHa3r2Es4e!vkhQ)7eSp1WG#{#vW`V2O!7<# zGGr2N?f#FPVrL<6Zz(j5IZu6}3)JOO#KX>2be4=`f^S=8jjvL{(ryOM{}X}Y&Pd_W zIO>CsYhoSFHO$`A5{^%h;w7gD>??l8if2jr?k}^jd8;*RC{u#|PYcoig&nlK%-~C} zS)x^d9+Q?IhZec6OfSR*HP&y1`W+)cCFK+MRgD8%&=o$I9uC>1-CTQxCAKxlxz)3D z2rr+(y-SXQpv6Pau~iDw7E+JmjEp;9_r=ONL9lT~4ti6c{Km9P(6U?uHkvxnwtEHY z{Vjvgy9vxO@(c>});jj_r*nZhSz?l!f2rbQgM^r%CF;tEs;2QZttAEc`c`1Syr zJKz2xQ`H;Hw)YH2qh$xNb#Ntnyq)q;K{6<}s=rItebME^7_{7y#I3j6;|KK;cyxv4S|7=$&fA&A zrC=euf)lVFE$#_WslNhj-r; zBMF1ZJyl75n+Vvyy#k{?uBRFFFJ?4F%7VNGgH!J}OwhQt%rY+sJU6a_w$jm<)3k>e zrGf03$O3uCXjK0&Ralk0nA|nwGp>EXG8R%t^XX+Y+OrJIk3{hy3g^&v`x;^4lC2o% zwhra(t~}pmJ9OT7#icj;KwW?@NDZH|C#6TRTAq&j9#Z!7UJX9zh=w6;SKw7!Jk%eP zaH-2vrrzerTlQsw*?^0nwO|y)&envW-LIL`4&pl4C9zuHEUsD}1CbUrXgErU=Ii~5 zgGroBhys1Dc2;ace740N)T`XfQx@&VrfQBmPeqiS3p}ti!PY{+-OgAitr!b&?MRSNY(g zsj1kRd6{Vjbg|Q`y-}2GjP=7-^6W7KK{Eml z>ZR!UbuH-0Q&GHoIuz$n|8_Jvr~MW)534gcbM9=~J&c6xPXnR%qzudr38I`g-QP-1 zf~)g%W?QreO3bIAx#wv1-%nz~{ZeKE>p5W4F9hI^DQaljfLt%R+}KKqet&sF(TiEa ze`ztOvf?^#FiE4|OC+{={l~lATDeo82EUN80qK~>+BNsVCYLZASfq@Xtq!55TN&#P zy2hK^T3Opk30K-23%1^2Z13<4R5(e0r{@{M*uVPZO&$cbdS=x7FN5d4Stt;+7}(y; zlXVTg!2B~(x$UH7#33h6@r*5?e0vG8p`P(GqDzpbJ|5bl#)HW0x$JYSAD+BFAKU!4 zp`b+(zvS+~^22u6Mo;hRiNv;e*vQR;1lW7~T=Z=80ep)Pd$)jp*`S9mTMZzoBNYW+ z|H_L-+u*PP7cgjH3Yh+HDEfyUAg_=+Gr#eR3A`%GTQt@|?8Q60@!BKmh%4dkDTTnC zL&!l;3~NkNVX)^F41G4A{7M~M*`b@&zDVQVIn8{%^+Xh;(SN@i3r~NK!4ywl(2xy* zwiY_$tN##oI#;mgew_L=RiJR8g$b;#=u&V5jZuSUfk}8}$s(XA<@~e}Z zX)OWml4^WxE(1&bD42PU&Q@`Q(RPeKlllzguQabx2cEu1>*mqCX$PpEnug6+C=b`Q z9uB{)!>Nzs@Y~&!7)HF8ws$&U^Pev?TO46d@?R`mKa{IJJb@WM?12PQ_}L@`Mbm$? zYXQ|bd>ieK?nt1vGnY#e4|6O1Dd70g2vuiaX0_&)5IbTjxz?W1-phuyyzyd7_tAZB z1Tn|HXQ1FgFFs{<4Qlv}Lgkf4l+!=Nx?_k{Z@Pr5O3s2Lb~X?Fe2oiMpmg%O1dO-J zg2K;;ir;A$tb3RF6c=L5(7t%&i8abQeX+QygDbo=VXE`(dH$>vS=uFCR84c{!^B1C zVR8w&W_eK`@s;p+P!w7zj^s~k&QV7?x7>T{hv;zCr zRDyi$TrPb=K8Kde(Aal8sIJ!M&)@h{muEga4WWGWK^vK8atyMJIKZMOrrN_fX?`{d%sn~eMtvw6puaV#<5AROFhhXJugsMWL)6W+UHmq#DS ze3^oOACZeE_#vOCbp}6((lGI>81}u~h?Bif;uftF=&3LR)_2(BmNH|s5)m)pQyzSs zFbr|wanRh*3&a*TxyNC0%02taVy2x3`-i2FP&O625(>fb_<#>$S zd6#Pq+DVGBS=}5b-|vSb5_GWmOED972cdk*51G=VUtIOu9&5{V(N#}AL6N|w=J+MZ<8aiI)%0_*vf?iAZqMI9aL`uBycH31@KKheu zTB`Ec6BEI7R46zVkA%msrBf0oc0NgWQO z7-3WIIBZI>fzHRyAP}~2QPW7F?blA$MR!>Ze8_A6jRfWTuR_%)y}0V#&1|Bh2bL6F z!SI#{K6A4i)YC`Ff-~XgUpcw-XOBY%FM`r(^aMF<5WA3h6`3@Zd8Sbo!q)xzC=;T&Igz_h>&%w9RJ+ z`pHlymu6&xnz?qIB{t4mK)nq75tx+K}X+HAbtIY z57pg;D(jNC-J?8AV>6-o^>Z$|N~~{lJ!o8Q%ol$zpxm!7y3BjZiWH0Re$zC(JvI?v zT$zB2UW`GVCm9glEyMP0rnt13c#EfFq3c}~iaIk5+SjE(Y_uG-j+9V_{u47?y$vr! zCt{(sFDm4=bHm9YXnKosHSJ$mtZX>Dc#3jh7cYZKeg;lbv%xR>7o)U4dE64t5l=^s zVuv%rPX8-BZeArinTF9UQX4FDCWB7TY>=M|EH4#Gp~GbdnkFB?_WEErPP_MbJ8#S~ zT#6&Lk&r{CUW| zei;oG#p6>24RAL*%}gRb^S9asAPrp%0`I2_>o%5w%+wWIT#IDtlS74O+b4qH=yh4P z@&d5ATumN`OZ@Lw%FiYB;p1H+h+8bd*aF5pSE#YMO$qoQD+S+=U4S2yQ^51>BsNGU z#b(nbp!sboZz!_>Rig`7v8IkP#F|_@)U-V9oHso9+z;C-|8VIYHEy+HKU2@Q6n>_8 zRqVtOpiHiBr|J@%@`>Y9%5f2@36%RkPq8WQXjI}YK?=)qQN1TJmiW1CTeF#6?2n8*rt6*NB&ds&lc_ML8)p}L4 z{3HcF&}A=57jI;t!((~FC>!$jxw6NT>`;DU4}Z9S9_re>X6ggr4p9ThyoRxYS48}XV$p}Af2}ip3jz{ZLNn; zogd?dcVj`iIt7*dw4g$f`UFD`LFgtuXpP+l(vkCd+cGx@R7gc74f4BOKgGi*H*?9q zaA8ib)7bxCAnN@di-t3{;%8nAo`xq_t9v%-&@r}JBHcq|EyojaQVe+24I7zVn(O>=}{R}jC(6hU@fOp;4%%{9v zhC!u|xr}%v(o6sNgVjq=;M%*4(;iV!^h@9Vbw0LNk*jyw0nop;7GCu#1FI3~OnKH* zq1*UL=vy0z_QxW4@dJdI(TQLec@AH75D!8m29abxG>bae?X1(F@xMAIjvdNe9OW!^ z^gPTMR>Z~iugaf_(zsH;J3Oc;0NZ=VV{?Ne#Eu>XPMyb5--TuXzsk{lh?Heq4n}d) zK%s|>`r%WfK)2xy@7Pi;{4v}g#BbMdbIX%5tH|xJXgc#UsYrxv&1!(s(4~NvNV%@Z8sDGiv zB|)Y#(X%3K%8LTEh#b(>x&j|+E0L|2;gCV;;D4(RoOn+8)S0D}FHFIib{C*+QxUQ6 zI7``f7Fr5_3ay6kB*%0q{I6msHgsR%;mLGYS~>_6*QMj{{EhfWB^oX61D7RIi2NfP7}XI{;O_(7gQgM(Ef`$)s5>y+0Pv%-CNJzJ|oIqnwT4`51U=dkULx z)-!+m;WVqf&JA5kputBMyKK|I=VlTbrLFci@vw6sY1&uYoTC=D(H`oM7LId zG&wX9RX1wzuI;Co)inty-JcF74&zvcj~^zy>?zIDzDPte{x|F3AOaOt;Unc6EBQv=xqD;U_ z6qv@BcUTRDjsa;*T~97dJ0<~Tr?t@hWek6HAQe50&#>6NCwc6n2A-ySpG$5YlbQd? z6e?bpLsy<3=Dt4#JePDDi~W574hYP`!o#-FNRSGsh%Yr+?I-OHkwR8p6(tOP& zpa>*0dU2Jaa^fCUgZx>H(EOG%)b^tJME)(IF}*)!(FkhB`ygYW5Bu$s2BAS!AXods zdUzZ@-GZS1aXF}6mB8eoNtAEfE>pG3BNs(1MhUmV@gG;|Jb#(l&E1CmuN2}v6=HRm zykc6P$DrC4ZxGkZp!u*5+-e}+?$<4Bt9~$g|FOb@hj!6)`-0x50*qLoiyu6) zP-)C{?!UH&rxnHF>U?q^{yB`E+iYNqg)VOSFA8qHOG59{b5XWG01{&gxcNUN2y-z+ zFSAl|`u@k7Tgw=n>Q8Kj%Laj-66mXckGV%>LwSQA+G-U;lxrGHXsSVLeG#OEgu^r+ zdsN-Inx&1YCuYKb+K16Pvj*}iWT<{uhuim)^Yaa( zu;U8n>Qg+Kdh#j|PnBTvDhFPinGRWlgRn#2nB4Gf!iLuYOnRq)*H5KC_dSA1=?LCx zRtQH`D)8N&44m^d0z>wxVG#Kf|K2)D9?)Q}8&L({w#cDA@B z{x+Z(+MmdH+HwJye15>>#<#icV`4Mqj>f+?z0q~l7v^c3$-eD9M7y=aLUq;qGP|2= zG3=QX??oqr-d{yD37pDZ4bo(ltNq};4P{zx%mnA+t6+3k0qR%J=GE4DsA-@DA^92b zCE6P`Z%qZ)Z1r65_{~@?qLdVr}J$MbvRHUK}nFaOymGe zsn-SG@`KpCVUBD=XeAo&sUQ!bmazGF4rbglp;_Q6Y;84!K`{sMM@J|gTeAgiV`9-{ za6Y)F$|2m(jr*_E1d&u56rx&L``spA3yB-AB5f5bH!B=FxuEn8hR2DOObr2g{-m*4#dwAu!jr`^lK;7{+ z3n>i%$(a#QkSD>WoD3`(wgBzLXSq(n84Rks&(D~o;&G5-Jp^FPHF|H)odWy(<;V{V zMbDK(*^jG1xOLBEv{w4g^;b)wD7g-wMxAH5SNmbU>;@0mqfQLewV+Jw9@M3-#QJF7 zrZf&cUmpQ+*d#2T6G055i!wp{5m3FY06HJP$wd7VWc3SYV#mlZt}|^VYwgNq*()|c zxcEOdw2Yi)Y7DHGQD$FZFfl-dOc@enCJ$?IOHm-KNKZxG!f^hot`fi01e4cdCFlnp zhrKPA&}7;>7ILr_I#<5ruRmSI%8z1@`)9E;?KUWZePEPf3|8YNGI>p$Y@g^Xs(#r8 z_P6_EWi&AWS6oGjTaR$s#t3LJS-?d@#Z1x32mP0BMU_2Ez`STY49wk*Dow{w8X;nr z0*GyAs?TCmTUhqZcl^2b8T4Da3;TIj;>CiU*fOIEY_-NQTT^2Qx9$>lX#W$YG)iIF zITy4|Ea7WaFXPnv`e>~;obp|(q5EtsNJjimrkQpCUQAU%m22<##;$U#DVvD7FCvMd z{8zT%-%`vPW`TV(PV%V1-Z)BJh#7NCShW#7n{K0JItMR7i1$^nwHgNg-F)J|g+3Pc9_)+I5DwN>Ua;owM&!?q;)jGNaC?;xb$8A{h&Uc3c_F-0 zQ+@nsft z$cIU%on{s~eyCuk0we9F;Dgj`T&EF<825(Te+*+mPnPg4^*-3~emV@J?T@;f2hC2` zf`V)eq%VjE*Tih5Y(O*ps$JAuKEpy11Hki{H%{1j78h3%w;}JVtbcS7HvFP~`ihkx z-Z`l}6&@G?GIOTE;g$=qt%D&Czf=AQHVV;2j={E!X&vu+Z4zMRMEyM35o zc&Mzn+!*?gp-lA+N5FaHL6&}Icw7rScT2ge>M3Gd>Vb*cUM9U~#9Bo%)-pU^n7=5S zc-3JBW>G<~H;ALvu_m@@VLr|d^~ILmN1)a(8`ILV@rFSpyht30p?~7xMq#qU4YpWyg_$^ z3#pGiiAkmj4>}Uad*do>GmWL4sRJBt8i}@-5@6H7ap?H{68fK?0L{MdnB^Zo%HOW% z6aC!KEzu7=S48q&uESB;?+3g2(F~g?Yu{x*o4h$SymDbAygDL>+!@F4dAlK+mgYcr zFwj0^6Dt4s$`Y-|!_x^#d`p5K^ztT$=dWpSVtzca!F-then)6pvIWcr8o{N1Be8wH zDVThk3g#*U@R_8B?(}rGn0tbOToYc(4`Q49OTKv`IZ?(WV8hN5Y?i;_DXV<2=~)F7 zUWT?kncjckcHg%kG zx)x(^sqwM<63{cl2^8$8CvJaA)-m3OL-<$#D=X@r9m2BNM));w8#Wz&&qI2V7bB>P zxB72{2aEfo?IB~C)!`zxKaCh;U!|z9U~d`DLNgFTmIg+M$QIaNCk-%6?i> z=1YRAYZgP$p-7f-aSDu&IfioTqYPQM27jIOLHF)Qtl?D!%l4-JhBy~SH!Z@9aUSsW znm*K*|KUdS>e!7);x%PVz{I1e9M>g-)hW)ES1kl1=EpOA%VE^oC3vQfFX&QM1wU^^ z!JyaWW+Sdb+h=9a>|V^8Ov6y)$Yf~Gd%~r6jnL{-2a|d@ah;);Ah~fFp2(|2#aZc~ zkP`>i3DIz2MiTZl-b-$wiLCLxIoq&dF`Ad`g>7?*m49>y^EO<9=96Wz{EPnhHhdE5 zSM1_n)=np$q$^9jKz<7!CkR|1r@Y}1m|f~g_j^C~EuaQ$tLLM7S|8!AMGH`5x{%i| z3g%s3cC#Z}j*zEF#_HD}$A(1}V7qP=*v&W#iEbBo)PCas=B|b`jbQ4eh8bipsbm== zg1OF^NM>HN6l-VE_fYImP#tN{mDbt7%uN-hrtw+6EctqL|<-d9W3Infkyu)+zJ{ z<0j54!i8;BCW6EJULj2$i{?{+?LhF3c zg)cn(?>Zipp^S}9ZOr4~0DK)myIt`Hh+1FC5?ia8+4WOk`GUM5LqBnqjw)yjiG=!R zCvj((Cr)rV58@2E$5$fs9q5B~MkOfD+QIaVDvA5}k2l&627RSL+|$huTTg8S-M=L~ zw&gOHD-LHJ1#sqniWk7S6>Ca+lcZk$d5;BNLks5{94p zEc4rO5nF_cvR%Zq8avJ(=9`~I&*Kho@=`R$Q?J8wkIGyYkjO4J_(6-OojXG4B3KjPI_2 zSCfd_FzyyFUfCbL=}au&ZY@0Gk%nK>=sUzO3h~1_?6vA5HcX)W-)drAdzYfu6%O6w z20*l>ADx zaTxw?9sZnLi7I+otSdqpL>IkTOTVv7e^@oF+w6^-LOF`YE#(;-<>Ve(DzvY=%v*Mg znOfo%P`q~$+;j_3!zZ8P;Y_IKz0vDe9tf^|l9_DX#+)R>z%pnCwDhJ@HfMf=tfE1B){Ja)Q}zE8dV3|f~(fu*RDTaL>_QPV^IU@dhM1s9>`7Ijj8C17oj z98~Rp@%;2^SmPjr5kWFc3-sk-O9Id{^E+4a8UvZgK?JI@mVN$A zR6Ux7^RZxGb`lLYk@Kay0*}Rx#2Fhd;;Ye8;%&*ObFh~Ed8CJ)HdC4Z;Z)F2O$Nb` zK=i;!jC!pNrIT_X=x6{~=k0|DzxSYGVK9iS02&3fQ;}Bl#062{Ip2fpjC>=MZe0Ym zQ9PI+Gz>%!jquW zaS86NEyT9Z641^39~1vmgsuU7nfSsxwsD<5WNQu}Zto8kv^WohU#DQe)-qJE=;HtU zDYuc8%KmHCBR9o3@ceO=+2oa@*3nILcO41diVxZ1oO1M@A4l##fAl|L4`P?&LWRpK z;nurxD9*ml@2#M$>mXxLtPrE5YLKjRdL9c?H%8BE9@cd{0$y_ztWiyto!|ox#Ohk)jUP;At>%UeTjLDKn}X`fHR#NI<-Onn(D z4}BnX?|+kBNp(c!E$uw^VgV1D`GLi*`$vA$WC*g2=OJi^Ci-z`vw-?QtVMX`<2F2V zj=rzAnQ{Lt3C^RO@7+gPSikBy8{0+Mf#Fx!l$&$W$tQ-34&D@I&JLkGqcVQMNobN? z&edhru>AyO9+eV_QTK@T>{^b(%3fF)eFmBp>OjL`Jt+U#%Js~nKqqztd*v*_?(G{v z)_oCdVm4v^n-H1a%Oa4kFM`mB0JJQNgMahRV{wlQDj#|+Y;0S{hwM#+j$W~F!8H<{ zEEhtcejtWEI!+mreh|9x7PCyc%U@4XN2jw-xpGATxf=3$3}uy!)|>^$@3AnLxUhZa z=)=sNXW`qPJmP6dDW{aebT$Rbw07@?AkR4FKUoTmvl1a=;#Rcy5)Ff9MH72t4S8^q zn8${EoGuT>{FzI*@^@!eYum`e4|y;}pR1@t_btf{PcHc@l-awU1uJ`FW~$yFb*9(z zuA6HiXzx8<+|v&fK6ruT#sQGje-_@@zY%{o$3yyxt=M`=k-6wRo~2YM@Z=NlxPuSoVE1x@&hw_p+x#ljPN4(L|rg$y0ny!W3+G zRO8K-p)6!w1kN8_fx7uY%%hUCib4fit%`@-Cq_Kq${jjYa+u=DA}~{-z5S#*)-*yB zI$SHE-C-INuT7J8ie9i0H}|1@O)s>s%x6vJA6aIN0?a&;gu>4$ph5fBgvkMH!uT-U zF~u2EB_eQoNAK63cbNK#%do~V5NkGW#L=fK(V;FMsz+oq)$Lhq)Z_CwV}2Y)jh~OA z2qosJT>>4^nanFU4}$%pnAd@=VEAhS3mg{I`704_dE*{>E^>jY;E*FDmlOLGa zHgJJg6#qEkIC0URbAxjs@NpsWr~XZmYi%0EbxM+i8p+lud!U8+sl~*lab`Dlw_|Gl zaj?$Hmj_n|vb&4D&}?IGUT$3jqPc5K0~;q|-47q=Z&-%kX9j~|!)bZS6-|gRl;Kv& z1EkX|HYULybPq>>Rp$vNDmllW{5Xh;k<^#*eI%4>yx=brrO=j4?8reoV4lPUwe)&% zU5h+!_D?P}+(K=mXH7zp!dPC>Zz?Lj-YlJX)fZk*2nCPc^ReY>mRx%A5RCinf|_2J z;N|V**zoG6aAjd7#`y+daWg#=*N*2_>Iu+4oLrQ54l?)qQr;zZhfnWLP})D5&J^Uy zS$Yzh{;dP4d@dC1C_}AxIT&F}-GRQ(x%<)cc`!AYPgy z^D+(Li!{unXKwx>YFHy+SU&L<6GhzVLXw20Grdql`K^-^A z{>#wwJg*xufZMOn!gq(fkqwT-rXTe@cc(FUT-pjX+6Qs*>%I7~hM0~*;=+6j1O4u) z;9B7ahQoTItb;O2!)CFBcs1JpA2Sg}i1E2b5NdADWhN^|f;&4U&)gftj~Jw*=;1L? z{3Eu)8!M2ME|q6?o`Z*4Be8mF5tA&@W^qr6hwYM&%J&nv*^~>+aa)K`a~92G+P1-v z=%Xl@@Y7`B<{|{$^~5^!XYRB9@+O+m+s)Sk>7hVuvN;K1CzoSyU26mZ_2><1L6q^5 z=N4XL3DxO*Ulnyi^vZ>3KNZ_{@mSO043@LlVwIzt6+gqf!=p{s1dFVhj?O zJZ>`5jM+qI!N*HgFn+oP_KsSHaT|$s5HA9oSZ$2bkpK+12%4hxAbe6qu7qpc{?H|O z)}V!1)ai6}2w;kB#OeLf7wQzF_^eOeu>MjRI5}HmtBE6Q_YOzL0q5nH;w;eA*c=@V zM}cT*xZKcqGPfV>k9DrlFSHt+^bbYXQ z^@bB4h$YyS2pPkpPHOp3I|c zGXz$iM75yZ)S*@54K>|R?LlAq3@1|Gqu9jp4*4^dD8V}iF}OSFaz&v#)67`{`Z`t6 zrp;@>VTzZwZ^r}*Nm*=cyvYzJcLGbXA(VygUE3|mx`(RoNY2+VltCT5Kf zuTjS&dL>xCC;rc*Crr!e2UBxC&0m^R@BHE$w&P?xnjBpNvb9wV{VJe-nl5}_5CvL2 zLwT1|FuIuEV3p!xD2XgbSzigiJ24F1yH&v$uN0I#o-J$-j*&Z06GMx6e{xk{ksJ1p z71|}4VZ&_totCaQ4IViSmfzFGzq2`1H_D;wKtFI9y&YPU2QY1~DGS#=U>2y=OduZk5TU#=0M*y7hwhYhYFtU3Jj%Q|?=XO-C>`E#*a&p@7ebq7 zIul&>0(+XjwirwYwRGAGJ&FX0c_#QT8Uf0xau)u>7hi5Z!_?F@pm9|W*!{JH^z5x@ z(*H2mnU}%LI+t+Er7m3gJk7kX6!YL!bpLtRz@FsF!NZY*VfzhvUE6rpmbQ~v!o-cb z`I)^Zer)eeIcWY&3K~vB@ts{JX1*(iJ9R^_-zPcN8NTKETNi>^yaM#?JNULK#W>1E z9Wy=Lz-@9WRPV`0(WFVtKIu98B`ZP0z<87lRxpXG4PJd#0`7}ygxXpQU{U}5xNmL& z2&M*^Z1Gj#g;&)Gy}P)6Trku*>;lIxxpI~G^H35nAGcl2$Le$CIDq6hwk7#}EwPoZyn(f93neRNx-12z(VB zjqiH+g1UZ$Tz20I+HR2NEUZXq=M#!CHwHt)`!oE4+#TCu?y*eDAh>6K;v&5Z!lndc zuvc4)CVRV>`|K@DN2|B+nOi!#<{f|*-4#N?<-Mgvq11V z9p)__hyFw8IbLW6(uVVVq<1ZRza0#gdgV}PnS&m+2f=;iC-zoiOsuo+5H+(94PA~g zt4Ak!{=pfLKmIWnjm#|#c9y}a-T9~(^pF|uOAsa$O7PedN0deaFSk1fTC3hOE8^Aa z-t>lj`(^NJv;qR7E5WQ;B+qXvX3~*)tm3r>if7G)T~CO=^ge;!QwzD_CMUp{BOqya zgGrYU;QWbW@E!KVX@QyOFkB2fy*U&t&PLsGeF)5^{p;z0;9^kDvOdp3FjxosuTH@F zr+!cvn24e|TX_2eE!OBc7fN2q;K7PhXek}WPS-C-%_YWI_iGltkK0-Mj_tfHp&D9J zmI-&brJ>iVT}&};57;FMA*E|Gync{~OT!(AwUk&YI`EfGemWo8&fR1U@>RU1$;cT53b=l6_qeq`T*!tC;)i)3(Zdxu^Rb9yUUMa=Ax0IJCRz`n^J)ju>O}Ope zMG!p6hpuBGpgYDA-_1UcFT)l?vqdF(1Wv=iNlvKq;=0^o$S!DIH-tCaKj-G_15mBX z2X2oiHrZhrli3$Q#hAe;?f;ZZ`uM?9oy92qxP{e^&BW-{si@Uu#O=Claqn;HxgMw7 z&V?9^Rxv=GDtEc!xGHbCxS!QW9>mvLaj^g6c2rvuj)q@@c=47g_;Qp1?q3py`Jzn7 zx5$TKMUnUH1xRpI8Mi|D27iLQOam}*`S9MU_2`MaILzW6f$y}z2yR@GSh08uiSRSDu@cv$2Gra zU`vnfymOxen6xc}p^=sdzH6X^a#7+NsQ^96=eXz}Q@fSIl@>+8$xW4b@ozM$>K_O3 zkr%?Q!g7e&cLvwIFr@vaC*8-NF?;>7FtmFhihCd9%TF9f{Id*NeWSof(c{TTaGdKcluWH>+8frQ|MOxVGbaHCwJpK$f1#N0-$Lei?mjn5 z|1R&Tbs6pUUIv@^#ppF0!DWvUs70IN=8Ix{7`}Z^Ay(^X!OdQmup>o)+LEKN+ktpb|4u{QOUfDM+-3M+3w+AULG^eiX5v+X z(7Fv2xvxzvH;#kw)A{gmsXv6&rlP;WDv%tPaew_4(E5Vzybsobo605l_Qn(KZgufs z?-^Xx_%jFFaWGQ%AnkGv;6ocZhU`5KUcH52-IyiUon-_CySt(3G|HnMt`Z*hii3*k z^O!zf2|W(Q^Prjv)GL*Pd4?3HTZ<@vdI|>T#$ff7{%Cg5P1xJT4-#&lMvt6Z-1&A9 z7Y)=Anq5zVYpIkCcx4X$bY?31k$}~=s<`3u32YT{S*P!d0@IXT<6qd0@Awkl9_|2$cu( zV4H3~RDPeqMBfa#b^yZxX;s)1H5-CoZ{)$raxUeipg$!O97`jFv&Nmn_D`po(a;Id zJ5YwsZ{ks`Qp19YUDlwR4!dW^5*xprZ8vkp-c9Pb{387~J7OUBVj*jOah549&jZ)N zokQOgJ^OB&Mw$&G!vmf%}L$UAJN^7J~aGvPEh(`1j)pu(!R=J)}{r_4HhB{lOcgK9jMU z4TX~GI50e4f)f-em>WX(+*3Jh$3(=glddrGaV2=Yx&Y370-((y4gUXqsGmCylpm~u zX%YR<@y-D#-z>&qkJeFEbRvKFl6Zk<)}o#Ac4nk`6a@NF!kjA}7&t!}T)`QX`WI1F zI+cYl4#SlbfjV+B$T6ReX7wqs$$Jbgnabg>X>WX- z;P#pKNSna0cVH zpNH{5r_gHRJ#Jy%3(|$h(czvq=5L6BwZ7@NH^vw>g&H7Olp{3jVJNqxOy8#E@#v6} zPwb{k!laY5!@RT^ey?7I>e2IA;8*H;hF{=KN9jIs?X|GUka|Tr6VXF6j;)@w8V&Bm z;BSv$Y^5`p;eTDsap+mOZS_B{vtx){8qpvRyq$&ZzYFEM{wu)stqNv^OvZ)_X>_)& zk$au)!(^@hG0Pq2dHF?i;FRs^5w4`YY&5x6jxVOZ}7EHuf3xCmQ}aT^St zo1M{FNdYFU=eXCC)lj{>9H#YMgZ+pf+IoK)Bt4R%X6-u2^tT1)V;t3vj)8qsdZ6LY z0AX9pIygBp2z~P7A;#o5Dw~f4?Bfjmj$J^CdtzNfAy;*u#!I5ZAU`t}A9NGq*^kFz z{p?Zbl12HLTw`9Hd={#P(2iwUEGYW-hRLlJXx}A<8_|^2IlK$Jwq}BoQ5YUnE<%&% zsbEnW%OvL?urAtfzw=24!+FP;IDZdr((u4!;S}s$7K%?~sbG?^i?=KdhW)$8QwHA< zKDAb%yH$hGYXzO<7RQ^~Y|ntf*OXCEMm>(NC1AkV<0K5~~8 z`*G<=%T68#8g&x~_LWj94Xy^rD z#j0%>Y@H7-r;agUmT!+tU$+>WFJ?09K~-|A&p)|t zz$|#Nxj*Wf5i7^=m;7Z^CVMwU8=KGW!y;W@sGs_nscTV}w(Ai8y|fe)4)W`Jahh=M*;hk~B3Z1i?{JtyEdEF7PYM%lEuet2+oO85; zBu)kG_?}1tK&_#aYrX%*x~k?7AM&qEM9z)d^}f&*mB;M!uJNc#1>iF1I@g^X2NnyE z-W4{`&}%(Qv8sj8;v(j~=m}TciJ<+Jr||uaT>KR}0IRjUp}HUoJ(9;m(zfG}`)wv~ zb+%@vQMK45jDb+kV5WXQn~4YXgc)1+VzBHko0w7nMIO|JS(i&W2s7}yMowtG2vpUP zGS$RzZofJdY~QcM=JMU3lc-7DZd0M4+ZR)Xy$iYhO5o+ECf>B57dQ?)2-8j_qKkSA zyvqqhv!*XI4;+C%^Tc?4|5{v_w2Zh^1(2Yu%9h3{a4YpfRMqA{%&4BAxVM$1 zE{dZ5{0cnQZ9c}|Uw{J5LKc)9iD{W@QD5H^zKRb%cNWv{ zeF)lj++{aY5)pQWV*L!dBkIisS)DUeXnf-@<6OY=umT#^xP!{XVjO8hj6QI6tQWh?SG!IZ3-P+c#!f0rUpEDcMj&; z9))FTg&1O>gyFs7Kp|~1)sY%8#lKb9>D3!<%%J2>5 zZ%Ot5P5q}VwbwMtlzcOlOBJv`i@E+Kw~^p9Dz;e*1@>Vk z35n!+P@M~oojLGMwHFQ^O70T#TD&SG|6#iU&&=@yxvc>T@9xKf(X{7D*}!cL8n``m zG&8%GFyDQ{uya%ec_Sp`qo{!X>r?Q<^AOy*Gzao;nZWng#6X{4g#z!h=;G(bOw(&eGZ48PqI)hj)tg* zPMAix(tiGBsIEE~yaI?j=YNG+D-laU`ka?p4#jqLUHra<+LTtvDUaz#1jD%vA!y!Sg97C}rH%{k@T+gKa76Y-s5;s^lZVM& zm@aSokM5bp54i0Ea!H(P;miLd!4g3#+#6XAUU_T5U9FL?p!rJY7klnu;11U1^HF@4 z*rt85`0qZ)F=4J7cWgVs3e>`(y|)UJE}hKlgh6b8D`kTEr!$Gwe%>u2pE&HAV4rsk zb|!>^Gg}PP2V_I=!2ovVUjnvlyeE`Q%LV1cQt(a+0%e^~JlG``cmBvmm5n3ub+e56 zzf$7gzZ5<(6yuiF=b*F66jX0afP(E8LAh=MIJ`T93U?oFICeFgye=KxwP7XHf%z_Wp;njI? zj(7f@$%EHjXGO)tK`}4jMV7wQRT+;WJ(bcodwp>2yRs`Z!DM;Q)>JrgZ98RaO{{firQze>Ob2+_SAv3cO7Oc z|7l>Ibq}yUMY(v(8+5KF7H1sYpNI|r`dSz{7{>8aoAXiaR1CU&Sqx>v$O-a24ep#O zM-Q!B$n{quZuxBTxZ2^%v<=WgGqmPlbFlFvcbx@s62HrsNP7tcy`{5PSu!ZA+kyKL zC2%x9BG<9&DR0tT04~K7Am!{-Y;d|EOlb)MokjJE!)k7?#uE)?K=5m8p4FZ5CG;wqCm95AHFO! z!zLATF6;Y{rOe95Mx)EHae^aGd25D^p0xYj`;MK^t-9i+e(RVR zEhd+<%&|UTw&t(tN1ct>_fIBTK8Rp{gVLZuX%2V%{a)UC0-YtlKH%D;d!onjc=QkK z2I8ZoY@2C3CioPCVNG`~-R25z+k2z3p#m%eOmY3FMJQ0KmI=BC2|dpL;1A#VLzYPi zN)Ap3&Ff-%KAjLYbnle^+MkFqd#8eqfu>L~^;xOFD9}Xbj0*1>H52!|(nHzd^_V$T z4Kjb+U`A&7kh>-X-tPglQN9RGH0N|9Rs|<#fyc$Q@Gzqa_4`fdmf8OBvBx5`9(ana z=~s;wHve(yD`OBFEWnvdD$zRkCHYo|z%P5+bH)athXd``z7{fNFAHd}Z;>mH)xcs4 z4|E@}5!5QWL(7CIT=HSATyaWTnx--XwK^w($G}R^^*4cihib6v(r9pW3?N4QL^gP` zK6zr6aZ$o$U7_9Qky`3-)%UjfcsycP9AeNn8riM0dm zQHQNYr*B?ZEr^FDt~IExD}*u9Xbjc(z|Hz3qWE1H4();1;QCXT>UkM*0?N_Ybv~Z> z>`(kr5%2T50u_Hh2p#*rW}#v=Dz|2__CLe0=vf_iTol7HUoC|G6UO7!6Q{vy;BI)C zFQ=cWMW{5~7lJ>OLGayc+{`_jH+-bCNp6~{zB-wKboNGx&*XGd(hG9lG|x6h0OdvT%zF#3d7gzm-{-@S+I&b z%#NVvQ7~-jUX9BREy3pwrRZNLg)WN`(5A8$+7J0M?PJDp=T->fo+^H>c@SF0dw^od zQ2C7{VrIQfLfonecDL3u4SI*wz0!i#L&Op~yO4wOTnL>K!ghu#p|L0w6|0H4{z-=3 zf98T+EOoXs3)nN48F`rl6Q4 zHqAZ#lJ~H4#ZW~7T0bMNd$SBj_7J1in^`bo(>ZK548e(SvcUOI3%@v1hQ7`<=#kl&M#}w2bIFrk^ojqB*0i8>epk@v2_@7dCpDDq!k}&)^aWra-4guBw ztXXQ!LY!e|h)um;u%1ihD6T!nMB8Wbuj~4wnC6T+@p*h&kS>PW$(Yu_H6R+{#DhI5 zz}f8#6mN(}gSqsbd}sli4mjg?%LEY2O(=!`j-$HGKxlBN65`M4>R zQ>H`pfi>udTXD&=kyxH3z+tQ(YE8)@R@ia5uSXf!M#VG3^8S#1{4`urufd7KcA@S* z`v2Itk=YeG5pVGm6LiEFn*%gP`2Y{uy8&6m@9G#z}qFvQrmipcY6PLxJk)1EYrzUWr zU4b7hM&k|dIMgkRV-5c$2u=L%u>U%$iC0b?zdi3+*lgmp4^cpKgE7dW<3Q?N%m_w+ z21}2j{%>NFEUpB1ktfSvMJ~}z>&OjmjkEs|>%)DMd|D7?U0jRUOPLfqyC?HKllo)g zy$akGPCVfgL**@(T7;e7zOq_=0q)0~L9=m;Ileu{PX4h*#gV@3)}fPl_NW%-3U0Cu z_8w@y%mtMqV?k?kF|Q7m^QHANoMK%@+>?v^fX)Rpk#9x!-hWu#4l78Xq)na{8=lK8 zLFktPbNuq$bSuX!RI}~P{|eP79+K>-R@f1pEZaTTHmOCHc4K7D1MP*hFX7dNg)f=N&Q@4*SGSLUM24#Rw{&FZ} zv#124k8(_NIHP);{5im8Dp>u3t#q&Dj!k zo>s|(nH6{>#s^`_5sW()3ieLqg8DBL4KL}lG(TM|x6Xmqx&A0@p!5DBVk&hPVFmR# zZ8H|5rG*@3)-FKxiI3#zcoyfToI(?ePs}-?p6k5{r)*0VTz1aGgD(%_v+B#ZQ!fN` zmb#>sur+eFwQDtJI)qhBnr12*Wid=ds%S zGS{zi1lJ4NpeRm9qfR?8oplafi9`DG-7o%X{5K{#dHR^Jqw{C>xLw{JuJj56L_aEDv^bZ5D{Qn01ZKcTqdGXHb0KL$U# z$;_t3qVv)OuK42vZo!n(3ms=_vq%aldyjy77qO6g^rtT6VdlSz_%}=fnpM*vwCXr$ zId$hRj*&b1gc7^kcm@+@^#hMvr6@VJRxY2~4HJ~@VSGDzTQf|Ftv;r7>H11+Syd-& z+!zhA*>{-Z!NbDB4TG_!NCR_c_XU-)86eP_B{TXx3BFKo@o#%3biCm_WJNq=JzRs$ zk0W8$Q+Iq{M{NCK6>zuy7sv`-?}kNwA-m&Ni-Uq7yYek+fWHgKI4S-k!~^64eg-+3a6sYY$$qFp!1<$M6< z_sPKRcL!n7>ugBX4}to-QV35|!IsDF=;+ieRNF!<^pXg+A%dPMivq!QkchuF&WFgl zNR+MI&wZ47qI7+pFnEg-j->hf**Fn+G>5TFn%N8d3WY9@_1Q=|vzOB|P*3eRB=mIz zk8PLP8c8V#dN#2&Z5O~}N(oavwG-+MyJ3-JB6j74p{>q(7(DbeXv_D*%iZT7BQOnD znI&U*Phv}FEd#;wk0$F0fGhU3059LO&`0WyP0xfpLY$4x`$wQ^clsBw_Xs1rR#?nlW zpwGN8P$|#C%r~Q3`tlxR>vVpuOLMX$B$Om@bYixTFTHl;*D(2odoRoH>y z=JPP*K`n|iXg~f(9~Iv&V%meHIAlqGRNNXZtbaBahg*!rdCN<1)3++jJbxAnlEl#Q zGnqGBe<0s%O5N|9axnaO1iE~5Kz(g{q0W3Y^mkJ4#dq|SO%*P#iPi>$TX!q z5e2GWOI6;Jn^QFuj9OoD_c`M+Onp8UtLvc$<<8zckH*Ps$R&^%%hg_M!WHvE?5vGt zNB??|^Dpb&Z1?Xy5^RmT@))wbKz)msax>nrX_mOu{cw@o?6;2-}ao7TV}WVPcyI{hwW8!Dz$X z6E4Xm&)0DEjy7&KISUMjF9a=mm(ObGg@!y*F4sw-Ovf8;$75kki#=`;pT_iS3b1Y% z0#W=>Si%O5$Qqy8}vW&JU&xg{sVRf!0V%ws`N z9Vc(dGscdjUf_H96bQQi6@IAOjn=2A8~E%s6S+HqN~Iz4L6`C6bVJa8H<7<|ngGqc zPjMaNHn!(t4dy4^rk+UR%6-`R6d_tQX2A0$nPB< zb6qD4K5oxEBnx1qju~DZS_U~d7-c<%f>%WrpV)`KKNpHYq!`F<-wOrS&kF5*!uX&- zZ8XuD$`S^}F})Rzs5)mBZ=>Cks`?qs&CFz($4{ZkwGwhbKNX6cxT!VyE9*WlXOr?b zq4$t1Zm4#XdvCeV#0JEzo@OZ5e>I#5TN2?cy*DjH;V^w?5uE)~1parsf!T$0p>T5m zw0F$r_KoJyTwegPy?xnasW7B4z!V(s9gkLTPUFk6dgi6+0IOTq;e(#r$YYv>zh;~O^`lE*O9jnz z;(X9M{T=W8-irwW_Lq*629sZp-XDXau=(&fNC@&rCC>pU%5G;Z=ZT*Y;=%-VdxhUq z>GM09%q0Wf$UFS?z-aC=Ty^Li{<=$C0B0vY{bMnPjM##XPX(O(7?7eDk98+{Ve<<= zy!LSsn#GTYPOC|*q2m&l2tjD=F@`^$KMoZ#>YdscVCan9aE0y*BF6<7nLiObtFH5w zt^PtA%KrFY|HB6x(7xBC6jxgBM*sJgAhD~(x|`pbj^|SJ4M>52)HS$tRy-0{2&|_l zL0jQxo_|2f1V3#{1v$WVvx1nWJeQjd_{G}{in!zOBLItYnC0RIc5d@DG_}%2&FS=< zFBgDdW0nv$Nw|qSqQm5DsNI7qncOPC@V37Utj z!8ltFTY`?W_P{>iJ$nUg^ee(UbNb_D+>L^H4y7?ElR$T{08>70qz+>aK$RWLu%SLp zxDPoRortSk!@B+!vxa$xprYOwn;s0OtSI#iOC@YigByw#?Uc8^`^L3I&0M9p6rGc6 zxwirRjyF~D->Fn=aat_&@2-UEzRo;<*%EMfyCRoaE#r^X*J5LGHF^&=gFeyGI58ju z?r+M)7X6X(_8)7owCW(L_bsBFpuAaj)#r*{ut^@tjv6R zwk(?sCYI!WZ4L&XYYFg4m6)L2n|b@%oiO+e`6;Qh=Dk8pj<9w%-N+xj2dxBg-g$19 zWz9v8UYT~8j{&RI8C?AIjZmS+e`&uw*9$K>vcXHeX^W9 z0p+k{eGu56a0LCws=V97l_>iBkEP11z^d^qGfO`Vna>nlQRT@!htWGIb^`gzwV|=! zX2?!XLw8{x<~sHlFM7BN)c!wzIlBgmN^@b&gJkH8aDc!en^CAX33Cl5ajj=dptYYP zYmTgfUW+SHb+0L;EF25f1IIw=m09TcW13w3S1wb}u%PpfoOvBT2dk4x3BFv;gI~L` z80{W#h8&L(i=*+!gV|`ek}(ORHz9a@JTazs-sGK=xbHG z=Wqqqy-nkaPxKDj=L)B1ot5Ym=I z%EKYJe0Uxu=3&{pIbLvl? zs$fP;44M@)!QURhHEs&jcS>2fb6?ch7KX|`)Jghc$rP`$Aw(RBicazn87rff|3D@i zJd3(XlqH&*!Z$jd$8#21IDLpIW~wJLFASj)syW=dHgwfgl zbtI(Up?BTsvCuiMn6>BxamV#P*kLCLM9p=?<5D+NSdZpoX7$HUFGganZ4K0YAITcF zR0##R*raJd9h1G=$!qqyp*Sgz1us6rMYTbt>iMUjT}P8i_m9P$u8}ZTgV=pNj_@Sf z#aF8x1j{sK%y-D+;%D!<#uEBFR;*)|nHIcDzOZTk3I6L#2ue!E z%9{pcfa2>0`LtJO@bnbg=ZzQ(#qk$WS)9N+eIg+LN+{cMJ`T(#-ZK?wCz@K?#gbQJ zAEeY%*Wq6>vA;7xH87k{pQ(w}9t&XXZ5=#n7=t=)n&h=O4|*X6m_7U=w8>^bdB#@k zyf0;535k6BSuy%I+CZmNgpH>+frnW*SiNlE5fzEpG)@h|yDku0ToV>r_+s==4U7%3 z!NW^Vf~RdHYWkgLuO}!_YibA#dU6Eqx_nXdj|+&#oG|rRTn0&RDq*L?CZtIj2+XC- z(Jz$zmhDW?#JTm+Slkws0Qn6f)DN&??vMO<>c0Ls{Z=rfSD%LjpKiEymNWWXxeSV* zt~@hw1x6QA{z#+&)$R7+IFx?Ysv7QmBb|GHia|-)MW(a4kfn%QnB(BJ?8vXV*!f{Q zSUUY+f4DJhBY)3bA6yEJj_0e4RIo&(wh*N(@O%wr;WwXql+N- zrIfubPbCKIZ{Z=aElU5s60UYLC3i|3=4Z@-FC7Sa?tt&^c%h=wL3p$83~U~7nQ2B} z<5p|@(E8~mc7VF2PyKAsU}z}*yEYUpzixy~qiWWk-!7bQxe(QtYe19VKjt-h9l1&K zE*)Je!u|z=BTb!DU?{!J)&(7W@&FEB)KPg zm)nsO>jP7aGv_}(BI?)NWwvQW{KzC{Y)+LoAQ@S=EBEa3HZc_934A7L9F|Y*(>b@R+Ect*(Gc;Ny2OWFC&H@1v}KC z&csEq-W)A-RDUA0wcmtik_Mvte44GN`{I=snbXRP8OwDn0cDuk^ z?#8kkBWqDtp$z>$=;Pnqp&03vNG#r1tldw~vMX=d0p#dhJ_%aPwi5SZAyc|l0M6h3 z@`maj^qkC*cj{(9Xzpn)qFtIm5H|B=@iLShkLICEG{8;w6exMr!t>r?$n;iWa~pYD zB+r>O&FMro2f_Xky%Uv2LElTO@V;6SPIrm|V}l}8O<2M#?yZ5AT|#;KrP-*^C}a)3 zA9!$#C*=ySa8+S4YwSxIoOl!NI4p~G4P#)ft^vQQ&O_YjU}zick5+G~ElXHM-}{bKu-s|JKEnZYd(;E{ z!^Z*+CpO#tC}G=90I7=}SH7MIxi%$OnUD(>eIhs(jHff-Trf-}-h4~|@x?A8jkn~H zlQy8oo*p1sSPl{e^*0WfLa^^;Uj1qyl&;J{l@kT9?k4TRGKNBvwt{Ky=m(DKljVMc zXh)K}jPIj6nkmhnik2K_H+vbO_So$xVaaTDQw|EAXbQc2wTOqjh@8bqr5@xP9`>If zOiovz+V2!Da&i$S3|S9a`N7aoxQJOV?1OJ^nWCeYxlrrdAz0~u3J->7quGNK!jLM; z08Uy560a3ZL#-IXhlgYHpGvNybR7TICV_u(1#T?T#_WZ;p!NDT_be@@`$J#Qx$h0R zi4A;pk5V)|FD4IO0Jgq81NC`5@MQNmw5QK1rfwr#9V;RC<|uF@`*~belWAAYzzY6AZhPr-pkHDGiP`rE1hq5MO(E0sD*y_E6 zulC=FpN)qRmqLxWzrhC{*qA{XZBeK z=9C4tzCND30T!UFdx6e;{;=D6Fs6E42Hh>@AX{>gtDdxF4MG2yh21`AFc=SZ9v<8| zv5*_xtz<5DYWO|7EJzxp03|&c46}*@1HY|kIha24#$GJt%^!{?X}qQ9KIR{p%=>T5 zMV(dKC}*_=;ucpyJAGcN+Kt?|I*leGf!#(cj1XUw|2=yQPpzyouSao(iN|fWx6N5ZtE11jja& zdVOC7f{3-IdQ*}Cy8_tj%coJ6@s6A5kKmq9g30eIq8v^z`AUg*eg2EQ&di((`WCUX zGz%7OE+T%!awsh+M&TG@t>ss+R>vXi=Rp_L$cn&;l%sSt^8&@Gq*CRK8aC{#3O+GC z1uy#%<5i`eo_Xc?QCbcD}OxC5~LpsR(38{h;_kCgvWD{N1pEwBwCniHubpr7l z#KPbs!EEwNYY>ZN?A(`r826f<2X+ggZfP{$3S5ihx20jsv=Mawxow(>ed(++0nb?% z;ggO_(3N_ey-KRY_~0}6+bkFt*l)*YIrKgpXn>Nmm28~LYV>(g4Q5qm@$bwj*yeYK zD~hKJ%XiDceMmQ0|Je^+j8u3+q8p35ZHaA1cRzE-MBZ6B$@}SV%^Get{xIBpI#jX z%I?cxj@e;My3hl4R{UXYvo?VDvF^BeP6=x5+{JdwPvgdcM^L3Y024L}KtUN@*RWI! zRrkQNGtR?vgNvY*QbTOlmvXCX3KaN$5Q=&|5?0f*T>Rt>D=$3(9)&fq-j~D3e*Wa` zo5$NdQ-qfMG8Y`!Wok_`<(6r0<$?iup!@9v*E!+Ho+T%svnzSD_Snk}gUUHvswAfl zd0Pt4vBCMLP~FNIx|`Wx=JYHcqlf|Z3&TLe&H=xPsN=Z5NZ8QfE7Uu^4<|pX(I#OaWPN(dpT*$W|r5RxPbm5`)DkxZGAu9NO5O{t{2q|3XP z8kK|)I)o6X;~GMo5Ps|TcRr=J+526W=lMR@#L8t`dBd>l%;t*>yz_&>EbL6B?a*pA z;mx0D_B624L!uAbe^WNIehp~c$T7MGEZ8X8}QWSH#VhVv_G((Si zBes{;@fTO6@X$d6+suaZi*HZkCqv4IGz}UxwM~252&k z_Kptw!J=a_M64ohe0&%>jTb}ryM3%J=aTq%FXEqnqaE6tKkyK-RL%Ot!iPQ?n3(v6 zxqj4T#^y9{pInQP4^N>ev5#rTzoQ|uSQWZ!E)o}a1Fv}Ui|wTPQrmxqu&IGOSlMBq zabJwpvJKed5r89>#Y2XHjV}N z>!~1f%NITfL$EV85G`0W7w-!Ly?u4?Jk$p--0?-ntYLzmbvS50NQ9=a3?a|o1oMYp z=bddfq^n$rDP~ikW!oj*HFg=aBm%q}>xF?Gf!H{&o*VCW#KB{Zp|$&DxHgacoBupu z3cF!U(Up%4>$bzNu|82WB(S*@;(c@{)|PZDdeH7 zCa-^E3|4FcZnT{IOz)lwhTs0?wv=`D)aw-GcK%}8vsTkws1Cdvc0n1CM{Uq(>~dBG ztFVj0+#m7as=oooZI6Pf#ZmYqcMUcrTk@if#5r4}K)sunq4R_)mpJUFel!a_rXE9i zFDtHXAVAIyRW|Z_A;_Bac$q%Ef0QPQyHYFJo!NBOstbgOI&ECocRb~4^nv9IQn348 zGW@qi75zx_q5-{dZG#0i1uX=_LKDdDr1z(?Dme8^C7zZNvpQ5EIAzo9&&2@EZ~Os0 z=PA!Gco2L~Bu(?B49I`#MOn^aXnA@Th$fuJ);QV+?Kmoav9B01R~Mjs!*k}Xyo|Cp zw~I$aD^Q|Bbzs10_>X$d@QkDI(4-zLw4-=a?RKWJxDT{!h`^R`bto=)#YL-@h)*8X zMD_P6tj*I0;=SpPPsuTDot4C1B^H3o(+EiJV~CPjAGt`oZFWO#5(men2Gr6!~)80*~c;#B%|M@$+QzMtd#6E6;^MI!G*Eq=>5DDER&G5j1rV}(d_ZW zNPHi!fNN_D;E!64PX?5t*=8?j_+`w~50iKBlSBxWN4fKxGveUe#nX9%|v>swG&XQ-s>O z>8LGR0(Xv;fJ18u_?XXuS3}Z4{_70)u~4RrU>ByJ`I=okT8rJ)^LWnFznEmauc^u7 zZJ>824_bBxW9#-!pzSzX)AtYqn|4#xR9Dq6N0Ek$xcJKgcyi|qBwr$JOic$9um~!(X5;Bk za#Vdo_eoSWR;&r(mfor87aN8i=cIyYLbFK@J-fpCKuoC^N7;8LdFn$w6pd{W3qB`l z7kLnr+wxI9EedRmQ{b-aTGF1IiNk% zjA>plASNL(=hJ@j@AG1@!afWaneIo)yYrwwe?FWG&%()*ZPCy-g*$~?VU9{H1mB{1 zV`MP*csLaNZ{&mJ&cP6=T!?LLN154?Lm-P9kF|7;w;Fk&rgsCAILE;l?*z0pbzln0 zuCB;7=Jk~ks5T-BP8}DZd+lhbvN%e4=!sC=@PO%$8O5445}Cs>YkW7e4E4rm;mE8S zDA^T)7na6h`t-r5v35OvD_7u?r{rt)+==IYP)4F!E=&168m$i2@!l{HXD9_j(xD2J zw3eDGhm)q#NWD<1tLWMt35He#hNdhLWVsRm_* z)Uoi5H#knr?bQVA3Y^8*&a`vAwJ!{xjP!B^&Q= zK81}Q`yoDpc&)RmVOfWQeCH8dD|8-~E(g@HyUna_T;@8*-*VAVFKq56kLt<|*ms^6 zdJJ{pvqx!SOI{W_QC86MJ|ocFwUYmZ6H(pjIS-lohM$ilKcvq#Xuq%=Wk*m8j3%%8@z2NBp#L=Lt+FSAbrQ`7 zk}hEP^oRVE)+w-cZfEv(+d*dT%jK@5d+#)0iKfRvgKvVlyDds9wsSBt(O2YamFjeGr<< z+~JL>4tl;x1EoQi*e#Dz>>juhv_GZ@##)Ksq=>mrC}!nB;O{e_KYKE)r@xhA`3AhS7x7eiF}yD&O>63GW+<~}=8_;@ z(>?^dSL|T+3#2$Nm~@Z>&VsxkSiJarGWbZ=@_ilTL3>_is?ZxHYjQQRW>NcL?Kcvg>b4oNe>nAf%kB6H`^>#}DG|lQ{ z(j$vtot!dfR%`*wZ+&1^>~8Y;CE(iZx#(t;#!7n`g7O4C=B9K3{NEMeFe{GvFV)HG zRt&w9`{9IoBUE!z!HOv-nQHkZFr4#Ru>HCOOsurP!i96U^TS~kab&>aGSmDs8Z_g5 zh_QbeG!_h_oU(V^p=38UeLMygBc$MZX9P>%b(Z?6I;OoL1LWmYZyfOejiv(DZ(uMK zuT8+!8_dz5DiCW(i?&BrOn>u{=#hC=TpSlInEgxl@WfN_Jt+dr-W7?%iKnEk{F61& z?6SE}H8?iDWrq6wAl!T%eA*|6!oyx@qNd@#-A+3^@3lhe zhB8!y+&9%8v4bxgEX75)XQOH-F}s10WVb%mr6br@ShU6xkH!!kG&z~31wJ4+krFlNQWGd z#)371LB4OmcUL^~crd=17LIMr7D8tJ8c4~Cp?&5Q_A59Ax)%k&Qo}s7zegE{yGLSv z+XZed%fY{X$r0LvP-C4Z7oB)sd2sl8>@IxBO9~5NY%gt;|M5r|F=af8nq$h(M@+#X z6?SMfZ7)RYQs$wiA8Kxy3D&j3Y^oCnFI2bBi*>S4&E zqc~%DE!KZ4gTT$iL5yF<$L);)%a5g;A6LVnKZv(&f5gQ6v6MYZ_QjCd(pDkbchRg1j5j$@^DBMM0 zf1!{Iv-7~=+6k~-Tm$l5n_1Vj0?-SK1d|v6I&U8bAJc7MJFtoGOz(r{&!wy_xRPZ) zy39rYMS*VWF^pR{3lEw}K-v2|zgD~#)%U#NEx&!i^FbM|)C$Chz0#ns^IUwSM?4YV zt)SRp12*%IVn@HDI5^t{CCZVceOv`fQOlt8paP6iO+gkkfMwnAz`$*>z#~#|jS+{= z8Nq_yYhq@oy75S5dTulQDw7vXAf5U|RN1Hoa}(xc^2}Hae{=}t7xGY{@ut!yH3jn3 zcZ1*Y64o%YhxH#xv-}BR&|cyPYEOw{pZr}Mdtf@{Q_yTUR)ju({$h$Zm3UpJ2Cqb` zQI7F6zVf>j+Fmq{|`EhK6HZY#4M|MYGr4m9)1k1Bvs=%0sP@ z*gh)@_F8O1tM{eCvLy#FcBMJ|I$Q#d{<>5%F` z;nhbN0W0y9mI#v%XF!U~jpi6zajd#0N<;P$qXD?}`>Ra+FF8kUc(5bH$j!N1&#RuF z!|a7wuu@(M_d~+5^K?ERJn0lVPPxtWN8jQ3LzK|^=LUG869+oizjMtyl#jG#2{atD z<6%u67)kGctDS4X(5VRKxz9%FHqs8}1)-bYSh%r%6_)zNQ0A7Lj||&^CV!-}Ki?=& zyk80Htk!_{W;0N1+s91I_OKq^b+9Hm18$w=*nPSmA0MMl*-_+2oD~n!Y+a}c(<6?J zGSCSJO=T&0s-5uK0m}Z&(8TZ|_hdWRnWF0i`!3_NO?HFS;x|8p!KB^WAl%)hJnhEbK+m-*3fcw!aMKiF+iIFCCv0LO<4s~|cOt!j%9>)UL0Vvn zIh{kezsF9@yt)(IXs?>nycajB_x*2>fTKNEaDWwV$mXRv*^lxuvQ#>@`cLh-r19JZG;+aFoz z>NOa2?PsH3TYoV0y2YPFp z#*m@qgpFVRL=)B1%v$>p9P8(for(}4mY;zUa}uC!{~*4rk{F&D@la>s1J{0)KpXTI zj!Za=BU@;$n!qZ}g7=GK7gJttzr|b@SHmrAw76Wqfxj6Z2-Z7pb4UMFur~IElcLEO zw!R9L&plvo+Vb#cY$+Z|J%epe&kD`dhu@!(h%Wkju&SN1I)6{bhQCL1kA8a4Gszmv z+B}3wSCY`6tq>Y&>QE`NAN+9p3q50U!EBBdn3YBd@(1yPejnxeA)M_MsikxYgC@ z{)^NizU(w{v9B9XeE5SIhQ4DG&vyRxh!WbX4+HZhNj#XeJ1rM%AbU0Ku8wJ7qHPd& zoVprca0z6lj0T50#8*;iq5ZzY#Ni=6e|W37^=lO~nUjDb@4iA;rWLgO=Ls052cl1u z{p>(9G{Zv3OIAmXgCE(w`IOgk=>lX@K5F9pNjz4(3<`a7A%6EcoHnfyv24SOpuhD145PV|?QHVU_8-LGYLdR^ zNCwvim*{d1KO+B!3muLh+jVrPX)zc$V)A*zwacsJ-Loq%t{B5@;v@!K{!H2 zBAOM96JGyghf^|=u;yI=S_QWY-Krt*{0y;6Z_33XD}jrq4(4TnH4x%u%M3hbQEsCf zh2nu$NJY@o?2CYZv{1Xnb<-N_YZeuCp& zXLKlG};}#9|?6n zZ9L?e2XFSZA$^)RbIk7#bDC8vO=&tN?hfbsjyt36)XU&Egma_GeZg>AJ|@d)4y=}nEqe6X9C{MPUv3t9A9Nu`Q?$v>iXOOe;SyY0_dc=~FgRXD;7Ya+Zr@4^a9=A-q`rnA6vESq5JF^X8B4T z?YbtT{@Ur3$@`jp-II;8_6>;JdYPVpt7oT@6R?oqo`18!B!Iy2vwk zT!60b7*LA+k4tvzLVn2;UThqTmP^aAEp?{Y0#dm1s?F##L!IZeePPN2$t#AGA+hix ztN1b$rthMf;hTbX-dmW=bre_EY2$6(ZGr+O3XQ+Jd27aY%JNBr+*up&<{<^hRac?( zDb-&;6|Bu`zffJ~h$lYO}c62 zBn!s>5}T%SFM4e9uVuK^)$CKL*tnl|bXHcueae&8zP? zki{DFPf=&EVcRJfcMz#|7IVd&ktms2Csd46=J_K+K`*xea%L;BpNncylXeCkamCDI z6LB!`CQmetV>yoQ@XUd9YlU=nK1VY7c?Yho+l%E#1=Bt<2y&Mg;K~*0AgR-Q;of3Ftj-9LRU1i+h_E;<%5hsCj2StPHq}DK7?fHHK&_&n+|01Wb)6wFH*vN;8vprQG7dSloIh z2&7{sfoS4AuI>F@-1+xbo-s=o?JUZ{VofWnsPJd{*@kSZZy-(=#iQA{q0pJ14LP^t zxXG-Oe3^R)^vsXPL!K#^R^y0;qoPOy$5`XmJQmw0oogtHz^*_5tC&1N_Wmd~t~kz> zN7tj%H<;_WuQ3I7UO3`!SK1`X}1?DY_GdaTHw{7(G$x=-&fN9 zWERN!jbk{<2OAF4e(1$+G;Tf$E^ftG8b-WG7Q_`^p3LlLTIJPga`bnOB~7BQAm0*# z8frgS{e+94>9h%aIyiY(KeF6(;<$u%SMKaMLz$U~-4iT9;W|qDVK}0B0KV!h0SmQ0IQ5+jrsQwJLRhtv;dn3Z65qlJl7VW+Kcb|5M^$2cSHAIp$YR0lSJKuvHrmXA8#T zJ~bQ6RP={DVkNY=2)y*#J)ZJ0jHD%rnWsF z2LJUZ3M-Pqwx$@m{QrbQ4%B-^??CmL7hz?60qLJ!G9!s4X-sCq7Vmxd>SGeLhe&Y5 z8f8rW9uEJ_Sb=)_#n9lB3VxFVS&9SkcYgh3Z4rU2)b24mcy$o^nSA7mZ(qfpPo&Vh zF%zrK7lVx~6?G2FVc|S~G&}R3SlxiKL)$$-vE_DUrIsJ299#*5E~Mh)A4d2!xDdj= z+MsMxK9_GK?T^V4-g!s_j^rm)Jhys+WpI0kCuR$Ng|U~F6MH&v!1VtLhFXTkh#GK&gjHp z?B^Fe+w2gsX=iZtp^2z=SRH!oq&VV64$TAKbDu3+xug3C(jWH4ZkZY1aw{C|{;kD| z|DN#*wUd0SMkwac=dUg%jBB^7$HK$ZU**KY_8i)QT_Cnufg!*8`T*WBD+krd)sXUK zFZBUol|~JLY?3lDz4sC;Y4T;B?C6hQ7LLWv`6gI)yAFzPBnlzg3V=H$T*>u1_bD=F zW~HNql*L)-sD272Eg)X!@-D$}@i%rz_Z-?cpND3o?A=|Zl|E-@|t*f zkSdT@sV|s+Y~Y&>DsWSzJDLm}!&D+E%hxY~n1+9Xus#yMIpm>rp_JWpI}P&06=LmQ zF-(8LN4EH=4|W?9yJj1J+&WBLwICAOdLiZOz7>`&a>RxW%GlUItj(b*?4U|Kcw9cn z%*_^pnsYep_{)N_$GVyR;j_fMJC8S4GIaXX#M^Z3d1#-d*m>$Bx2h-oQTBLL>^#VP zq7}TkJeyCpJBBYl(EN4U254;dg-SNQ0+o_SjRMP501L zko-gWtG`!}pgsom_bp_d7b#!0G7@@y(8Ynoo~_V|WX-liAlWk>Jkz$IpZ*~j=t#5Q zowS3v*C6PxRAGMOZP7WNvKIST@``mAFbu2k$tGg7-HpO~0fAtjz7+D;2ZH|PzOZBR zQhYt799^9>0L+@0NT;>ZqCaKxjC)^MR@xiQzpX~4U-=mDcQMAj$VaV+R4^R9jEmOB zi|=hMhQxVFAo}%-w2CRDLlU9YoYi2M?ZmB5F9Ok`8-m(Bf3`pKFuor}?C`x?FucVU zZ2ykpD&bN{v5W`EB-U@N2G9Aj9Sm!ROBbX$qK%Cv}f$q}};-*ol*rB8kRw1L9Vqds8K6$!9f~0opIE~1vp7*?iP@Qeil2M1N$n#yN;CzPZG+(K4qto~ z*O&ME7zZ1yN1&|X5{Fy$%*< zZpP03{ji1fjPkWtxORRt7mdh5HNP1UGW{~s7%~^sz6M~fT?JliBMbP&br5JA03M6p z3YLE!=T7%cp&{Tg&#%AB{k->s{p;0ean~7oyh`BbXE83(-Gkyg$q?>XgE_SC&0q2- z9DH;MjJ5}X(rJ{@AKOMGaQ6##2JBLyW8S3@Eh#p5T@_`GLh@rEVm8e@&7WFx%SWpQHOBCWikVi1g z5j6LhgT{_x=6Bo+a`p{jPSGYP`H=@rb?ubBRYMGqcu+i#0?keB{0}Ta4@Ch6_B)A% zM@ms<=mI^%w}OU6UvRs4jwPDUVLd~x^DiZnQQPnjkZs>dedY;hm1IEDK1WpdE#^6I z=1~Sl6VD9F;Idx5A)puer%&ruF40KF7jYwS;z3>1s5R#mD?C}qIm&b>8V4#Nv8-Y1 zGcFpEg7foFqS=+lVh`7R4Dn##p%)J==_^>BuQGi1pNo!H138KfjtgJ}5-A>_CM z-WBY{NS|=(S59HGC+Yky58xJ>7F^Dz@`f=#XnCy;mLnrsp?7c0`Vm95??cnXLB094 z*Z}M>O~KC13&C=KHn1gg$Y+yHN@b%nASnx+to<0OVVm$MlS zh0u6_GE=X{iR0aCfEDH7FVcLqJaOeOO3p&pu%Enfe-xK}uw$b32^H!uSA+J9Fo^v= z2)@$qP>hQqN41L;J8vPa(HS_iat*2}1z=I4A0i@g#eH3Qb8aJ>aWDha9=&6I^e&?I=66h6r;8hW z8h|?X5v;xRECA&Pb*vBMLlx&iC2=dfw>yX#OD!;p_RbYzV$`YjrmT}HE{XqA*>k)E z%51_>m9)4v4M!oVZz6tP9*m;~&%|c&EuJ&8h&9c%WrjCf#6|@QW@Ka!vNxA_&VLgi z(NBeoDxOw;ynhL;Tn6!hcm1e8Nvw3jNnkeor`UeMR8VnMf+7Fzz{J!vYzcYGCMk}R z@8T4IS~zG-xrA<#Soqbe7%Ccvf<@dJ?7naetQsxB{N_qJ)8(uZ3pVt z(Dw>c$JXyU8xyT-E+ z@j^k|u^f~aOkqt~0?3D_h!x3^+;{O*G<-){J9n1AM`O67FEmom_kJ+{su`;hg*yeUb zP;4B4*T$v5(}YV{wVZ0bk5S-fFb|KJEXM9>DNx%l0YkV0w*IJMW{S_u>D(pgo~sH5 zZw!e^Of!ejYl2VS1t#8&pxthZ-u6}(VHrL03dU+imnvx8sZ{(oH3O_!jYB8F%^yiYKTCujZt5{_q zWq5obZ;^B=YAim;9t@bwwZ_5PrY?BXk|!JRaBgSPT~ zSv-bMxD58KXFxK?P23Z=3H1$am~VP9ie8Jbanmo}_K31kRs^G{#onZCOe{pD$6(W_ zTCsGk9;EzkWzt2(*wVS2H#|uLpQFn`acnqvzmEsydFvPsTmqqPNg#J9;d72%L}lF< zOs>0$vci(#$g{~?m%?B<-=_iOW3{tHlXeMscg-2Ib3U;iEg)-L5^1*?|c3T zK6Tc>e6J&5CDnyb??|H*_7{3My1_C`CtXlKR{U2CZaZgy9reDTmgorKdSP7StUYKC zI0AS4&ccHw^jvhZzYG(y*?00PT~`Lhw0&ajX??+Ux+~PZTLL|}AG(`gvc{|@{QH$$I2Ljk zTN7tO_^fOg_@xvgL*r?GohQC;k%+pFx1q<|ZQv6f25pr~1*6ZAlqng<)~2Ol_rfYz z*j$UkSsA83n}{;^wJ?7dY3@#Xu!qFlCrLVnOLM`XY8p(%4Dyro6K9V7%BKz-i6WbD zLG@u4SZ*Ey6{53z%xlU^GuDMx!!YK0X&_1l1o3$}U-PojL?VjJH7Ruwa(5o$B<~O7^f9>5tv^g7p0g$ZzF1{PbLuCY{BE`YIIp zR6zLr5H2~r$y7UHv!IzAgYxeE*gGg6=DX!%_8-w;KQsV_3>bnIV?MI~>oGUC5LePL zn)a)c;pE*s?DF;om4W0T_bP^-fw$Sfp{Jqf=W5JvHURm7H0*Kt$Qxoj;oyZxG+XQ> z_TY=5(e5s5jgN&V*Ai&YP>TneZ7@Hlh7}JM@RUOk?Po4BX|=#p*2i&qQ-Zj3<}9r8 z$Orvy4s8J&z_E7(e0|K};M+txBThnFtPY#`wHS5IH*-7VQn0o;M82Z{)O?)=r3K4D zyGIQAr!=s6s{lrim7%ltCBIi4K{?&)(a_rt8cHr>OS(I}nsbNkcAt)#U#p?z=4Te^ zpM_;V_MqF^MF>@wxtV&KX+!NK?y!Y=v1cYs!+bvPt$CDu1c5Nc&m1$?t-=u#OUX;q zpHFS3S^m5{#!JUx@0MejnL8IK_kcSp)e9ACyO`sq$E>QX9<2PGp!7lzEc><{+x}h7 zyV~c2{oHQ8n)DG($4vOCnqWA68*$UrER@};5%;6ne`YHJ1}8&Aaygz_ngI?bDrgu~ zDd?0Mf?0DfZcrZyDyBK~E{+99<-Xu7GC)OA5tl952HM64nZMy>YQU6_F#s8{qx;Qq!7094s4}gZH$9F8+q1+N6Gf5MHi+^F1mX-f@Q$7ntYbUP zx2qm-(Uuq!*X2W)d3%3QlNPe}#vEKzqmQo3-C<3G2=!Cu!_5;Zpt(>3%qO@(c*{i8 zQ2)fNioL+h(}GDJSz!xMuF2LxOtXl-W_Jnd>Ai0F@VfYYAms&U3<9SFA24%Cs8p=t z+`5SJu!GB)mFfiMPCfF)x=@NjqzL_cL^tc&cWF0AGxcA9D02{fqwt^L-^Km{N$j#YUp;85Fe9l4R zdKZ-WofB@#)}i^%BnZ5J7Mzv@VwqMhRz6vQ_I}m8<(ez=-)n&_N=97Gc^rgoAqH~y z4k!+eWbb?BqE2)d_jIfye_ANFeSt8*z6SMTj$omDHg2T64WF1A_Bh*(^f@vflA6mE z^gaBiWy5CO9avad2!1b&A$_-iUZ2#lWg_Lah4;lwZDR5KJqE_bk3?6^I56)u2wf&` zLeXfciD+*hG}T;y3;wNziVhd_^BoNBeI7B{;ym25xEQ@_mcmf){n(W92V|UIifW;4 z{MFVa%sT1>N{3gI?s5YD4}4Z_eWiDx+~p9 zO0LYTXal&N@~1iAEXrt|0e6<9WBj8;FzDC@&H0<*n8j2y*a%p9$P+ZOi0N{uiWx_w z!0-v_`1$U6+?9}t=el#z`NJ7B`OwZBx6}wVud~sR=DMYGXE#<>E5!Q-O;Xw zw#V;T)_=J;;(8G=-p!b+`DncJs~bHVEq(=#7O}{^g9|gHSan85#~2u#_$_ z*!+sY<0GeH@sK9wE!_k0rzD`1-pTEgbLii1!Im&1HpjyNuWX`w*{4gaw(BTYi3kU) zf1V4<>$WlNf^*FAdI1mZqObcd;@(P8U_G0>TQ1|V`%(<~J(uwxHsqD^uVU~%n#tXw z#RliA0FL#CLd9v!%nAYV>oTxy-3!g(a+b9EJlY#PXO`os7pI!Iu4OkgEII_~Rl(T# z*8pbNDHqEWDG5ub%dD(&l7`G}Nx-O=mTZB3I_^INAzt5ppI3@sG)6+io*7;M`@dYk=HIvYvhSnN&+r0OK5epqt27*b~2rjb(NYRyG z@z|Fv!z&CH=vm^kCr44mdJ-<0PJV>MKUq@(aoR>?=UC1{rxB6FXL-T9=Cm*s`5O54=UxmA4g~hc5mfg-&!vVX z-09s1uB>4~p4Bi+)JbM$J1$mA1BQa+y#q*AZejguVqn|M2@EuT9&5K|UD=aO$O zJm;_rH*OiHD*jmp@Ogpa!-~jttG}?cG zyb52qnPse4wyp?Pw@Xm@-F$F^QfSjEWIFmaOmyHIlWgo3PHb9-sS7i)%pn(=V%{^I zWf|Oja}dvsON7Mt!_o0vKQ4K;8wwxAL1{=Z%an(+^RC-abSbmq!Iu#1IK2v#A9gdv z#_y(G{f>dxOmp1&`!d`!$|KgX4a`-EN2MLUV4!(|>lvS6PiTMOG;7obxzaUqIocx*SkN1$8o0%mtkizVNDVC~$!D61{uFDgmTvBalx!QTUL z?k%c`iUL8THP%#RZw&jOIuG+pscs$egV|p9!{n0+IO|eJz0Y~J=0!3XIe+Fp*Pruu zJ1(L1oFT+=@PyNeQ79Sri%aL}KqhHQ)cc2GW{fgQHbp}7%$uxbE$v(y_wwM4$1(mx z7*tH!0VypS&|AL}wCh&0*cw%sHmD4hvyZTb*K=9;0}k5FON2t{R`lMm2AW!i@usnT zz{7bC&ghH+tsfULwCgNvwG2SB^}m?C`$)j1Sr|P`6CL|(;2yJy2Qy_2^c*5hgiwo< zl#WBnkOkm3Mb5=^HuJC5;Hw}X@DPrQOZuciuUqr6r|2-KsaLVU1Pi>da1Q#;EXPhK z6`ryro#}TR01(Jo*u>%OzOeX8Kv$_wcB%NyF-%>^A( z38+nb%bIQt=PDUO487oeh?J*+(|z}+LZXyFs^YYwrtx1A)>zQ(|?s%wIT*WBL8MeSvz3$ z4t-QxRfgC%6WsMesKK5IcYiwJj?$^vyIY3SCi~*j-Nh)0DB|w@$j8*8i$~IiBcAaD zGwWaw`jD^T%Q;hPrF_<~;5jq1o+Y$Boep_hW@1{^EG)Hjg$1?5KJ%o{q}X3PJ%jjh zyUkfhfC>C-qlX?=l-H@I18Vnamr(PvQd@W^wj?%6lfyWsl0@9QU+0*PJ(H=f$VHEiER|k(xvBJ~+M&$b`ViTTZ zph_IE&J?*UWCA?{=Y~oX39+Rp2O-D%KV}eV0}Ai&Oikwox0-aD)kdvGZQ_k<&mW5G zR%K&h-bs)WGjNhc25f~a=x_(nHqv1xtGDs##145QPQd0!no-4%ArHwJmXZ<6yGM63 zk1|j4P4$J=t^;7ZA92`8H&huR2a~4}%xd*#7F;Mpi%I{nhZ`^At$oDf@OdF6AP)E51c?nV`1#rOXtZ4q+S!wQE{9;fwj zP%^&95+_FzhdY__N27RKld1Tw)Ei%A6hi)%Vl?mw1jp^>!i;%UaQconT6&y>%g@V5 zi(k#xwB+Cc0El_>N^7u|~uKt{1ltslcX21zIjX#y{kjpvK8~aNOw*oj8Yb zUfys;gA^P>fIO6{q>`|~lGnshyw@kZ3KeE>C zP0Z+BAYZ0diDt{q;NlV^6ir(oRJ;sjfd`IL4vRa8JZ4mM8IL9%=Vq`OdjdX1yQB93 zC2afnr(oDtD)wm@$*yewZAZa@vC}b9s}h}c3Q%ij0Z68; zh8cMmAz~?I*%C`4QT^bV(QNgX1YRfyfTTj7iPG+we(kG8$ED^UAoFtEv zR8Vt{g%-zO%uRVGaV?@T+y5x|ys>0QoAhvK!DWcrm5HVQoa0L7i@{9FnbO-SxaPni zT8wVj zrrE1ZCz25V5JKoj&LM>GtmlO{U9PLHIehop_qy-TZO=b`DuMkKj+mh(NWsFsZ$zIT;-slH5m>=}M|y$>4JB?>QI$VauIp)B9e z9#_4}M1}S)Xb}~{v5B!LcC%4Bt|gy((mQ4{pE4b^PZboaaocYdpmBtzcp;Q1d$wj!vp8C3qwnOv- zO&B>eAGQ@{fYEA8_;@xLTJ4)zez$mN->6`!gO}jxLj@*v=2NFgU!M501Vjau7Tws&QN_qGQo(QI@T3!!bv9pSCxQKXY?#uiAVE6X|1 zB+b!PgYqqpK68_EE8)<=t7v5qiB5$4*OC!zHr+Yj{vLoLw+~A7CO2GQ zwgne&@_J${y{OtXZkL(}lFauk*k zCrstjLl`o<96CFL`1c*VQ18!EeyL&?w$~d$4yP>54MXaS5^?8Id1&xz156!z9wm|F zHQPFlsRA7NZ0$5O>?UM_tTLepH}kp6Mvzy;mTwq$9>>nujYg(RQE)Ovsgb!HRV!8r zQ!;ZQx@-pAyXb%>;bq{y@D3A;){2`%<;8t? zT^e<=BwjMt&8ENm?<%hCsKH03llFAvE*rQf2nuD|V1K0?tQ}K%#_Nw<<+NWYh|Uvg zXANV6=8eVo>vOqi%R7s)6V?!yCX(x_`GUvKhurQ(D*Q6b1N|6chS$e3(XhS325|y3 z)>N=T2`3R_YI(zVDYtnQ1|FutO8=R$oiyrFbB>QD}$#^@}6?j=Vj2!#r{6Q(_(+)C>K zZl|lprgn0CmZn0d+5w<)QV6BzJfLl5q_Ry~CX{RbWN)9xLhN4~Yz>WP)(QnizBWh4 zbTzPTPX+61VZ_Nwf}pmC?7MXgO#Bdtn<$q)>=fdPzwzKvnMPV%9BUGo16caAUJ=9q z*|7>Kt*r@xVQsR$BW(s2Ir$AfxbG$=%nb-6iz*SzALfOgZ?A4b- z)cSV_wI^-k^<8YrIQo9o0j-65z)|J~i`Feb zzfsPp+P#~1ZqLX10f!i#y~t{}hJfr$hOjB+GK)4c2BQ;(*j0HEMok`1nl$x8O}zl& z$79f5FBIMy&%iw?XVFW&KUBGgajV2kurn#(iTB*`tuJ|I2wb$!H9?aGEb3MEzsk1j?Rm zN|5-JgKqD=@NZZ#W;{F2(=~UY(Xm5d*mW})x$K9?w`{KxLJmJ%ic5RMo-La3nxCdADA85L;bN`nS|J%YLBw9 z;trkP6W4RSTk*{J%wnElMB3TPe4g(Zfp>z-sFUmhB=qkIk7OJ&=SJgFD;df|o1CVZXE;bYlay&))yV@in_l;W;J9QVIz6FG z#+(h%w(kcEJX3}}BeTId_5wB!tPpN?IsqM~q0|wh;*#pwN{=1uz;{C=D-5XS%@4aN zUl6NE;Fq4*Jco!;}_|eKzD#&#*N#Zt4LCSD!<54-4#GScIMXtD*7YPcG>! zEg3gy5Z<~g!8UPjoT;mTaE9#RluFo1!!`ofc#q87O&R$p?ur{xM*08?JYN$EI^80d6jIjg9&(bdB`iy zMq-%pEnd4o9aqmTLvQW_GgK0K-^a1Im^jF5pN{QPAr8Mu`pb>wY*A$vUYMVYu0vO& zqVy?C>>mz^Cee_3mBa6!{n0Lg_&tVskUi^X&_TMa^lL(KVUNUm5e}@{7u^og-=dx$6!*k`;lXiL-H6OcoS+=0F{tsb=S6+24Buae?Ma6mti7 zI;S6Mx9$dW+a}(xtQvKf#<3w4xx@op0m6rrJ1~mC)Bq(Ij)=m#vxPW@v>(~)EZ(ZW zgN1ld#(vNxc=YlxHdjsH?FG3^RKE!w4Ytu8q}amEomdaLRWtj<(;n>P<}D*|^y2 z8QY%|3-7-)a$}oO+~nFk=okjvGrt^#@05@b8V_}UO`*4OCPsfdfgW_%cN_6B=5( zDWj`;f}~(4bS(MBKEM!^|0suj_5IQG2KDqDL8aib6jM98BUpz+<*-EjHJf}m>W9F0 z-Z9ktZy7h<5sRPJoxm*PD=6@+vZybhOr3QDyK_&0yW0<8XPr558g*!|>AYboaZ+gAf1lN-#wq)y8R46s1F+We zB(#nq|K#jEG|g~<%i-o2W_ba^E_~;rQxzo{+jrw}*Wb|s%hlG1Us1VkF|nbGcZ zOx#sXDEnq5l>B+awOtKa@8_kMv1=fodV_QvgP%(8aCN4y$blWT$)Hz8J-&yt!O+cC z*iku~*;INmn-LqCd}$&E?ft;S>y8SwGD#C3o62H8AH}N+PD1lLUD#bnd*kH*CcCCX z+4@+}q0f(GmoA8}y9>*g9Yt-2Jl^~!j%W0vUCT!?a~!Edy|ME^HOibjlFl+lPAoxJ zC+w(N$imOZVbKSK!om8WR+P>SLa5heKs8_Uiu&Bhw`t%k#h%+}mJK~7lo=$T&B%0i ze33VrUgNOuPy{+#l8zCD%3)cPP%tIg!sLGjFl_KuY)+AZ^$#Hov)h2GNoSxVA&fcC zKZTx8FW|e9!Pq=si<^F0&P5APD$?jz+aXE_9%-lCR8S_IBDS^#`GM6g0cc zX4=d0&~Cv#W<2U6t0Fd6?W+RxJu(tHZTCP%FI#{IW3lIzRcLdy1hhYgLuY*=t9_OZ z(N(25-(?-nsHp}Y|9p&|>ko~6J zs7cSD_FpNeW?cl07zgT$SjWYW)38aWg{4%KqRzW45ZI;6aB*=#y)=7f^HmAjD=zcS z`SjWHw^LSGk*`kAQzWUOy4Fz%F7hg{L%}gm2zgCw-q!DqTs}g#c1a#6Gn%*fv?+G$oPH&BE(TJP#}dD zNdaJ-HG~T;Y*mhSCkBJ`+)T5g3W!^g3*P;7>F@c$5^}3pot7`m`Q9G({k!aSM0Rt%veDEM9?4T`ZmX1J=un=YH{baOOGXO=mQY^SU8l5B}Y`s#;nkJp+6}EEF z8dk}@r&mBud^nenaG~Ah4uC7uz;~YyM3l4%kEw!DT9tzRuU%8+|A8Cl=doDo1(Vbvjc9IKj_bsVQw*-%- z7SUk!Koq7B)hK%NYJ=O%e)vltRWKnu&geL&UGX ztmouCsL~J%2f|g5r}}`&oCt_8za_lp>w}BGZUo8UGbOUkzL>MDin>zv&RpSbg$nmS zOtQw2$^Q3I*bsh!SQc4Kx4sn2kHs?kCmaoN9jx_Rg&jIO(BnriJkG4fws~ur?qp(P zKB$C*1>~umM0b&bQdsWdj$)|+)Y}(;`3HAipO?erRj1kf*74}mSO6_&BCuII6(q-8 zl&yhlxyFx3)GRxLA3KRjc%D4+)HN9~r9Y_lm6hm!2uFc(KAZa8iF!g>gdVxRv~L*3 z?SCGDmN-)s@7TsW*I(mpmgJpzwF&Ij|Kj?VDG=64XUVda;MGG4HIg(wc8-F$Zq)nq zIgJ@Ef5GdQ9#e|1_Q00iqd;I?OgXR!ybZb?y2J|y<$}+H7z{Pe zN8cXJ%(&_ZT>FuMlParW{lxV+$0HRQW!hYWsMxZNkGS>^WE+T&{HF&o<619*^}j6M zkZ%nMR^7n6;s>#khd_dLDRh0`A3Z)Eg%I*tAIUBNHNkPNUHFsN2Q1^xbpcp&Ee&)0 zEI@FezGUHqDh#R(;kQSVw{*b`W_X(N{iS00(aV$a zk$xMwGw%ZTr$=Jrq(?mEmk94`=fTpCVPG^h01R|9F?%I>K{nfBeYYWCGyWH=*<2zCMn7SpxDCfMO5MIukhF`>~C{V3d`n!aHd1*IXVYL)%E;n;K*UL;H3gX_^ zE`loPsfE+TsSvZHnD{17l!9x$;rtQuD?eDr<%u?25ac{_fd3fm^y~*$e~!h*kL#Ja zSrt@;g`nfSzFewS!xz>jV9vKtHa6G+vroq0?S8vZ;@hL-mR}r5);L=lWU>twHr? z1HkPOpk{9iYdb01ek%oBrsQF2*hDmQ-v@rzV^HT7J>MqH<^kPfpv-|&mU;*#SejFw zmv&r}qj3byy`B{p(MTUz{JMDjWmK93HQqEpmkW;@RxwGVz3 z9@uprrDscdO_dop(2ikt3uCak%XHq}Et;E3gO#53wA-05i3!5>S^cx^AbxJb8@gWN z`pTjB<*|}xkBdr;2I9DWS`SCA7EwV&APdq!@C*-z`d#Ii5_Ac*J(t1$;xue~b(Wco zZDg(A%XmmpGcOH=-y_m)Bgs0PPDm6U(%P86L_<Nj*|6T<#*_Pj%&J>rw#CA;kX8*vwM9wDO)j0wpV{gQKc1 zGd-gKO?er%b|5yoY?z$YNlW!sC^(DxdW&w7e4O!jP{h;@9A^&&3 z3a=z;Ba@Fuia$~xT|9AMIu(;&XW`v(B2-MD2q`CuNjFWyuY5Y{$v44(_)EAUD;aJm zSL00Ri<@5_gWsYdsDHBr8x|x%eZQZ|0#}Yk3t#a!(MQp2gBOfGauMsgaV8R#GmYMH zXtkPNrystwKh!{jQx`$*JrZ(0Spwa^YZ?{Q?Qxtp z%;*IY%KQdKjH7$nPZ`KMwT%3NUqV0ED>(@xTwK(7EIw)a%?>a;ug2KfxK@lcm`L4)*mG#?JL5sW5^>J>%p6D zO~e4>{UC3b^2u=pD53YtV@0+wAx_Bz`BN0uR^-h!-v{nZNz6pUk4;idK*6}n(3Jd$ zj~u@M4KwcX)}uX{Ow*bbz0XC_?K-CYqK04pNSd>K08Z|=5Ead&A7v7QLR5vO26-5< z=n>PGPQ?T_nmzYuaJjDrpZ+Tv<$8npYSnC<*OG+xZmF!twH&l|B939#P{`=)4jWDl zN7Jr;pffQT7C?O8@{w+2p^E@^T25ha))}>;kIR62scn7CR0f zhKOa^;IwKqypZ-n@17sHM#FT{i)ikgv>K!OpT$*I#3;*&Q;HrOf~#_W%&Clqj&a+$ zuZbKc*sJi`Pch!;PW^48Js_kg3Qe}@;XB$vcPN60nZ5wjC#^ukQYECi6N7AjFgBNn zxW`*_Vh<_UqR$mLcW@-WN(sT$fu&g4cQ_`dM1pv%26XDAa@CzOK4w!UM7ya&w2wM? zMRkX;fl}V++RQvYc|%Dmc}YI1K>cF`syt&$B){I3q^8oG*pMM?jt>#O`j&)aU#JiZ z`#|R8lQ<}I2H3w9&l#f#M z;BCJpy!nfTu$D4_GYZQ1pVCEWUYQQ| z84~gf?TF*Ahdpdq)1Le=)*_ zq0=BHI|~-xw#C98q=~0lVxPYeC~r!K<`pXCmGBS@NQ*@Qz7|SU+qe!ZM8j`FSg}tA z5eHX6Zm(F(xfg{WD|10S%7Cl#FP3O)XfxD03UbHUJa1_jHv0Q9m03SneCY>oSe3xE zr|9xU>wHiIt3hz>95~HB$(v%y8>fp2AzCOZ`Njqg=#PC@C8BKj zN9OBO!~{Lng_HgkgIxOs?KRi)zRN~o-Six;_3|QY&LW=AM`tdpjRjxZ3SvooVawMG zQ7SnJu4&I%%0&f;KX*ZcCH{PIY#guSw(!4$&Y1dcF9<*m9lds8!zm*ex`x?O5Nx-tLb-1* zSi4~ewrURnk7pC$pGzf*yHmDy>>A28t1v$&6LuwK&@NC25wDCv=1%!E zlVcEbj`}hx^r61+f$;RcvzV}-{2#6lc~1o~F9z-e{ZV$PR}%}`2Fu~@>-ofdEMch? z>0GUma&ZG=`Ql|~A*gN)Mu{^(lqH3xt0EXPb|#)QnTDNik>dA@E%!39aepW{?~iphI?-K^?M3Foh5-^2l`{{`T3xD+mlcIbBuNXKElQ! z-JydQv#TotL4QXMoeBQ1<4zpR-kOHt`OEOV{~V~bcSNUcDn4bNaHmFi z!Nw$zJ#cVG{n6x8n?bCDeaW=DTLTe77cjCJ0f+467~u7acYc*n=gLAT+_aULQn{e_ zR051f<-PgSK7+PtDo@h3~1??puelY2Xg%EDV4I+h7nJa>C8; z6R@>+H55ioLC<9YxcRdJq6ezc@5o{P8pNqFVq7uI6y*OMX3Y;`(RhC{(?0!MsPdo5 z_WU}6rma_*W^8xndgc+AMQia7K92b7y8w5TDsWGCKWyw64dPN;&dv|S%a>>$_~0g! zInEb)92^5J!b#9%p@jO{wfx18vk;*c1Uik!KoNS1nY(7OP37?za)*3?1N#a+1diZK zJDS8W8P3-{51MN=VbVYsTv|{Lug0Fhs`o>oZCI_a!_pqqKB%F5UZ*hRVlHt&<1nCi z4=^1T#H2OI*gBX;k8*~PQ_IjJJ&Q~J#+O92Q3mg^HG5M? z*=eicyiO|^Dwl*|)V^r=Pi})AHz-5#k942EhHS;5B52Y+3z_5Aq1n${Oz`lrg=B<( ziE}CS%7~Yu;Py|UT$YATy)~fz5%JIGjAjut=fIk$?cK`1$XUn%MBQL;6bxZ8Vl zKy|IOL=dymqH*#Cc5KZ|YPZRNN3uQ(0qJ?k%rQ*0>7U)cVRpZkO;l`3!$=BctMZmSqX}V zweffNPGYM5OFlzK8%;88(3^bHHtNK&Kd%Gr#+zu@aSFBHSg>YM2z0XuCI3JcbZw(d z{k_wCrHZ^OAA;e2*-*?ai$%C=0M_SISd=^vC&q3>uf^mmx^}gsZO{Q|kLIj#eHg2s z7J*t%$lIbxoO*VXt<54&S!6Q4x}ilq7iMEQr_jK&Qb6(cpm>d7|a8 z?70aB)TQG4^xok3c`e|(Ug*#x3-t=Lp>xJv{^R9%oT9!LHLJ)Y>=}ZB)SJqL&u-Y_ z69YBAO7!>=2bU|Tm(9c#y)9`r-DktAS}wx9|Ljn@aV*!nb{6Y(xASj@ z>^N%6TOHz{XX991{U!e1A)eJHl!MW%u8>h9W!8e*EQ>N8PHliyX1y?j zo_Rsv9`YdB0=#TD3EwX*&SA_@tjL>7-ccEx! z0oT^mMCr5yX0tGXVM89jI*T;F|IWf<(@1vz`)Fc~UI00$dt-vT#;f4* zLx$3q^BKHc0tQ`Hg7u&-V7_1|)J&(n^221*av21dwo^7WsV~$$yTZg;my~*YUNK)I zfAC5j2f0NOs43gd^mR^9r`jY48XUwUlFhMrdOo&ET)DfS6*SZY;A)>ZY>=tpypCbG zWD(6EI!S!9PZhl;3ZbB03Uzlqfg{a~+G@gO(%D#d>^}cnK>phSS&-an&t4 z*HRbriKZop_7%MKb^y1j>J3Hx&cU2yHx%n54wy|kPGm9f81|9RxUG%{NxC;&I!bA7 z7Y-GV6y)!aLP6wNj9`Y)9M`}DolgPw{>b0EjD&)_>#%i)j4}c{m}=E0;h2>rz!OW+ z=@WSmEKT|17#AqqL(j+2)Tc`RkK)Wx^nDr2qdt>=;o}vsNgjtaiO#Go&0i_?v;(PZC6p5z&s!2c1#*CS|uwcs} zG<qaW~6g4dUTdqZ%%{89-fAF%iUv*G|1S_;2NmMn;Q!BB4p7?pBtm@_y>}4@z}epz!UN8Ljo=(QFG@g# zbzd<4MP8caJA@`5r0AMbjM^uq{a|kqP9je$W@z?KVIo7ke-ZyUT9%yb3kh$64zaH_*2WqwXFeJhuvrR)mtJy@WfvfFiwfUvS%99q!=XMo zpNU$}Dh(uUeAKRmh@QkJ+dUY1?okl`xSD&B5AnIb4O)L+1?HPSvfI|fvDM5O!g71z zgi2yH?+C`611AAgl~A`u%Eu6cQ}XW=G`ei#QTq^8y#kcx-Bl=2c=NZn5OiY8;E41T z_2AJn*13!o3M&RB|)*O9D#IA?ACbHSiEm$y*9!ty2cQd(RD$M6!cZvZ$@z69GXEkX74 zkg&J9A@NVsdBBibo_K`L-Emn=OSuAmw|ipeMS5=PQ1{;2BGzARG)7z;%7-2+!B^IK z&~|$&92rSofby~E@hF!vDjQ(^+%p&`%7+t252NT0Q|>IIozau4iY(hCbd0*jt&`&5 zsb2!D5XIrVuq>?dm`d8pdGt>!gZi9nLS6l9-1)Qz8aCfoY8|!$JJ|!F#?oVGv}G2U z>fB(iN$&hvSR&SYePVk5*+A7hCoo@ng-1M}4f?}~y=Ip}`=}dy^G6fZdS1pm&(^bX zlZT^SiwU^zOXa$8qhP7II|dYqVNRFRXfS;|>@_XM{kaP4w=N1LhyPQ~%QQsEi{z41 zq<0vcp}vPHFN7a&Cquw8Eik<@nH50}u3DXog}=ghz{_B+WkSBhAZMJl`U>jpRWa|K zrC>aHHCS&u%ezp=o4w{^9!Gmj@qrasm=gh)o*clSzVDgbOAYK?Oj!N*H?04#^Qd;J zC)TaAgSWCg(#6d1jBf!h4@tsTzWJy)J_%|AFB0#VIQ6q8VUW2#w2YOq(ZR{6CiQ0{ z$s6HyIsvh2H`JJ^v$UapSaJ3=4BudnUY7T%ldE0nu0y-N6F~jm4&3>jEl75|TeP?y z2KB?loGl5V52YV-JhB%bJ(1zrZkv$pI)`II{Gl;*GQ`Z5p@+>}2zVThg(hcs!{f0a zOq!29uM?YA?HbR@qfX2AC46LVFc=KHOa0Sj5PjbY9){Xuz>_;{jFAdN>i+2XHi^AA zoQYF>v+;>eHEOnuX0oAuncU;2@>#zu%*j6kTL0^hwo!KIq1CF~f4>wOS$Etpc@k2m4|4Jtc(s_(IiyEHM3gPxu9hS^gywBXok9 zVQT{QhUL(1GMLGF5kEj4&iln?pnTH^D0>zI;za_Ob6JSrThpQQTO}8a6q*}Hr!dpR zDop*~BJfpd!-<`n(SM{aqGbj=y^{|$bt?ArxDq8s4|r1z$2E)GsDIA_d=I$6;_)M3 z?$CH_jsC~HXSH#Yy-66MnI>%cbc^pF8jQ#HFUQ+{2e9!vfRC^%-qOhiO_2lKJiHb= zl=qqG^&jlT^Emj@oQI;h_xP$keQ{YwG5XGk;vY4`p~JlF|MRBo-&P~yG<;#+Pf6Q4 zUJX^zKe^+-LMHg`%SA(^O2PDf5OF4qjaf1s|Cabeo&I!yua{AMZ#w=NO#2s?MNn+D z2=nu($D7Wu#X%X=%cKL2WxG&j93s@Jrt|xJl*O76>DaHu9owGf+S50Do z*7gE%L2rI}WF#79xx%>1r8wms-RqwbFSySZo?uRWY1KP$mmyr(yjKvKW55H=AmaUFuIDS_E7yhLi>9mah1e<;(VCbxX*j;N+KyfKAHzLva5K*-8g8g-NVML9fLOOs(C}fKQ7zpa&F^AyH+doyD;9#2 znD%Qn-Y6n?vxlJ}Z(BMa1w-a@{rhPc7VZerFP?nOdV9S8l!L?WeX0s96Wrinbqhr5ZoVv=I>^amY4$0PaPp(Z8rP+y9bJd+qk>6 zio=TW#D`f$e5MPe59%|^-Rn>hwhJ7$ZQ^llf%q`I1iuZrfOd_G(8=#S3I!VYl6a5d z6MRvkHPa%ie=sU~7x7`*zUCaS7@82!M>L)BSx1AYnJ|G>Ya;F@MCs~#MGKu7720bi=3-T;tqX=Z6|MWARI1m7E}UvqmT+^~$qt>bBT zR~;u5$1mZ!VZn?z?;W=WpdvcsZ ze6AyLP&k)%#;=!w>|n9bu5tq^zC2=0pYL+X1xF^TI05X8E7q?IPw zLwnQf%w9_!JobL&Jy!KW=Q2w;Rzn_Hwcb4OpeJck!9vx|QOfq2FWCI!mq9t87!}!P zfYMgf;p@QqesMI-UcD+riOnUhTDqH?np=VQ8!ae` z(ZQG`U39+Vj`O$b5yv*LIImX`^{8GFu5m2GJ0B9MOD2*F?{>#-KZc?-W+n)(xKU5t zEoFcgLf6Zl+-~q#KCC$p8doVt>8-66py&t0H3(*^hUIflx;nJ{&z z3aYx$=glJ&7X1!J)1|4*i1xU)??bUkE0_&bl|f8q6nO$)2%G+Q3e09MgdN4i$QXN; z{jAVN<6-9kmTdyL`E;gn#~5pFX5#I%QZ(@00uNsd#ho)3qbhV2pRpP@=)=MpVN5FfaNa-BOz;IuEh(Dk(nwPQ1thO4N{oidH<{I1}N|Am9p zHxrk89m9;1C6sN|V9r**_^(yRP<4QYLqWh>DBv;wN^lb9-D8=p+f>edi%_*qTa_c>Wi zre`30e&{sXb*onv23o?w#x10^O@uo=F5v$7Q($q;2_>>{;g+6ru(jnov$G|ZU5y#j zE+>YN4A65(Axw{<|8shN$uD0^tna=9B_Vmj)R46>)wu}tf1d-LvrrTMKgY=b=mn*^n^D`yjm=hi;_k!LLF($q zQ)f=3PUp)0QvhkrFf8!We5xLv1mS)T|5Ur7WiP*WnqAFkxEsZutsLh183 z9h_DVM7;y=n6`~Jk9L~^StDg|m^eK-m7%!p@Ch`TEn|iHXV7fq8Pu_hfsS+QK_!h9 zN+(}u9XGGDOV-1%%OC2Bnid3WkJ{l5dJWsN6kOq`1;d_};r@AJ&^P!f+K`9OskNBv zessm=xCyN4s|r3+kHI8jvuXV=22vXr!Sdmu_(gIaf9^blwLdSS{LD$n^6QV9@6r&g zl-&MR2#y>;jFYG^aC{`@AkGIt#tx8FTSIi0&1ij?&fsY?LBpjV*6)3#{O=Rx?Zv?` z%s~c6rl_EO;C7Ig>Vn0a3bgCl1>6m`Qg>;PMUegm)_lH0X*gX5&BvRS(N^kcvveAB zs<#CHErA$Oa1ykyqmmtPK)s-y&^(|&59}sDJ?gU-e;U9|W(Pt*L>BK9rScb-&k_^D z37S`yGVfFOSZeYlXkQt`4gMF3?F-NHh=!i<@|_BQEy*Up)G3}2D`gPV$WlWh5R9Uj z-SoS{u6<&`T1tCOADZ#jg>r5Ab7iMV6Pd;dxB<9|> zzxnt-gYjuX1vD%j2sKxCLR*D5Q;mwmwO;eEFv1Li=5^%_Cjvt?JbJa}h&0Gs! zql57K>?FK!_#BQT|6u*ZiTt;&3J2U4p?G)^_*_xP`uO8Kb@@oRs-XGSZ9BiQk~$}E zYy{WPv3${vFyc)<<0+fs(2g|E`_D_k;DQh|90fS*z&;EsxX0`_90Gx4{ovL_ee^!dL3Cs>2sZY&kc=y{$QbR& z8n@d)NAPIK*qD!Af3GulgJBR+;|Ix4QnBasYIO6ZS$%074~Pqaxl7F{_Y%SXrtPI1 z(Gpn3E6`_J7^HWLLDRY@VQN7TZ>@OCyg~|~CT%yDYL0{@4=18pPZ^ZRvatPv3dGxJ zPvjQFw6C=(-#(2a-%20o9CU?A><7Z=rR8Xz`iuEkQHSC8XjT~ao_l)^0w|jfstKpS zU#~Y-#jnN1e|m!#WriJBBx6|cU1DjO!Liiiq=^)wW9w)xIb?@+e(#vz5J2$XvG|1L zp~CeVlPywXPJ#zql|wu0D26%X8<;%nB{y;11ol&p!N7gw6|`T(wdOB^uR8|O+?5Yr zC0}^;BO7#^5`)s4qae^I6gvz*Q;%pC%`krWali#oq~7QC+o`wAoBYqOIxRGI7h>+S zB#dc~gGQvY`o(vl^#fwmnyYb{{RricD<#*|2k`pzlF$2ZIkq;=fNvQbwG0)!>Xk2)HD{qJ{vFdV^{0K;Q|>S6i;l;nAd^Y> z?tUw=&$J@=@5Eu8Vwr<;tv90ic~>lqf5t1!kHe(p7oqV(2AIBEOPsl}e5iX6dSBW> zey?!Wo)8Q=?@xix)PoLaBQqHVaNF1r8R%M4dE{xx8eq zEj_?LGZ{Ao??lOp`$Cg>&v``392R491qIWUiW=1&*77ojr``<4I%2bHL%6ak<20KQ za{}i_l8^G07^+^ahQflW5HhSBTS7KM-o9*HT4I40|AnCafjrPWw2W9r^=zckJd`~g zDqKu^eb@XtE^hd#G<>~~Ef%%$o(pH-QR-VTshkC^D?CB1g|zp|aMBc)qvC(Xu=DyN z6v?N6rsYfC@W}zZtxUPWvr*{QmWJOi6kuB5Rj97=L_^-d8$QIdY1c2Ksy>9PUW6+P z|D5ASU;pDna))E{u*stf4x9i_N2ihA)S(UV7`$l(%t%B5i0$KLzHiY+%I{>O7X+5w2U}i{_JeVuM*Z z$PJ57YW$EE^mZWFen&B5br3{1MM8#1iy3^ZVn+Q&g2Et-+0+hVg$L5OxMi`@qkIzo zvOt34cdDbxY6Vw~+$^kmo`b%$+l z%RrWULAd3v38qCBl7}OJ8);mGl=;V@Yh^st+gd^EjBJ*3-H(lE%Ru{=CqU45JviQ_ zon_ntRPUFGF3k!w2@c@R8y5@HKh41DGYYWWYz_WBydJI3k=MDnFKc@kCbYTf3mw~z zfuMbAN!SAoF#K94l&xRFP1Q``ne|1q({SPm12v&=-4)i@a-BJDTnK#(*P#7(OYB!f z{LXJRyz6%A813i^>Ms>oH*6rh-$2@}dPK?d`Df9gn&zToLEPzc0yn&=f-gFi*nDp; zx3`Fg@7`75(CC0KXI4=rDgx{}BBB2G0On(N0qlDGV=FHD!O2a-P`B4$Q2&sCU*45t z{_ZTy(2K^sPbQ&tK_$NEMR!a0DvV2#cntaC)M&>+}XkD;KQ4SHXWjmtxL;*O`IlG|Crk<=$(@K!;)+ z+hs}jP~SV;BQ_Q8y>&yq)@AIAK|1_LGwYa@XP_}Y7DYal&`g?T$A7!oF{2g4t0}=3 zC*3HYo@b#rq0O|%*9u(&iAnuuH_U6Agms0KqZJ1+sJ&~O~Rxz5^GwO3+sqFSC2%D#m+&${ zPY-avHI+I}FQa7Mkdhp?m)vJsI==lsj?O)<#_j9k8zBiv2q7sYMF{n*i6kUpBP1b& z+!B%`?2;tuCaHAU-AFg7q@Fd)E|m~Egb?=WgxotMal$))@4xuqQ|-0p8gq>AxE~3{ z=f`o+nc?Jb2x5uPCSml!-q<*Vqpgjx$-qBazil zy2Ugi)WLB>8Plx_g4U5mV$JS9%#rpU-f8R5^?EP((&s$fd{}^P-w(0-?6C|iA zoS58w0aVOQ!_0-jD7zXX=JurT;q}cJl~@VqGuLC_Z4p)Gt3x;PmoyZ4@z7n-5FhW2 zQFT2bCU3vw_( z>6Xj93tL*UO3*xW1pGCi55BzNa81C#Rc8n!9;o|GKZM}M z03{2LwTDq-!|-ryHhi+>Sd-l!JRTfDOX_)zsZg*Wn^gQ#e-3BNi-$9=(fIepa+EB% z!6PZ()s}Y})ML*vt0psWG}7UXk9@&d_bF>wB8P@_Ywl|251R$@@bvx0nl~qtuYRp~ zSM?&@vAej{+!9`Mc@gb0`*T_AWTtLD6v{T|!iovh1-ac4-436@4%a-^I{6A$e7Yv8 zZ?yso>d1g&d*M`A6$U?1Bi8R4K~{G|XuX*UR-bQJT7Q?|QriJY8ky*EaXC21A2933 z7cgUX2lYYB21&#f9v^%J0?M=TQ*tb{Y0%6wRuiPoq?gDz`%H|%NiIjhLW{TwLx@@O zx`+>1vJev{pTK^}hB#_n297ybjoN+oGTXoHn2B#VxE?yk?^Naw9$HK0JL+3L+7bK zTq7+M+eU?eJvd6YsDw6}B3Vj59B0P}$VEG3bPOS=syE$=a{2~V&YS*Bf%0lld!OvSdZc9m>2t z{3RoZQ#KLKTTa0DEfb0FBZ_k;#)FrwjP@JDSfyVTh!qGyxgkva>W5(&SMWn(0!lYN z=PM_yCVzA(3a7O2=$2CKF|sGN4135W$;B2bW6w}ltW-$ZrG`5`rsJ9_5A0?5zwGi6 zCjA-5K5VLErlS!Lxgeg}AB+}XSHPs`V}K*haue%lkkkZ%Nfh-ux6+I-aW4<}Hy5Iv zT+r!53|@S#jkTLAL6pfLZ(5Wfv6@=!lU9WVi(>J^MY`LzcEKJRC&A&}RPec90rGKP z@I}@iU$&0IsAxHlns}GnuqftcRL%pY>4QYeNifPxgY3ccL2joa*v}tC%zTme9c+t> z)?dS<2PLSU;sdsO2D4|&i*aL9HY!!ufirch8(C$+HXX_seOkhYgi?7xqiIOxJD z^c}nw0_I4eIN1wA9HV&kydo?-TE^R_D4>Q1V`Om_WpKz7E%e6Il(APxOQ7;WG;fn# zqch;GFtbgLi(V&z#@x$f|5=PKw<4L@>gD`aZYT_IRiOR?OPG`z1~xTEF~qNyS-r{- zB$F4w2Fh(m-lLw6&Vwi#7Jx^;RG4*dJ!X8Kz@j>fF<^@+D$@S3v=3APv&A}KHC3G{ z(i|AB%Vo;%10d{?J|_QJMl;M6vHM2iL$0Cx&6aCSIxhvr#_z(%PHAWpstQqS+$lR{ zhBZn%e%F=WWNKZ7PzCV>RCKx9D_>sQGajqIN%2)K$Bgz;?A`kuzMDz?DSa}*sOxz) zd0jY$B$lC>SsokbO`fyO+rcEQ5`u<0LdJju$eYp+PS?_&v-fdsktO4^|2vO*Ca2-k z(^Lq6*F5)bA@S_JPd%mp?q*=>U8ieGXm6 zR)WGTN3b=?<^M$OL>I| z=`Hls=s0-)kI9xPMWdnvT>auWP;*O$y50lf;d|=%c{T<7pGSh3X#scb907&}erP34 zuv~H`7DJrFc&J4hr1ss5E~C1`y$J_Uamb2wm}cUEbqqafGMUm#lbdBEi;A+ zL{DCeO4&dmX`(*1&KM3s@xw^(U(VCUf8Z9QOhC+xx0#XC{?X_M%_${7c2 zIf%LjW}?MzZ+LB%4KG&i!j-F6;l>B!s9#V(^2v0&9ZP>`i-bH}Sv_#Ju@^yv(i z{_F>CJ$EsUK6k~XPNeG}8wOLKhNJgCsVrpeTF4MKf^628l8mj3;6-Erq-h(|-TsL0 zSbKvzc%O!5FMSYWh;y(0f$46I!9}_kz;NUUyrR5@9hMcu<5ztq&uWUwDLTjqo7XJRkR zmErM)))@D7GS=SCq_@j_aJYQ{v?A;w&eRSik*)xdO}ubT6PGBj3Y~QRzBVvHt9_)~ z8knKfzg&nq5)8WQiG!(Y4R)u>q2<*i(D`KmfnAA5s0?M&r*m0a&qQ?Ttc1Q-!ZC47 z0%l6}(C#`1#}Rg1fd_=dE>+m5Hy?W2k7RfO&xgq=QvJ`C~El%-<2ex}{TB-*M1>(3O?`@S&Zu0?m5q zP(ixun+^|5-Xce{Kn+mjsEYNU0+?iyx@E|IHz>L_hqBs{TsQ0%bD<6`&tK%_FaIGn zt*Zc&>=iIydYzaaHlPQ6(R9>0>^RuO%`R-Ad(xHFFSY}TyigdrmH4%R0n|%l&b4XP0}zS~*Qd=m}6)2LT<^A#cczwRihS`15{bLcus9rvh8(CuCf zvoEp5yw0!~0iB=JqeAl_l%~V6!`svoF)G8r;ek~XO zq5e*_7|8w`;ko}+Z1;=Cl;tV-xJx*0Tun^q@~u4RyAQan9L_gKoZ?JZma{l|LOTA8doC!z=OfxD4Ep^@%$SsDvgfI}Es| zhVlo-to*G8>hFEd^2VK{{ZAkQeMJ$eOXZ-{{qa747(6oTr0Q{jW%67=ln3e8C)pwoIKnp|#V9v7CO z_pFf+J}pR?yxNdDZdc>g7zH|a2|$Y%#q7XeCD?L4n7im{GJDfo+_jG_Y-%>g%BP=s zw(WW5VSfg7eMb|!(hFKOzlggR$f!H&4yz9h0%d477)>4++a=?8_LLkv@#zST*y(^P zC)1nNwF+ZgT)F$nQxN}}I1(RcLDaPr?s7gI!cV>swLg_ZM)U&SvM8NpHz+`UvjhUP zO7PO~tN3hsC9ZhQv2f>6@NFrDD>p6izAOhM593Ql`O*xMYzrDA0=VkJF2q6o#`C&_ z39er(!Fo6C$}f6C2pQ%qUbHidOi!?xvlBOG4Z|9%T&7!_3k{b}@h^tgz>z$?*4k92fd9$eMLvrZisucnN?#y zZc)0DeKAGqJD$TD>{Zy9C)v>Ott)XDoB2PVDfePKfyvx%S-N`ch00X&EzT)tZnqDx z8kI8Y$2EkF?d#FVcPq<~9Rm$Ndzd;t66^jni!wD`%S-3&DffGcOYF2urj8nkI!hv9 zQi%*V3}X0GkqoN$lbG3edkFVf4(78N1WsRrO4z}I#+&h}#8UM*_OF<2au|bN?SpUe zg=nkh3VE)Y%=OE0zHN3Cv==GRN+k}%xAfu~#y`XXwkfDhy-6m!F0o_8y3l+&9~;Uq zl8@P&s}xax`j+o}xM2+LyB|dPk1n9xwT!JFa0xfux&p2FdwFYdtf-Xj7pf*N!o%fB zDBYdSgG~LwyRRpm&#}B=-DObKFXu6(gPF=OZ*&e?jh!0|!83j{buDcLWkFwv`mh+= zFC>G%vnmEWRE2^rp=c&K3HuCPP_ZDmq~m%J7@eHXTDJ#6jprYhZKenJqnvR0U?zP{@Rw=X;6NMhsFeLTpl4t@|MpXRaL!F@0z>JWeAR0PK* z)KQ+1&BA-XWAY;!;?JiMs4?>pSR@-VbBkh{C%<#EtaZ#jIEU%c9$7N*jObr{9^Ho6 z^R}(Wpdq{f44UNFW>f}^!*jUrzN^G{TZ`g!KfnS7bee4i@0V|w(uJ~U4G+1|KsWZ( zixcmVIyi%eaHP9<`f~wQ)0&xG!dcKic!#Ss_CVDt;4iaW(8li?$X55^Iz!w5avRw5 zEjlQX90uQjyDEnO-C)RC0Np`Zb9DeH8T1^#FO20k;x< zTZY$B?yDn|`zo%09N#hbHAxur>I$UI>J8C>384PA1d7XFvAmNy9DY>tpNA^(LLB)N z6tB3K&<)M@&0--vW9joobL-}1pr2+BX+6e4@mPfTU8g`c{-Tg}B?)YXhXZaR=Ix7Q z%=-}u+7d0$kcjbT=44yvapYy6_;@1Z9yv#+cRfKdi1HpSSKkB^HyG+%&%{I&@9q z9@KTTZQ3~ueO(1sqwa9~utrw%buTkzebF&$0?&Cs449*yI8CHH`5`5uV zR~r_Dy4Fh8aH^Q=%#Y%8-?*dFM@uwXx`Rn)JZ2i5PM}LW%!sn(#J{iS=E^(FW7jln zTlR#h8WZR3Kv$?si2~g=P0|{jphd5pDXO;6T$%~;;m3vI@z2?&&@j=eti9h#a|*r7PWKS95;x)R-jVp9Sb{sWoFMUkXVwjOG>Ug;Edym> z7m$mq-R0QeMa=R?Z-u=RcA-sgdQW^8xuMU8fO-Q1K~jI9xJ}%Q zVdM+ed6R%K!8Tm#X9bOoU$}|)2550IhO)gm;BrD;sB>`wWm+!?O3GsIx=%$jA%MBP z*F>Xx72IJ_5iS}iN86nbnf-w{(q3bQslDRS$3GQ9^N!*m{dip6?G$xj{l_GMtDurH zMxVYHfmQnqaFoa6qMgn-J}m@fUKd2Qf=c|Uaun`tF~iqq3qZ{$7g|&AinXWJAz32~ zgO*QX&1Ykoa*Zx;Uh9WG&K~HJcR(~W9E+{($%5Wc1?J8=3TAbEFxn^wiDd5G8J&+Ctb@U^iE>=A ziNzZ0PqQOgtMTqZ7u+-a1O~3m2S?n&EY_MslVK4kP1OYD#GYtX>H>;PxlntSbg>Qh z_@Gl^&{|Ns;7C0}c0#7X$JBOV_wScrCk$#8dP9IP0&9(C6DLir<4X4GRclb*5WQDGi&Te-9@0$PJKXkYOc6KF^B?}I5gBB~r3GM0kj{t~oz*#*t3 zI=SwtaoBsjFSgfRCeM&1$WIsv^X_cKJwsDq+B0HoxYEm99*A2#&!dI81It^u48jH! zqSR(Pcq9q<_x-K-lz2ReH;3V)zh|Nu5*Ipw{MldHSenN!@-{T`14f~!T<}zg4Gcn; zUxrXuHWt!8C$kUJCEzJ!G5zXw5Vfa)DbDO>d+0v)JdueKS2ZCrWejzVYC)n8&0($8 zyy3$&(DKwp>5yZfSa}#%-P(r5$qvwLy&l{i)0;D@H{bG)9S;qsS$JZk;x1G9_ zmY0ZT)(-^Pf$pMi_H{U;Ih1nO>ah8GDzSpC*oug7bSX{;tAAtB{`*ZHK7BNLzUc`? zHRQ*gJB2$6kC@b|Nzi&03tA7oX;=M(MNKT>lg`9J%<6jPtoxh0q$G1gnteR0_rZ)g z%dw%~5B8^j22NXe0;f(m1J8S0#z{BRsOKvTwyg_;FUbqgr~4(4PaGi(JUt#ewM2e> zl`AS;+{AaJtt!l}T6(Wr&Oa^m2My^ikcZW>3B{{0BvP9?Z~E|{m(JkwKovjUABP_O z7eo4m9310D9VL5-Bd%`?F0%jL?5TFH8&@To_?&_IHF?~AJO`(tXQAmEy*K_@ zFASmnJtNI>G;_HsTHBU_tZA`mGUx)kcQ6+fAD)Q^pP#@jd&c7I*lpOn`vN)!ePAoU zgy8>rB#cd>Kvw2p*>-;-*)4ss8uzq`(3ZY04N&j4&0auTh@u9&y?D|4EY4o#IJ z#z*%d&1x;wg`7qG{33MeR{=9O;2Z1a<>{)8FnZN1>E`Vf3GmsrP6Dy%uV zH`==&fzGP)%yIh%)@-W|)#T?IyZ;g*u7Kc>Bs~67P7ES{G@G^^(oP4k&X1MM_lyi{ zWy#ocawckw3le1mwwL5)hCyYa8jh$wi5J?;u&m)C^{zR=uFcfV;kBO!JD1V<@JuZ1 z-y5a`h z7xu5Fu9*CX4ErC0l}5+$QcE%#wrb%L%5M5vOh&C6hEOT`^R-=RN0|DF$*L9#kz3Ay zd(B1I7I+e6{Ti5V_Xt)Sk&H$?o^#hNf3toLS8;<{1$w`+L$yc#P&F|G2h|aaOf?l# z(&O>PNu?91ZrG~K%XB0;9;K$vi;@E!!L#1Jv|sh-DR-veGFCQ>ui^QewI+Vx%Cxs?O7;tOq#R@gT;r6p}%>GFz+N+^s z>rY-htUt&?oMF(%T!=}X4ENs_VMy9!aG5ko7@N8frRz462C9JcTa)O|zh+{(2)Jq! zXsA`dFZ#NR?r9(u#_?TDK-m&!c()}0Lo^E5mv4da?Y#~T^;Ez;*Xy{kGYQky+tHmf z5JKnC9!AX#HFmxctmdy}xg+9GRW=zF>;Dqu2hRb)#BtT7OQ`!rAHEYe>3?jnpf^Df zGM0Euy;GRZzkl=5!&YIM)&U6riA+0Kz%Lg4@X-_!sLOf1C{{jlfoMK8RCCqx-$n;OY}aIfZ|je${>EzR(IAzwUvM{}hl| zn2yVT>!Zt>?V_#0Tw*&v<__V-ylq)FcaWDr@+I=y=}Wn+NgLv;FM#sg5uS60*g03i zz=-=pv&{$IqB0P&{E2nx62*Kc6tlJ}QwSO10xn;!!q$%`P=)k6d#g<-S(0xt<3t1; zwuqu$PGj&N5JdgDIhM9hEqS;}chFsU2vqk(vQEPyR>%^-ZK)a8tt%lPjkkFE^eJ@H z?#ki@^v5Y~xmaiC$y&b|@Q`QYA*T2$)Z7SUijPBB$IadBe;tK2Nsn3Mu|!m@KM%gj zRLTV&Va4|QKsj-&&|>Ta9!7!WQ|g7TcT-r0st*J{wnF7yGcb!0SmXY;Tv^~EHl3{k z=Sx|3Pq0W?aVM7a&hA1UN)y#7Uu-+GoCEeiw1vn&{uW`$<`?(-ky<(RRu3+9a; zjhYj)!Rmj^bLlxO`}-(JSM33%u@!W@-HP7Ui}B_D0`$}YlHb4R-%&Hi)tR7D+Tx45T`(wuL;kbUp7!356U`A{t^?NSk8C3rY@IrjUQguVvE zTs;%Y?A0Ww%=KcbV}3Hz+_h-+2B28u5XuHkw`|;*1Oe}&vDRM|>{n`G>rg+QKPixs zV`IU253vU?uS1VbA6Z6Y7_Z*$fvv=KDze;*9|kXnyvN^|OFtRYePzQmZcm0W?nzV%wh}uO)X!39MLCxq80|X<6+3_Ox*uD?I(`wx-6w|O)%T*@xj&yUYZ=OKbqb9E z-LZ8Iam|bRqWZD7TxYlpBq6f|*9Xbqus@E?>ZD(vHUgC6bOfD$kH9YL0<5Jx;jPUY z#DKfUB=>{F;;0&~C_RWTe)&PsxGHSk)))31FMz9!M^G_}_QQ{}m}Yw{%5M&U-av>7J?blkf3@7v`PX9D;iLyOn`gjWvqCV>3}@d= zt`N(^5L?oxvxHan^xjM$?N$(<-vVd6oFgVced|`Jy7p;9F#R1(Dd3wOw>LBdCPOe`X0xjhi3q|_sYe9 zRhL1XJ>*G#0;(*%&!eYag_}WTs8FoqS?dnso(%HhUdY9#ifZ`u&m~Y+wD7+AM=>V9 z8x~*r#CBO}Vz(Y)=FFl7Ho2g$??J7n#7K2MqHL)vw|Iz;|Z+ZEV z$%d4G$;LJw^f8{_H%o`?qxl%WwvZ_!MnUGwSvX!L1k;wR0`2{oEIS>!eRezsIYq;q zepMJR=?_K|chI?e$1;46oCyarAVU2(S{x0*!n5(r+j123XuTFPo(=<}nS0P+FzFnH z4)hsV1&Y3ZS$ZGzVHPnfp>@t@)}mnw0Uor2go*I1AsYAPSK;z|V=*VbA6hj2;MF?R z{k(!^@b3qRCAl0m4xVJXrrlBH^A)V!a|zo2B!lbr?O^`i06!RX#lCh|z--KFZj|<( z+a5a#wSyyIr(P}|j~t5@N32<${Rx;m;4sP#oM888e=eCwx!j?q@F=7J`o3R{L6w0R z@ZcH`R9uCkqB5-bbq4Fo$QE+#~s$;GE=$fjtJH#*74T=^DX|7B2RsQce{ZXT5cjcdZV zuCR{2+{Nymw!F%_E>~U9lYxs3ob@d`P%$HKHS`dUkW!#4JjOyMB*bqqk55))wATEZ| z*G1T$-52}((!mWgufp2QSZH5b30ATVLiU&IEI1|#6(8eDWceW_QI750zh5MH|M7wm zvLKkcCKWxb?f8y01HgM}4ptsJ0{soHqhD$W*4+vg1YOd&jz8zt0edm{#C5PD-@$t97ero!%yOe=pwMFB;Ubv2k@}8=3oCBk1jgb#X-}r;U3FrSRb;9 z?)yCS9c~0MLk!_Wd<-^}bg-VUH{b}3Jn-ng1$9;KvV@XVD64xT!m5L)YU%{Ofm&dD zyM=Z9e!-Sx_+ftK87xdm1i$iVlw4aY_T6WM(pyRvX&r*DeQxl7ckRTQ2OpTKGK3Y* zL3DRM4S#LVK%GA_-aI6kdp|K{8Y8|6Mq4M+o}IipL(W0G&rsBl?F!a)92;*>15fA4 zbk>%Fp|dxR{*Z)w8>`Ue)mxz>s~Qz2p0bQxBOoaH9gCqiSJ{FY#6jG}l^a*U=M4j} z$CUFBGtB~I-&=TpyYc9nycHV85jS94Ij9bFf{Z@4%*c{B67G?xOYaVQ=Ux~;))iad zyF=RjOjh|i1EzcVKx4BbXo;y`N|He2k4Ew{_i z;a@fKGKPWbK2NZ>Dg>+0p@QPHLX@iJFc-5GqLX|O^>Yk^@Z`C$O?4&uruKjrrDuuF zJR63-RZx$h8}(3T!q2UjAtY@Y?N5WSQ%w!D4qpJ+X95mIUSNK%nEmyQ-YFA8(L2tC z*WcR%I&Y3~`A9=vyz~$+FW-;q>7_heO7mAlEB`DF!}@VIxR$yW9MW*X@U^?Rtj3f3 z^E@1P;Q;DBq~5ZNi72bu3mVINVxHDyUe`Ji@}N5m?N&zYE6O~O$Mq8;xLq;fEjC%K z;c)VYkue>03Ipw7g0E_y3 znC8M>*!$v6ilmT8YZ90+;wdv#g6Crh0Ezo;HT| z1Orz<>)G*8eXb80X)R{@gOfq>)T*Rwb2mKFb0|t=8>f!HcM3z&!_lnZJb#@N4UOjq zLxAiQXrDL+j$;jBTgL^^k>6t3Q;x!bb2eVKcFAx@mw4{eiO z*o9RZD4D$iH@KFd{)}y){bxJ4b!+5G?bBT27qF%|WvIJbhuLm&ft1BjfWt|DiVrUF zzre7j*%K1yB|=2V8GNzyDt5h}hjY79=00^7jD9&6KkT>)7JGf+QN(qSZ1^L(hR*^k zy%fRd&nGrnk64*Afc;%J2Cw$GgddcS7`jBjhPN~LlOZ8EGh`{oyR3oDkMmIYT>;OO zBT=6i+2-YCQ2izob;k|H@1w%e#LgPV8_OYhXEDkoIplZR2@=(@CGwsUsI3p&Or#+PabbP@srbVKPejqlgh=#~?+!$h z#+5_!VlpVk>rj`}axULsC3^Qf3m*TBhLF%gkR3DN{(3QBq+$zJLt-JqeI~v;oCp6J z`=Melty6c~L52zb zVcDZBVq2YIquOlHA$kN@oi@T;AL`8fvxN1S!BF4QnEHL9pl;D~Znee%Tz-qpas5pG z#eh1G1}1`$^JVIN@&?)VGfeGZ9Jq}=%I!~61}K@lkGF?`n=73k_5EQ@lr81&By7)Z z1)LsHf-++nGiE)}HTNEqb<7kOpYy`kUFj@*+6P{yTttmNnjoKZni*+D;n2S0vH8SO zFk2W5((h@Ae_v&`Yfo_Rk)y%u(G?8x&|w*=#~~xf3B2+PVa$rdxS=)!G#0iHU%Ubf z1LBw>zMc8@CC-QUh2o;Olt;|(w>mgHTJJ{!B4hJ&>}pS`ao_ng*UgXd8`Uc z+y$HzMd#}w3-Il^5juR*`Mc4yr%wJK=jA`9pu02Nr8mrdb&Y(}W8tZJCUpiI2xeZ# z+3426i1SHD?iGWt`{YBwvqV-o*&llSE=J|R1To-fBp$x6f~}Wh#j;CBXt!O%YHv`F z(x!zRcc+3!UN*Xa3&N&~T=W>E0f{bQ5R#XHHAY1sS-soRo;uEDpG&#a#sZ=Yk2B}! zcw$}(T>D}S>K--_CFA3YEgs95+3WF7nW)tEfwc$I@lo+$R8~A^7RCB3ZFoO0 zpF;h-0|LM?B$s!5o66cW7DCjCUhrvO8LT>)g{>0m*3!Dc@&>Gh`YFUd3w$AD4pzsN z$F!+~`T+0mWrfyF>F})54mA?~u*RYNU}u64c8QI|K;b05rd(j(1#(bTmNK)XZJ;o5 zVH!Vd$geeNnx6X{T-Kb6EgwSA+AR|84XyFXW%74l_XhK7FO;a)TlPL+gLCt$L9@&c ztk_L)(BN(O;z1Q;$7q1`)jRS+RI*}LhV#RgP`0EOrY*FBD4Oq#zXyWqP+}uZ-!8P= zt!Ik7+LG1;cTg0jTh>Lb;!}bQaN^bobUjy!l93aH)^puK(K8COk3{mBA=TKSX2~{g z(nY2vNBwWs(2(lKn;MJIyyPNtco~O{UwY!cfs+x)x0Tqx0&Bi3Vd@6RGBFfGRzBxi zp#iM+tqva0B+qo=b)nRexb9UP46~fkvHJ>;#qJi(e*P!M*F7M9*hNgUwg;X6=E9Q4 zq~(vw6Kb1Op=RDxo^fM2EFGwc_3u(}$+sd@9y$vhP9^wf^&FxlaiHie$g^?{KZgmXRzgmbPX^$cZVswbcJ=(uA@fJK*6!KgS~9K zh}Eej*g7o>dN0yLsgyXK6SDZ;&JCzg@c@q?DYLp|j2pd9Vho{K&C%9?_C&*#BTY)|T!B>kED`Hia@+o(9mbn)MktFg}a2cnkXB|N87@ z7m>B=yWyzM<515d5q>ukx5(C4aMe=5I;Y>l$g;g?ew}g)Ju=XExg0e|$}nhcFcTZ( z*z_X<0~XU;a?~>DcXm6L>&(NJq3P)HMM<)X;MV>Rlc!sVwtY|YxnKI? zx64gdNbFnPeh|JU1X}AZpweifpuLgypV_xq{mq#G!Yb&|GaKs) zG?@B+Wa`_ZnX+`;|MwplzdAuZ7JXR1Z(5k!auPz44`RnJ>TF-Fz#4thtRsF2R=W#O z)j=K9f@07o|0rm#r5%DrFLA`Ibg-rQSa+A5P}g1{9-ML%C4avY>Mk3zBWp6Tc9#t- zJ#UJtDz2cl{16nn-(ix^8!bOqmJ`!IN~pa(pMF0TdM>CW?a3D8hDMM-K^1$e=wtDz zfzU?2ewDOXR8cO3hQ;gH-L+@H>1ZL=zB=ebv*UH_SQ3m@ z6Hc)DVHOx&03*pQ0YfzU&8QPbWiS6qTrl(SI1MSf+9n{AV>b_C*H3pAU)a4bN z#dLR%fX`O*DH{;XkL+E7I^(Ob#r8bt^qL87|4u{IIZ+sO@day8{g10l*P_|U2SUuM zpTuyeWXfBY#60OSaGG6?kNaj}P}o&syw>u>XyTJjivmT3I{zI(nU<-W_{IlWcr?@; z)Bd78|091KaC8KIc}qQg4yVAA7edyPDthA|XAk#PqIabn@>YN68_uL-X9;r2CufVo z?Z?s9{BJhmcNXMzUy3$eXg9_G;(0$kz@&O51fH0H=EV`@{ohW0n=tA&{=xhj3-FN7 z8I&tE1%H}pT615qD9IrR@>a5h9-Ht@lP$eHR>F+MrSN!IB~}I!gYnB9?ozu%2>EV| zQD-7hB{YP`YjUjgCYI)`IJmqo4{PT(uu~_?vF_}881Lo}TPc5Q(a)6)|7D08uZDx} z;nza*>wNB87tcFq*K*~wYqX0wgFeK-ah>Q3t|hczin|6=O>;pj*BaWyMDnVav6|<^ zQ!8BoHEsjA$I!v7zWpU{Tz7)C{Ygfx$)V8d^@asZ8w(rqbD>?HMO?8eEMC@!$KCRS zpv$C>JiNj>_$cPOfabEp+gP1n68g=Zh;x1ASTo!ba^x1|m7Kz^j5bBx&1FK!y=%~U zq71^D6++ay9%%7oBfE9zDw-D*F-hq&OOshD@RxZ!M&8+i?eETF#q(l(bV(JJ>ASeb z!&cEv&w|M_s-W1MZyXP>D8L?(oQQ2(i%$EVHk#hIy&cc<|0%^k4fw22Cpbvp@H00m6eTpi`_%eYP=7mT!TTUcu1Y#R0Ycaz@*k@3^XgAq3fvM<+T{jZA@$zflU} zpjb@kMqYBYHNw(erO>%P88trsVJZgM_-M8j%6JUD%~#X=FNn?h?1{roFJQw8707Op zqqOP1aOaT}$8Ylo-Kz|=yQi|tyRYMi+$8WSrP+O(H8vWk!o49$*fhEbR`>=&{SF0O zyi5F)r4p{uDB-QklTcM}Ge6s~4qHd)fL6~C7PM_FGq-GJvLibLiScFTp*xW0`QGE5 zVdr?^*Y$X^A`2Zt0W2Rop-O@}7`@*BY11d5cmH!2 zFPr^U#EeVhaMIXZ>J6wWSt6sZolZOE*kvAQ!(4>@?OY)THP`@N>H?4~`s2hhH- z_f|BWy8-D(22z^kFv4CAb{etRSWk2EsX6fCIr%n+DussDi5MPO%G)N4MoGg#(P}Ib9xAp~qTq5Di z;`E6T43>3O4y^CO6zGiWPMl~{9zvRnj%Fx$|G30VG|6*l-VN0I%>wnoHSFgze@Gko znB7uaK$)~~(Ao5iNnhS&W}AMAhcN-qt_nh}Eo;%~Re!WlAfbJbi&{Zc9dol;P+(WB}7In*ZyTd%@JbK~PKYv+VAqO&+Sog-zE``dSL-kA;JCWDs-M-~;); zqQS_ukx4zfGOJ-#BD|)v>zEZvCW&nE>MG(LbpwS*S7CJ}boHq1)|YL^2V;R;F6wt!mjFEJQ=FM9#49rb$A7I|H}jWuRV>e{yCTt znSntC4iK~_14^b)#=zw>*ELwhHB7bX9ugU~>_CRzUbg3L56q9xhS8ghX#Z}Eb4r77 z{ey|vVVcI21J4U7oyW1yuy8b&KILh*`V#*{9o(M3iodGL66d_=d0u=o*_3E|HXPBY?FF z`OD7)Ct--}HruRJK}o?LxVnhCD3?g->nP_At;fJ@?PI}h#4>O#8xEF_XaqBPs-^IuWjlZ)X3c}Jy~+5&c`n?Y zGZ}lmtR`QH57vx~2c`Q#X0zxW?>wCi|8*d4PK?CD+dJVxEOm+RUJ3SI*SSmbJ?@gX znRnc*0HG`j)D!$rcgGPJLG`s8b*AEu-~fmpc9l8IMwoE^1X|tC7Hn-482-8h6vtkb zz^ej?(iw~$xpq)zlFnoiOtiE&rQ8QGOn-Ib5!jV%|7xBXx*KI&ok!}ZYWzXIB>Ql_B925R=bboLxfY{fF_Vm>6?wi=Ijh)wC5WDC0QR)|xMS)(o8o5dd`U~jMmv#y;- z=N^H4|A2Vh@oX}T8JK=J7aa6aLVpWen`#`XzZ zSBKbA7S~~44MXR=7_M%lPH%#BP#CcmH14g0_diUqEbth3y$M9c?Z=jj$6d#|?px`N zKOFU!?uBAE+Uxi5gpbt$IB9+e+D<$Rcei)Nw33;y@QoKb4=jR=Z>el}G@aQ^i*Xw< zM0JC=3D9>ESnS*gMh`8R-ow+xqjQ3gfHIhq7=?9TUvP~bH@R%V9npevbDz2+3U{BMj-Hd3qjZ!D>~h+Oc_SCF%;|EFE}0H_GwiurTM6cE2*A1?BJ3MM9nH(= z?@@okqkbPofA8M-_KGQf-m8Z_b@!lTNH@Wz;{~hK$OOI17tniQ9_#N^iblZ~0sm^| zo(kem;u@&i5sQ0*5a)dLB!+Dv#MBRA`gPZ#-XR;qqa5)3!I7x==cC|1U;uGA@s-U1UJ}&3bV^LqpzPKD5vXZ*4U}vX^9Y&G z`u-D!71NG`*ZVjOu%O+`*C9~9QI21>B%|`_RZtB1!LR#}XYXY@cl6zeuD9ob?5_oa z8?-pD_~F)LsoPaJO4}p`Fc@UtyO}RJ>u~{PZ68lBlyaN z@o3>%&vk=8GFOLl;I%as!e`{uey{?H=iK75*bK|}I@57bb2_*kyTZP0JcS?JGvPG# zV&=Vk!S*~X2ZwLQxUwK0wel6PO>!2~W?!Vt=s!ZI(`m5UcZ^5xJBp9X4RD$)74K_Z zz?6~uP|FJO=jJ^8SRanz_p;dR#%1`Ze>{5T=)%|Isc1i21=h?j#!XS=GPkErr<=VI z44l9!T_G-eB}IJ;7xbu7!^aahpoxDJPms(vQuQ^>~hE z|3}fe_{F%rZF~@tWGE!bAS59~dDe|0q#cqZ2_Z>BND5_iU^-x`kxFVhkEF9q&$?wg zY7;^d+JunwvqNYT-u3SsuJ3i6=?4DYZ%Aub9IA|KNpF@lSiE^ap3jbf zZCb4R^2GnB%MUS*c0b0tQLaIWt`K^XHxj!K$<%a87z(OvNYuVc_MR&t>2nR(ON_|p zm08fb=`|-G=|z+C%aJUyLV-ns(z(A8ZMN?Vlb9yxJJKF3zU~6oZx*0R+XAj12z1-* z2I1yZPV*kid#?MNc>QOKCYnYtjm=8~7v`d;l5ra)%b~z;BG`v#Lg>ij(8Y2M1+Ht! zU9V_3ZxhLQ22y-J=_KOaB2YA)g8!Mzapd(`xWM)TejB5NXFuIh@?kms{78(_iF2ua z`dRLNG2>xeJqnU#j+nTB`50AcTdF^EllSPWSmzp<7zuM<3~rvKi7=FYFRMJ7@b3O z@1DV=3O9)V;EKY#2B1<8or27`0K?om1ZQ?wZa!y_1XaxSqnw}QGw zALx3EEm)_w9C8ky0D}evOzIepvQ-;U5OI)8dbt*YPUXX&;TKW$V*+W5)B(f7_3SLg zvRZS(@L%l!Y?){VdUj>dRifhSHiVP6#S%0VW)jwQ#CSfLte-ZXI`1)IdZ-y16}S?4 z&mcV3zYwLqoxIJf26D9Z9OKWOb);C1NlJpk?{MPp&|R_vT&0Nom_cjx&RS^$e^&w|xZBnX9NX~n?3 z$wuRsIiSP%`iGaYY+3U)`0Uyj`+dtsx1)0*aSxlxt<(jv;SxS~Pd|L~bvHV>#X|f1 z*&r*kfSjpm%-j8pcU-XtZ2A^}Lf8+Q-R9C`r$f-jaWU8#lz{IzF-5Bz@HQj{a;O~& zFE1wOMN!&+9LvqNa84QV@MB0SILo$@w+hCsX8HSLhgoJiFdjSiCy}1Pm8^5}Clybe zN@Y_Q!-M!tG&zxpnoS2Faa%e{k6tAYYYI^``vn!*Z{nq$T9BKs=(X`JtkO+rC&zv4=w&xDnP<ND3?;1Vyv?E(e5?`( z&SiOpICJcCI}e|mP#D@3h0AIg_F!WvjIu39&GA;yv*|8z^E!-z4t8vw;k9O z)xfnEB(mICB565!m*|h2hAmUAV0b|$W^5YHxeDUF)xn7MovUpe4!w8E$enHxe%^czVM{H*%7ZAr&(0~99Ypov{`}Q_Y;enS zC%okw#@O}7V0xQS`!O@YqGADkI%OfMgyH-Hhj0*dnbPk6%7G6!jqUaU-a{RgmUj`+=0LwkTjqI&F-}j z+u&5F>)8kyQPEHz?g`5mva_>!EJ#NTfaQHAW0vtb9RK|Ux)M{^Hmw-D$^ZArD_D2y zXAz7E0Y5EZ{FaZF%1efjHRu=>bV!M&9P_P3{dQ$Sbtc3I7kam@g}23!Y@na^UAHFo-e(@Oe&=Vi^8$aX)ozb zDgZ~@xzPPG5|+L>z&d+Q;@+*AsJ8M7y6;%T@?`yCe#2lY2Hr2< zg6&aj;O8&Kt||RVGU6Faf72fL6fy^s4_}7%ll?*M69>)B6m&Oe5q{Z4^gI{~@mDQr zUMvsN>>XfPbGA$+4(IPir$OS)7%XyPOvFB=vCk*Ukaw8OkRFJ0hSFcLU_3e>t^_YVJ`DO2<8%3 zrs)+rv+s8GaWt21#8_KLY*A&BIChV__F*;3UYrES1_!V%e!$-CZFIb77K(HYXzuey zqM>z)<%P$V3HZSfdOj9yuKtgh9qJDaYki5%qd}l?MiWI3_VD7s`&4tII;@IOL${zN z{(wKz0nJoUTxf{hV>_tjr=ehatAe}ov>fZQf-yy)zz=n0_;H*BbZRwG_^OxK9V&t{ z@eMR)`})Pf3FsP-2S&~+82Noap1kUh%~?ZX+XR;5cPi#Ok8XiZb!TXqG!`J*9__A| z!3gV9Xn%eiIiMzGoY!)iD_@2d4SpcnIFZ-!Vhr1LQ$Vi%g15+iLo9cWp@R4e^CiKP ziO!#NsMgv6vWVxj-ZhrK&bGyS+r_BZWrXc_ccP78KG^#tlXyMG$TqBks*l>JGIyZ< z-z}l`Wid2V#*n~)m+?o93G4P#U|ek+$Zk`__}%b*R0zr)6x`iA>)7`(kV}s-MHM~B z_l8KR|1x{bP_enK)f$+$H5u$nU(hkCV)Qj$26cO?Kyzt0mGo|df?^|@9bjz3(~ zh&fPn^CUFfF(-=)&x29xa1i*aD}6V{QTz9siQos*A*!F!UVa!zA5X)!9m^3-)7FC;EItn|-K-CyNkJ`I2Q*dI^%!*+6(pL zk#=?O0-kjd^ean4`z4=v-3ghrC#Q!Vd#nc4VRCxmT@AiBQK3MVVm^LZH2$(Lz})&= z5J!}9&o|{l#)e=_vSHrgwJa-fr6x3HzOKB$nmeqlXI>h56rF@c8K%ZTVw2DaJl1}~FRaLdvF zn}{>ex#TtV6dWL~-a_=DiOhSpjMOb3gIuHpMs^|^Oy{w{V=-|bYK|+c?D64JU-ZGl zB{BJ>KucR^J@{$yzOxo^v9dYxY4q)6?MgEuvFcNG!KB zbL*CvqVZ<-Om+NAKmVPDpSEts4U@uAv6gYbkN?FoDF;|T9Zzh&A0+Lk`-1XvxeCpuX9Ph$lWH z(%FVk+L-}f_j2LDizIYSGXbByZ+XwF^;9@bo0zRnVm;GKK=ew3{~UD^g|oJS{h}fe zcpe4w!?B_(Y5_J_Baol~mBPpXiv1X{gH{P}`gWU75L9Eh;3<2|2Vm zbp&K|SD-NN8R_(iqJO_TipSDaWXBr?=VtIq8NZP?T#1jrD!ppieG?^mB6&Z7|&BYqzZ^E!@un0v)8gMIP0{-rfhUeAn zj&;5aJ#SWn|Im17f6+;0+e2_w+IrNfw!nyVIgDJ81+#Xt%=I$XJ^$5_3*JQ`Gu9jb zBMP*i(?GOt1cT43Na)_M32Zkozna@-PNcVpYfhg^R2x#jF=Z5c58o1t0Ba&2dlKr$ zc%kItVcu3hJY1|`CmOTkRnl%k-^$HHd!5HbJ)v1LaEF3Xa`(NXJhjF%IN zZq;)?l~>SuMJd|N9|rb!#)1i)M~`<)QF(nls5w<(tGt^eJ$yu-KTT%Z*AH5~jtA8X z0>;HO0S{L&mu-RemsGJ7vIv0VTmIkB0TB6~jDX{m~ zHOSc<3-v8M)OzIrHrM%^vl(>`yw;YYjrSemwnD}=7pg(WB=-IWZ{c;BueR>zI5xMM zi*cr5{$wQj4N=l8GZ_j(dBp9xR=sUQDN}j!^IZJn-ZigN9+8PX=^<${~cm{|UP2__L z6G4>JLo^-P`DMjzqUtl03-n0GRq>3W_$L#G>K35$$95tfbBDA1x`Qh|tYqK0Bj2v= zjEQdpb`s#&Hg0n3Z7r z;^VB-G6H;;SAh85Z(`Xy3sfJ1_{kSef_>yPFnE~_&ZhgQYt(V(D@h`~)?etZ!J}~T z{bBgGUKyHt4TpJp8Q}jv1&Z}fmo+*#K>dVBQeYhbnMV4U!{de(Nxda0Rqnf0!5Fdc#fCn}n*0T&P{y z9|gucl%mqBY;XCcMC&vbGFHb^s>nY?!s>@(3hcs;anaZ~ZarwuyZ~yV zU3h(J3T}Oxgo{?~z~JpZIDO|j4BA_SX0Klni?2mwKuIM;O;LwNUuTT_ScMyy2B((F zx(60_q{V;Hwu#LuNxq*eozvS4;wpMmy2o>0lHV6u6CH5xwH3(~>G zq|N*f?JE6Ds%M`=9o9KjFm4^2uLoJ|A2SDC86!z8K!8b}%yY-qyw<>Q*#7khx=f3Q z)0Zre`)e{@58Hw*c{@Qop_mgtu;Aq$A$-B?c#OKb4`wu9V?1l-cdrdW$(R+yXdz+0 z!6Po-fX({Blb*S<0OkGna%5Z#ddpbOFpyP@Nf)zs=O11$wu|@}c!IidfAXL)5>$3x zM6GcF<9x5;l~ISlS>i?u-ZoIZwG46b_Ab}JG3W~gdVR2U?NsgnAb5`wV8cFdxM@>_BPU5greyo_^o!J| zcLB)fpN2srobX!AMtn7=68Od({H-oSpYw)vL=fv9uwKh@g`LFnN+0wbI}l(UVs~pL zY1ZCKEGPWtY~06yTYf*j!NwdW^cjE|YRqH2mqMa&FiNknPRGgR@M`%0Y*?2IKJox+ z6S9@~S~-Dgkuk(Sen~yu`(XT#Y*dqcpv}!EXr;>)NJbS-)|A8k?UPXX-~tGwzUGE# z3GHLousm`+UK{6uKAY!&&`tv+GeRiue+hf~sexiyfB0@1hY_+(D5`Ye94njXgtK<2 zH}V|XG+L6*#pgid&vj6ZNh0EuFY`s-H;J;+5af@ZQqj%@B=dzXiu(+t7xx}U>F)a^ z=LKaR~pe^MLuMRfbea`s8b5!U&PPIbZP9#%;1 z_?x2s?AQ8|&xchGv`aN8N7HqA(V>lV5y&Tn(mE-wQ>(Ei# z6`eL`VdjJRX#Y$JCIKSYmdseZsSj!H5-TwE-c6piUj+LFUx`>o%1ljC(3A|Mvqw3h zGXENCx3gKd#02gi2Nr6yQ*z-o#Y9dDy1L zW|&_Kz|@evUp-6F)b<^9F1`RsyF=kh;&g1-vxPKdCebI43Q#qv0G*tM&4gk}Fdz=HCtrff+O4m<6i1F-hqtARs%HRKz&sGXhpR9!3*i<55 z^X=KFiblNv6V3}gcew^mc6$|gTaPn(PX+i;n--_ub?s;%($2Ve)UK2*upH<`A|S?hIJ78j}lJkz&dpAR1-n#Y|#1A z7bH_%$OL9dVV-J&-`7G*TQF39NfI--0%h={WoS8rtnXK(PC6NZ(;??L70`C>IB9b$a&n*nex z<`TLjoP(!VnGQelI28xRU{}pEzUO2CasFdU{mm~x;{1y!u?ql^NiO-eOab<~>|I!_ zK(})vX`MFHtIiaHw7-xBjdR6}aV&d3kAp*p!%+Q<9rI7H4vM&3YCX@3b*bCaq-S*` z_kamJ>c?ZJ#YRkWx=pg(n2W__Ke5cY#4CJPgYtnUxpmJ8Pft0FNv&BhGCTv#!Z*NQ zkrU9)bvKCKr814dm*p|iLA3o2=Od6qkHcajU$va;d2d2*ZK{Em_H@`)e-3jtrGnwy zG@5fc0;(gl;Z)F89H0-V)EYp$Lt}A2^RAx0%h-fBMl%LRKCjrI0;k7DXjjkpnPdGx zhvj!xGmZ-CUk8&vauAsOrz~RgsD<%bAYQQ_i>|Gl}ZjecHHiAZSycOxkNor_?&AD{IZh20YZpybC9=sMcXn|8Vq{r_g;gi~|SKI}gduRaZ* z>m^_Vo1cjOx0~NvcLL2?LqN4h!Db+rNqKw%>hCs&=kn9o`-=G$^7r$tv!iin1&=bH zD+ZB*xt?oUeE2JbEr?{^7Uti0AzVx-;hzT69eyuB+0q;d?olt{+63 z!tqk_JwVHc;Y_nioVN9BwDP~M+Ev_theok65L5X z$hI&oDK-dt=_B&lHv$#Xuf*HH9zt8#?CJAAMDd#SrS}@ak5$E>(oN*VcD}^U%@*bj z7UQVmqbS!n0t-zUd!Kb`i|S)}pZ_tv@S2#dgDgk*YCBoHQ4W@~F3%s?U5r7BE70VY z0H#-)NLMW(KAYD?kL`LaK7MY828yV2S0>wLG@9_u(^K< zESZ0dF*^)sxF_TFgm7T9a|gzK(ZD|6VsJYc;9G`J)81K*D8NFtab?x^krQB zv;Nqaa~g5rA! zfA=dyi&TATzQqIWdoP3Ay$xiF%}&fac@dPm`?A~;n|DlH$oQJVvM~)6DA}h8?UTNd zZO$wsrL&h47@gq7^&#_*T#SQ__1>uBTjy78KZ=fZjH%fb46VC|Qo)(qyy@yM#8B>r z88_6xbiflLU2q8uA3r6Q&354aSO{H-JE`tD3C*h9fDX5F(9OS>Z%B_JHq9R3yN_jL zQXR46LKHP(TB~~TY!v;u&u#s%6UBD6Sp6j*=DpLzsp}V^ATNLp5yxYqT>{M07UP#R zmL<8!c%2@m#BfCoD%SNu!8fW@Y^;HD&1$?AvIc!rCgi`?cyv&&g!rU(BG?tdy}H5r zW>(#%K6O5zc9&x;wOx2l!xe5CwcM!}9;_j@D!fQQF z*c_;XnP|;2fV*jgF6-a@@PKwCB0Rkr0u|v*8~wKrUGq;Y*1702O@BSP+kFv4ho{5; zoIO|;oM|49M??C`?W})33ZnvR=;*3#_$!ZPbtmE_68WN`G_K_$20b2#ilPN9%P*yaBQD{?n|hcrKt;ReMu6R4Q7Bk`MR{Yd z6m_KwaX)$QJ_h=?Da$SM`LmO#p->=k=KY=R>IrN@(nUz9h!!+SxJX7L; z5=UL;-H)NFXJ4pv*hk{+Y7F1ZFQE9bJC!`KpyIZnsJShZFJ<;f`Z68 zn;yCzfQkCd^LR5GrfRZ$x!o6%dt(H|Z`(tY)@z}9zCP0uGkC+CI9U9Qmb=Z7JmOOf(MQLu|wSjc@$Z4opfR-D-aiyJCK=k$;fA5qFCN$WgILC^w3yDCn z@>Q8&^j8b;9YDmRwv()W zWhhw~LfTa~$*IUuF}uL+3d0GFkIQW z0_AP$oY$2hcxy*E3_5TFRb$erFmMKp_-8pTGmZvZvwV0vJqn7P8FRk86s$k2LHFrj z>B-MB6hB`A$t$XHqQ+4abg!Ck_G&FW`Fs(RwuDg~Etae9j$qPvD5xxMK!LqAIOAP< z`?Lh5Kg+?hTTB-pn~O4GJ}xSZ#I^ZO_#?dl@ZBg7ezu{yL$_hlayRs5IlCnHjgVAU z34#MLWs_^eaMiAGFbt>x!{bcLs0>2omlsri>mhg2uNwQkjzi5=2bkV+k%~sUK>8~- zgT6zkcxenTZ)5wG{UwA;A{=>Q30}{>j`^1Za7Ta)#{?8J%|{3JTC-UK%WK9h$N?MU z{ZxJ-2euj4!p7ryD8KNO%JwSZh+P#7EiXf%4f6{JS7CuqDsQ2+8LVbj;c!0@cIOB% zURpx4I!|G|<#b|QZ31fEl(w^(pN{5Kbg5j1w>D{_j*AnNzpjOfp)9M~G!bpWj7f=r z1K$CsXolT*Ht(KB%BQEHVyYXlP~0Rh@)lyH*bs$Ff;o6=1g2yvm4E#o(G2tj%W^lc zqjzY*hd3ht`iV*t+lb%HOQ6)dj@6nuv_U_X)6%L&k<5+19en}Y5{`mly@aSXYV+mK zEKn!z5fzz?<>j7ce3Wc82r_=sTTPMR@Q8K0rSAbvaS_N@y6~b|v#GA5F`K(eQQ5N< z;wDJ(;h-bf)FQ*>Q<+aZf_XU|!l__#2-&Y%&R84a=oaZkT3fTh$Fm$uRwG&lrbFZN z>#$+D5{IV*V-MSR(=;S#5&eR0Eq21+VaITnAPdoMAvi4_fy1NwAr33Z zbK%T8a)&qdJ_nL(_enclhnw%l@03Ke=6q*2Y$`53n? z2IT)e<3y?)K5AYK5o^0cg3T~gpWy<1CPm_AHU~DTUk>Ug8HJvV|CXHH+Y!`czbXwC1Fvn)wIr?AdBJhN)R8xG*CJ^&e-TY|e9{UZ)NM_pLBc zScX0`=Mc;LU(2+Hgu*jy6T+{xO`H_&biE|uT???tC~rvp(~CoI|s+s`a$ohb;dkhl`{xGdWV2~$zNbL`XJQa3}sm;6}G?arLD4R5}CksrbAkoWkWG)`C$;7 zj-=Zbp9c4TlIe*LE3k9DJ@b!U182J<#C{07&;0j+bQ_-noqp@UAfgtIyhs7(OdEQq z-V!B*z`9qARmAdu&S}=r$0HY8;v&GZEghTJt|T$BGCVdXmbOk9NA|8`p83dPeod|z zv;3HLS{Vf6hS{OdsRXF}&HR=RXF=++Ybc5w$p>kk0_Vbk)UhWW%H~~X-v3PEn!z|w z#a1}tO%d7`J>eFcPC}#ctOvaA745mA25yuCACJ>~OIa)lvO9@xnuCdlPc42}I~w&C zMZj#ou^7bkQmo$z9zNOFIqwXb%oc+rEEzg%7(?~+XX0*^M}2;Uarq@YmRwv3@Z~F2 zE&D^fMn}WUo!7Bo00(0kZY52qVtgtuKs^QhZIE{5e2lD>fD(acPh)VnI zgu8WR5Pz+U@f4VEF^GBm`fNb?yI4+sL`8&UK2%m<2%Y!u6V)_B*lQohW`?=M)#*Gj zA99KHu3s;k5#f%OZ=Z9LK=yqlj)VBS3BOj3p5p*&T^qs8fSo_8 z8)(BY11K*ogd;9um_1txJ=JQUS(E{p!vj$EeGv>V+{d`%4!n!2-A0BK5Ntlk?2P}uH!zV=)D$I+h@Jk;s9Q=kJYb?Z%8x>&bfUIvjowhs2 zvrOkyNK+;;4&NT?yxED?E(w9!-K)`^b)<_t3E$JML&OJD(0R|_M75l8se7hV& zg=G*Vjs-J$5z);U3*Me=%|4Y)nrgzZVBIOs|FeLqxs*{wf(unQ*o1b^96*w_lXUg+ z{8s1!;}6_Z2h`K3btGVO|$FpUn)BbPcQ;2 zcHj18EVFW-a`4W2NfR?FVcA#?dzU4HpOF(|+bhUZ`$0H4dN=C-*}?8LHT1cmBU)yh zqCFLAbcW^$`1G?3tvq}gS7$CCd@C7pGV{UlXFl0|m#zO+^B|5n?Q}k*!+Nb%=z*b- zZ8aX{p#+UCPazrWmcz05<*5FriNNq25LZs*JYv1Dt4}m(_3Q_(6Yan<;0Qc6KZUI? zW)Tx{Ae3ng$0)|}IxSv;f`2NM_n#%=@$c-JxAb6Hu3DTiL;=k?Kk42P30T;kg)LDI zpn0`}woI-hljbuvKtMRIKQ|F)OkguspCEGe^97XTJfK7JhGFiJ-?a1lOCq{+f}dAJ zK|P%V+nwhzVqh|8W^98Z=H+@VDaPOLr=nR~2KnAK6IbNfp+x5a zbaygm?@BZ|(8<6nwk_myQwGR)52I3fKFce~P#U-x3SPb71%1OnK6)~!S+OpgzDrRR zb_jan1$3cG3VGx-PJDU*6&qH9=$}GR9NA3$V>Hn-;wxj&XmM?iPlN6IIH>Msz3oin zQJhGjjk5-0wQnp=zZZtsWC)-4pT`kXi*aakG zRE`IomzA_&YA)-d^T44|ws=xc48oa=h4{x5>e>3PcDVU^ShWc^+T4C!@>b zODvo4p4bN~QSOq#IT}BsZ#=XZllU{8r*Q&vfBo=WXcn>(k`_jx zOqPNl`xP@^VFhSvh(K+905reILxbH2-yc=6MLmZK2JmR`H-&F^6B#4sBG)tPIkkDV zm1t>9##U=rnpY}@HbYlv>rrEUy%v0EpR-Uax`+yHC8%7d52S~&cRZrK5-Kg(wC<7N@z5f`Qmaz6Z;kI49>jarURbM#Sls_QK0nI#&){hg z5*LR)edkeOPZ?=7Kf|l%oM3%8%RphkSN;5Ku^WnGDqF09)jR_@| zybd>96wL0}p%Ahy1slB!BH1)hG-#?L=;;O*i9RDKjwxqA%Sr&-Z?`?6R@VL>auqI;>_Fmip7Rzjjb>d3Ud^8K} zt|d{^BYxE1cN&Q8GD*pRS)lxH61?4}AxxIl}C@yZHs_h-*R|7lO=f3A)Qxmms zokiVZ{HbTL3YM~ro4{ijZ9QjATaSJwT}MkH{$L-bwf;w)dm723)e0=w%KS3J#UT2; zoy#kVft817Ot8!p<9Z9>bK7YcdVMo$X)%wu-;enX&q_i4%PrCtz`B(N zi>U2s2j(kWTP8ZG0h9Sy=#tKb15!WMwXQ;Y@ln#%%KTvZ3B<7TEJ**zNU~-qb_Xv6 z>8LfNHhvBYE1 zZq`|l@VgXU=FP`TA9>6xIt5lrCGPygcud9K5FfS*oD)Ow$({LV8mI%Q{caIXeKHMvTLUZ3TG4?l=}ivL4H6XHX$YgPyJ+B79%Le2Kv zmjvzqw31HWBH9}h4%tf-*!g@dJXm`kibKw$&{&T&&R-8UQHC&_`D;wvUXuL<2T?a< zE_`feXNZXg=w|acO3vy-7MLFwa8G+Y!{dsQ`_7Lhwa{K6>qOT?}ey4T$?`EO#}XNB`h6u6>T(!kb=xqjPhs9AEV1u)VGZ0Mu^eF zF&>kwJE-fIT-=vfilT;Y5}y{tnDX~n`G5s@-;1Qe4GH8;vor3F4a2?*L(y})8=YMx z!G0qu@TFuOrpPv6+psj$JtHIX?_)@WT?Fg17y)J*nuv720r*<8^ZDv&p!41v{2vC8 z)}T(#{hEZGT{p4)H;~TNEyCLTJTyrw#h#lVXp7ekvY2&T3N0Ax@zyEYj7(I!>9r7$j*0-+ZNschjMLf*)|0&n=kV{~s@n+(d3VRN}U8 zVNg`503(+WqJJh3ey%gX#+qyRe3}9ztiP`A$12#y{@>B;bHp2NLtSnXk!&=BuGe#5 z#1JQRXWmxDo&nI;tO}pz+F;L=Lo{a>+y9gHa)u8UL-hkksIw^~)s+XpuY-9f+%lmc zV$ZgQ#vZ_0dYZeMs;+aA>-w)_5w}y zQyBLs4?2Gwf?RtUw7wq3$vt#=!}p`%-~VmG_T`b_8k9f_UY4UM?l$qvTtmk->SEHm zC$ud)mfrG7ffc_ek+)80w2`TS?%>J>?EF;=GU_A5m3u#CnhAE9o| zHe5q)Et=`Cf?v~YQEFMa_H}+ z?2NgV?XiMdtUEIgS|YvScR)P2J}V{7#pm$uLn$1)ITo0|lRo%)96bkSqO5;9wVcbi zjsw$3-P=lVbl47E(>#dovnnjyy&esR-X~KMr=TdrlZ&v-1fyG)BoC56JboBxXa=F} zgR4;Eej1${{bBpQ{Wx(4Lztxej~5*o&$_e3Wx@e>IQi&}oK!jz#EHYn;!icumZ%4B z{BB_4?AfeGBhFmXQc0HmVV=@oLQXEor~2EG%?47yUAh!JeV2gBaR|k3)r%-ind3) zU{_28C*3-W$i9bDzB`y51uuce0+tyx@M9aa1?HZvA{B-aC>Jtq-O5qwue^60C4Mg@Sid zWxae7Sh~KX3xAeCtKCq7lV?EM-5d}UZJ96f^rGHbKd8n1kK~xuYQ`Yn05aP~awR(! z83(>!d<%EVVp!@n{VeIo_);a1xEVG}JmIe*f z(+)wibq|T_yFgYJW-3_d zG49{dNf19X7X<8~0=L`fb!QCV=mMG8N;keX0LCH_|dlMVGhEMnj)ewAz(|GfGbZ9}^ zb!Lo$l?3W)*Rwll9Ys<{ODs>K`l2%=x=e}t{^yRXI!tgso1<{9GnrQ}fS0Ge;uLFw zVDFMM*wr|T_>L$biKW#b&b6W&+j7xjfdM%~S*FC@gBR_UFg|-9+VHHCQxir*s}8%f zZu&#@8zV?pnj`j3Rfq1~IrR1OF{p9BlI@WzXm4K!c=V4uM*Y1DjF|qgcNcr!7rM|c z+Qds6J2-FOEb5+a33ZPdnf->_dNljoOvhzL<5{^9)g=oU=j#Go`8XbJa)aRKG;_2xS_$>vN=f;Y@z|L= z2s(kl`zaCFw#^iVI~735$gk9Sc_1trRD-`85$-6sV@Sziv~w9jT#%g)Az z%1fN}h8%Q0ACLd8X7|fs_8hM5rIKSONv3xu#&3E?a~G`x&1VUW{S*&57sV(ujv`}< z&tq-(PCQ_TATcb3a`&w$O1#1g#?PE@Y5cS7b1(CYe|bgg;4SIawFIZntoLH=JW|&h zK^nTAaG(E6!QETup+P6(;J)Ob>()qm#;+JG2i}0T$%S;=ZzJsaa};s<8Q{w{FDQ&dzyGV2|F)L)hF450x@mVd0eF(hPRY4Xerg= zd$ykf9B~#@-KAK!=otObk&kXM%Sg3jsX9xNHpvUr_;@qIPT?cbUS#ESL|XPA#h%H{keddf^lEDBW^|;9u@I zfZ1~{kJiDuvwmPFErk8MucP9eIm)gWLr!uw>tWM}*zs{_dtf8B%1(3s3;&OzbB~L8 z``-9S2uTP@NRm)V!Zdp!2}wwjkPMKmn zaoUIRj645p%v$W4(g3{+vLNs8E1ow}ARCVh_%63^~C%#Yk6EvXOW6gIIA^_0t~ z_n{tMWFeS+xB+>+v3Plf7|q5^5saTHMm7BtkWj?>H{+_HHaik{yX&Z4tTEW{2*j7~ zWGKCJ4fid*j7D!4Qr<6}YR}?WIykkQn8?ae-v3kJ>RbjL;r-O%xCT}CEJK&onW*{f zJJni}g8Z|=WTsOx+S$wo?<^kbWvLM7{O2@xe=vDAF%w4}K7(In0dOri8coszXr$s1 z=8^wE!dk|WvcDo>K$+>HpnMUiU zn}M&2f+6@a^P8@{fC~z@q1{APxH(!EHYq+>X^D{Fujj~y&;?}j;6_IOAbh^QTmLd9osCZXm0Go-n#4l+(?phdGiBwoJD*kg>P@6CEW z3)JD&yF}m{cGB$@K^Qx?g^Cm|a;5g_$lIq$jac^dc-ApgKjcTkgkNdxyKs1VrwVcx zjKm%X3miTq2;ASWJ#*#`h}ULcqnZ=oH!2TzeZ7cLFWi(WLT8~CelW7-Y9+iws1hY;RXs!rh8rmP?k!FQK zm8U>$gA25$rwMWdVaVfV5yy$=;I0+B+ZhMM?v4TaJE&t=hn&b9ClRjC4n_VeSzob} zpoC@JH6u>J5ra|`s8T2@XvSZ8EAjkFBXmtOWm(objCE|{(tGP*O-u=tv`qlXt$CaY z(=^SMgTZ$q68W)jg0BDPmfTXR2A|o`*)oQz>W!f8na{}PsjP>qZUMIBo&~X+CRKHi zQ;WlGq$fC$d4{sNf~tIW#;gaG-)Ue{oJ`hyW?JIhBCy`k0o>ykZmd$i`{=*#B`{b!gIQGz#2C#wPk>ZbK$k-dv6Lky3CwCPmS?zo}1D zF*GxMDa45}MViNg>-ZQVd0m1@>IsO0wlE-)Qtu^9=au?Xn;Rn_NwW-t6o+Dgmo@wz z69vb7>Y-#&7NnPGf!^$G)G4S55+D0xWY!6$zm7*q^A}Q3FQ@GiZQ2{Mo8{ou!SP-x z<9_QxiS{Y5dGwx)U_Qg17m2j9;634otU&Kkfnd_=1)lGwk%Fm*;ZV5*oZi@>>g{k& zzj8h17AV4wQ$-MzBOyG)Lqscq?J#~7U}UBvx(b6(HhB#+HgAHl8Ls&CRv>yTk6>B3 zBOpAdAvmCr0rG}R)OUguR^*IBFRLKxm(BWmm%BpL4sWoT7L1h|v1n%Zq`K(WANsR| z`Fjp5XFljIBAq=REaKc?@Hc=gLzjYMJ_PJ62T{{q)1Dj=Y>bKm*`Nn^z&ZtjCbEw0 z6)X#KdxXIErz!HX3{BORDWRgmXmDWcPV2ANX>408cWvh$G(T4lUw2z#WAqJbvttA4 zKPtiHu5MU6b|3Cq1?;`Sdc7S_f?tIuV>fFF+Pg;Mn^qNce6$a29J2(TiCdXJ+cOv?|)1{oc*0JaUMjA#+l~6+|7DE8S9bni27q{ zz|z$j2A~#S$$wMXYd3H{cn)}P>ZN8cPQ#xyYp|~@Mo{+8Dm41~opa9$hW{R}M-jJ@ zO7ET|{EL%e(^OlmigLq2Hh(G`90NXmNM(0)SzaKBldoC_JVT0(9*p0XtU;~z?!l?M zBT*zMf>&djKsII~cfff(Djn1Sn6U(IPo070g7v8N{W-PP62j09TbwjJ8T-bZA-uIM z)v`Y$1h2*}0j<1wfJ23}vkrhPZ@ZwC zy(b%IbP}=VJz_CwBKX|%hZTnxVyjyKDBEuWGd-r8Sg|gdMSIXPy$RbxhLJwk27xQf z>b2#~hq@QqX#0n;Rm0-Y)+h;rg8w49dr#oU#vI1ni-6cc3HuZ^;(en=Tpm#ceG%ti zUab=P?90HCuYU`8HY-e@?qi-Z^RI+~x=4$nI=sFXjP_4Ap^>19GqN2||6hO7v;Rp3 zJpZAEbK?P6u1jy$6QZBI1dnyjM2COx(Y_O=ARg<-n0fW6cl#45`LqzsR<;tsNY<6Y z-p_3ZZc$4+3C)-p4GSk1;NaS5;@VnHy#3dZgRNIlvSBk9YrLIIJ3Iw@nhT-i^>^yM zIUX#d6-bt)6$W%ifY7a;@UL%!?#n6QHUA`d4>0acN;DBU&ovFwPC%tDE7EU#o<{v= z2^OOa!G3!qR2?Wrzo$~n-L8i-#_mBKbH=JEcY>wNYtg>&yuiL~FDePQvHVaTiWh2- z!9|aV@n9aiV=BN`T11_#gV1q@J^Pt|PQB=1(77KA(o`onf&u7Iy98c6iJ>;Bd%zOL zf~O@Fkbn1~UOMx0Ud^Cg#Y^?5(w|16UvCNeO6yrxXcZ{mE+b;` za+c*jjmExlV01s347+?BW46Trt0bg;caEZHj{8&_uKzvP@U!AnSfn7bn}V!HN4ycB#Icc`pp9^UbwxPB_AjE=!Q51O&EBpDwS z+o0#Ysqn(p10}my25!D4r!+F1%5SC6K?fXoAE)xar+t5<g4D?IRBqY6HVJ`d_y^I$sb9~#qi5;fS*A4EGtTTwRaOSPb8 zKd;fqUd9O5%!F>+92o!SDu|b}j9OxW2P`VUDQ5!HpA?9v!ZSj_Y%mi|3=U9d~D8mIj{i#@H(w=3L)`roq9 zk7b(0345urb{XuswhDKqCtv|8kioVZA`Pto;pzzPa_egJiob~Cy{vKht`wMDk7yo{ z0y=$a(6zJ~^E5d~{E-i{Y~%1le=T^s3rN_posj)MJ}Tx4nP--H!f!n=4cHe2R+IV6 zN1R3mnFro+Mi*6?n8Id=|A?3QDVTkx3GJ?gkRJ`>nC`NO^sU*)=3?ZOf2+XMk>@Zd zJr(urhd`8Y4%jVkqZr-3UI6{^=6wbbCaneuzh-9ZXACNTXgijzTRv_q*PGr_vrfDm z(^5ga$Q#7J4aiiM>ljT+(f3I_%9MXrKQLx2%vIm1(%I2qaygJ?eTAgtPLV+CNGz0e zmT^MEi|8oS0sd@V=$rVJ_BI}cfjPISqVs3k@LY&y;h_Sr_Yv4JEQ^Y{OVDAz8KMIm z(OWGW*D2{^<+^MzdT@$_`WK_n@IAS?c|CT_dqi|5_8aG8AXtIMIU z@(hTg_6k(Xr_z>areg_8$@>y^rrN~`^tVya?C@jz#BCs1A5N`PJyAK<2TC>taB@Do zCsr(h+%1=ge&ItZ;x$uPI29)8g@Wu;HFyTGUB2NtDrpV|u}KWbY##_L-`6m1L@8u1 zrV!h@7Ie&>6Weo$lCLL7dh`{L*9T$pbP-NVR>9&Aj(8|C5(na=LAuTsS~U!jce{z2 zjM{~cD@|C>PY;#6Zsa=ts)eUl0&wBx@u;BP_Ueki0PT2kT*5Al>OKcy9hcls~V98$E^SIr0PzYY2x= zb?l7FnhK5&&!XhAfTCtLwa}n2a7BakY%2xXw@EN?&kQ$?ONHKT=FH=y1QK-_^ZPyE z%XW>!wr>})v7we)9H_<#{-JDk5<&E|a^yy2Lg2V$);Zcj`DzI`&+rPme@UR#>}%!u za{`JQ)To%BM~#%4;lY$R)T)ia!6nYnzHATFkGICQs`cpmmjT!XO-1Kbk?_Gc0C<6i zS!Do%>XF^Rw|Ar)_9F_NLb3fnH

H1L12=IL7YiMt^VU`LzW$PS?i%9SuZhbrDHk zn+RMC``~`BD(6#xXC@W)Q9yQm^u4G&7zCwwjvw zJCgNZ9jhkZ=k|cG?mZ`+u>reYXQP&&kSiE6lJq9*0d)l@$R1J;A|FAu-s%}pzVs5! zR%3tewPtW^odUhFmQZqgIUNjU`Nj&S^D~X@)IBS-aXkiF@)dN{n<}&n*p07F)nJ%w zF)?ixa-8Su486G?(<|SRo5mu4{~U3iI`}ea3fK1**`wN^*Wy3HJ2{gXh?2g65A1K0W|Z8^2LY){$M? z8VjDkQU$|1MW}wom0nMqh9z}5AgbI?_2z1!jhxM3yO>AxDbvq;E3w>>a7Zn@F+U#|+w!1MLe9ChGJp6>9}!aW+hV=3c{o=3-u)2K9ZF|@awAP%P+5K`r&%x4&m*JW?( z!)b`7yTI(`Z{lQF2@iIrf!2~Hc<)w$SHI6fciU`QGI$=XzA}H$TORf+Bw%jjFM3S# z66mNuCNC;-z`e)?K7Sg6U1=^L&R|~CjFr_HOR_<2MiN~Tw-a-D&xmqi2^hzQz?uYv z@fBA=60OPU%=Tbxs33~Xme}H@Pi+JTsLekcNV`P<23RPgp<6ldq83P7>@4X1{ZY8u zc@A0_H>3DTIw=_Nf`=cfQU4(O+;=0qQmvr{kuzY?X$^EuInMg)rlRJdI`EvSz{R-%2g-?Wd;C z=3vIIDiEDwxp)_*z1v@he`lUR=`S%2x~c*WxmO^{X(V)4OCZrpo8=xaz}176Sau)+ zcv%I42bEXha185xv*oDVayhr^BFl2?<&dzk6@Y84!S}yd%)>y4`c(#lTO+8=Cm~g` zzl0vuEHi9<1J3?bLGKg(^w$3zQ29h4@f<#rTDLP!v%5O>vaEp3egih&tbu{^XIK|- z4V`9-=;>yF0qMm6ZqqPc^*luMUjaY=R0v68`)(sGfc+Pczu%FT=e?))JTXcbTAHW) z59zp^NlWf_3lb-AIQmgK3d3T#FJ3Ia_rIxR^#rD`n6o)!aXuWrx*dCd%mDtIJ>=`2 z8ZbMTK#fLEhGR!^p=24$YnN?98;e7%V>B9npGb!-_m-pOHg7P)Mj~%vzEzVsZ2(y3bkibf3Us9B z(Z5%yW6NeE+Gv*px!-hf{naC^hwLpaxgiDjL)ichGgZA%Dd@`oLG@!O6kD)N zto)f^;LB0yIB^6LS27K`k4Hqwp9Ou@ONrID^Qf+Wm2no@Im5W?*mqkERC{%xOcVpc zEKhLyhcS|u7qUG4I)O=81Ng2Oq&E8qdUD0pqn#3xr#9j zR|C)Fm>_q00qd3K(Itfi5URJ4^)&y*smWFWzdMG!>Q_Q#i)bh~>H`lJMuTvU1DRu* zg!ginvY%sP7G}5v%f)pdEgXj7yV%lNzbABbx`h)={bFEy6o}8S@-sLH;z4)?ZJA-r(6_b7Vf1 z$1(D0jXz|cVs}uL5Q`?~60=KxOwH6|sApvdQ9R;;xgVpUWWhAjv&szW8A@ETAbPe4qeF*6vsNxt!gp%Pa){C zd+jvyM(ah*rMxNb5H)-}np_CQFFS?k;~W8nn#C}nZ%Rxp3b0M90cyonAc&3zhuCmh z`;Bp4p3Fw)f095hI7}jTi?DB68R<;k2OSp!$e3;^zF_luzef&CouH424>GCZEiaHC ztt6|)=;Mv0(@^PS0rar9u@^lJ;QL5AOv!N!sr^j)V48%(Rw!jh6P-`Shn^_oDv$pZ(aj1h2Bneh;L z$lu*cUu}&c{J$uQX0NX9**T0varR&oJRSynTUqxIV})4CfS;!e-M5aQA(w|X71OEj zKkun5J%w)ERE3^mGvv|e>cOldu&H7(7DSiQ7N^Z%{HqT2GbCgjnU5btwYZ$`he|Ki z;DJUpI2(-w`KS5RTj3ZCuW164cg3XrxhvIQ(@1h{|D%$(M)+7K7t%L9pjy{=g72^E z#3Y79lU}qQKpzk49uMuDEF-6@8sc z%I^i?`NNF)YjBJ@y^O~A-Fc{UAs)(?Y-4#`Yb>apMD|pcpuF-V?7f(RmqhF{=rS9W zW@o|ZEm0U&k`9^=!jXS(F6%NifQ}WXA*j$FgZ#3o`Z6o(Q2v={>M@N!=m^n|8i{*Z z2N~~rh*a&|d7?Z{2J>#2pw(ds$eqm4^INOH;=cgkhDq`3wuvY^eV*&|i6e_|1G#0^!DRRJgktrQJi}-yP=odb|yGIn~0+wRLFrC=ZxC0%m566E^AyY)>viy{Xff zzibXyQf*Gd2SYLOrw1f%Pa;vq0tk;-jQ#qT;N8C=pm%aU#-11nt#0GdMod_4Cl-W% zZeUTW0`@$c>rj|Rp7Mi7twlb z6+EqF_oPN3u_#n#8ptA+^|J?i#zEo`fx10XALCueN3_ z(Rnl(9ON9T{bF;LeLV5wGxjb^;Ny!{YPP9IkQvN+qEt9ESqx`#XF6m{8D!acV?QQ z-S%dXo!UX$a`o}0)oQeMoez2szmS%?Mk-D^!2UgbMEYVh6kW4`GB5U7&>bZ1|2`y( zKODo}xs#x0Lp;%ONM{VGe+8o^*FjR51pC(tAyM5QwixT6=D|SNQ<{jzxv^k$vtICK zb2IuKjfCd@BG#>tf|WYs@Qy0Mka1U`UX|U?UxtAZ21A!z4+cKIpt^a?A7%MUCkw7CiH4`~AHOAEkg59@Xd*axCl zHk>4*6nTHU5x*M-C{oa*EqM}LICKsU3}d~9cK=il>`?&U9!)H@vc~@OT%2_2DrD=2 zVC~!skS;5RfT%(=^k#lH-HD*JYb1yVkn{n7c$6XasWn6)>*AZ#7L4%yB2mg+K{bu+ zp)>LfNDYm^)`@v`V*V2N^{~2W#;qO*O{JnbF>O5)jj*>6W*JOC1GQ5q_YlDo;)jo# zOYr25THsk7sqV?=sP*3)kri>EXGfdh*E$ZpTTg@Xp4+5V%yP5}zv#fZIO^=D24b^( zVw1)^_y0B%g)jx|b8{eF2cA*knDYRPVPW>|CZ}juLF^g~K}jM4#~1nZj};%~$2W3D zuQG^wx(4fgb_dNB$>jX*Vw6AhB*|_y;L~~@SFA3^N|_kEKb|H3>K(=IqO({Tz}QDC ztU!`{i+dHEPwk&vf*zMh8gzRv^sHfBMvx9#mE)=19}^h3FdL**qiAQM1M}82vQCI7 zFk7^Y+>5io9%IH_c)S^OEG8lU!T^!*XM*Ulhk)-AkD`x@VVw3>T$Wdck`X+{h}s1D z@R9O7myxcZ5b*zZG3o}L#}Q?E_+RpA9MFmZ)t%}rKRp{lj}q3W#+-l#$xh{GuP-{dQsckE(ZplAAGh$qcp z_r@5O{7A=vL&M=c%V%rt@B`1=bs$%cqGx^(c79#Nkja@~#IzXkS`T=rGaf&DU4(P` zLh#QK3tYo%hEaD$qr+ln=y%B>d~+dw^kcL4WGmP>l4%v|Q}K&Tg0;+>CQqs3WV#8Q zJg=KO@JJQycWuJErRA`9`Ye=H_Yk9V&hVJc?XI_UsMa-eNX*hlTe^W|Q%(6{as#Rt zSw^R2GB(JS;ZSucAJxXj;J!IR99R&7&SDeLQ&J?Z<1W&g0##IqXMD*FJ$P_{VtKm` zNShzg`!!6f`ZyW+9^24%Y8cxEAL8k6wy!|v(eXE0(|GHA0726?j z%o!AlE4UUFE(NiJ3q~pbMcj>23R~%LT}wGM7Aa0box3m=xt|Sh0sE2 zR6d(Mo)0Cj-WE{rNPXga@)jL%3x`6L6dW~{W8F8KV0%&tzCC;f8(VGQRo_r-Q6Gt8 zb?T6$XS27`5Y+CUjNZq*q3j>_#{FOp53(3PnO6ZF(-Mi6#}rZ!E(7D~*&ql=MfrzA z5dT}m*gQgF^aF^in>{)>?IWrOr8H^UE>uoVK(D98%u8ViI&c5bc{fzhhGiPVj>cm5 z!Eh|e`$O(;tO483B-U4P4f8IiW6xFg=X@~3P9B>B4_u_3+qH@H)gsLMdm*-jNzl_F zgR_jSB`%58IA5TJy?radkTRcRx-M|TS$B=(E_J=j19jhdpj3MjHWv!f%TtB%y^2t= zE|a(}UrEAd3}rnEr?4cn3^xpFq11mfXk7P)eJvhn@Un?9n?8`=8VfLPf&=nHkD`rM z4%&RQCSmPOcqMWTmLy#P8*^t!R5Spq{2@r5Gre=|FQRH?4RS9j4SMVc1Fi04P~;Eo z@jt03XR@jDXMf7umB+VK*Cic$0?C@|`!VZu3#pr%6xs833QyQ8mT^?^k6DyRU@4|xQKC!n(J zd>HO?9_8z%LBL}}3}`8Yj=%^|DD*_`&1Fzpd=>Xr)PtKH9}niw!R2ZW zn{~FKsKZhl<;L!1zQKvV(|bXf=UGCflda{XuN>j*Rt=L&O41}L7!0y!*kbny9(?1VEbGvBJw`|%sd?yBNb^+}bQdFC^2qsy@ zgYecf^2Xks?ft{ar3MbQf|Rgp+!>Zdf%kMlwajrNxMmyz{!0}) zTx$pODY}E*wIX_M^9)qJ)wR^JL%Sh?;FuUl`u6IA(gPoA zv&alQwljX(z)(zEE5X!B_1Gc1MtiogH=l4f#ZD6lT&sh-lOwTr%3hRRGUhaGc0!bM zF^LTgBvPB1;Qc`z>di-E`-JIq%HIhrU*1ZyO>!Z-_B=jvxQeq}6Y$CZ8rU0q3G zgT=;R)-B~gRg8mCYo95N70xCia*2qq>oZ@_Pr=}=dcuVlq5E)uViC9o?06=Om9~ep zxv-A48!rS#JxpG#>b-H;HKUE`eWZIO?b}@0_h8{b$QWd})0Owf|zw5ONsI zwu#~JJUv`uUx71KPN7a*DjgWSPpk6}pvHgn zv_$oh3E;_vb1j7ukYv9mmf7A|Me4Ay$RB#&vfiWVY2dB2oVxGvLD}~O^wa2Q6s<_d zmF?4+mbHX>4z1$khf1m3;*VfqvMciBp@N|6Jn&LA0qHPyww}8HHj87C*Syu#e;@O< zTUNpL3R9fzz7ySVl;hW@*Fm$=00vFc(DUJWjNPR~wFLc~jn@nEb@x^1J)Z)Hv@Fo! z>SW+so*}XZ6@k9TTIgS81CBndD{FxrmPBkIZR=A>yPF7(Z7hbM)R)vpD~ff$tgZe% zv>M!vR)C&iDEmyv1S7BK;ftm?*r(x&gE&B2mUR&C^x@cab25H&3`OZK1v=@32y^uK z*kbHO!<;VS!0=C`rdAalzRbn+FLz03BgsX@ z%r>t@>5m5VG)NVgDOU^ha%yN_<}blrGj}X3XFdfdW#kuVL;emUR82TbZ%r7B?K@VG zg4h>SZ}e2Kc{q>t3k?BP5(Nidtw5ux8YFym5~>TAk%Sk8*pl5t@|H&6Ft#V|akVGT zzAc2GA4^21(#ZJ9C$Z<~A#gJ1AkVo9I^Jr+_K{c5)?j&`2o&eDzV*IiDDGuF z4ay3FlD!ec^XMcn>U}}9G){3{w*yGYlsFpNK(J?^7**%8eB9?6LCOP`;T|1DKi!>z zr>Cpn81QD=c_YdP)u>Xj6%{=r)KPygm^nP8d)fPBa7r#M4P?CjiL>cbZX_yeUV-+N z>tM}4HMsuE4b}k>#qT(22?Ghg$okr=Xt+NC&Mm5B9b^3hwcpdAs3e%!tRzS-64woUl8JsFEL0-%LXrxINQ zfs#58dryjBa{El=XUu@sx6d?Y_ z2K3t+3m^JI@op3I_Bp*~e7;#QUq-V(ZC76s>3*P41)~Dz<+~h(8g*F8CWgEpj~Hy zH`>kAWY86IH4BL=cL;JV^ugl!E$ZlYod~8eHtD$&r1^d@nC)3jQ~j<$kvij5iC9)k zvKhpWB0yfcU+~xNY?QrtO=Dv`K-J;9K;-O1r-@Ia+5R+B%`r$FT`VELgyl7V^bvEO z4=V8zh!X1qc^p{{s(m|%q;)cQ9%>+er4`|T+=z(1c9EjHyFu~RAhm2BMzwDi!jfB3 zu<$&EZ@!jc2Q%$x9QKlY)ajG;}@8OJ;e!J;PB}9>7>iwwP{zlkVJAWu(0)3cQamrLxWpfh767Kyo&Z^oMz& z&(=$L`u%!rpJN1)+b*PZgc+89jwLdAB&fcS(j6o7(D%YN=%hOsV{rypEJ~v#K`9`A zlEv|&-d8;wUV!bJYe`_X8p`GOxvm)mlJ+@a<*sNL)jy2o3qEri<8m-!MjaHn@nHYk zXvPkg!qTHFP`L0jClXHP!g_s4C*!t9Ca%Ecr>`@n*mw{!pq|z_5;3o z7jwl1OUIy*e;c)X6^_Zz^T5l^3KU;c=x<&I>U0XR zIe3tDrc8r^?JU=DIRv!FOvlq4)6OR}2?p-R;ogQKfQ>`JY6#;)JhA~B1u?cCJx>FC zMNm}TOnAI@j;DJKbWZ8QxrFtYes49@ebZ~#t23b-ri)*p&)Jb%pN)k0rqE!`03fr@{-g5=3C=3n~C z_05|JZ`}lFTU^chjSth1xDxRGya2ra;gc&I+xx_PU^(nRMCj)WV(X3I*%(hp?JWh1 zj?vJ%AsTvEw%=yQXpCJnzN%;!4PrJ{21TQFRS?_`N+FWS$^k*I{J(jnB zz;#XUrC)=puq$&1lD}9du@k@qt8=JO5sx+{Rxo*qHcE$|B_2BLOgpIxU3$N1(cDB> zTiSq{k`PF4nu@}e?o>D@p0jutMl`=Eu`nf@q0R-;K0{Ts8Q`E~(nTD8W(gYYzaiLC zF&mAprP0axSJ<0P2#h3&eZ$I$a2DZasg`21Un#zlTm@B^Tf{3{PUb0J0p6&|q-TE! zerFzgoYsUTqYEKw{VudfEua-km!rempLF~S)Gje233zhy&Vo9dv1+deQiV->d5(iyAFx)H`!pXgXza4q*HAZ zDE3~U-s4BI+48MG+%z8A71ol0{7Ili#Kd(2+d*ilf-Qdz%2hUk#*A!;$!7b%OCnUg z%={FC6XC(LXfSG@3cSz3rY0l^dRit@YsX<&(%8iXv@zc2^Dxw!@rVxq_WIPHn(pnA%DsF7 zBC69(NyocDmJhUswoF}co;Mrh7=w9{H*nEAOXT9&`KCKsAbvQB^@&HYJX|x? zygvf&+pH8ix|jd%UhK;@1MC2C6rJkc)7E9f@Oy*dn&m=@;x!5=rIvb(f&nV{Ti1$=ui zV|`wQ#PZ@6*wJ_b{hmZJ|LQfcu3HC2{zXKwvzwkvnTQ&ZqtWp><01Q(&{3mmuq`#0 z?kih^&d=HGAMKCc2Le%Xt_$RDJpjM^T$RguDmEmBaCTyQJ4O<>&5t*Gk+|%~L;RhR_oN2IJO%3P` zJwsJf)u>0?1(+Uq1A^RMP;dKcn!nWn`+h8dzYa~t&S%NQtY|OmcPZtBdW7@xH->*g z=P*ra8EkSDBY%D|h|9J?OEI6wZk006z688q7tyef-mI(Bm(G>P2rVt?VUcKhn4-eYL3i&t<6mQwb`iIp7>oj7lAs zK{qWDT^?(ptd{wHyOYr|Rv9fXSK)8gRT=g?3ZgfNQIz7sjV#t@yO+O6bPVf2S{;IR zMZwsw)tQgYcISqxgs{?HdIV^|KrOz_S`3=T6gsccpf zC}ah|bmD`mle4)8?qYa!?F8oXb3nRlG#xbRr~C@U*vOB>qxu5$jiH?AloqiYFQdgZ zd@P&R1WNfX#QRG!DfbCP-V`N)p2`YX6Bq`aJ`>=|(^cp=G=mzBj(~pQI+z(E#h}ht z)NG@wKxyh~;_&?<9-qd#V9qc`*`L+y9udMDpJepd8-f1jaX8=_4`zjL$Qn^G7#o#= zsv;1+`~PTM66Oq-oEZ5;oIF8H=KL(A{+ zD4Mg?l&>lT%h@c$xT+Gpp18rMIZ$kH{wvW z;`WCmsLGR_$){Q0&uOYQ)gQ}`ClV9uP~sdDh<_D2;~b$W%4g()YxG(uEG|T|>y@VB z1={G=y%b-e1K6pttigXrq1^W+(K&vXai5|Xiy#bJt`qRSCZ&UYji50+9nUl|4#(7G zm{5Ka6|#$AhM4)aKj;y&gn??w<&y-&3B=TAGKRd*z%6VxojyMdm!C=nvlP~mIO7BD zdb$`M3a+#M4<3>I7e_kQU4?nF2DBD02YDlMN;!?Jci?Y<{QiDH=Yff&3oc#N5k1BNt2x#C<|7%=xnm^IDtnB=Z^F?_`}} z&yjqeaR8M>SI}zs5)^8J`>GnSmXB zh5$|5ac=)nR7pI6VLAVi<1FJQ+M)vwh1FO#GX@`C*oyL03qirlV6=R95L_RL$U#$< zfph2~p1Y4h&7M%?E9^kt#Ut2yE0N{MRe-;w2#mM@>^rjwMCxa&CF48D5ZH*dhpf=C zlKDp(5@D-nB8piD@K^Tc@E0va2gdE}d8ZC2FaT}FWfkig;$6K0)X5B|=|7ksYk`g6 z{`y4p=q-a>nH?yG%!Tp}KM)!|BxbQ&2tURE|GLAN56;(V*t#g9?y!z%Z_Gs>%~V)p zvI~u_W`M(wp%C^igyvsck9y5L5bY*X@Hc1OnkNH5-Eg17u{Q~NpD#D)C=i%gq z8(`aDj-GuNIN=6o(!Rilbl#Z4&dB*>?$t`{nU=)*)KUrmVJDq1CzA2T5;&XepQLH< zDC$r9N@X`Muy>CYsE>02KI<6yvBMQR=PV$TwTjU0kS?Sv8$jZ>EFzk|Rq$y_2FSiI zg<~7yuwV!4W~FtnSwL%ta>H5 zF#05VFB?V_BaT3re-Stfqp9%bK01Fo%ahG2N3ZBR#DjSr?k-||r9ElPt6^Zui%Y`6 z@;n+eq!FH9Xke_a^%(HF4mC6Wrg1EkZuia*jc(2*&^!zb-tqC-z$7$s%_f%(7hqX_ zDp*vVXAFiQm^;oDE54k@D%Bdi`(PI1nLME;)D69w+?hZ6HMNVnfNejja8h;}wtJcq zh4I;-`R*!FCnaF1R}Qk#0EN5>7%LlzqTey3r1X!VBiI=(7dW7%Qvw9?*5XEb0S||B zC=yea1-eI~Ha?)d=v>^mzY0wLsiIfb9>j>yWtcQD1`DFhK^nP=guST()fFNPN>c&R zIv*%FzY7*y97K7CHFPqc)Yh%*vE)PXb1 zzaVhiezf-UV!yW<o1P;%oXSF*Df&0IH-^q&bh_vlqDjOCzU zVHLGlnh!D~6M=qqK8l_0Q^`mz2+Mgxdq1#zd8`4IPdm&NXQqP4#f5WMs2~Mbx=C|; zGZ@{wNmZB1n1{v@G;*rYU2#4U!#Je6l(Ksv> z!$zyXn7SCOO)o;>)=xyL%+B136ZB)a1=g*;gla8C%#*1F7OD9(>>=|Z%hy4ScPyx) zknTDqVtVEuD&Ct$76~t4_pKCE|DPvry`IKcaN4wPniXTg25{ah`-rG4-!xq(6wB9N zp=vHFXfk>c$hKc1E;?7xu04qMY3f4boG@^FEC9_5E70^-HQMZ345HGU>id#9@Q=&H z?uD_S2gc|*fpM4rm|)Z63wWqG6IZinmGJdmSU$`c4X0M2g3SI6goMduwzQVO zu|#+N2B?}}%wG4<#ZQ3+ka5Wgn;&rAH8_T5Z|Zcu-^iOPLxoE73S35cv#S=w;JV=t z->bFJq;@5jtsB61%}qs{=R@IG{7H0hP304P7NPgQiTtO<0*w4Jmbc6q21}p}SH3R< z?J)s3^v5P_3EBz8bK=>37Zp4n9YlAkne3OxEOcLSfW0~rgR%@WNqvex>RO)SS0&Z( zG%Fo#bMJGn4;3hy?ZvD9_>0eeXMoQxkfZYaLcUfx4Ap1N2Rs%@IrIq7I<5`(Vc_#u-+l5D32S*tZtpb!N(RMjLv7e&)>2JjaVi%+9_%E8vxSo>P-Kb z6#kvE2gT!q$mubX|GWMy`h_pT;?c(`6Zw<74sr#z$*Gh*c`1b6KY=E3Q=sbbP}ta- z22!hA61_<(=v$uxt@*X=&^6+B{VazY`c?Q3b!&2lA%v7HMZwvm6L_O+1~v`1gP1X^F?||kwx8-l{6H}_Q3t-`_;c<) z_Ah>?do0Q>=u6TY7s2Q+&iK6J1m)9cmlaIBo>g{~8NVb*6I-}D(}Y24#ncP#|NmUc zf7e)Gc*?B9LGd%(IxA=koTjH3t-eN264#5%GL-E(GOl%F!!H_@^*F6x%Dn5k6 z+oEeUlbOt17A*rfp9nKvOX0_l`REpOiM>_~z@|H=vHA0FuA6lgx?Xm}?ym};BGVwli16t494o>dzGw!%pf6fMHvi_Wdi2$AjsQt45}{cfl=K`?mh7mx6=_| z*I42_?93H9L?fUJZt;~LO7YcQ;>O5A&}8fhi4pON9N(XV86i&Cbk$q9t#rbwhqswq zQxczX0b%i{Wf-TH3dV!-*j4Q^Fd0RC{>dL$Q{Eq-QcISsyOo12zso_^=Lm$|9fen8 zRroNV3a#A%SX@*9wo`WUyfGClXM`VU(5&y(sSr4@X%*@o-Nl{yeUj!FRb!h6O?!W&e6kC2{Zxc-L<_|8^jQGX_S}UOG&VS5Ix0yHV{?7-S%07CL53(g4zK)=o_`*Ozw!E7Fol>kB zBBj|$8p!SY^R`=WCBFI@n0eSA9HPb0QKx{RJtpEl$l+QeQsKP4fd78ph<`>GV3wu@ z6W<};g!v_?9&j97H`j7`bv2W1@Z=3QTY1B7cX%|`5?!-ISUdgC>#rb%S*Y4ef4jZPzwDCIVbT1Y>UxlFS&XvOS z!xcEr@C+L3orbJG5?RP*nw@2TmN>b@f^pGPzEh@y4$H1f3}5v|Z=an1KR>GW#&VCJ ziSR5#6J_6zvf~&1$dOF%ixOD5`EMQ?*-Qg#-BHZR>>wmgyau@&@=u}kkm%#!7H}Asd{4!j6`QfQZXo{pPXAtBF4ML9N;BifJkKr; zx2()WY292wrcL>mPD3Uwy2EYGmY`^Nk-{)nfxGUcL)OFL)DJ&HIj(eA^|un=G+e`B z*SBKF$W-RPc@l=cL1@fa4PIsg(WB-c-l5&d?JpLx5;I3^ZrB35$5dfvb3V!}`b(1P zV_4IV)2wYncd&Jx%T0v6+;S$pBlrJg^#NXR|G{}^3_8!9vPbY4GpniBy^+`LccmV} zNH$ihl%9V*ZvDZDe=r+He3{)a_**7Up$vW0#3+yt$dEKIj$$juWnpB^9j>!Ti2+CJ zS?!IpEU8xpU}ZAe4(*SH>9nUB5RTRscdhI{USe9gf<*h%ag+|wQ)(?EHsDn9v1iA? zs|8_@Ejxr^4^N_Qv^nfN5JfP|QTUr?$-7@KM)$c=rroXvUGtu?g*t0+-i~mrYpdh> zx#v)=mcFHzzYAU4GQn~|2lvYlM!hcrv@cf!H^ct$#egzXbLT)5Ju@}kl9^{0g8l6* zJYL07bZiKp8Wl^Q3LE15BnPmOLeb_VM9h(x3;6hQ*@8!er3kAzZ6d3LSfY zv&>7AvA+B{7u8D@mV-Q@wJ@H!Zp(lq-NW!jEgD@ryYoicgVelv$i(fd;PKA^=ySLm zNVTmc(&>xY!G}KBp*Y5EXRQRgRaxNgn2ODX971xpKu3He4ySCb^^{&*G-Me2=h}I+ zp7Wpb*o;c*XD^cILooDztA{SN7ed!bbcZ}O;sM#_I{-9_lwdY;=3H0CCeS={B^PH>xk6l`zU!;rct z{Ps|VK3>EZ{5ujRmH5%jIv5*gm7!0Grp>R!cE7oh5Z)h( zmLA5e_b0&XWIiSg(T2C*DR1)kdg`v^Vv*G;e7j~miegV$Wjcg`SbrY6PM9jR>K}p- z%_9ExW)ZZxoPl>wH&Et@z7<9jd7YRtL8saTPpwmu=^l~jluQc}+uJmBzeaI@0Ws)$DM=r=u~cOvJ(Ov1G&jRZv^*{esEw(9!Op%gSJi@&!b(z3`cb| z=n6uU&k8}*ky3i~TQ4*?I13W#xoDX<2vXXYqP)`(cCjI7G+`fzW48+1hbUliO))gy zw!w{OiqTHF1+{NbW^nIYrv0)4^$xc4sp>JP(t9YGl$C*7r&OUGoB`>fp2Eh3XV9*9 zE(ZK6gQWOeO!?Cg=hdWu>&YkFv#1Q*av!kI>h$d1Gy!8}FxX5d_My`g<(+jai7o2R zuKy}P?}3fHZc;l-8f1!>&acFd#jBuc?JLQ-QAVhnVZe=^tcFoNucBo;@#mcNxcjOY z_L5biw#k2d7CDamj*P~|v;IJhn2DHpftYq5mol)E@)ny^Y|q>eO~gL2{u*XwI9Z0O zuft)u^F{QI9Sm)sjX?8=KV@gs!0zrzv>n}t*=Xy6?5indPW+Vxd2X1K90KYUd!gog z7IqlTX5##j%>AEemXksIqL=qr`0xzgw4UxzLG3&#C6;SEp9^)lzO)BV#P*(3;7E26 z>eXH1GY=^+jdnYRu9xtSwSH*O6o!r0iy(0;xe=`1^1qs*(R6VxTow1l8Po?>Jt>f= z*>1<={ZomfLC;=~L(n(Cn4Ik!;j-0ElpTA|+$>f@gD#z&#{(VFI=sJ|XfrY$Y_W=u|S!}KOAlOqY!=zKE(O}6W zI8a;)2Ip;{D`5hdw^5f;b=ay`bvT}+oIvD)8BibL1;I-xPhoCs)!Mflo(u`WCi2i# z{0v6Vd&Es%b(}oG{V?V887wYRg8s>K=#jAntOveOiU&)C1iiVe<0^x&%5qd25eBPe z0ciXzhkFE2FGKGbkDS)b7286=Ilc@MMw|e5^Eu4>bUJJFKF$~HIz*nEJ*=a50ycT) z(?0XNvQJSCn(X_@bPL9V4|Of?SOK`K&3hnJOtXsj?|DMr0SHgo2q``#XdIox6@6sPq$igo_?%(^ z`WM-I*A%q;x|i=eeg<}q!t;J5CXc#Zm#-NtzD0TO=^4<{2=WdFoj=LRb z6c;kBmv-RdSpbTy^B{Y#59TaTf&6R-Yq{PRq_d)-Ze2Eiev|s986(jpLdx7vXS25( z0w7`VQS!7+<;Gh*=zcJUMSctsHs4x=ChJGT^Ue&AR?UILdnZvl!4gW;^wDm>37k-< zht_-#_gF|#azC+0EcHR#d|boYuJ&GW~Y}x z$N3J{;Yf4GTZ6%LZWXFIN3&j=bKtx66I82^NcoHZ@8^2fr{j~DPmRWND~oC>4k^3gVh_|ezr3J*Og z8@j5Lc^oa^iupaE)@&>+dFPFfs}k`c<*llwox+d5RakKivHA5%tX*~<<yE~5ep{~lTgI0=y5K#Mf2!zb@U;%gXxk;>wKm2b6PPtx&v+nN2Cf89s zbK_8y?b^gT&xXO4Z3%!8$H8cMATv8N6r@jQN)kTmuv!~;&>vg?o@=i$jZvFHL&E?x zSBAiUe~rbu&8C=;Hh{Tb8V7~b$yKs_FX)cFz}+V}Q!h^&Y!CbiCbAqs_F#qJ^!qZ@ z%svLo?qq{EvxQqNCsAb54JKL{p~h$_ZxB6Y-`!pDpu+}&IcM`EEt<71jFOZrsf3EB zr@$d}A6_260Y?TF!TbB+t;VF4g*i)60TkFk)a&zQ@~dU9LYK^+^)4RW9GhMF{ffw(T>frgORUP~Fo zJmZu1pNaF{M_@WR6|Kj~pf1Y~^}fI5O-C)b zPX*1DeQmJIxCo{CAC!%{=Xp0z2aI!x1B-X%*g)<~hqM0)k#lJuTXIvPyYM?V`8f{( z5wue-)7fn?svp`5F5Rkl$jepC`qiH-9Yf)2Ps)kRtK+^C0w9FWtk%D?B}N^Gcvi$9 zY`Q6ttR9q!`v3cLT13I`9umxRn}_aG#(_c-%*;Y>F)f2F5PHH0L+*O;#x3Pce`zs( zxonB`YkgtK@)(pGxB{lfvoAgK5f}W!s-DI{(VJap-!TXBlB4+Z%?nXH(@vrmnhNRD z{BhUyzT`$};-WWJg1@T__Bs9Lw=WDuojB^pSEu3QTZy0`ze{+rH>`IWh**-2{|q)j z(RwE|PwkFQ!N+;aXkD0bjGo6KH~7up6?h=U3Vm-Cp=VH;V0bqRJU14?tPiH>*gKhd zY}0_ReNLj!xnyWkb7G=aBS?ZNc;$~w6ubh#J2HoBY)N5xpA5nEY69$fJqh1$3CA-^ zXX;l(u(-%75ZT|c_-Q-?k39#}Se=W@Z#tqVF<&rAo(eNwsbfvQmC&?HgSQQ3yy4vd z>SI`dbp9Ifo>0PEUig97Ww+$%uwy6}jJRvp4)9bLbC0DvQBmv0O~bOlY;XyW3>wb! zQWinL&q!A6eFTf+H`DxTJ8OQGN1U4wN&U-{yk*f+-sk*!6e;Pz-Ey3z=+W*eDvXQ9 z?os}#yN0QO#E*(!1}^?ykhQM>inZh6-Y*$y7IeqN<(D9G(GkA5Fcn&qNi6)sFv=sCH(Id9oNiLtA2@URkC_1O2<;7*NUhr73H3R-8b`QoFpMn{i{b82o zNxa#y0Ph-`V#ce9sOqLC{JU3%fioF4^{{6F1C-n`_ysqaO>PlKf3T!%Uc>V<+~l1f zxd#%!zUv0F)tL6t;S5!gxz^0vW(FJduL+4-cebZ zigR#AwE$W#f?)9Pv$)UI7Oc;Ik*Ma-UYSDB=dY~Ciq)CW;lCQkx~xXM{rTKzwkBAY z36kgA$O)9~hxtQ`pqX}W%f-%`{YG6j_My~`h^LW{#jD5lwcoo@{It|C7Dx*I~1sz1DP zCg*a8C#JuM1=r^#65p@UC~6!ek^XunsJ-#Q@lOU&m*p$BHd+CCb=l1O?h|G{{R)WV z?HHPwg7~77&?FcL>YvW>y!2j>zvK#dcV_XJ3{TLMpG2*Qo)G!ASB0sd#xRUr z*5CO37t}eIkt^blG_aaX*L_b4*~v0WFJi!S zn(frQ=4#QEJk&A@9$2U0ZP_&JcUvCpz;IL{~n75u|eQJ zmDqbJaWG}-pD2_1W9wnsZ=8L}e9Nlf%YrRHQQ#!UXg=hhEIha<$^zifa8y7cZ*t7ER(b6L)W zbNFMEIp&X#qCMgk6lo8a)SpR1hozOUwBS5udPL)4-K&tk0Kw_#CT6z97{(6Bz)L-c z;MGCbFv%^2X-ruR_a-XopCh^Be+u@hsR&zccLV36N?4J&2t60c_|4N5*wpuc;FCx* z@%@H^x3Yk}TpW#xx*@PJ!Ve={GcY9NK1*6lY_Pgy(0j5Cq(RHM>-`DbBy>4%%Rb4p z#(t4_Hog_20)o(Miz}+Wg@79z!~64kW0p|`uA&*gcKvC7^N&BNOKl3zmMJmgwgTyH^PhhWHFPR~$@vZ+-YB0SoH2)SbS|&@J|5&!1!aE3kFZNle2v$Hq?(~4NLce z{Q4?zUr!yu{7uYBYZPCgro!TPyWv38GE9FGjAkpBLbcln9A7|rirmXk`ZEN<+W+_sf-lck4C4I|B32Iynp$oIi%Ab_8SA z=VK5@8FKBS-tgmC1jCC$l%Z_H2HPxAXwZx?Ivinp{ zEbRci^P>NKP9<>p;_*NeL^sGi4 zGaHQioMyEPHqh>Sf#j@3F}@WF&@6llMD~m0CVL-qf#wTSBdI^?T!}8dhk=G&7F#lD zH~smtg{ntSxVDQP=fYMt5_8&*7apmGq@(K4 z+7v+DLv`+X-wIW(OC(8u0$dGUi2YutkRKILe=nWYa*a^7coA54-VwU?9;S%N9Nz0~ zCiJ76zchU@Q(X!~jj{qJo@*kpjYEAc%=5R8m$o#;1^9%|T(YNZp zGiJFKfo^tBaIv;SjRg_pKsUjP;AF@?N?lrFwhe!rh0Qk)z(G@YeEMku@eP8Y<-u`o zOgqb(&dYpmFJlz7`$;6LCSi-i74l)FF>$}+61Vg}LDplqq*k9gU%&fd!;StBIp813 z4v0fZzhb!5I|s$rqVRccGRhqv^CmBC(9>NBK3g);>xLn^?zkn{u1RyZE>}3*IvZv5C}@Uz3A2$$&E$~0gW%QxY5=W{RTv$^Ljrhd6N_qguZiU|y36qHGnMIC2JW95{>LPnc7h%W!|F|P%p6th#gWRn@?P_i@T|YUbZ;C}* zk4en6bQPQc0i;3weC zhntFnApMm-j6Zl5?G9hW?xX0v{qP5GPxeQzGV-o&P%xXvpJ>;1i>p??5&CEk#{;f3 zBTJ~{9!}qRcijX~dshx`0}|2PKM;%W-eOG?2S7h5WqFs+1M^YDG8nP|=2{k@s5HjX z@FTfNW)T;!ZGfboCMV{~RjxI*lyZRUB|g;sGU_)Cp4_d#Q(pq$t$R7>28$u4d=~EM z?uwq*olth)OVa6PiIE>ZOUzc@=Asc#gu-DL!M=YA>k#jN@Sq}ar^Qz~hC@*Ru{kQv zp#4EPd1U)AMP(2i=qm%Ko)cK(PeeE34a5aiVcjra-md8h-TPEvo1YJ1jN_3*-C#1O~T@HX`XhJ>oabIO^`h`$V?v}_^eIX%l26L@=86o0!S5j+Fm z@ci^lQ2a#h|G5I(yxhUtzGjW_bZKudPSDrIa1~kq@G=dhkc& zQOufnm>>1B!@ZN%VexFr3n)$r_vVC9zq%(hP{%g$P64PSJNSUsa9n&p2m}9~hO$aG zL7MpoXtkJf`@$9Q{!bH(US5tZr!S$`gD{l#O;CE8)^V+EZ`k*Pd-1}Sa&lq~7o>_9 ztK!??Tx;WZiK~t~bo`tLS|3j#D_@H#59!a^_cD)b&&8zbM0EHO1>r?rz+=2odsiS! z5EyoNEe59>^zn)I1Yw;&$XAB4BYo(<`=ylEmJfg*I?=={>&w+Vukfa{8bSW2AMwP` z@{&VoVCJ-*c41d=n`j<56%3 zo6c(&{KfvGb9%@-eK0;N1@Q@MJnmVE_pQRA`)hNQD$SUpc{h*Eqdo58Ea>!-V9eJw z;J(m-9XM-+Mqz0@5*WWFOnbp0NI&KaYNDUKZe*@-a*R24 zg=mn!Sqcf8dqDUG$^|XAWtLaMFvTSU_v{Trht1DeRO4z4-*=Cz1*_wMiXx0MNe6$* zj>!LTgq?2G=~+CEve^RLemw;h!*qFLo)om={-zw{C^R%mfh%JJP|q|SM8kaWY*#G$ zW~M@~lQQzd4a1q+0?|`D1Y5Jmf#>XZOq5R?zF&{&4(h?0mc3$@A3B+5ML*h|DOn47 zH;U#KV`pg|ICd4ng*hisZ_fuV%h7{GQzTdM5iI#H0XEL;i}mEqE52L>jkDLGr#J!} z>D}U<7|e4It5EfZShG`pa542}{GE=HqsmHn$)eE7V=X8=)KR9#z+`+f>WDMpzwMXM zd{l4TelQH|hj8AgNxe-Qf2cL?PMY9elqoP}o?Gb-o$oEQ1!dr{;xLFDdzB^5%S8X4 zSr}9rfPz&N3|d!7`FIT`(lb$<|0fXTF8#sS{Q|cf5DIPN9T9o?@V1+K!Drjw+*Q;K zbiZam{;zUqza)djsRGz`MYE7eC!t+=l{-DnM!klmplshoxv?8erCtI7(W^kSQi{bn z+o5KU1T1~la>u%B;G`tZw5UmFS*gP%b-pNyvA0Yz%LJu-D~9-6fltzWc>O2{JA1g} zw#g-!Ft8_XB))XhlJSB@oFCY&mcs4}S@_sD1D5U-quFX=+1AH{T+g1FD6a{!p)qjh zKqlrLt443PReab#w0mgvk=Pe=7E}L%N3Ix;<~D`cc=rK!s*D7OJt3g|?F*MXaS)BS zu(EicfnDX9EFqr3D^&^ZKbU~(7uvYT5-B>RSJNK;ELi8x1UKT8czoRrSFTciuJa1> zoLND4ZaM#CP5Hk|TcNf@g^3ALXzLXyBvtMK@w+MT_SQx88k~4ct;j&Xey6!chHu99e!5Lf4c)ds!jdKCKWI3~|Ml0|Uu*JBUe- zi?Hiu7Po0!2ETqJfNQlJ3VZfL&#f_BG(Q(QK5szlvonaRG!l2+q@ADP9xib%0>e)$ zG4o3-X#W)rn&UmuF^TTh^>n71dl{UT&W0avXu$z<={$0kb$6VK>BTYdH6#mKqn*&J zYAyNtkUgD7*_r#piM2 zkvw7#5Gy&kJ093njt8ETgYaAlb`2Qm+7`6XAB!acp{WOA>RB z0d}MJo}!S|23UagMYMYSAs5`&nDE2&+*>DZ=XIOLu(}y1g{tx@@EN`t0v`O$XADwD zxA*{b`x3+4=7mA%w>)B6eBf#;9l^8zWq#z@Dq?jK2ercTu0Nt3UCx`b=iGhth0%X^_Ny zjXz~A(;$55U^x6zOpISU@cbCcY}QhSzF`71u9*f&YWp$f1V>#ze;yE4$<>8U*44KP zn~PF-ZQ@YK+qMI=eceEMZ=bTa;v9-3QbnC{HSex-2#x5dAb#+iw>5{bRCzqwUt`d= z;=SNBbOsLyA-B+lx!6#n1nVUttcpV7gdKrNmmE>H{7=ezAvso_TB*$==H}4=G+8kV zjI)x#;r<{9f0Ds>?8`>i;d!i}mm_6(k=Rj_n6x}x>HT^dbU4;Ct$|l1T0PGTj@jG5 zpIE)#C-o6_4}qdm>f@xOa(?kLX6BH4@R=O}->ySv1y>+uCRLBIwb) zk-VeN`Qg-3P*~`Jc5W&JXnf(~dP}f6LZ0OwE6_UL7FCnO@!axd_*{0Geg=nFtfqj9 zW!w3egHp_&M|%mQOG<};-%LBI4;WPrgSkIXQ|5R*_-Gemh;lcGr!3???;JpLNisV3 zK95z`-g2YCQXKHp4Tp@}fU*=ne9~fwH(rNh*2i#+kl164c`+-IC4qJjWvE@00EHEx z>KwpNS{2|1&GmTo-c*dfBEiVHE1;=)GqhA$leeHZ7{z~Oor|d}KggaZ^*Y1C#40q4 zAny4^D|X<@S#XXXiJ!U!q08`b@PxWhwS8$G_^F9$obCnn&-TH-{eF->Ae%S8I?fW_ zYI1pG2x@DbB{oujUNd|>w>DkD>k5x^XM>|?Yq%IYo@?@<#Jv{>ofDMD!qGD~6+Pb` z0qtSLanSAoaYa%%{+j%t;c{@!wiMy8c^cT7Qp?hPx5JxeZxj`12_i=S zzo|6mJdws-m2U*c086koELF;khl1OaFzykjhQD@{LBFX}P<+G@I>%4IUK=jqC-Wls zT(TL{mlN|oIR>S_!?BXyy{fcp%8r{epz6L29?YDH34Ls#?c8am*!qs`I=Bn>+ib_C z3x?=i>4vfd3vi90j?>}-*7SIik*jRm(tQ<3*-w zc9qx~dO?$_Oyc_Xk??Cn0&W?O*tBsKZzJ;i!{hF`)PF$v#<>2H?-wf+WsKaPU>zIYu z<`_-$Y)@?VqYUveCG{*%gX3CToU?)y0R22r`lC)bu_g+Oy@$Z+GU7m2&*HXbMxgcC z7RI;~VBCFTvIIP00V>Wlx?h8?Qh(-Dy(4h|FTGKQeCo?9Kd;2P zEu~QUUo7fG=YY}6RWQbqoOh3XK%_ZR(2S^p^6lgV`apcFJ`2EUdLNj!YdF5nrn6if zW!*xjF{fuvT<2~j)_kjG_a-S(v@w(Ajl7DrOVe1_E*%hR0^v3F9K}|h%;v;Zd_RRU zQl@g4@F)-SeqO^7w|vpQGXWGey0pJ~%o@huW*&b$WX%Ukur%W|dOJwK=k!`gic5m5 z<_jRBb?O5*zSnB?dnJ#y9q2HFoYS5 zJBu}I=5v!3#u&Q32#g~c=D)MS?O$i1xP2UqUZhL=l@&~0Jey_p9s=SdU)J8?$Fc^V z#h8p|{N(O%%q&TUr?S(KXL6i4<*txk*9~R;vLWsYaV4Vi5&qYO-(-Mw`8T-huk);1 z#Cq(Cw84ox0b4d7IgX9XDdwE~gaMAmE@gXs-QHlbw{dKFE;fNjJs zn3KiMst(}T%_q@%^bKKgRRsA)4}tpaBh1##6N_UxxXoLFk{czkORfS?oxvl8_mnxH$>iFbEPg)DR085|xADg7^^-B_fq4t>iPSb=kE0TfEk;)mE&2w8E5 zUyKXD%o-i^P4fq-yoC15r&!Z#F-SdHgvbGlU`mS*mXaG!YlgqPr4!X*dn_Hqa7__{iH-IQRO|syL1Q`P5;U*yHi(n&jo(dCk9FsF;Mk$C@38IF#At0xO%t` zb2{@U4AVaYGY0oYm$ADc;>Q?l-Mkt??(Bx*iaf^oS3 zPElR#s!0s$DTjj3yYoyaEx;N-S7bYF1O(1X9bi2JhWI*T-j zfh5@qhCG8LO;#ixJz|>~0gRE{1%(H15%-bM^hywDxl=L`+{@W{1c5Wq%;p9Bt5Qhmq(;?54*u8m!LGkDl zf8Z!bkCY=IOIg5Ve%281%K>!P?P0TH;;^P&kJS}!WFhnm-!6N>&d1PRIJOXKcJ^Rh z=hUGsEC;l0*T9S)XJCKIZan(W5bU;ME!NYyNj2iEa)xO?w4byTwC|I1s-qWJ&g{=b z=O;*1$-9~R^rN7fr7LVcn2YcJBL3W~EH0X64rSMtq15%aVEqSi|Ar8c+W(Qn?O{Fh zeRBbIcY1)+dv#Pbrzp)MvvJGFa&TDW0&jRVUeZ~INi_>#;T&U3+1VFWQ8p6SSwkcP z9rH1=dq2vDPJz&W)lu|bu8@855%T`*0euJM;)M3fDs6m7Z8yU`EV$0V6hF#m4)^Tvy$Y zSy$>q!ssgIeJYRtHT4v_#M7P9o;a92u2|{&g<)BIBGybMwtK51?^>=29hDnd$MpiN zdv=d?C76SJZKTAFJ~IKw`ooAZ9NY2(B_W!VAX1)yhkr;>WHuVw_a~!g)FSpOB>-0L zrYupQrqbm5e74W&Z}#BPSybCO5mXOea{KpX?2miXv8Brt#Y^_E`K@JW^r;U9kY7iU z|DIXDO57`j>D-`5!3CULz5^`l5#0GW1<6!`1g@ zq4aPY^G_`Y^FOHH=dcLi_606Gx``s*)7*A0Qt_={X6;AhotZ3^u>qhVa)UhGTH zulnbyID7C#bn)$uqRl@f4SmlcC2nBZCN*p}DaHOYuU6%agoY;)$T<)Qmg}z3?)EQ+ ze@_6*WpCLqwK81nq5L1eTX64_$i1l->8&{)1{Q0hY{NcmwkzbXe~!dP48r!A7ockC z3I1;DP86E}k9^PzjK62Yxdq9n^^jxc&vU4IfqKZx7t#BgTyU$FLsUl;Xs>sI+9$f8 zu-VD${#}Iu$^z6lV-4y(X5)AL)98Pv3ibcW2I=y<+~B`S%=f!cW8pgDOZdi6;K^*grQ;YTR1SL{X0%W7yBjZ03&DYuPu7_g2a&F4$w#>b^4=Fg&GuC=jre_a z;v|XGYBlRs+YfziUgMH?7tz79N=SO(Ou40EiOtA;l!r}4+qHASz(5T0upI8G`yZ=6 zAORhzAEZpY3~(cnWnEN(mrfB@-3mpYQENcLmSGED3FAAjLCTi}*!EbPohk@|g!}ur zgLQzg?8|BJzLQPfAzgMqy#jnH5PM}&r@gU;ST3)+x|$?) z-wW9%PXRR3gW*&UGptf8;uRXFVC;S38yWVdo_dVZQ$Gj8Hyr@^T>5^_8P7$BI|S=) zc91Tah!y=xVIK8OwD%Nom(}U4iH&9n(T*TmI!;pDM)|@umArpTJT^r21+%IPEb@3a zW->lUxO3bR+r-m(QsaDZ=iBGw4+vYhqKo%8 zoo*<%lM8M6>Zmomnul9nVh4TBP|v&@K0HuPnK=Os4+X%k6IbY-u}UGI{7CZri4`hl z_T~=vV~GKIlQ}3q3WkfWpm&d3OzcoANSE{ktpWS7fzE18+m7;()@@(_Rv;UsE$n@) zLX$oR!2ZoVuzqAA3<{-hNBiIGpEG6XmS+spO3QKH!wVQ*zXO`968P+AgHfyRZx-5f zDjII&kW@Sot;O4g;hP2AxKRZ%n{#-W&ZP51a`J6!QP;+fiDztryl0QNgCziVssJMg`SHfu$1Ej251i?FQ7c;vxBqa* zPTDc)=j4*x;UzJ~>8yOMg4c{X1G1MxF<+4iNhu-dadaR!X1RbO*pT{4Zg?l+GT7!_ z1@$@eSo(YNSxi~YeDX)aUmbq<)aN8PY9v72$GfaLArp7WmSffq^5;d3L-$8ZaNgTe z;`(00SBVa|`;8d0_+f~=?*#oOU%~HRUGeFbRM=-sd4QS-Si7bIZ>)Dimta5U7F-36 zM*^6<=DtLG?@kb{T`ehEVS?^Ab9l~w&e*Z-U*6XHI2Ugo2`77(pmcaA+y0F10FJ2w zc9}t33;m9y74YWDaZvesCrSq9VS~W6^m&P^fB9DVDiZYDki9*BCi>O=joNLz| zfL>MuG576dOz0VbPGjdm9r@y&LLab{mQ*yhISr4+=OL-?DHC_s6=Z$)Nj`iHfvize zQ2nD8SbwRaJP0wz$7Di#z6nUgMX=t8X48M3nOdp9od${l5> z5{Gl~0Q%l$%~z)Ok4M+pIuf;UeW3K|W^8MyVrF{RS>MD;OwZ!bY=EG^B|8_;lXB+Xd=3Ux3He=dgO40voIr^Tan1 zxL~gbcGwm&y}~$t^VV^w8!#SfJuKk&lQS4-JrN^)M{wCHZ)nkQgq*Jbqv+h@VocjO zJ`$27gped5X%j-teZ?Sz5JIT56_QX%LKvMy2bE4bBc021P}6f?X*zC85?We9h;6O4 zEg`hL*ZbdoKK|O7>3Qz^x_-a!mwLt5PcZe(xo~1q9{#*G7H!vDWLYsaU~qLQ$bxiv zPV8!Ib&ccFDLIvs_4lD$@;siITm|ciFJb?1DeZ0Bm~758&^ShZzo;o_s=W|ahnB#r zS>zX7l??-4?m)=8fT-JO3WvYql&nzR|%FAA6zRe63ikKaY14j=&99#=J{gc zcUi)~@_fwf9E;K$J1XCwkAp zoX%2I>5SuAhFv=bLrZuOoTQywpN^)%&R_hhu>}7!`IxF$Pj}Xvuzr@3EKAt-q0}tGD;lYmmD4kPR*^*NQRvR4o z1UjFa9J?b^@3HawS@={>uqz$`+24tC6!OvBw>cL6+f{<%+w^&v$arVkI?(;aR?wJQ3okw^ zpka(3?%o>@V^b !>o13@4V}zYm?}u0vbZfly{T7t0L7VAn`uE+-`rKjAU!7*fKc za&|&U{u;CoJ&0HSFhS)c@GN?@+fvmWBmKCHN?H zH(Kr)06lZwaGRyEtXm})-dwDO+7%a|?-y5&iM6QN{EYhzS`N0#w9ltIR?v}Brj|Mq z-nu1$kw+x9X3k*$_sx~=Nx}Xjb*$xZ4YraKt8+&^t4SJ#8DZKGs#njyz%nd98bEHD z<6M!m6g+R95yxo}%OUtHquw~P(>LVZ#~hgHx~bs1X%!xuo(EmFI-r+MeYo~$eCw!$ zZMoe%G9niZ_)p;XV+C@jZGSdDJQYR#0pcbyMTB+^pzcP(!n8}*q463m-x4l ztzu=$c!i35p~vx}*l|e;*ilZUJ6)G;s;$8D9=X(WY-5nW5*%p0Ec!l9o|hX1R#O6? zTiXfa8#8gwU~=I6DBzOkvzS^RV202mw3V-bEyiA0s~-b)N$qUFB7A7hUC*j{sOL5OA`eHc8-uyuR=Trnh~b$Z61c{fBL36SXtWDZg9oRG`Ezo* zur6aTwzOS`dFQW@CulP(`zwR)Bz~1qw?t+PiF`O#yZE^m*36#29nh3f4t8qandyLu;C((HFmf@tNk*gYiOIa< zi3Ga(4#R@C!_g+^meBXtYPMx|B51w4z%n-d#WHM0<1Yc!*Wc7GL>=H@ptBabvP6(H za3KzG)uw&-T6Re(YMJr+J*A15pqV5P$hEUlY~ z&HfqKe>ecblhv><@&%KupA5bWY1U+uAx^wXo>A*5p!-`dm+r2Ir?Mo@49kJ42I;z|= zg^m@EneX-kXfz}cUuOnFP|RS=_`DrWx?YO$Wi?CLIufj(&W8S%Q^2}~eBWjz z%;4Tpp4mPX-%YH--64UnHjjLOegClU!IXRXdV$<%gP?WfE~Zd&rz$8C{^{n2&GD=SJ&H-!0iA!`qJ~an^KVE_@FJm#M#u(N1liRWWHkY^- zi$z|w;QKxw6%lseDxCr!-zH&W;UKj8E0up==1cR2CT6|xA&dR21j_MOcq^*{{eOqU z^p!QJH()-7Se~VsRsd@M-T;be{^D-CBs%Bho40u!M$N;YxYXo0>gbk1r|}!Hu1*vC z^2Y>J4XnkWht=F}BXNC|kCsl4}Su z4T{0Oppn_@ml11>_Oye{V0&gG&WI?$*4PO&3%Y>mLX(-5PZf1AvBMaR(6Kzh}Fs;SYAxCzfw=InVznXm1UiOe3KR61bqt{|i zfd}&!7DG#ng4Bpf<+QAd=dgCp#>~QOBq`(s*~q1fcfb;aP0-{-1v?% zzHpR6)?QVNEevC#eOW@z#>pU|^WE?%^DwcK^Pb^K)bDeoZVK`4wCI@@8Vk*_st~j4 z0EWibz|!aGpmru4LVB-&wgk~R_y+u$R*bUuhD_02%Kcv4#}EDHm&fc3?f3_n6afyPbFp|~cpZ9n4FUsY1@SF7^fwQHrg;aU@3J0l-@XEWOwqvCZ3U=5`V6?g zv&YU~9K@!j6_Bz&pT!O)o~pebKcAY5zkP8>XQwK-eu&No&SjNlaWN=OR|ulg6nXuk z-dMtmE4{w$gfu`Xd zZs=QwWwb*qSdfW5iz@l_PsEqZ_?acvOW0-aGPL}qgPW?5Tat}sWixW2)6g4?GfH7@ z$TbjtRKdw<8K}E!kD#7LPK&7lXFKIuG0atS9C@nK9zZO$%%;| zhf{mPF)3v#b~gl~-MutW{%IbVxCF!c=~GavD*-KUw(=hL6_hpI&IYf|1+_U!@P3~Z z{_oqAEc-{St>FcOF9n06Fa+1e8>3rq8FT+V7o4(fp)AiEdMqPZda5lZu41fDS zu|~rEZ&HuzgbI)9nZb;deqkb6h&*Zq<#8x~v>`ADttS=|CoBmz5RbflPyj@|GUY-< z1a!^HWj2pv+3!u+`1S9h7*G;Qj-N_l`-}6~wLSrI!cAe9b{a1FT!88C%+S`WmseO8 zqxGvLs58(T>%o^80_9L2W(d;XF2HUZM=VIU!5z~}D2H?vj;&jZK2>=bls_6oBg!j# zj#~0~?b8rC(wzSp(}?ej0@1Z}0%$qdg4}H?7^+_akyNr!bJ#KLO}l|}>CD^lULW9% zJ?&IyqqRS=qt1l0z94s4dd~vwhO1+}MH1NFm;zdBs^Bk+2-F?A9gR!}pwnUrF{!1L zFUz40$6@iQF-hpT$csH0P>1F2rJ!Px273*NN!&afTl%O2s%SLxDK`iGF`f{6C3TCd8gTc!b4nIRh`JIAZ%#Lvl);p-y-KVAXW! z9~=)DeV^AoOhva9m$>-AHPA6tfaJp&oas&bfud_bi7T)nsmVXIWyX z0krGNL8N79X74lvQa2vM|GZ~otrc;P9@#OqRs@6UYIs|f2o$AbZK4fe^07=Ty>?IN ziJr=APUM4==pda@r}CUfG!s`2#h`b&5OJM&)KNiPdYV=Knc<;T~ZD#JYIB}>hO+JbDzc(#%AJ=rK*Qz2G-4H0a8E4lXh4bXLC0490Wfxlwdla};bg`(Xz09vE4LpAPmwzrGR@@Ha8+9*&QJGTo5nLcDSps$!Yq8bk z6hE+nT%jKVn1Rnn7Cy5IQa+`_BV}@TEb|7xxj!?Hs~bT58+ir1BC%V)3UYo>|0N&< z{+O^H|Gg7}M)P0s&Jok0d-Nd`Nk>v%OIxs;w+h~jS&ce7k|8oQ2^?DY;6mXLO5Xnv zf0g}z_dg8A7nER|+5&oqp5X02&tmPXKC=Ez)!4gnCd6zHfk#s+;NF*EC|6p<{0qv# zNScl7^s9)`NBQS@-tcNdGCDt?`PZvjp7Hokp{cZnE7!Wf+ap=L?hfVsn{Es3lgagd z^*j`Kim}i2|9x}=)Cg1IDfTI#^6w$=-0n;Lle0X3e-$1T6R#^EjNAOWlh;If?R)HRARWlbDHmD6`Jl z3|qd1V^qou!C>kVRzdr?0f~+ny5k@`zk2|+B1`EW8%?~4qb!tmhJM!%fM!Ta zvV1e8FTi~;Ac?y=z9fLt;UHtV)${@V|8B#VLgR4DfG4%V>Jj@{hq{d+#VXFVRE ztf6#ML}lX6>7YuvNvZ7t;ZX}GM%-(@x5X;c6lzpVSaD&1LK@OusW<&HXiS^I=O)F`Gb2*4NgK}E=Y)Xf+!n6z(ctxi zlQD6RE;JV9gW-!H;Qga;`ilzcYibZ@1bBg`4P1>PmTuXLm6%+HZ`44E;O%@cCl9wMm#ez5;#h@=*;^py}U( zAkx{(Q=ZXx=R4WVbLLH+zF{K5AnC?HjJJre#amZ&_v-e}q`Wx{>f^z358S@WNzp{6z++(ZS`CG;XkVI%{qkM|nd-p=ae~{O`mS%sG7-!`Jyi3ya2G z)n`%r(G?hVGzn6cTEgG0Vr(5ggGGrbS9*@T7$rw>?B*LFBbG;S0HW2gtKb^UAfwY;Ft!N?@!fi2 z9;eV9`2)GgT7)S3@lfIx3`2jph+)D3e32dpU&5uR9kz#jZdxFl+b(4IJYb&pwlfRL zeuN&@;x^HZu;5hyXrFLFEqj{J=J?@U;}r4;Y{81$dOY>G6yG_Ji}-jHsEpNNZOR_7 z-r6sQ$aZ(ssnJ6(>-EC%C;36Shc z-jJ#o{#y>si#k`M=b*FT^M=@%3#Rf9rp_pRatuVy8^j$0SL4N1yU=Q2DGPd7hh{&t znbdzBm{sP1+k;3peSHv=?Rn0cKhWplqYfIKUfkpEIBXfP5H(#on17EmoLENR>y%Mk z-lByAW=l{d_85~i9TGQZ=Y#%?US1ZM#Vp>}l1rW(;U38>Z=96=7M#qzt5dP(>O@eH z4}uf*spO2g&)=`2zL1NUr+nH7+CQ3^hiX13Txciyu7w3%iDMbt_VDh8JaTPRqwMGj z@R?5Rn$5~$*ICqyy%mq&MrhE!_&w(vh`TZ(f~#2>L(|O(P@oaZ&lRMh@FWRjy~%rl$(=1&Lm0=q(%}EB2J_^`%AQ$=)?YRBtK(N+a3(Xqjey*^GC`lwZ90|uIUl*y$W7gP>TeZ+z5F9KQnEavK4 z2-#levAg0M?-Uy{yxB?{ers9hoQBc>Jk8@h5VMg&3P*em` zPj@e0(|8dHuEOHKU%@T?(U{_2&Yg}2f#rO1%}sIwx%>zJYE=ohM&CgF)g$3=w|)51 z!Um=DEKu2OGI$hFhc@yIH@NQrpT=B+P)`%E`^%5FKeFOILCcxX#!BWCo5?Fw375tGi(KKE!+KouQ47^So&$N@aTsT?7hm3&;qcmPn7BI{WyemF8!M5U zx;aDK>zOEuzHVMVie@hv7g&yPluxIe(i%1zP4f(4-KPcE#N4^o@WK4G5oNW`F2#p; zrx2rT7xvN}@k}tC^)}fvmE?1n^5!`crC&752#Mjke~b{yhEukqHj_0?3dhiuv!L_V z8FABLXBPF!2`HFTDubI?14mxq|=tP1FHW#j>g>mZ?qey@9`q6)Fe$=-4bwG8~Q4 zl?TNSrpX{_H}NArJM#&DC1LnH7wEBi&fONQ;{6(hJa^)3Y)&cyf3;fn^h-X*=$C@D zb`FCJ79iR+u~MR>CrEM@fTi~f7Vg%;4j!t*j^AhosOJH)CwW5Sa_V#X%@#{F_M10} z+PT|r^mlNJ^Hfgkyt(9Gcym!O`}~!OBun6r|IVWP)q3`3U^ez&vxJQ96a4+iTDWmw z2)4SrvU$lqnC45)(65_8cB_^b>^uVDMaQATeJR`;aRk3!l%Ub$TJCga1js*a;%-HG zOt;vN^>}!~&>{ou`IEW`vlpO>mIe>kihxDgwO}Kxzzi{YrA|?o#CI56shy85CXwU< z&7eGEy?KeU z=9SM!qe%Hr;e>$;DvSInTo2&r{aZ1n+sfFZw-S|(W#DiR8BS8K zz?)Mq5lf?gp*TGWOj3X2I>duRo}WZFCP_Bu5PQA>pf z`!0cvoQubVCJ?t{4m3S}%2ix6$xjpvJIpIk!Rv%2UfHOonF}%WH}BdP4bFz)#4Xc6 zvw(rnu|E&Gk8T5_*;$ZixfE_6C62>5J3OVe6L-u=CMV=fIKS;WN|SaA4b9~7@u~r* z8UG1Rq36KyKrwj!*D1t*r{2fyI(Ys_1M3espe-vQkD3}W(H{tACevYhE(dXLEtslE zaiZ>J)MCAComB{YrM%at+402qddYu?7UOrb3p6L5BDlIIpaIN-$(~`D^xgu0NoYWc z{R|=Mem~`+e9+C$hELo+0hRas!J9XPqxsDm^juOcD0VcNqvk9KY6;*q~Z(t^ltb+6>3B=!gFC6=n2E$gK#0~A#vE5pWCw())@Vo{6Tc}ui zbQg~rPw(==bTm3%0yc3cxM9LVGC&dd+`b2@wUlF*_a3VIzo^gZHF4p?C zTC7DKO-&1O5t{~!yH78I#7cGWRO?{{jW2k2Z3JJaXhhjB?htA!K!o=gbbGGC?3GV5 z+4+BYSJHV9J$0t{;!Ckf)?)apbT-~tGZ-Cx=h6A*HM5`O2hIPE1cl{#!E-D91`d~T zC(~ZmR}ci%qvOz{!T}Ozm~oR@8MpeTi~f4fMoa=1(&fbWS^Hj&-rDtiOuSTkMb8oilNGD$Tve4QGoI&(d7}3DZ`O zV`h2%%+j+84M1?aTWiI-snKCh`ljk_|?9j*ZZ@4~TNhj>&^o!Q%8*F$s1MlixMaxA|y6PZZh zNVW{smo|cD&lKi4xl3$WvKB<`-3z5*RxET<34F1yMY%W(T0@!yUEOe4SV&xvtYj2;HS41E;UyK^Kh| zh5MdIQC&w0o_hA+_RR+hO;7?#&=3VhP-{kYaFqF`7-g4o?jt&?E+hZB-SB$D>$Rehyd4)x;~~ z3Q&Bq08}f}u<%Y6nrsQ?o`dv2=5LCB|D}OXKUR^;D%m{LJe&G~v^QMu#6vBNS;wUT zpqX@*J6~n!;BW&69E?ZFl1ak)jhd*@_(5!Qyn!z%tH7`SE}H}eMZrul+EjR(Rlr4YCsDH1MW{LJga7&pIQGO@9Pn#2E(o55 zZ&^4i{c9dNU8D}GVJ91#6@}rh{9} zr~9|>`Z&tpyfe#|AEka{F3!okh&ozz&{Sa#Dk?`%D}dOrJ+qmm_%c-dM17WGO&olY zItd4tg6+XP{-7cpqLPAz^7uLkRl5PI`cm|xEt_4DHnvw9fI*iY%lVn|TiZIw8zSWa z6NnXJ;f!jl$3kbtB0=S`I=q@o`LMTMyncHrZ?9Maol4qx$5e(5#hWO%w+;H77J{xe zW%e)iGX46kVEy1O3*AYssDvgi-(11)j3@f4L_=9Ubx=PTfXQ8A5+5l5?Me#>4eV#b z9%`V>GZNZ=cj7801z>cgh_&Qg0R2@BDBe&GD$hM3o@Sj>OsrA=#%at68OYB)%!Rgz zrz;_Idv;iI4&$TH2R z9D9|yWs5Ha8CimSSU3BzXe2^D^|tTU2$=(B0Xon@v{Ds1H@y~rIko{cx2=TOy|h;h z*}&r>rRdh`!D8*NGSjyX;9oQZY$kZ2rhfvlYgC}9G#e&mox-yt!||zY8tA^wXKx0P zhsV~I$y|OBZk=3&x^KD!?FB8Yez*bHq|~C6*sqe|ug&G^?|H4O9D8F4_qmx#wA{$5sgaa}~%cj8Tc$fc0DEK+(%ka7s-OpAIrd%N`$a*eyf5 zz#Dwy86A{IUF2=6FR<{hl$mt2VZDa+uzg|!u{e*yo#hSa;~@gKFLKEATMH-L%W3wB zaP|+{(?J+|E>#F7?>*p0p%31LpiK4!iYQ*z683;-TIUOkB1w z%RgzZSUnpp|EmSv#XAJuPID;HsKpaw0?{;J2)s}UfdiMXQ+Fblav1b(HJJmM*RQjv z2aBPZJR(skt010Vf~9q#AnDSFbvki?&;3ERYZZ&#Rsh!xs3VgXfjPs|`GM7is8AV+ z;fF4QJ#s>|ln0VTSxDU^G6ewXQ z-5u|I-G!}xPh~ldVW1HfMUl|8TvLOxE5`@GqPdz_b^^JF`w779Cm^UQ7IZ1QYXlN_ zyV()KIdQCC%!EdzO6a-pt5~h|KA-ZI@|HCx`2TsOFD8;#ZjBm?@){4yzt4fzCvIRe z!xIz-=rdWeLe3 zMCX2&n_SElL!$&T5;uZVmkLPy{}iHQ7QX+r_p`a6^K-nL(e=5mXmG_e#AD6 zeLNmoE(}6ln$s;v6VbWk*us+EhoNGD4_mRU1mwkQ*b7-H=3F=8PR`z-c6|+;bdLn( zr3D}z*a!>XOvFq(+Lup>1BGOOVARvZMAP^3cAa@FXF)cKin@8C_Ea!=dKA7I%TSMJ zfqrf=@u?<>6{d}qyuk)-PXEm9!hSGO=ca{0dr!dNH^hy*ahB(t&;a}@FaxJ`e5863 z`uDxyouI~A4+%m&1Th=KTEVAM#`aZ7Fyqrdto6tojMCc%hS}*T9dZqma#o;;<9+6` zmzc7%)0viMFh5s+8Fg*-c=&i~wce$vP-zMHuQs4N!yf25dXG!gcVWf{7oqFdP}Fk2 z%z8ea;fl`%Vi%2F7&720*p8vQ{&NL%tXoaY0B?9$cnEcbq3CpXH#0hYm6_afW7XX; zIQ)u$MjPU|WLBm)F=jPu->1TyjB1GeNI8lzHcS{i3N?N$6*R_8hjreUKssxX*kr;I z>^v|J=HAJGvekJ^^+zGp_i%o9zaC{|>R8eC2)sA0235W<=5lY^y+qcVCpK3?^U8zp zig}^j^dQ%N3Ls5gE97)C=9+zjJhh%6b>D+sdXaq9Segm`V~jed8BTA`M)$HS;8$>% zNmP9KsG3Qb<2o5Ew`>H_rRPG%*EUx3IUR4(yyK^JC(!2a4bT&<#g&UL;2#Pv++ZD! z(u^K1s*2#9qAE!IE`b}T&fra(GniZ)gjP!yaqHC!Q6+E=x9%?H26e>#BmTAJiB#ff zT@-(R5e{e0?82BkwQza!9Q2%OLHwl0tVeW}JHL;^W={9?6ARJic^B($I}FMRGBC1C zhpS#v%)d7cb7DjAlG0)HoN!L;FExbs`6YOt1!0f>dZzpQx;TWMPYI`^P_(a2-1#I` zoVnc;oHSQJ=YNIZ>PnrEwK;-~C>IPioM+x?<>bN}4vFo*veE5(@vgEl*6d0^{R7{) zb@oQspqqs%H-GSOi;K`x@62@zDMvBJOo&ZtfUKi(n6G>pt^DRwzw!w0nw-gOCPcGI zeS3)OeV-d@`D5&DDSu7ex{SR+=zQA%H!Y9EHG9vao9{Nz5Y9p2br(EooQmOrvxv?A znC+V;$70213|>urFi{z-R}Mfehj(nji$-XYZv_p5V6gl%75tx^gO3NRFw^w{rZ`fD zN^u>#jhw-Bsugs+Cx+krSupkp&Ebs5CFBzf!$K%Caqt z`nm=~Z?*t3&r+c^-V)vJWMT(Bw>^52cvH_}Ebp*K(N%4ze}0F&wDW}UEJM0?P6OGC zgD6$1G~a!{4%`yf;@hPYQKEebYn3t}{_h*0$n-!_leKyKk#i7AtPruqO#JucDl{2U zfX&Zp@b%9-aGvENTt|5}{o&M0eV7OGyjQGi#3j_w*a<#+FN6B5YjCbB0<}MN^6)Q8 z;5NPr=5C0@jLqj^roIG)=nK@<`Hh8cpT#`?UJZ$XgPCsnZcq%W;eAhIdB*m0;*4MJ zGu=~aVze!xS#uIsI~)xLvLe>B>SrGNc|R<)LsUw+gp*$_M9&2QV!yBuW^>$tb=0I{ zZUtPWr<=Na|0eV zH$@Pgy)Bmfo5VY>(etia4Z5}zqfe_hSZ9rf_DL>GUhtJA#{bRUtiOUcW|GTSZ6F9n z!v8y%ymOD0SbqgQ%T2addKSeoH}NSjS~DMFT&z(sW~3Oc6HxSUj9@}ponymF;6i0O zI=Lk9Zo21m9;(OJ4B6SK~%VCyMkUSRk;xBT@A_|GMF*N0Ku^^i8Gg#F1JT34gQ`gilbHAlcS zEtln7FlE0b?Lwiv3_Y_N#6bb>{DQq@6Gj{-C(v;Et(#A$-;*X0o_{(LiubpbjlvdqmTQe z%O!fAc;{i+4qK3{k#NPx6xaZlaqK)E76vg87+{w6_r2-!6h! zuLKD1NClyRI*%n&A$x5)8tht&@*g79DXKtA&m3@EvYO$$Mlx_G@694;r-64RJNty(hJH|(SR!7NB=zsG(UreRXZSoArw30~0N zN#PdB{|Q)0Ug%fcXn!r#|NE62OgqjcGa?1MhX4m!OCV*>pIqb^&SY9e_}d-2C&ncU zO}Qz|X=Et+U6}zpUOQmZ-}6|wM-DGCs)eoIC$VLg0N<}r&-TMUkbd*$Dr-*i$0dQN zP3$dcS+bz@S2a|%RO7c}3Jh%nmL%k&Up{$8tI0^EF5uN%vqET-vDl19$xow7_B3C=@GfY zMQ6ntiJ|OH(I|{9jl+Uuay=jJ;_WY@LB8<;D@!_siElFbf@ETM?)fOrnX(2B+F4=j zBR_n6CmP#-TEzPonc{)0YA`VZwmKmXQa%(x`_lPf$OGYxm>5bM>*2-KOei!h!h*UW zd^0==nl#<`PtA!K{&E`pSbh$l&I!N=xh~l8;5Y;fO#!j35e}YYNUYIR^ckECp|b~Z z5$*-EU76JH@fBQ)&O^pLGhXFK+2*--1k>--kB#?-C8@mJT zC{MJMiPpFYl6lkMvR5^3_3}cuk-i`)HATCIkzCg~TiEFqjEd2_U{-@28oj;E6Aw;f zW&SbPKzl7Ci5Hl!(8KO8ea!E{B`!L@sj^`BC|LJ2m^??9vCpD`mFdtCVoPIU8EO@ZPtUBzveC<-$IpNj-9hRK=i}{Pit$VAB(!=J%dLBAm^5ft<@PJ7 z(Ejir*7*;hkw+|3RoJU zz6-D`mZ>iLS#<~}dN$L|he&@h^>c~0T6x1xEgY6I)>q|RBjJ?J(pVakOm(A$v1 zweAgr_l=h^QXTh%OdoKJC{sSLe=A=kUCG? zHDf(El^+MQNeWomNV_d9DbJ~ykE^z8V#niaEDI=8J8+{=y>cT4*=x{UP6HM%Hb9le z1zd&OGR6K*_UT$ayl7vK)$d(U+s^=6Gb!^44XC)I3uDhLL`m;r*1OOfLZUS=^xYg# zYg$9^sq3g68UdOME;7HD0o+I&$0U?()M>9o+c`4U^;Qp_PP9c!Am07<6;Sjl1w~J2 zKCi3<_&6Olb>(8erVYOBB!7L~T)45^fLNI0S&9kmSqBDUont-zxnd+9*cu0J#pk%p ze;m`j@rpIH(K7&N0EGO?L>k%h_TCz7&%Xk8d#Sg+D3Q1+E9nkOS&p49Omo>h=qg?W znm=ZO@s%4`-v{LP6>-rWiO{V*iHAORfemVb7(PvgTUOM9-+toCv>Tf>cPxU?p~S;} zYtG##hCq|~8+Ivg0qVCu<8BYDcuq?ZD2}V5r}sbH?QjNvws-}8btNv6^#A$fSaZ*b z-~ z)HFzDzvye?y59@Iek<)v9j(QlUsl7}k899#q6DNnJb3q_Bh*2gS?RGg3?vratZk?U zEYOid(}^lDng1)-xRN0DB2J{!Ka-*Ji!OIl8p)dq_dsjMI7kZ01E)i7!lXY=qEfdU z#9xbGaQ85rz22IbBD3)U-T4xO-}6A3ExzOBu=L#U$o)apIF@j5XL%Wh!K*?vCY!!pW^&c>{%bWcA&0%Qs6xQ2Qi zeHX*AYdGb6Ml`^ucL=S&Ca@b%W@66hS&*^k3O*iDh_=&KVR}Bd6&8J;W@SiLr%n7H1(!fnLXSAphq|@_J|Len z8*FTrvSVutF=@a6bcn0Muhd6X4AK>wq}QNYzz!H;ejd-C4#l}Cm2m6~y-&tI5j@H+ zaLF7$?$b)%!A0GCujXb9+T{#^i!>XX3W@O6Rr!7gFcr^_z+uyE7o0r zvCl65-)+UlFFK23AC-e-XpUIAA`8Z>UXLQHP2d?31XhI_yljsOUi_mLwT@=Nta%E= z9#>4MFM_D=d4LJS@O4HF2GN{7WzY}+!!-WMZzxKHLU{eb4kOhm0N0^TzTi3Frdi8% zZrh^O%p2UomUgJKw{y{=On5)*I%FoCg!F@3&|Wi-%Xi*l4(64pUF8i=B}EW^MaB;} z7DK0_1w2T;0Je<^HuY`;hM+Ra0t>m?tm)7+V-j>Im7&3(4LodwK>2FUwZ{#C!@>Gk zGF*mc2gcDpU>o<)(*f5l#Gko&U0m?`47!g}fLW&+T+OdQ_tI!Enx6_=%!5(6HkB!i zX0YVhPT2WxpSXKHv6x0IFgI}G(Clmt25batEtrXk3-m$Dc?xAA3n5RZ77Z5WK*1!+ zyIMUJN_JP{eUEH%jupaNm*rp-s0qtYHlpTinlU;RGu6l#9NiR3%m_1-8pW7z(w%~K zM?5e?WXHQ+ZpGLFRd{O}!3?dV!OeF)SWyD?w`woipC5y#-ZJ>fW-ZoB!Gq z@VsyxY`hb>+2=|eGHn<3D}@nbkNP&VjbK4`JW9XB2|fE2Lb%C7F#X)d+=35)*8bm^ zRhkW`t!MaZbtA?H<-%jSqYijpM|%wpR~MZ@8$HSp4Gw_TTH^9fH6?z-0&^#mWN>xv z=CS++^H&EbxR!&OhL8B%#9?T9@D=x57lfTt>v+lZFzC^60>__%!1L)CmPi~_U6mcU zx}Dq^m8B5sTF;DTJ>=SJdinW}326KSF><>c)n-nGp1;gNH2gH4nY#u1*FNDo6RJQK z8bl6&acpr>7;)TAL%nhtw|}$)V@3yp#=%JXJg#w>hAMovIEx0J<-B$Aeg0`_4z~Q7 z4x*ifm7Sx!c>J4ad=VOeLnAcN^l1dV-CfK^cb!Ei^+u>Cr>pY*LtwM=PjQO>C+@%L z5@?Uz3UgTo$R8JjO4(K>pSKYPPfi81n;&@Rz9`}Mb0yeWXwR%Q&+v%HV=+{%7-ZK& zu+6-ai+1J757d=G{G=doYY)b>f@%mpx(Unn%AxCO7GL)3EG!V>(K@DrX05(pIcO63 zoYdfZZ4ADtx`YdcT?ON(6)@i-51)>`f=e%EL;UeT9B!12IT|!Gx}{(SR=3!*s}s>L zdmghoPzydEITN>3L2d8}ye=%kgjP4`j9<)7D_CJ zFl+DA&~ZHqw2qGkjh|bX?g<|u=*v|Y_s1Dj+xQQ+Z7AcmCD*ya-$-2FYzH@|HvsoI ziJ>+DD4QI>dm7&IpVvp><=0YByt&B?hFe4acri+khI50FhhgShVk<{q6ACPnq2_%J z8egk`v9E$)=rii`c_gsz#-V7JtH#a;A>ChyG2(v}N;M);$NK{2S*C(~eJDnSYyqDm z#US0XP;4hz%j%COprULzk8el;!^#v$^fG1{`{d{s>c(DwpMc?#-?-mvYl!}DBFYC8 zviJ1=lNja>3r-9|qc@$*;G{E;w1@*0>y5;grM!7w09e-)v-Ye*Fd>xu;PF?Pna&oj z(ol%+%aZ^NYT2B%+i~5a0k~#F4MvTNfj2+pfbzA0@WMF;f>aveXe70=Qo=BHJ~`o9 z>bXZ+E-2r0g#!)gFrgs_kB_ORd5aNtha0l89CAzrH^7gR#53{?6yJ-x4rxnjpu>=y zXLEXa!KYOqUl+@4$7rLU(sJnkB^3f3$}y_+o-lt%BkB&D%wNCAfzKIEXtkjMIx~U= zUC0#z{W8)2X*zFxyPsvOp9u>CZBg>_t60ad05$elf#Kd_`rFQfxpD-PSOf0;=`i;G zm&+3CZh%>Q6>v3e%sEwp9)FpE?B8atA>INvWyCI=@=^*JaYO0xM-59CFPC|$sV#f|)wqq#X z^?Uz<=`+pqtaV@ab$u_=-J1!#&p*g|n|!pi{hJ@%mP+-i)Rk$$`DmnHbYSF~ZCRMagPlx#RD7mM_a1 zN5j8`;C%BO{=5gTeTVIz*Wyq3fJ{u(vG+ zY0xyXYJnM=O;^MH%@z3JR6eHV$x*T(5CS)3V25EUXX3#jV<&JzopU5)Yb{K>F%uV= zAIE5|9Co*))HgGkTEz&Vzn~gyHKswQ`y^b;@ z9h3&~g>r0B{O0ttEKwM>OJP3#1x*ZMe#AD$2KpEeyTVy^E?h*hz7}n^Wul;VH{G|@ z7k?=R;8wG>sQdLi4-3cioyp*{=-wKW4l zP!rEs{+V{S*Gnt5{mP9s{ERa)+;t)jhP@e%Nq@}3 zo`SK|Gl6Bo<4-CKq={&GA{R4qgy52TjdX2VOFI3Uh}qVq%yVSKW~q%(GJg~`Gd4ZU;X2y{t7RD$MYzJqvzla0VsEY2U z!JFf(GxNrI&T)+aZA#Vvr*Ffc^Xq-WRg|-RVl5R~IPg`9gT(t~71dVPqZ%E-(EpOl_tul*q!j2MS_%$W!1U$g9BA~={midA`h5;|4>%5||C@}G7v+M(pKK?iwO)Sbg%m^ob3oK@Bd+5(z~^MFCW(7sFlWc3x4o3z#83pb!nHs1=V~wcCO0X6$z|YK`paL{pTYb7r%^RL5R|4PiBQTq?tNWp?ERrQ zX-PHu8$IEAo~IKZqfjv2IujziW`LW6l<+1d$sK-;gCao(4ZCb!o2h0=U%Gpdm z=TW``<7A|YYKgH!7MVS#3_HgT0IMej;Nt=)l%7{;*Y|LqNAB~hEn;Bk;c)bP+|4h! zav8?_ti+Im|4`n4gY1e-f6z&z1JTK63(I#gv;Jld8agt;^X)0>={uAtclknfR66`V zIvn?yBx0)~invu|f$?L;K|G(Q$hXvBUe>kfbJhej+|t3gwHW?gYk&g(CnQJ59gTZc zggmEgW=*hjnQaZC^8RFFd=<7CpF`)Eek7ni7F()aiDH?W6p{RD60&>p3*iFIO8DroYH2Tj5rc8GYpuM60a; z?7JB^phAM%SJps><4V?Sv43{ z>ISUOnPv0FZ>7BH59Dh$2hur$Etz;plN_1JgJo z`QGYhNPgEIrZa8E_5-o#a3hjhc0HzT*^yNK)t6qAc%$X2V3>1+X@Gr)VDG9dRC<*u z9Dlo!v-e9FL&y-VI;)_l=puHUe8fH8%^0w&a?rR*#)URrMQ_z*a%O!P^2Uyb_}Xf; zb&tbaUxgSf@JE#*4zgA<&&Glf*!xI`s&^KkOvt0?a-ZhJrek)XGs5)Kpk0>3dX^I~ z{`P5d;MG|S{GLPfEW9wRPs+M=M^S%yCF}=QOP&}ZTIMxJ=v_Y{5iiP+Jm<8P7&pc zsZ`%_4;KA8Ok^x?oLcaJgd3g1#4;ghWPKw7{v@K>TnPOY7tzx&kroz~>(*VsUxU-on3O_PNEDdq zN-_G$E26*HkjT=@h@*i!$uIT=@s#OQu(6Da4~XcmU1AK>ze?_|uKCvf>C}ch?@w)5=Zc#p1|ph)x;mi6=+%PN22W(g8$DKMCek=@9#GTrQf>vZ6Eeh zy^~7jH>rdD`G-)j-;ZxNCJ{bgQ&H7EA@_B>9yY5vpw^3JxW}ystA<`f$7k`7C1%{( z!37`@&ZOBfQ(0Eyr9y4N85$B73QBzwaNIDHF}REEY;$i>5YBRt=wM;+0zG#iba3sIEN%Nft;`(+g z2lkNsUGqbv;$WuGRJ^j+{f~=^|+Su0{}Ca4U#e`oOPIfApVk#U~Ff^e|>x* zYE)$aj3~xw_9iGQW!)1KGNEL@2Gn&YL-f%o!lSDc$6F8LC|b(0pUE&OBM{&JEQ1pT zlkh-x16n2x!Cas+SJzmkr(UBHws(WocPTum z#;l(4P_UU}yq^(Jnrw#=zb#Ppw2zn<9*2$b`_RzK7M$=jUfWlNPgfY?<&07YDKKN& z?HKG?vH?o}j>5}F*p9_gR%R52pg4)$NFzZn^-3_tPF#`OEOf= zn2Bn9)+b-gcnbWNcCzU;RJdaW$ZV3pfBSdNh?4>f1H%3-EK_=G1zK)7N|(jg;w`3~ zb$FPP&$9s}f6OCyW8_ergU~%O6t)YQ@5}Bf2^`!H1w*nGh0$z|JIVOYF`4|Y5(Q}A zIR;r(YS8m#5N(Wfrk8`u@n5$P3=9nbt8+;pu-!%$_FAC+{Y313!V%kb3-I*_infXs zQspDk!U_bB0V`2%P?zXg9=izR&iuZi9Zn%deGdm1>~q-+J7X zQpPeBBS5`h4oYsuTFhEfv7m4Nfk zgP=cSGW_~3#u+2B;MIU^cuQGkYQQ8+KXn01zDMG&2FA3kjDcNEa^T(E4FA*?;X%|GRn^=fRu-0w62n4UzeP5Ge)t#&S0E9P>kXY7Vv6&3H;u97VlgzMAgZB z#slIL&k`}VpQ=ZUFd$ü=($>^V)~d*3H(Su}u$}wz1um&Or33 zABv?LQ?aC-`3^fO$itoKc>R1OT3NSJf8P->dO3%(FT0?q@NW_@Sqy~(!|~bm?I=0v z3n}*$a4<^;`-^3uZB&oNPioM+@dRjHOox!pL=>j{36K9f16y2}o?UaD1a8%*ZRNpi z9}-E8YAe9yRU&-Ys6gNEjKMR6b$NpBbgp>Z|Kiz_|QqAv*_KW%`=fz#2#l;uqL zKH#jdfZB-zP|AG9b)rn@&`KsPTHi_P6cN=@oP?Hn&P)$nstC-hqz;eAu-Q*eeEMvO zJVi#jk1L7vRUW51ubB$eM$k9en%Lgr45=|G=)kg3(Q1gINs(X>V~&nChE(Hg;*+B?-5 zqNPJ9f&_J9hG2Cwb zY0dao>F75=g44$z$1GC`**tU=TFW+|G`1Ez7IG+@uYYLC;kU-0BO2pI z63J3SSU<@IKOPc;xRfzz|K}$e^B0|WKOZibRN_w2QWWf~pk*7*0rU8b4 z%dIS2m2m_Q{<#Y(_IRPPbu@pqeHLyTf!J}-fsA%zJ7upSd@`B!%c#C{I>~!kfBR7s zcV5H(zf-YSJ(u~FG9a*hIrHK?p}Z$+iH|Y{Z{MAX#65@+}k&yp12*xGuX1U{7(3>o0dHfp4A3p`g1TfBG zQasW2+(tu$sn}MqhZ>m)VYWBxqi-+f3!fk0Yj@<(SFb!#HrD_;J)J>hf10-auV?Bw>IExmJeyZy8wj~Lm=>4HJMs`8uTZfgm%A!AWLv& zj3Ksvv!4!gA624Eo#i@mhq2i=o_{kY11o0afVAuuzooW^_e5%k`!p;{|OVumE*MH9k};< zHXN+4pwEIu*t3taUBhB@^U4KP=nH-j7l_;DHK5Zm83}SK0!#iWS`;vdhS3}l^xS10 zRTnfIS;@GH&UEOxNoZFlL-|{z_D?I&{QNY=VR%4pY8F6X?+ntYe-(^>uE#*n8hAfy zC5GFu=TE#CPTtW+1NRzOc_|H|vj^h#e8xJ{ngA`&4v+^-S6NoF7~3r8qw>Ez$hVS% z;fMi%)hy%k;5YM1FQnd82f(;Mmxeh=aejjZ3Nr)A0td#;s8ZQ=WSemz#Wh6OtHJfY zh@pq???%IEtZ&047_FAdh?SQco(=FqZ%^h+*i(Zg+UudUA`|_iw}2pKG1Lx?1nI>P z((T?xBu;X^d7B?t++T-3m}gA)Q3}4EFdcQ)C?R@PBoT@km+NsQRCrFskU`9MA6AN% z;Z>}Yb{Dvn3ehQj8HB!8L(hNSlg{X3y3YA5nwo2a=jxfnym0{KIhDYRXHj_b9n%ap zRKbRA0yI=_rl3su7!YYs1|Ek(zFbI?7flI_?Rf#&L`)a!m0-oN65 z{-Y)lWwR-Nw_qA>pC64*Q`(97$aY8aFNSn>I-Y8}Hm7jc#KlgxR_(^tNrc*)nIL2!N(GX!!Juv^LRp@`o%)q`d`#jU`B28uTsHGYre<%GmMLKnD|?K z<2wIY3TmbHpf4?_(N+=EJ0%&7ryB!jxfC5L&GD;P0hT=yeC%=({##mzs>)o>cnG`m zq;(R{Zc}dMP#Jg$%HiPavqZV2m2X&INh1b7rJ9+#Ab0AA7DpYSXR|I*_PYY@L;nDm zH5tS|EsOtg=1{!%MTk7vB}hF|362kspxWY6=qi`d{#r4RF|!(ce=fm*2{Le=;{-dp z_n_P9pTy868v~clq-R_5u)}#5G5a2kraD>l(0?Pbd5jGI+r^mFYJT)@=dGywSq{Ng z_Sm7f0nZLvj6)t=0ds@dr2D1>st-kDZ%G{~-F*#mHxxn9NTd+UNS>?VKL zQ4cn^4yKl@i_^mFGEFKi$L^rH5c^REYUf9RFz_8`6>ZYvv$B-mR~in7VO8CRehy4`(2a^ef;>@I_aQC&di>>k18Y$Qs zW>bwpbW&QK`iThL0Rws_M z^@*vceH`~d%)DYhD1@*cj*iKiR2lr8mh58so|_|da3#c4d5QHhYr?q8tFW=`J#jwb z0?zBdapjw`@ay<;D6xyitid_7ZTJx4)b|f9Ol9|^xCKx-YCG=ls$-1Y2vAi?sDLb? zf%PV&Ev11x?N6!H-Mt&=Ep=WF9#2n zYl6k5HhMJ714F*AWgM6zSiSE8It5%JZiTI6!1Y43ILLOrtfsK(s}p1}4(8>~c(nDo zN`vIJ=sZx)x11ObfmXTTczz$fJ;)iWukdhUh#Pv2cu%CqjuXk$Kj=;^EezXz1bLH3 zf$3-)s5@9l{6|H@mUn6Bc_@K=GzmesuU|>a2vhRl=?UQdwH3xL(n94BH7uHW7CtlG z&@}iI-p)8PHFiW94c(!lM(VC?ueb=+Hz+ChaNRbzd{8GL9Ov5#e) zrgFMXg=FJ4%D7`IiTKMX@P7S>Z2M~xMy+NugbmYoe=8Y`bAeq?6nDgFVBscWoVg z9DWY8JUDRvFN>Jx)G&P@51oFl1GQi5?A)=A3m!NDKj+Gb<1Z=5@`k{K&GS$*K#o&N zm*QX1%sZBANIM2!MYAh13>jJqs&}(V^!lSvJWw5jxKkiWiU8^6i|8@?6bUsbh9Hv$ z@cIx1(x&ayc=;5*>b`*LDhr6uvL-6LbCx@2vl=V?ve2yBgFT0_*lchDRUYpN&orEC zcbDSblq3ioKOAgb4$z(wAFAzs4FnaTT9G{sC3+DmVG`8MERW%VJvJq*jb;6K2HZR)a25Hh3e7`{m+9ZI&rqEd=RxXOeL~2V8wMvHIf) z)}{ECQ+0Kc)|L}szq<;y7i6IG>Q^MfXaP7@?gXa`c^F?(22Qi4vTXHw)Ew*z<^%Q+ zhYtl*In<0RI>Y?gH;+@R4948$4p7)l!4iy7G!VBdEVgqv7L#HkPb*YlVr-SLXN={W&yjTy8_Et#&2 zDo2Z^BAThJMbB6r8fD4+khgd7E%ig8>&gh?@2^8!2QfWYZ5p`msDXXd9lPw;L4$HJ zmau)AzSA@qp|ZwySvAIYyrqHV-Ne*dL3nGkxXu_OXsX$Q8CPZaWo#xcBlRdKN~9Vo z=V-h4Y^L!liGNrcsSP~~`tL@;j(~l5IA|w!3o?n%D`)7|vj+bcVQ_jeN1knmv;}3@({K?JjR+>GCu3KaH3<8xXy>L(vS6VYl}o?aothj2 z%JI%<9LgA=5(#L;FCyLJ{-F`QtHJH_GI+LXBI8cQ5%no-z8M~mz8QMJV~pg!Uo02! z+nGAfxd^Ej#h_i2LPadsx7TJWHul+o&TcD=-h2W7^{v28K{n=3ngXv^E>M5ah~Nkx0(E9H{%kI= z%Rg+8>_Lk$ImCNf8MM1?2Hx7a#L}w>g+t5~p3<>g{?`Wb`ArGETXz<+YNM&p#}CXU zXQ=oeHKJ?sgxq3Gc+CZ#aP2B$sWH>6nVz)zkQGjvzX^-7kE7(lE+YP$?XGRoxR&}{ zE|9(=WzTfcv?+pc-Ql>p;2d78l%Z1m(oSR8M*6#FFQ#^R!Q*A;P+UBaxH$|0N&F;e z65bow)9DZ$4&#Hak|MpG)N$uTNGpxRAJ-Xk|JQUlIHr=c6nkOn?7O66OerVY z)5|w9;G>Zu5Vb2fQd^aSZQ-McTb(8;%9u==#?Hm5pBG~beMeLcpGfrFqfBcvz`yjy zW3+ibg@Yo(Q=LWCbpdote8X-3V=&GuIRjhEim>*LcP zS+g(3$`%2V~&) z6notJAqEVSx@b!BbR6w84ljw6Xkt4Nt=eSZV9L(L;l}~GBEib#C~OQ1#=6b(!Rq>J z`gMc_@~&KzJ7ou>a_1cQHHCGV$8EsqiCgfjaSl49+#jtd-K@jR zW5`9?(#M0sv?3JP4X2X%Ke_gC=V|oni^R77LAZa7al||FLG(>PU&MN&(&r~pZP^AI z9jsgFQX0<6K88#ifHQ3WAdOF>hH)d|yz61Se@>3#uPO8aW{>T^!T z*yTUywY{D=MQ=BD>%~#YpX~Rg=*N5mrV!IZSWY$r!qWV(W#=+z8){7Ima}YV!6k5B zgW&1ipD!6+$vwVb4jL9!AoICQ-`g-g=~Y7ze>J885!=%o)1>`IX|wNTEVM36fY8)c z*v2_B-e4&#WPHCKrs0jfRDc#F2BXrLpRzBBA13of$Jk@gs~H0w|FMkpt!xmk(c^Sj zUrW{y#%k)D1dpCCMBmAusH1xYk+128v$u%RR%i;BzYoCSyH8l6JaxAYPNlLt&A(Hq z6G9?qnC3d)Nk*RSgQQ6+`id zAKP*M?FYAB$}utJ40Oa<(~hl$_zzA-4FfG`D>{Rt?S0YulmdlDX{62CoX8d@lIp_g z=vg?0bdHz@*4|g)BkRtP^gE8iE&kNz`ciEBa0T@57SZ7f#$|P4Jd(C>w4J66?|<#X zj+=u?_m&grwIdGvCreoG@#V~) zPsTXOxzN+`n-u;o0AahIUDJ)bg!l8f+&#OLbps6qXVo@{xp)abuPq<~eRe<7t%d2g zim>R#RciiY5t{k;!_*mz!SpFRyNARR&p)F0v9Agsd-i2~cY?9!hs_2{+ZklXND)fX zMv}HwX<0R{{HFPFY+Pm-+ z@WwS$gI+bfdMyW~eK~x|gWLQxhso&Qb_(~a*Wj4mTJ+z}x`&n57~3}=Q(KRsKfOUp zZsrl;q*>tXQa~KE-7#eU4!B8T!MkWCs5Zy)-F``N$yfGnbBLr8nJ;8iFh2Xrmt0We zcyu;2L2=#*;%4!TTHa#WrT3cfW_}_VYY*TqXfSRX^NBS*SPkv7*nF|Dknee$vC#w@ ziQtefNTQ}gkIDwUb;?PTDa%e~jVE9Kg8fTDxT>>ORVBa zn@oiFR%oIBgW=FVdnDU6P>>`HMi`L~0e`dK>!yR?{7(uA8!`bM0wR&W#2!0quTsrF zV!(D^BekD-8QW7%L0nNjzIC<6VwOJ&o4XPvZwJ!04_T})w~4;_QUkJ`C+WJ}3Y7PG zvW^ZJaT~mgs6D(&F?%hF-j)-c`H|pXlZNoG3CpE6lK7f@qIN(4%L-W^#i~Lg@SH+A zZe$W;elaJ#Zw&2y3Yd2%7F+}qaaDu^I*(?osyc!EZd~ho+`liyTw!3J9+^)q;udo%?U!JOpEz!0!5)Gh~R1mk!vkuS^si= zye-0giz8T-XM`4`*&ejxD$a@s!11fgux*VoNcX8h?_d`UjwxfAIv4D%J40UlOhUuy z2H4pz9y{LHLC-jE^zj%CjZT_Wbi0IhZmVV<#B3~ymFCzuB_3^LcEQ|d1GTvf*>fVM}iGbSe&7S# z$Cb#tI1sd*&ha}mlQ<`vb>L#gIOkGL>f}5hl$ER5=PzUT)``#&c7*dkHj#EN5JSObUQ5sm6l>qE0Lu2)Ssaa|)_N?B|m>S9G{O~gR-dzsHEg4kj=s6JnUdHd;!F1up z6M*+7nYMnYqaDXTQIBm`V7u{Pygc?iCiZ6?7*Cm3cUdb9s~4i4R~mjh!e+N;Rh-dX zF`9kLMZvCv{9XGi;a7khT&->J^Y2I0aG)-DoIOe9>q|gh%7J$pd#{aKL(CfV2%`S)j_&PSHB z+!aN%g-fabfraGJOMRTzH5*fW=b+p4y;SvdD0%y!2<2rXh_#^z{5nfeYqS;mb~-`h z>me-9G#0k(oPZH`8Kdc+A>iC~6pXi=qaEP_`YrGb;_5YE=y#W>R=y!2;V-DMp@4mU z4u17F!`79IzbI29ma8)%9#^1Q&Q);fw;2NNoX3*e=ds=S9`lzUMJ^#8U3P_$Gx{Z{ zu3*|g=}K69qzb$)mIjNDVoC{%7b2@JPXBL{gD1=X&CL!t`&1)CD&QIDDsJ!*YN}PGbCY1!~Vpf{N8w(4L=- zP5b0%zSbLOd<{jP4=J?QemuUse2n!C)Pwz|QfSV~WL(POpw^qlvb!=|ojxD4cTd8y zO$E?3KACa1#B|eKImU~;Sr1wr-tK<^4YzvXtlV1E@R>x<&v(VoVSyl9xEZYq1<+7y-$Nelj?6Z$?gcY7diOe(J=FjioW^ziV@ffyU1M}VcKw})AT3#&X1v?k-p&5JP;&m7U1m{jTPk?%sZ_O zRtCCIZ&;4)w=%$CZ#4}X%KRML6*TMapD?d28$F**A-+jk;54r{byxh&a$nB-88pwImvZ3VZPUY)Omyj)Cp$ zTH@=c0ldO&yOu)**m5bKn8vpg4~_lse1jI6l&?kCNq#tdd>ua2Sb$pk_4sS)0hBx0 zQ`zI;F#ACqgocJ;M|~Xj%$fxYcjsZ}AQNmm;RUu0=Gbz21*E1pgN1$xRnEA?uMw_5 z?ilOhb+!jp_W#a85m9}$#j>+?*b#3`Dhn5(Jj0*ZzpaG8zbj#dmLobh?gmwXgm{|v z&>oh_{?lhR7WG;~>d;8M`YI9M?+nAw@#$o11oJyurc=}Jtz@2V9Lq)xg_TRKm{-sp zyi5k+{t;~MHyR0^d*Vr`pa3r0%)#?+KFC`a$=6M(gRBE7H2R_q78kO9;Je#^_ud3O zc5A`!p0mg^^yh~c9A_-_-Ow>K0qX|2L(}m>(!IhAZ5`#Xv{nZN8xQc`3@*c$FBYQi zg89JTHwIM#ThjF40`=ag2iphLLu%be%9ZDX&rx&Ai@Pi5Nt5|q#j~JWHvwXdyf7!C z1lx}vhO6_P&>>?}*0qq%mHZ_^r;K#M>Jzwmf6hlWwHHLsJr8a&rrRJl#v!RO#Zp?v};es$sHH*rCU7Ce{77dB^DSs%LY3tkHH*vKXc=|!-SFv z*xP!CTDsU1&yD(^5jh8VXELc+G87V~@^ zDYSJtl>*W$E2-kA7fO?oQ9boMI9rAB)eY*wYS>w3x2j`X3tyW!9Sbbp1H|w^oV`ch%9^c?AU4#$v;!22gd4C5@iuuyR%` zgiSaPzc!eok+mKM7EeYOc>}JzdlDB{B!SBh<}K>JOVVxTqT9@FD*U#d%w04UJ@VzW z@%BgJa@7Go+Q>k@(HNpL3NbL{42Fr%fN9rBXzlliHnDu6?Z8xU7oR}!&7<(yES6|b|Ck!+KNN$|_>Mxm(}#4$PbOe89W=cbg68nQ zNcf9-csoPY5<07;)r>=KD4=C0gGFwu+IG|tqng7og3ZJot3@Z zVmr(t>lZZED;+xabrW88rQ#fChmO_fXw-&MD9Jv?@}qGWt;m46<8@K^)r*_3aWvZg z^O{86T8P~X>Zr>uXW(^M*tT12hkHvrkar`H4*hxog(G(ppRh>eGORJMD;7MmUXZDS zPeBjMN_t9zx&LDH(Smscoev&{c1;;*K1qhlDFyf;Xax?k@y7hWQ&F?O7W0d>aUN?d zU{0G9*F5)Oot+NU?c5$>eESm3Ogw_o_cpK@W+1lZJHm|43~W8K8iY(+oY?1pmJbwg zXwhuOM7G7|g0rX}a1wRLEXTDO#hCy0JmLMCfxKfJ*O~APgQwf`{6m=pVxR((RFV%Rpf~Q-*Pzvr3!%;Kq2)JM~ z&E6J{AvsFg<>o^}%6L#bZ8-*B+Xec?HuQ_h9t>bTBQ3FSX-X~A!4q@ffc-jjJiQ2A zo-1kC??PBrx(aPo-x)_sL?x@&GnRG{>k6F8pSVaLZTS#U7aftvV&-;Q1@W@=iAu?7c%J+UubFsUON-@5bnFWrXLE zPHoe>s0NsW@`g-dkS7O;YZO|I6j7DhSZeia7>ZX-qR#mR{OuxFlytqJuI4g~-tL7V z#Ti6awib{V4)$LaaCb`_9_fw8NZu8^+gAnVF?-5)iR7T0{45vNtLUO{D1%JyZc7#xf?YX$$p`vCq~K)A$(!Fu`CN@>COS zrOQ`>?&27jR~U-A`dM_|_ArQK-sdK!tN2Wv3%#RUpi75OzU9_oV98aw?&m>_p0%0= z&QwvWaC;D{uJDUhjED3wmbiW$j)59EB+!&~H-39ZJL)F^@24v#ecH_5Wu=MB#uTt; z_a)(J?vxK?zKWLG6ym*T2^8;XfR8`L==t53GcO-V9YVudA5tkS=nZCQylmq0U^r2& zzC}DMmypo(1}Nj}Ao?CdjcZ}heme=4d4yoswj8ROe~+(me=_AMJFOqp8X_-#uB}GE zK+yQY?y~295aq;moaeu*IDUv6Rwk}TjSWup!Ex5#V7(I5{KvzM)1GsdoGY*mL0kwj4Tw;)yCMce;j#?eX;V?JR8Em`{E){jMuH22wX&faqDCL_Uta z!yMzGld(uO?5iR1sU}KCH*pz-)xf)uX=~{`7i={X(fr9dR9WBQ#C>}B=7|aRHg1Jh z=S(Vn{h4cQ@Ff;E>)_44P}cKWg-?uAQRw|2SF~U(ghqy8kMlK>))$8@D~?mob$NWt zsdbDIJsgCI-qbN(4#KBJ==(=3RLpA{Ju?)7w4{KxF_<&sC`yl|@q>I5VT~Wl;50c>-D3yf z`9doc|JqCN_ZHkXcO7aUB%n=qQMcfJ@cC6PXgo@!_U+X;FESU~D+7zdMNmEUtl5Tt>oG1`T@0>x z^#_VFrNC2~62VRb>UmWRYDxu29u1=%K|H>unV7~s%RuiPQ8eViVU~X@fgZ*a^&j9w z9k=;`R&73Gz08HeuoAEhPoPpoEAh>YLlN`esLe~Hy(%f|et1bGi_5sCH!cwBaTYo> zp7BGE2B3=?>*5I30#BWZAaoxj9jz5f}MGMi}u#egv7>t%~ zLN30+84PnyW77>2)F0ym8iTIFz{)smzgs~&^ir7r!-FLG`JkuxAc=Zc4BewAL(37S zFQ1Agmvh+tS}l$9aotRePG5!3LHDWl=m03Pv_wP0D!hARKI-<%As-kA!?=R&mqHcH zUy+8@={6{NXihc0c+;-=3DiHQg3XI2RQrb+edQo$tg~YzYj7fpyRBhsZW6ATy$*$X zjL&kqkQ%R=Lp?(CPN+@n@ZWjsv2DAM#D8HcXw zHNWUW3fxILgz{fAAnH~r)@Cseid8?@^lA=5=r-^rB6wp_!!#sq8Z!O_uH1VKynjW3 zB(RC|)|v{IzGwLn^^LU7LP3nL7ZAtaY!BL2MSOvEN%|+U++-=58EB%VZU3UAe;MY;cyk4CjH%$1^|*$5x?P;{PcMtLK~a+d73-lj^${Y)1(fiiIWVn(#( zCrN(EK4@xZ{R}|NCSSu=-Y&>*JO(_A$2OB@oy0@lQta(i1IHQ6Z=zODX1_GT zA5YBjD9aq!ZdeVL+J+>oBncxSge0k1uZ$!iNs;8+z#0gp; zjXQ%K4`zY9fI|COId*M63g=5TamLWA5Tth!`9C6P>w-_@Wq&NZvS+^QqcWmvnngXI zY{7wyvnY95&iGinz;>R5%d{6^MkC^U9S!C;Y9_k3TIu1%>`ZA)!=%*nn0sIu(f_K= zd>e)A`Ah?ys;eL$J6oW!F$w~E*P}|;BHBMb1UnMfgRJ2z_kCj!8dR`8s=!~=v9yYI zpY+GLDKpTO3}WAthunhfBiPGh_kgj#i7V4)%ND((GP7!w#_E9UkJX@*x(5zUWS(1L z2WfSPg1dsjXg#|eod03n9RDsx$0xN^IW8M2ruyOW_w~T5o@v_MjMQ~`4_Rlp1U+Vn zke|LqU|^5{J;qUR{>Kc~Lw2}2vXssA=N@s$vXZhqHZQN92mKpTK)A2E#OeJ+9b-;854=&`BP z@eJ4AGKK_56TngTH%((ZDcZEMBbM{mUJrf*=f<%1}6|XO%Zr>WC|STJRL6ru=ctM%!z5G+twra_OuLIK0@M3-82va9b^qi; z=aMzpKCYke@+?XH-=&yRH53dBg>NhQg&I*68u-4ux_8X-C)$v@6rJ{liUaa#jv4quF<8Ib%0}j3(WTGu7ec z2R1kAFzl5zzIHx@T^;Kg>rhv)Ddqosm&?$VITZ%fr=iE(uQc-VAe{Y=0d`xI;g5~i z&}YgCu&AtslZ^LXsMtb8<@?Csp`qxpI#%GdY9a^~Oo*)DGW6eLIWWUS6bZVkdCMY9 z<#qE}=aDZc&#VM>g;^j_@`wI1b~ntqK-+C6k}AepzFThrSTqRgIX@irZ71VOSTf$% zDXN$Cf*h(`hPO(y87q7Z*g9N>`k#&1@#PWe>h^`+E|y_5+5$$?H{t%P>+!R45On;r z5gx^GD9~Zg(7s$cJvR$iOPH^tXg_prtR^PMhfwjG<+SoWG%3;W*kcAM4Ee39X$51UL&bL)mCjGWfvV;KkODz^#4VK z-=)-M5qqEhj6&OA;UIo`4*3f$p&~*AN*XDUe65->xmlla*Y9dSK@n6L)}l&W8&TRD z3*C&V{5+WXlMV?WMsFX}gD#LkQx{_a%QbsPYd}Li`zHKeQO$l_N+PBd3*H)qIGK3R>7x1lih(*R*istEf-Mf?-GoTIEC*z^1&b{QlMUa z4OL%{N7u{Uv_WAN2-s}!$;2AHe|#YZ+kNQd&M0U}zKEwQ%dxL34qA0GIej~K7}#&j z?lu7ew^M<1U#>a!o@{_cwIUomxD@znr$cAQWh&}7r{qLFK2<2ek;V1k7FP+E+bY?P zWF&06Jr*}C$VA7sLnOm19@C#J#1`37+#zQBJna}_(WMK+4i&-UqAgf*902(>wjW6t2Ce_?B(I$! zF;+ejvd^*H)odeJT9b>mrz)s>v;?94TD_5Q1nun1Z*>6eF|Dsw8*?Vbp9F~DTsjXi_AOR z#+d63gV5&AXmnoIO;o*iqV3KEqFb{B$|?YR#woyqR*E(jEMw3npu*IvByn9GJpOnM z#hRO_t_ovLTwwoxYX|j;W^;4t7kcw;J}zN9o2=7Ysk)gnKgApl!Q|TY0Y$uO8okojRtZ8ftN#ZvW-8w{$)mL4j{Vv&tm*j2l7Na1`@X}gGYzidp!OL z;hXyiq?eCz(i!6+wOJoK#L47c*nW6d6AlnP9PTd>qrvAIaF!heWAl zb+YXw2FdGb{ApkCiL0koQ?|1Xpz)yF;R@17IdR^9l6^OS)5279C>IwGU)dSwXG855n?dpzBB~XWGs>&5^I{|2#^$~sPa`qt zRSC)(v&ph4%dt%PIxVce48})mpvU7sf%U5Z@VY1^n*U~lU-wzmpZ1c*CT|7DNgt>* z^ESw|7bB6&uYz1P#?+j`d>Y0< z;M@{RyRwtewkr(|q+CD?xg0${gwyVq%c1CZ2`*|niq@|x851o9Mz#gwis>cz-@Y8& zkhK~cghsGo!bv>q%zD%Mv+3_QMfk&D6AJY|a2u#6@}D08O;uNtd!>eA;$Ord?+mo3 zjRv1pt0B8h3es_hsM9S=}K+@M$YMW84z6&IXSM{oc0q_C2Q-dq0AG&jZ) z-&Mpopx!81bXV|sVkXl@{Ncf4)@|zP0!Eh^yL9ALRM77v!b|Eb8#IF3Y<~!^xg}$7 zW)l87Ud+6a0>M3*C-zI@$cv*oSZ2&JFvpI7`o02s|6@1|+r0weYXYg9wE+c35yQ`I z0##KroZ(!CX7d-|23N-Q$Y>xE=EH5z`G<5Y<-@>DKaeX-AaWf?FlN8)o16;Zw^-8V z%R_K@>M@kOJ3&1B{b`HJV3gAXocB%(*fZT371G#jK3za0N-D%jpaQ-2?$G*}G0g9e zr=qALyxaCSIp9+RL~}A`>ap(F^V4bcz0s`i>KkqUG6@WPGr?aTiZ1e@n-Flj8Kcn5?$$F6@!tQtRH$ZIC{a2x7-Id~=)3oOk+)W(dfeH3v@*&fcDJ%X`Y|6{M-7J; z4r8$Wx<9e?b)~&~vmtay8Dly$;O>@OJRl1Mw*eXLEl>k~pqxk^h5`8jCapbg<2IYjM)GL{*Nfq%V_t9m*C z-pQSzS^S44m*;`bkThssG7CmuRl&pI1$eanG#6b*p5U74ihfHOv#dtK7&8N8?eq|=h?9c0 z{tn=OQlQ<3R}q(e+S;BA2JR*M>tZwA|F!!q5&Tr9fT7Ol%P4w&cHCH6WFD3 zyopMJwXgT%*4xTB_hA!0jgq(o?_(*yW*~Z%E*~`Y>Xbd zX!YEV8=ePd z-P2KZY88Blh==HLX;^;72_*(e0_~l%p!-ECG%wviZS!lme$UOIV)U7;HogozMH!hQ zyo$O`-BhwO4K>s%82ccLC{H}Xw6hO%rmYhu%OgNxDq{;inh6Hs4g>nXX!39|)TEq43^BvKY~NJB z^eQ~HW(>l?Ur5uR5?rNq3G+`Cf;zcMY9A~`+a1@rPIVrsvYG>hiD^`u8w=4^$>1|2 z1!On(QoGY4)O*XGe;mwqqV9NM{6)N6vI-TIS>K{*7R;&Vn0I0mNH?$LdXgQ9SzJ0g z9@C@tx33Ye_m`kGWEzpTOyYRw52L%h4hG(oQQe|GI;mqZPU+u zLh(HdgoBZkH%b`{;!dOZoH^`FpMsf&uc*_e{oq(_2bL@wHbPj7ZlO(}aGb#3qw`SI z8e`h;xDC9XD8Tb+X&@QR^yFKtADbUQ4V4P8q;?|S-G3OQlSUB5k$#|4I26ktu`_)9 z0ot)&6MF9Kg<`uPm?O$UK7R&yc$*RaH+OLVE=6AJy2=1yELaV)B#A+dlxHw`u~6g- z!AJ9O_~`~TQ2i?C&RPt58_lV;-$=B*VF@iWw!`vq3-Gze4jg^g4NbNR>3a9gC~C1c z?U6|ZWuOYKuR_3e{bAbnF%R;eR6vLNaqRhNgx01B#5L9#V5uCOm72NB%UNGC+fm0C zhJfwZji@v=i|HvVF!tkLq<`84A~6Z%PBAa4yU!3*{B{zXowt%!*hg#=GQikI4FgW| zz%e|6w#}^u>wWvcRb>>NSDp%sgnDSwev@`LFCfj&4-#CN$@=a0f%?5t=DA~i?c@09 z?fIVOdK8lz@6TaP-cqa?m5N4h#zHUKIr4TXqQ;JVOw2POrrRZ`@|Qg=o^uV=feOB()>aC+Z=w|rGVHzl&Tm_qYWNv z;N5qa^j`|5ip`hc--2rPn@uNcVr6*ma1!cT2BE!GAp72p6u8(af$-!Ix>;6$1{NlS zlQe?lzXqiWuQEh z<^QH_zz=2xpm|0HUCUy?sBbXlTCiNpC_ccK2%?{x%^30C@LOvey7#$YkhC0vG*+U0 z5IfILCc)g(d8oXK&A>4y;6Pj!c3619r`v!|KSEF%B!bP-a_rf3iSQ-oIIZ>wc-6cB zjit=@zU&ahl(^v1^rIO5tez{4@|?;mg-43v ztvTx@ZJ8-Juv(03(n_$W$D5YDenzL?Ex~!!jbQ1;xRAvSka9N$LP8|y{HF>{HhrP( zfBNXV1CeBKkv|&u4#ShV$MMU_m1rdOC1Ll>P&Bui>X8&s{?1sE5s{2PUdHD457asL zJtwL;RNeYdFqb&d6UAy}Ak4l9iT&Sb{L~6`IYF>fkb~bNIILfii8ZN#*q<LR@9KZB=P_R43`cRKGMmI2B+NpyyY;k~mNiZ4_W=Ns-^CgY3#FdBvh<7DV$dJIg0 zc(i#n>wsY2q~LS0nA1E9?Gx^ht-=`O%^yPB(^DX`?+Ni4&vugYLWv&TO#&XLk<~M* zpi|SBsxOL!#KGCnnzS6!7cy>0bhMzGF;!ceGwHD96x4&0u((MTt=3G#O)Ti`%x@9| zu)BTx2M&bwH>&y4B~bM?3Kj2O1n~tsV!P@!iFIVzmNEX|b#p85%#R5iGmFW{oGVB# z@4|hfSEE7_yWgGI4N? zJeLxKevFf*5-A21!*b{;`%0gTl|n3r5hs@8rCq0jPQ*ViyGdJ;u?I$jdJwS!4`{sFe9sR1va_f%5R!$n7nP_pM0C#|w!y&p@V z{dxg7st9mkQX=rf=AwoNA59347%%k4!G#fUT#^Y@UaT8u#UsJGVbd_Bz?bHFJNfQtSQF=i!tzRL zr9OsvOX|>gv?{_a9msl{hiTJl;pK!x+_#1KWuk%2FiJpP!qkR^Vy&c&1v0!yWc%`wA3ySWU9G5)M_G@H3rGF?n;PggI?1f#*>)Gg8* z95=K1(|9Q{GOVWM#VkX-y^MtRPDk(K&!~H1Blwh^g9f7{s!%AS26K#w{_E@X#`rmC z#`4KC>e-xo5(Qf}MPdK-ag0a03)St`KxKdns(i>1Xbv1A-gCp*Zu}VycW{QceUVt` zB!I4XM3o^?pzOrHbNkwfq~4xbU2P|}yBE@`&#L6^$3!xR?XH71mVxW;5ZueYcOGk= zaUtUxP;~yd>4qy!*bp!lPB~a$H|wi$-OQ(8$Yz;8=}=Xbf&ol}Y>!Px0n6RXeM5+g zr54D19*6am{C1>6n|;+AfI(vW$qPuMVtPz6JYkmOwM};tIq( zRKJ%(J!cMQeFbdBHy#2GAHu$Q=5;*O34!55?5C5&7kAt~DP}jDE$d_kPe&Rp!X+Z{?$1Xx=ybF~4 zScRIhEAVce8spbF2=<0app!SNdW`g{T9 zX=czZJ4sp22n`-zrS$@Te6BPSe>0!5d~BS+DI*vng&&3a06 z_msi;lTBE5+#Wp4evuL3mr#D%5!%utA<~k~sXn7X#Y_zG$^JOEo`6Pxhe=kZ%A<6!JXK~o+XosYPA1C zCSF}R8I6)$p!MEbXlN$z=D7m$ckrlWw)a&S3`$T1d&_ZtIxqpl9Uvu;uAe~Q81Y#(|C$+6>1B^lU! z4YQ}RPHg?H(4$5F4 zGT9X_%oU^U@GAnN=t|Nsi*e@mWDue8CV|0oDK#(5fT|N-U@|a)&Fhz)lZY6h!s?5Wpp%Q$tsR{KbKB2;M$3gS^W}>~t2%P+e zgMKvAUlsZ}BbG~hcgP=&l5#$nJZJ*$X9O;dnt^JIB2eX;CGZ|MlNHknao(p$Nc)4(>G4$1 zqJEclpM6jIvyT#qa5w#^Z-OQ_qmVapX|-3(dHS_&68VHEhHK?+d}n;TUAJrJ%>yd9=7^9X21bgw$FWT#0hT_ZQg=R0j=Af9`*E zjvA~Cz`h+pApa|Xgr{>b@OeLUp9qH7KYlROg>jtLpTurEPihe#0ncBnVf&;gI{bJU zR$G-~?14SZbNQPjjb`jbi#f3I=u`|((*@h%uLRxfoNqp}i}K#i5O@_&MybYqPP%RZ zwJ17>uCvpq^_rj5Hf;ucd2j$dZbiY6#|1dmky%;n*j`B7$qz({K;b`tXt#CeeK{(0zD z*h}RcxQNYeE8ubdSadvnk(O2L0ndlVjH5k|gx|hGoJ-Q6 zCI3EEY0BYro-mKrr2C}6Sp}u3<^|Pk@rGvO$dV$M?Z>id<@cz| zs=+X&WeWO`YSOhz4gq5igU5P3($ktxpReAHx1;4Ky!o0-o}h+LyrP)adzS{>;lsNF zxgh>}l5_+GlhMEGFy&S?k^0Xi@-GFP!guET)+k{d8YwLt9|IfixuNEW*>sm$I__P< zVZ$y9d&dFVRW=tpw_FjZSgjzu z7aJkmR08f{s%ULJ8F`sB+&;q+8yR)omq=vC&S%HAU!{ z+CWA29Rjnhr63u!jCc%agUQ;TH1*~}^p5JLWcZuM9YD?Md-_Z-uKxE#>i5Db#calqGe z1|tcT$7j8=!rgBkNq>2D$Nuj2~l0dr(CE zZ!};(^QSmlKI4>_UhLC#2?TNJ;C#IZzIH4?r`WNed?F9^f)0TLy9>?u?FXMXUj_T1 zFZ6)TB^=o9N48fspuUD1^dAidR~zP?%f86*pGt|x;Tl1TQ560#X6JVMZnmckq&>+A zoc^pV(%F#;`e#P;3OtTC?~laY|JY)ub}~4xVcnq2D=+zYg)7uq0*aaHC}ez? z-1N6}M~oSE1*JmxzndUyqYYZCE&&zCSHy^M&T>P7=_?I6hW~eoM5vWuqEZchEE~f* z;;KpJQ&`SaXdm)v*xXCrl*a)Z3C1BS^5$&wl z1ib4urdGYD!SR$AUA(6X+bSJUB|4n)N<2&ieQU7l9`letaHduH7s1(}o(fOTg^`gB zYz}cG@`IJ=75k7X=02v*UGqTqN)f2fO(l9$1Bg({p}hJqbR8_C&mZ}sO>Z#j|2rF8 z9hiSpznxl@b|I8ebuZT%5x?+uZ~TWBlk@#}Le%B&YMHM&kD=`8eZMzllmYFg(4-N;*>XFEUeW-7jfzo4a2bHsndLOHXfjPTbrsWAWIXwZy zrq>>|+al1ff_2QRta&{Q=@K&xw-p@aR6jcfJ*mINUWobjka(;1zfV9RFm1S9LV=-p&A?nhm5_#{vJBn6&RZM0kbYxS+x&Ouu>& zx9~=VVp~3K|zZl%a z9aL0&*HoDLhB(0(h$tP4>K4P`uHpr%^p)jH+;4O8J!etHGfLnyIe|8qjmGHU-C}EyZDV~U^@Y?zc#WzuR@UF++54Bh0#sfbgX58P7@1HCqle^TCd(pu z?XRGnMSWbW#U8X>dmb8ltI%{%4O)LuhvpfV(RsHrwsj-(6IG!3PJ;WHA4~paGs}&) z3f610b8GlkT-V$x#hDD;$wbfv|r-|xKj>mC_lF>g~8|AfPQ{moeP}SW`jQky7>d%GP&^86+ zYt-P;pG$D!%u*C4Q)=B>OLv^!b>LLL;Nt8f?OE(@iOix1P7 zkgfPAsRGLeXMthEC2*~&r`DU=Nr}RF)T8rJGPerKY>KdJ!D8rl>?4vbH4xyxknzpS zIUUyDvVBP*Bps;1AHqZwZW$(cxtDb&Jq*T_^^7^Q8>)?mE!6=$n%*ik9gxC+6%wwiQ zMCvJ+d%lo%WU%?SCX>bVtczPNhPN*#2{u@aE{jDQgjY^fYkd|bzBGc4-+AC;uZTPS zm*d?|FXneHg?9y6kWw}XG{+qwXU1Q{W_|z}>1lF1KxEaoh6ihbP!;iWGd}EM}{kz9w|JDfn zw`?YMx>QnQ?MpbL#~l?6_Q3V_aKxu)(Bd8kovl*@hdP-D(rgZlx*doe_XCKY(LxX< zXwraJ|B?$Q%Q46(2zjoJoZciM`g{!{3S-WL?wVTS@%|lY-a8%D!;-+hpaGhLwZUV~ zZ7%MND=Nk2&?x55>?#gNU9~Kd`#^?;DV~s6tbiV0R#M58YdG`aWK2JE3Dh4B26fkP zB0udT=>6l$*ip>u?W4f)Pf3V|4$~b+><4eJk#r{GdbwSeLuc)H+L|{FHstA{fy+@i zJd*9Ebxwifv^G-xk305U@r8e4sNOA6Qr5 zF1E`zxS&k}GG>Cr@CNmG<_U%i3h-tk>+6~83EfL$X|7TeJos}J`HfHMQC$JHdRRPs@8`!Hd)38P-nXy7ffU_>!i-Uyg`CTq({$)(85?_&)*H>9KO92w; zM!3-yhOYDLiGJXFs^}XEy53gMp_c`*DxFj{CmB?-yU1(Yf_D!)fpMP*a@EUe!7`RP zaGeA_qFqFEz!071uZ1gNA&f;b6!cX`fXU3;^wjf8uvn`BS&S1cZFk^!i(haiEc;%z zVG?Sp`mwBA4HkWiK(|T+`%@Jtd_7VS_+TNrRNKHjU@UH-B6>^1ApBb>Jmlg~du<^* zkQl#bVg5 z*CWqj%P{%37__WwnP+xC>HJ%Q%|^E1te?kKw6Lz=%NYWj5y?Q3uf~?if72?tD&ckBs#cgRL$`f@sFx@V zIu1I+O@~-ePnid{kve49C_Zj~6oxYAY?y_O$nV_%#_kuvXCV(2H%ukG`Wf82MXZ0S zS57>qiNRysQj%Ld2b$Mi1oeJba0_8)%p8t79*9H9=2ou9yNI}M&WEvrXlyOn0ap$( z-`A5ZV9Aa&YiZak-vZs6u0r=!Ey~kuqte*|!Io@-UM$nE zpS&I{r}#s~b#YlQ?ToqPOiTSlr7blE^HW`Ij2{yq=G1<}&}j$v@R6 zg49tqa1`D->x0JoXykph5FA^!4O`gR_C&u1>kkD0-|`@lUz`6duyR{FkUKoI@`Ekf+{xp-ZNyMIg6WhBB!BMt} zM(5X|nR_6V?YO`^L3fDgYZRMtR{`(Saw?i+4%QRJ`0F*}q~fHEw~X9iwBC6FxKG&h~AGOyaZ(^+>}OiK9wT8`J1YG`Qx)Y{@9(uy!;DY ziS;%oIH=HsExqNi@($DTR^KrV7?Vic)N^Udvse({wIV(I7D39t%m)@c6WW$?IONq4 zyniPUoJ<;Nk?TCv+RSv0Vh1W8;ftGka!_c)aB^NhWAG@)eYFjOV{tGNW(6)*?D%ik^Ir%7i?h6|=8LMs9bK*GHf<_-m z!}z&dne4M=le0qsii0MDVUmwebbV0%=Ez1TxzoysX+ z--PqBii3ON<7jft55qOtnQmSPzt&7g{pvJGywAKfW_w8YKsOQn4kilYSeNsQGsHl7 zHgV}JNAEbs5j`sd51lV0*DHbXgr5@E^k3xeoZ;ZTJ{y08AH^*Tv!MU>MQm76K=pb= z&@&~QrbN|Y>r{1OYx9HH4$h;8ZcM>eg<8TpsZ_0Z;{`Q}WV*F}7%peLUxTv==r!*Q z@D60-5*aRN+n{R3vCI9;MaRkOpeMnZl=`m4G0Q{HPoj=l&oi;C`~Z9|y^K7M3zc<$UB;W0EDsZvjy>NZ ziNeJzqb~)jUH0Em3Cuee52j zJ{GcONO9rRwHW->2~9Q(Cc6I>W8wYRRQ*E%IMt?scw-HS&(%|p!DFf7Ni)E_2BPS8 zhwSUyk8yvNqDjPJ+LD=0tdrE4Cw~g#SB?T}-dxa&(E*hljtnfUg}de@xT~`aTF37r zu>)!#8JG_Hb}&woQ6_tLF0lOTdg9TNh0>Kxtk<5MAw$fm&~O+`+nrpM_PwZfRx!5sG@*G^}l3LzA8fPjzrq7oJUd&DyS9n+IKe?k-hChd0w8rE4*`@2Y}UCnd^zzE;4W zH;Git^<_ImmeqR}M#Wqem>#RgR$%v7op>VJRtrB)ukNZg+h==4kPmx zG6q{y3YbTi!_Me(mjIK;i=d*OeP<>%QN>XgSx(lPXjb(QkJ{~U_!?tVHC73XuB8(116Sx5Wnk~0 z6x7e%Bkeg-#`LTqUw_tN;mJ63>UcxfuU?IsXLpnEVa&fhqX~^q4rX5IGC^n5X|DI8 zHB>S0u1Y_rZY!{4%hV%1r zchz<@YG@)|TJfk6It8b$u0$ojdxY1LRjoN968ksDgY{5T7`>$y6(*#EWB)?v{WK4{ zPx<1pes#1>x(aIi)tDTSh*wYUN4?1DMDocSEI#qT;Pp_lz-2G%$SI(S2hGW#v-+s7 zSOTu2mV@J*!{mJ;V>>-CV?Ka?xz6@P5ZWxER&y^xr+72*WV_?=vn3E<`;$GxFR6*r zN!U?Wg$hH`2=7uJ^1SAeE~aG+1V)f9-Muj5zeJ25TFdfsOYv=(Iy(ET5!{{b1y4Sv zgSYV_2$;PUxqXAbHK!9A{=*Hggk?~#Y(plsiWsCwA~PhqW&zd|AGu8M--_z;{qzDTtvIbG{{!d z$I+7yqv-8h&iUU0{Ig>xwlenhxqN3_=V*WtFWKL=CmibS*&R40n0SfSK!Dd;@G0Gk zRwL8NO8wKQdnyV4S(wH;7fK=hK_p1GR|{M*1GO2W&hM!T3jcBCyr77zRZd5_B%a!- zvK|LD3b7TVq1j)Cstya$@L@5=P6&XO&m0|djgPAP)$!e$HgcAn zL+!7}z;?i2(EA`365nj35=0sw8498y|8YtutUxj$3KQ?WqN4j-IhDSTL=^2JSjG0+ zLQ^X)xIP4p75p&YgQBMMXtFiu3$cOesQYRHM%k;QO8jDQl2)+p^d@*$Du=4c zduj3bRVe1hy+&fW4bow_6<& z=|oqWJ){J>->6dC$m>KnqJlg6REqZH=Wu9U23D_KgT~*^Gj>BJV8ncs*EkZX^E=Z1 zO%L*4v25(5cB*N-5=|~AVCQEF%EJrbf{r(e4vUCz-Xnp0&0J_!ZlN!iq~RCMJQN={ zCkkJ7KY&kR-y)b_$bk zm*S+_gQ%VxLUjTov0YM){o%u4KoA2xE4%2cn&~)Oox;?Z%@|Zw38J+1=&^!z25wIQ zrHSQWK7Ja?Qbi~&^~CdPK45y&(PsTwS z8;?TUQ&6Sxh3;*?$e0(yp>uta;D9;fm3Pb|W6ort{lG~u`Q1i@uYYh2Yp)Pd#%a^8 zWBw$K&5aXBGQZpMFo>OSi#&Q=2jW##DDl}roJVk+x%nw{`705o`P$*j!~`sSz6~Tc zZv+Y^HE6W`BE8QsA1d2Q_Ph)c7_8BP4l~BHtc(*_+$|zvk6}dXLp`XEi6ecJBQVk9 z5)qvkRNWc1k|g@F`|j8n)yF%A2g7wT-gTksX5H{|mowuih%??jguM~hvj2>ya4#k4`hh_NEQ5ipv_d!jbFaD3BGmopW{kr%;6on9y z5R#B2q3&xVNkS4r2q6if2qB#2LFH(YCZ~DOB#r0V)lo?ZA;c3xJRzQbgb?2S{!jHe z=f1CDueH9b!Ra;E)XitAlnJrS>kpb)>!EqZNAA(P1Sd@K$JWa?gjAz#u=>XaY`vX` z$2PA*n3kHh__B)F@4pbWw;o- zKRWWrf3vxH{Vi7TtvCF9JRCjrDf=}%Qgxh2@0o(3`3 z2N55_R;ERsGp+BX(Dc8z+;Ve&(E7F#+I_;|eu^qOzYB)8v!lUife*YtpNRGgbin@C z0r-73msq&p1dDy9_@g!x9cP+?=ZR%tdasUcH13BETd0#Nembaqd&(Q9<;d*&DMS3m z5ZF>3fWl}o`8}6XpHn*XI5rk%P07LZ;#ycMO2mR8zo>6E0ZkcselxR)S*-+}_dau% zHtHWuj9JFV}3v{rvuMe)@J{$+VrFZ`ni-v{$97{Qp_28j53_9k% zdR z>6GQv%j79*nK%KoUub~Xaww}Mee%_BXdfP4Ql0DCYgyhW*xDhUfGQwHfCD9*hPL{J>VJ1*49I9jYhZ;%Zf6L3NcnWG?zZ zd&pZ8-J24w3L*44CK$)`F9VO8eKE^>3)Zc=hE1ctauw}fu(3}S2KFsy-P`{T_e%DFd3v#DjtUJ-)6)N(_5f^zCwoljJ=T{jHOQMP$Q4y%3oGVjfl+ zY{P}RoAK_@a2&MwFy^j5jPl9Gh`nb*?AuFVx_J`QIq8k|&-_51S%OW{uiWYf<<$y@ z<1d>*DAnUK72j-7caMhP>(My2_k7%8!|}+tad>&30b2GR1P=lt@QnLjv?^Q+mR>&m z-aK=B^&ks$V~&A=>rVE3W<0>cEzqWai5)a8#cGE%lsu~@HpN_~)VmHG(3^YQ@?&vu z6vgX{S#9$b&^Ueqf2UCAZm^Q<$bbtNF)|Qc{gXf(H&FPsP!F9x&BMcy*HG>KbLQcG zpT~?D$(!`ggVFv-X7j3z)!v>?S-fwun9Tu{<7g_kIS_{xdo4)w_s7sbu^9iq1P(ni z{FqRNuKj%l+oD=bJUn_2OVxI08;}K0@dSEwF5)U@DnZxnB&+=`<)D52+U;7D7Q*y($Kxi7be2B|h=etE`HWs5-5|0y>uoh!2pQsM^j39{yUI?UpFAI$2j zimt~pSo`%^(DY~>yttQ#sjvEjJbW*he>nh_t`oW5I0KuxM3qs18LwIW!I~WY?Ygxh9wNSQ4lNGT!&{}WJw3{BW_(#MV%5w)LbIO!&@MUl) z5|xJ2on-+~Cq*cmu)hlKMy8|d%p}Sx{gf^L6NN9%?nb$~7mMsG!Vc9h{N$u~v=?@v z$_HZz7?y--j@NKcO8{iTMt*WI%{sE@foAA6;<`m(tU(ERzMnG;=?&u6gYfzEOuYZT zFLhc?mpR-QV7?HH&4*X>gBj~^^Eo3_5C6_}-uQv)e}93wAD|=7c zy7-os;H|cZG}FJV^@KC8f1*rm#8Pya*2Nm^{F&o`o!C=556t7MS&~9JYJGjgWDYG!VApD9tZ7}gQ*`_SJrP-3{<*Dp}X!PcrQ-}bN96{ zXR;g{nwOL2PW*rg)6n@jc@}?naqHo};630F)-I#@*Z84at2h!}Qw`{RwFL^grm(uY zP-rf^!W(;aaL0<5%;0b+NRC}0E!jhnK#j z$qk@={Sf8eB4w(jAB6KK^3ZsyDL6c|!<#lapn0+valVK0K+4dl9kn5!?N=)naJWNab)rYn;HGNLRk@W{4fXN#|#IfnKBsE zTmsz}=7VU*qjKG0yK(W&d3f=WE|&e1&E@yXO25)arS zuA;fQn8y@F@Cot(xF>1=cE=3{54TBN$?-4K-67|YL$k!C@yxtl9n9vZft3UCTi^LI zRhm!5ym`aE#g>3ZG-bcpdI(OKhgzE!Ku@hPbYI^GE9dB-Wb*^AuHlF+gKD8MBY+<& z&cWcl1z?$an%AtzMAZc1tr#J1DHTKflvt2#{mu&VGI(YJa*>6%iSxMGaKqUjC5GFf zv*{Xln54uVlE*R~y7Slt$Ut}dK)y!T4W&J5g6EESn$=hmdm#ie-|U4(iy9sn)xfsf zQ{H)8F0;8Y6COJ+K|@-@4o}KMg(AvY_Ljn`U?-esV2uhMp)8QE!Lm5&wEy`ZH`s7n zkdK%OOVl=@`+YIj-Tjx@tVm@A%Z4-Gu%}#oj5F)_-zG4<-pN|-S5apa-NRC;8)?~D zocg{HW`Em-8XNK36#A~;!GKt7jUy;i+%z7BdiB9g1!qwd zZ6fP<*vREO{beC-;d~S2<<*Ywghf5X)6&i0t!Fla<^2n6@akH$S$z#t&gkNXfAX;5 ze>G5caWU(DMqIM7=`y3b6WCLk0gVmBS@k)<6ZK0-XZasfeQv|%$7Mh|&EAX`)$nJ5 zL5O{QxgAbH>t=5d-T5Gt(|N@~SqX+zacng6<6QeV-k3KBBQw&ulF>=1-H_9lPzIsbBSx zIHDsJK%tT+U-Lqzz7mkP+Y!%n7^vS|4|kRQQSU+$|M+(dI;8A|Ws`j2=b!DUX6ekHHvRHS2iprBYO_JpdJ|fjD#|aWID7l`WfBiwrVtY<4eRDa z;@sRUEH)CTLopl`7ASx`(1^=pExGQ+bs(F50c72KD3=$+&Gip}NH!cTZEo}UpLe-Y z@nq0bG=%FO!5Dv84=T5&W3l;A?5%tS#i@yGg{~uU1nzMIg}rqCN@n+a*FaXk@%VN- zeFysC<*73p*}@qC_;vju$_j=-_N*88MbmvpQ_f5fGQ4^d;+~o3W2f21<66Lsu zf@63jrhGk*;x&b`I?8i)Px;AgiFX@3b13%fx&$pvzG$0H=SI@|fX-WE2cHA|zJ*XQ zwplRPlE-|3x|E(&L+h(Qtnru&anC=<3Vw`dstUyS4d;UE*GSg6?lRcx1mL$xeK5yc z1ZA3k*{9A})Xf@zRwtDp_q=su>dchy>AdjL?i zI$wUxwmA;Rd#ClWz4;^|=+lcX>;0=eJD$oJkS>Rmqoq7QGSpFX?dsz+lZY{;OXZ=7! z;S4=X$Ap%~y|DAdDeUPEX4XHe&`K`D>;773Dof@bBN&3VGPp|VJzidm($J%_BU-UA zVkdQJ6pq5$B?ln&{SGj{O?f=p3ma!eLFfHYZg*opzg08?efRW5alAQ1oOH)bcP&`3 zPlnnDYI&`EKWxklATQ1%Uf`Jz>No5`?M4ymuF57hNf5dYG={@v94)8T@%JesF|maH zR_gk29cl+Hit*Dg?5$z1DCvdpOdBFEGcUSB&0cWy{Vhp(4W@i1{Ef-IPM5`7+U9wb9H z@>5GIA#c}yx~on>%l*ed+-p3`5VFwW_#W;k4`a{Ytw0U=A#DD!pSyIdMbSS=yjL3i zyph}Z@A?s_Wud^d#5!O}onFmJ&3w9&GkzGojylAC=P`nC4@`3_(16DA;e&03&~YIQfhGKAF9# zpFM;h8ITBLc`>*@*#Ps`tj0d)EYbIzGAJ3Z1eb{!&}kcvHfDCjuMK0Wm?4yyUW42Z zK4{xk%O73U!}y6W`Ndtt!wFmnWhcv+`tuHEr_l?=ulvcIdkQv%-XJNjTX~6L*#-puJf*#KbAG51%Q=Zm9x2bE~*> z^maaH{zBCK-ongJ1oELP%HY+iI53#Fg$D|ExMXrAH~sGz4D$EISE+$0TC~Tc%iuH? zEm{W#Vk!78c+4&>T8g=+-ErMYTf8}gwEl?0JdpffT337XQ`;iI=Fe=je6WZqz0YSk zT3K9fuOSpXea(!csEau^1na&eU=8K9?dl$KN#ZGPZWPJ2j*++C!Jf}Ksf3zL!qnyY zynaO_sK4@r`fZcApizchrPc7@G-)9;-|JlVQudMdXZu+}EUVj__Q{fpB#;nseDYL`APt9h`YfT-^oP#tI4=w=`8HJcZNGwxq{e8%Ig1_$(;o$ zWS&0^4iToTXU}}NPQGCtpNkeBOwm;5vx$Rt1~ZK*6Q<12rG4!gv)j?lb$5_fOdK?& zNmis`sDY{@u@Yv+v+im8xqGKSG(QOALOyYY23%(+iUZOA_DIZGsSRakF2R!Csn~MD z16;;fl7H+X(>|#M8hPI2Kr)>NX1&n-Gz)QbRCvMUl3?42AOi$q*3G=`- z`LOJ1N)2ZB@*>aSBVPZ@iO%B%5ct;*{GL#Dr*t0I>!#V3-Ek(rGM_mgE@4s)F(z(G zMTg*p6$hNgmezmxhC^}as$s^S_N%4>Qd4l1kA$wB$*fh^ z0v-Ic;79uu)XgN#yzfQ4xI>JcuZED9RgPyv!m&F=7vDD+;_u5=sJMvcsnQ4`E@3E| zx9vyQ3irOEu0`=Y{qHyH582M&vg{dPVD&Yrl0lA0PpvNBOnF`NkZ{~?CY z*LqgJ&6Ccs>tXDyAP~=Ulv#fweQZfMYb|=iHNI~{gNisXa!X(aGg4#?!Ogr?!xwe; z*76Xoi~RljZK!f2g=_1FGYjnjXwb`vNBpaaOJA$wstp&ggwb9UIE9O1RamX|RsJ;T zGB}ScX3_(H1o8aMOgDWoGoED*tyWIZl&1_2%C2Dq`5fDQC-NY zxoO#J6SMXZ44bcut$u-g&I{6*YE#fG*BcZ*9EDPaD0KcZoxKVZ;h)8np>Vu}F_U7k z?N<#~nE8aq?|I7=o+v{7=G#mZbci}7#}XTze#Uf1(CL(e>F-XVWB~b1D;M%DXHsy2 z|1Pwhkp~(Mbk6ZV2u1OfX}4?^oHQhm_jL(6+&2ZiPg$@m?;3VRtp-uYv+~Ht!=O>J zfXn0C$}?_J*1y%5awq`98|c{_G7vkQZt^1P69V60>m8M$(^8?*>=(Rj_SpKCT-`?3?j+ z%=zzo?nu5)n_m6V(2#ijM>m3eyqMe0Rss)U6|Yx`QTTBT$1>S5eqA6XVZ{*tqN+JGhZ_{!2Hw zxOALguPy`8iUhDE51OY$1)i@^pet&laKf;1yVy>xWaD zykJCGAJ7~3+OWn>ILFa_k`?$l~Dgp zfbFMc(0!{nIiO7-q#^>s-HO0-Vgkf`9|@A?NS5A|g(|~J;J?~+*wVk2hh;=!`-M>u za_j=WTIop~>ttrM;SJw8BOHB|X@)TSD%X^*f{w)d+;MjcU-a!N%G4*LxUYxoOeA@m z%Ca!^+!1JaJ_EXzJ>xc~SHSQ!Cvifb3P^ttiQN~8ht{7XET;TU^$G60eLFW_n$Pn! z>@h~&jMeu!%#1e-0&mhG1KhH3YDzdtb~kd=U>F`v4^2(Q!+Mx*#r%j;!$_d7WVVZCPbA@U~qE=xKDLO`7wR& zwAh$9SUbQ!*ca_}h{Il&B75IRbG)$%g0ZFvIJ`C#I_4%}<5qpvZG45tZ&8NhqqAV_ zXn%NmI1ZeT8^P3ZIm9H{V3L)#0$WY`@W$*QF7m&@jt(}$)q(z~{xJw={@97#=K9QN z;4(-o41jdH7wA`cW8tb}u znX8Qb^W3m?)<_;>@$Ub*4<;g`*Zj2NYW&hZ2OGj4@wQwa?*2Z22O2G7c0nbS&o6*k z^96Kx`dAhzip7Q@yC9JC)n1Pdk$2ER=>Cz9&%P&;-@$|1bvc7`#bMU+CkXDBNJDQuZxZJ-Llv`d>C$Ef42*{hzR=L5e6@x(k|bP68vBBUttmxN}A*TFf1S zI+8+k%`zbFw=2Dyeq4#gV}a3CLE%y+-nR2VX@3{ut3|;k^#W{PaSeWcOor3Rb5MKy zbf~rZ$;;+{=Wn(Wlg;0Zscf`>q7{>%Zb~c~#-+oVkjrQ+k40m@+dOqa30w#gqx-K2 z7+sNytv`2w_^!GzKrbDKrj>x!>b}pVfhdpm3!GV|*@B&W{L>vV}lhS9qT04VkM*FT6_koSO&X)a-PKvs#Y$a6ZiJ zn}XjFX1-D`+bFm*83o%@y9uKL0I{1=3)cZtZmT8R5c7olqYUsl(kh!F*$*ls)> z4BS35&zR$2WoriweQ)w{`NL54RED6vJQ6&T4Vmm+2uc(G2w{V+LdSt(Xgp%UttxGx zv^gL5wOgQyEFMG`sfSVKh3#VovCne~P<3?{zg3Wmmkv+CWNjbv2dI~emPrN4-7fMw zsR_fGej!=;2gU z6-7eRR{=Uhh&gUadHlFR;B4qbK1LO6ExpZizIs4d-$eM65C?OeiSuLah0YVR_`tj? z*k}>XWAutxLvJ58;z<>G3GG>;Q8?(Cw)65QmnkPc4Lws+q5a1xrkm3TJxz$q?0uU> z6_RJjREb$0Fyf?@V3=kQq|IVjty==<)Q>WPC%D1xP#*0v4|B5vur0I3e2hcNiW z29$160Hr>uygAGqnvQ(nnzaK#GGHLM+9)$MvpO*D$>)+iC;5@AFw)n|(D!{8Z=XDZ zg}qILgDW{~kxWJx+({f8smWyXOQ_?N3R@@Dpy%LGVER5A3f_-k?WW{2R!L)<^$lqb zI~aoe_u}g1tv~{loR&%%$!| zM2TPr3R$+Whce{_diuO4ONz!7q+|b%+Xz>h(>Yc#is(_lSsJwU2e0 zf8+{Qi7c}fnCf6Ml=RBunTh1hh)Wg9dOhb+m$%`d73P>ZhWPM5m$1aGiR6uz3JwFK zSm+;jw3Aiv<^3qTtru*vZB;Oaj+u^{>T%Fqs?A#Ze&bWOUIibwT=d9a3^v1Po(ul` zjEg%auUdf~$`&kdQ5ovhU!~raR+-JXo!m8UEEgXSF3+NwN9t=ih$ck~VuiaVMYZN9R`%8vo7Z;zh=A+<7R93k8$0V-+yn!Vkw8t--GS ztDsvo59KQU)Vp^Y+LlL9??oo%X2!GCPZ*9c?}Z1A$}r455bY{=@vAqzac-musys5L zKBWxVw+aso-M6$%I5FnpK{j2%lxC#G$9mozx3n4K72~0P2gJj(7xycb0&vK-{ag5P1Kw09<06LA*Rk5Y5gs4t$+}F*9PZRaFITzEc-;@dHeh!GD&ddEX4LIG z${UuABnCf=c+DI>7&5I?%3p8ogW}|EEaZnT_uZFEI#wPo$&jNZ?P>9) zyU_Q!Kim6oJ+{A5;ifT_g3I$TcvYN+sV|-3j&mRUM!sFk($B188qIs}ZUFh`8{GFs zU$Bq8&m1~+c*vgPphjmJ(UK$+D`Iau4?M`6sE2yQ1Ru1>8%mC@e0ZQX4%;$xp~hAh z6OV+0owYq0?4fh}mJ&Q_bOi&CUjWq=E@1jt3g0iMV97mt7yB2;oM%pEpQU10>`lJy zXMTcfIC0d9r{SK{vrsS173z`$KzDXNj0{|ljlpcF5 zYllX_!b1zN&799yUT}^JrFP9?aAYQXuog zUhWo69%2PG6QcoMyvy$)ljb}U`t8rbKd(bz?4>C1*l$1{!a~`%4QUXtmd-*g_jz;3 zdQfkC!#d|&7F?bwV)GmuJXSd$9dt7V)jAGlZ!%Fwkvy&IBFfK1>!XW-7pq^9hm8;J z2>MSI@LK&b5Sn82;G1J090{d9n`=Lr+$SGB z7iF^jw`wp1H-c8keKxyrI>uKm1!-s@h;z(L+!G2ROvJ&tSs#=Z`J%&mV!*y!4qw$0Wit}+ABwbuH+|69X|xj<&U`OoW3&aY2l!0 zSk4=BugTmlSED@7ND%%dcGo5!I1#%FN53e+f-EO6KJEq`IcMQT%QgJbcOK(( znb@@BDRbDI#f}=T!$|clU~h7bIqTI>2tD^-gT~dwW$zaW(#BpO z-!(+Gy0i*zt7M~x%K>f^Tgz7usX@ucFzdp{J{W$Itq}8*l%V*%zB&>fLfMHF(au{VI9PtF^44`~;^8rF| z6`=Kr_1MbS%C_wAMW@42P!R70mc|#rc+x;{#2>tRnH!4x?W3DKQc(Iuk7;7dU>CP)G zFl84vw@qfYqF5-HI1!{RNjNbs6Ma>xP}t><3KN#VzuJ|kWv+|9<6WTOQ?g*{aId`S zVgi#}mz$LLjY8?$mBPy+z>kWnP-)nG-g$L06Wx#+*9S+l?ElwnHysola{e;uTLVF5 zPaqiBRS4_k=6K}7STxQAo?xtv5j++PCS{;)?^~?1+zIS;1)RT(cq^Km?(Z&WvC0Y` z>?1GW8!2n|e9okEx3Tf%l%M@(0{TIru)!@E~8?m6w9y7naWJX5h`7Wi|T(cj@ z91>thq5*1t^X78xFS0*M#ZYoL1KJ;6Vd-~sA$sKuEK`ofH)mteeq%S+))-6~z$H-N zrUsfdBS1~+%XKJoZ)xBTcN8wbp4(Lzt80Tjs|>-w{0BEus{kVp(nsHV!or{VI6HkK zik`gYZ3hsF%mSg-{xG&z#AE!yAncew2dtl`LzUm{m?wP_IM{&xijiy zROO-xYN$8601ujyZ(zMTto<|}MKhlnr>^rtu_%}o-2BPH#|*%%w;Rya^BGgCz67Zj zw7+{#ARfvEP_RyAYYH!;?`_W82G~OrF>CDx+~y7+J!R1kY3}|0EUQMtG^(a`gCT4%Zt(d*qOvCYu`0pwY`?xZI3<9s`H5x6L^)Avp&nzxD%`^Z}(u z30$F%6od@{IO=#l%|`<8=0ak=CR9O>#Z~qR$pgPSAC!JoW9zHAyysI4Zz`s|(cKYn ztKA4|uZ@MoZPhgQ*JI}E%GhViL=cUYvq38};Dxjt#W%0Wj($3V4*O!@r{Y@d&KUC@d2nNZDV9Ac zMb`_|C9GtL?{^Qwi8*KSA9*cyZ71Dw(K)b-E&xnPL0{4^yNO5RwB47u!eOZJO#`lN zvOu5Zn^CyG35{o8=aHSanCGeqaOJ{rT&q}#KT7h@a@rYGh$M!gv7cbIl$eMc@(h&=vS255k+7xgg$K$uF{%=sdoislq}u z>_zOzVpZ7Ic?Cbt3Paa>8`$l*8dU$lz{);{cm6XOL;(|dYh5Zxy0fv*PKKF&rO-Np zI9#v`=8c(!%k1dcay-bcuAD=?#e+bvuR3`AQ2|lBxXgUVNAB=;onU;JL(-G!D7Uzb z4(}t-pfw&H*Z1%gr(wAHSq$|Gw#t5v*Fd9yGZ159fp3e?LhH;9q3y^lY~GUuyB#Xf zRCxz87@y8HL%hIX{BOayjxt`a>zU0G8Sl(K3Kuqw!$0JQ>$2(%np-E)-%<@_4~PeO z{eP@8sgcF+?ch!wUQl*eolAmxgcCo5vB;tjo{pyuxmU|!e;+yKt9zjGuXA{Zz>S-K zZ$OXZ-ONgLJ9OxKv498O=xb2TN*=Ai7pf~Te3utW8YsgsLBw>$eL>5-7bN>rztnH? zZm25bn}lK%?>1!8vd669?p}DMVvW*M5paq!e?~M(UL&rO=8ItFJZ}w%b)&e+ z_++%PiV)fsg<$!FQqspSu`ic)&|Ll_mp)v~y`l4%CU^x-w>mM<7s~(oD91PKv zf3!YX0VjIT#HHaucOVlh;E(H?7lnrR*O!D=b-#yf(T zRxf)nI&mJlhiv6WLA18Tzh^zY=b`!=GcL*@#&uEv6e=abh(-kK_$^pw-OW4g)0qDC zP#hOjg66Zt)Q8iw~alV8C-D_-|b&2B;3)w~CpmcsKv!Z;*bFB*?*6S!=*ESM= z4Oxhr#V+__d;(6W&BgZ}XJK!3BK`@QLZ0_VuCn7IiW-J6!&?f77d){`KNB^6T>Xr^A*uH#UUv6j1kYvtY7PC%>uIbr`^6Fj;!8@of7vZC%2pt>QCONLen z(o@%&NfdF}x>oSk)3eyskO*`=xeA<(%<$BpNc6Ref$F_tDA1RK`SCI2V<*l{Xdf_4 zsKa)%hwKk!L!K@U0LlNlxJWU~q@gZ~hiqC6p7DzKYXND51L(aT8v!y+>VcZY!G3QW z|KnW(A2NJN3n~F^?=)h#r=!7=T&5a&M_7};0*!qyLHOAVV6GlcdQPltUH5#H|G2=v zkdIAqodFhnJxLzoh1~JOKRo+9c_qaU*?!_~b(+>OC#wX~ObU5QZ&m6(*CgI!6jOS0 zkN>O@<4Lt7YOxrmdMpZ;W5LtCoJsoUW7qox5I=b)D~Xh$%guC9sjK7mi_XLBh2+ayT8*jmj>2b! z)p*l28dq7;b2B|061F6wf%_NPq&6LFKavCu-4?_%xz3G?13~pxFK|f>gL=IKXgOg# zbNJTIGN$X`v|+(GlRWK6 zL}UC$x=%j1FATa%Yv<32VDR$;Gxk{tlJZ{Qd}11`h}}icS~i?Ibd`FxW~a-oG1PwD{PM;#z*c5P{`PagC}0bm&<9c zu`CEQ{}o|bXEV#|&_S1%XW5;sN^I%d#GBUDvZA8dpk)X$7a@b0@3{!-rjcBHbH2=< z_JE>|+CW|JJ3u7f|2H2j;4^0nX}^4^nUi znU@l}ZoLNGXB6STG945ZO)l%*k_48_8R~v6pxIC}ZyIp`bhn&iEjpZezDvS}jagh= zU&-da*@sRF75LmH76a=JQRg&bm)>LIVkI+}6Ulv&ywNP>C@~Vln6vpsl#V1lY-A@t zl{WxY4@;To)^lOdnhIQSpE_@z7eU!>@-|wZBClBm?-yDL^0>eJz~R}b`I6W&S1ZW_ zd;wHizy`_+;c!>47pNICtp72&U$zQou zL7p@YGJi?X_QZA8m?D$4)XT{~>BP)kqxq@Rz93@#@W3h?%&c)`J?n=;>%BFU{SIYb zkJ3>0k2|+t5d_0NSmAx@Dmc|$2DMirL9^)IA1v&ex}pCJ5T}P?_BI4&PGM)I<)#b7d!{kdCb#NkYAOQpJFMnn>?-Y^Y&9e zt3Bm5GSTvXmMnyLpL&ZAg6F66Kod#cFb%nfq88WbZ5^F2CJt2Ab!+O7JdH;s`yhEa@;d6R*;m}tM33M z)Q5d`R+OJ92H)XBse@31yg=k1nEi*vkf%-(y9Sm`$)YYT(n$6%k>y=20kba&AY2F} z_TEULBVsM|8&HZ-M#*TTLOPiHN+#b_%oF|TEJ^25l|y~7p+W<}zm&DV`G!8|8fu*EZo8zD@eaB4Kn^Pjbp9;GafW@9ZCl)V!PoLh?)0|ZMe4^_lL!R^V3-F z;SoXKvlZ;o%zz=$1sK!Om#M1xa=qQ#!1B&L*1e&MmB9=gwb=o?+pD4N$!#8@D1c|s zPG||d$83aY7&yNYI!@eS{*8-K+p7ST{9>8Pe1H6Vyd3{K>WE3cx#Sb=Vcj(v&|MMD zq@*{BP5%*M&Mm`quM5~>HUhdP%>+-YeIPPWF)>%a1dYpqwJFE2`6csFLz<07s}`dj z^g)|>0*_i(gz^2CLF@8vX1YV;jVbW!^gb+_-W$cAgGqm!%>sx1VTl$9wSKmcvCSIW8}qShHffzb zN__d+8k!Z(gzAxA(3V)oR8I8=)wDNkXlXWVD9J_ZYklxmxfXe5PC)ykBECpigW5+( zkM%#p@2rZUj^%1-8NC7@JX()|I=etYm-%yp9P=Rp9rS&;D1N>mn&t%U z&z8f`$_h~3Q$oF^MHqcbz%~qJMJf_dIYo?zGDHQ3+0b&8{MVM3WRB0*0O*`Xn;vuM zE|RiuUDY7%*$ZaBP4UQpNthY%lJ_+@hx?sn;IVNB@0{Jqv~2ufY{z*FO#H7bT z_`b&*9Pe(1zCFjVR(TgbNP{-HC5UTWVPGytsq+M|bC<%33)|4h z%?FO3s|EcuDb`o4f!ZUSOBWvJsgt9@K=MpzuZn>`b4$VXL?DaztY_k<3PP+Mbx{}} zL3zwknM0y0cTC$2u|-vwv#XSK@0-oaa(uvZbSPJUHVC?phC!jpWk?-$21Tce%a4pZ z1M*xwaG6gzzAxvv={1_4SzabRz!&yRzX-NZx_QZ5%6?RK@gKd1;itzOeljl9gv9sxtpgosE$1CdIDZa{k9nZ$Hm`~g> zCl-spT|>JE`?;yZ2NSd8TJlQ?;GoqDr*9|d{$7-qFl3EBPq=HIK3D&D6zd+;%v>h| zSML+c3$_;HsCj{CRc;Jzyf@4%5zu+uG4^xJ3{>)>Zpb88=GHu)vM;gB$ioK=JqDqC zxusBmYV0>VgL8)M!=aamvHWi_SQSy0ZTxa3UTnqfCjDj}2h>>VmL2ef?mRW#d1w$O zVe%<`F!1J4*5Vw=EEn}g;a3_y`j@z^gETSg=TS5mbBYh_mxv+VQQSAS1{7jxueqrM z&R5r<>Elw2Z`=o3pKZXh*&WKFuHnrQDbP6L5EmsE3u3?ZLd=&!G>pgtm1&W@z%53W zMB0?};Y>cbWfTfy_F=-g&3Kf!9PS??QDUS_d5{PwvTf$JgA`z{rV^+e`8p>>#AjGd21X>H6WKVrlK(6(ayG$xZyS_)5`@=_E zbZ#DMKU)b6<2B*n%Xla|HipHxxUi0o$H3&`2t>-@IE*#Kp4G%;JsAYo$fuk0!xjQ- z4zupQwann5KF>M)n5&+-DtLaO&-Wh90H61;#y*!p_U9B{{GE*b&w1lE%UEpQKwfbZ zSBM{x!J9*8K*N95Ko zre#R>6HLs9y8r@{ggy+R8X|Z1;gd|C|Gle#ET2 zv!E>I(izaOje~8Isc+hE6kF_>gBDHXA&(3|gM^)E_+vV{MH0VTAsSpux3H#|MKDD< z7@u!drF@ej*WA_zGz?S0<&Zbp?Cs$Tl4Ph$e7B~iV04aiW%Gwpuju(p*tzOBo3dgb z?lK#LyHd{KnPC$ApQIcI*j|C%2V@9l13SEu}qB{kP0^@H9w_2tbk0i<`!G@rLi~p`d9UZ!;bRF>WJy zgJKIe)$=o1VjhHL$^BUEUmwi5Z3d#9FSw{Cgo~O>sEg|n>)AnkzWNy1hc9Ya|C>CZ zUvx2EJ^`%d-Vif#C~L1!1a-CT{PLk)Xf!hmUa-X&IZzKQ3uAE7dt01vMHfv^q?L=V z1T!t8YGLp$jNWLra_J4cvGjvY-KZ?%9 zA;z_f;)4*9I3Y+(Tx4pj_zOJQm8^w=i9;gQ!}k zg55ifutS5qc!NJM47ksXE1jV`^eP|T&z!PDGHg!C!iLGiu#I;5>>F`Bd*WZb>ApW& zSN#;$w7H;yva3Nae{)OoGPLSA2GbYkQ8sB2R4$T(`Qu~Y=uLZ_njcr*;m4(HAFSAP z5?fw(vU4ej?WE-tPOO2x{m){olMJ>0(Z{SE8JKeSBKlrUMCId~P`bif82vh#e2&E7 z?KO!R{$+_Kv+U4QvWll9FA^Gh+Ch{7-F6g5_NUhegvreo5Q;=NWtj&3}%~Ij)kv-&|(Bf=fF@9N!ltrzcWy2c*2fM z-G$jbhJr>V=h-nId2PCYH>_pE%4;`G3Uo!)8v{{V)CV^|55blwsTiHk^C6$(QBRL_ z0ryL6=Vj^^ODGL9nPW3Kz|-pU;y$(k9ddUOR*$1PpnAIW1HV?!@bjoVYf4aL8Tlf7Wq@Q zA(cF=AX&O=dKCYt8GV!DhY^K$PAbeR!S-1!@&!|)g zq-R*ZRw-!x^NM?ztAnT75>(h*u-D!BAb04)=Drz+5&dKF(v)D#)yaa{RmtSh{U{`P zRsrGB;p1-wwp5RR{(Ux}WvM^7t_cR&lnAl(VgV=@GR!dV3o^M(tUI+3zSo9hX4N^A z+rAf+CYgfL#AMvO&ID!F$^6uOE!0`&ES@xcB+A^j+=9nc?&6{(8{~1A82HH6lMmVhU?M(y!_=_ zbbT=qU(kYJ7&r*7)2&2mR09o%L#dOk60a6h_h#)_Zg7UYSu3)|*UvA2w!LBC`F@IxH3~E2H4~ z@j-|a)S%f}3~4&y5cNF{j0Xktr1k*J9!_kPp{5Yh`v7sh1e(M5LQu{gl=+ND!@sK7 z>D~*lk8>nk514_i+HzblbRl*uc+YJdE<%f+mGDq&BPwp67tFuKbHAqtagbLDDi-8c z;O@k@( zO|T)s4D153FkEXC<(F20bI(<*{mm8b`6rq=Hj{@`R4UfEdYt!_mgBVPN3f-452WNg zXKHbGxjaBF25~k&_$(5$^e$o6vQU)nn;@3Dj{&Efx0uNoAKdqpW{r_$EcUB8nrqs@ zyJKe{CsT%V=Bpy?SPs|sUu7WyD{$kpak#}Y9RE1x!IdG@WsrH$Ogghja0|U5wg|E0 z>QisB0ry4|GuaK~kExR+F9H-R0$74?A9S8*2G>u!p#Jpj+|7GF*Kv2|y+xO>rB8ym z{=UGxL)W9{CdxE_u4kV8zcS}!B{W{m$Y(lf!qIZ;a3E*(o5@g)mUe<8=}A3M-xY!-eScoyp{62a}=HDPr_ z77oZO#pVq&K)G!LUwUk_^+dkC!I0}lPg1O$1qT<_#ZL;?SIoidF~#sRr4MQiUkV$a#Di^%Kga^p z#2zD0v6io6VT8ju&iQ`4`@MqIH}8jzDko;zw-k-GSo4<04#IVh{#cNakBvpStf$p1e0}Qz zY&a{&9n^(r{J5AkiB*}>Rq8I$DdI+RM)2N4FJYTYBD)iD2AW=P1Fb!C@QdeJ>W`Q# zZg_f{jefHiC1)Llpt5)T?7(!)u{emLxnIjQN)|%H_gT=`E0RaN_*TTp6-8|| z>+p18x;+Ycqf!wpUqHM0jiI39=PK;KmX2Gc0?IdU<_`+qoXYh|Ukx<~Jgv(bK;OCKs*k}1!ShYlf zz3nsUnW6*D1LtFo*K90s5>UuS+(JEO<%%O{B^KZDH0XKmNJ#8*Z2$7G6=Hh z3-%IsX7aR-cARNKfYV}Zdc1*UouK*dWfI>ypbo94JY$B6ov<;KIJS{zS?fvq{Z1j> zP|K_5`jZ9{D=n@)U*@qd2%Tej291BBNqJt~t z!{tHHXL32{^gl~J6cg$Z2m`&FGTa$KIk`UDIS`9M3gIYn$P`3p(#0x|=fJ$a>6n>$ z6jNTcvYYzk5wn|!!>nR)slFyQ<>#=EFVoSmXB2egN5RLn)Qy#G%-3y=gmXJ-h*Q9Nq_SjJZgx;{tTFD`p+58ie)g71(fO8+9Uxs)SsS5dmgh<#M1B_O5IyK4x+W)S3W3N8|}lo znO*!P+&|zbdQ*lisAB>s#+5@#D+;2{`ZBc(9R5mMk5`iwP$?z9(8fy;>)6Sb$ONo= zwFjG@yysNW3;r{!*+~=XM>jhPs1^mS=cU+C8YYaXh{MjkBcc5c?Lzw{jD9hEx!Nw= z7~2cq-x`Jfp`=APM}T>rHl$26BeDDZ#J<89(lMChb$(&NN2q8^ISFP z?1jF;Sb;UYFaq6T$OMnmb)WUyY5iu!BTbNv~ru(@sn+GqLl(?-)Vc8xuC+qMX? zHPcwe&NHa|-$`8ac|Y#7l7pk;F0s}Wf04AH3PJ0PWWHM(dI1petL zbc)CN#wWa9ITb@%irM@oKTzH@i5Dx!vBF2xA0Ffm&eQ&I3FTw%)@NhdgY#%qaZlW| zK}Ve%gTO9Q~kus4%pz%E!^540D3iU@eKimsO4|0W+=eA&MliMW-2@%_za8n<+nW|9C~LaW8OqaGE>``g~(!0miK)f3S};MDalTnc7BW6*!n0cca4=jK1C+rj@R zy-Ur-xArjnWUfs&a&JC=brH@~NkPY-`a)dUVzSW@hrIO#vAq*P`L8)#B9wvsv>N`A z?k_Fb!|=vuE0o8}Snt~@DE|^8{Fp^u;je?l4o}3e<8lc~#_Zwa2WNuGodDuq>oA#B zk=S5w0oDihpnUSmik4Ns#q~Shu4 zGOZzccy&`Tm{gY1JESLTShxbrb5?`&My!x6|G?W`xN|M7TrO%%H`BW@0b6fJjqe*03C%(=!CRbybJnK>#tl~Hhr0?~^Q!DDm~m#wG%S#J|q;IJ4w z0t(?>`bHFO9m_g~WQqqF(Qfj&R;d5B6u$h92FH>*;me^E^p@G;P2WoN-Z2^+9OL1J z_X7Itg>retW3kq+4Cc@o!lZ69D(WUqM8hQ zthPHFLY&vZ$L4Y}uEy~E!+yNu%zD~4RnW-s4YPShKcj7mxMTfsxTlj$H}X(uF(=OZ zpaN!WHUoN@A&#H310~W>R{5?ByAv;fe6&B79^;_wK|c6Pqruyz6lX0Y@A&q;-0_FA z;I&c*i(lq5r@`I4UxhzzY>h*cyYXQCd^}v&Itgl8tH33bLF%hK42n5Mj7d##aR=S6 zZnX-Y?KPnM*%t5aO-4z;9l_(}G4^dwC5j@><7=8JTp}&Nu>WB!_S1wmGiR_`xeF{_ zUf?Z*!iDCcU$}exG+dW?3hR3B!=piq=smNNDQ`32Lr$H6ZFf&%M*r>P*U;kDTJ~UX zS&WL+iDolahNDio5iteNi({?b;o40FO13Qr>4UjuO9F;q^YKGy-dh5tb}86zu^axW zDh7pklQ`wfZx-{TlC;NX%z9WVR1a`Mcf(lnMsHw_zb1jgD;-Qz7VtF}>Cf1oPgzOg z#w?cNx4wR;824Dn-$e|XW;ery0bCTa=;eFq7q*IV>Wv8-Hbn*v(v`+=? ztNVgHwMyLlaTVNduEJI|1vDP6f!PL?*sppNHuhZt<(g4E>KWaL;jvJ2?6RR)f; zLs(abHWvb@H@RB_w-Q6h$&^^r7G9JGpU4JOtU)E|PRQI84DE4s%qfxh$RjJDezpcT zI@z1K?YzU=)B!XsBY2C(4hSKQH8$m67Bcb~H))!IF*j;4_rQJ}AE<)6-^8HF`U+5+ zFdCgRLYWA#ti>deXEf{4&s7SI_K{59+RY?q$G~+@^4qPTtWBtzSoB<7+!B*2JnHs9 zjjjZKC1^KVNfx2ejUp`HP|h5Pw{I1sf{Wd4(EHDC{`ze=bge5#^_%BWH0HQqMR)39 z=gMH&!vNfrb^z-ay5MR};-kCgXsmXNZ)r%*;N@EW?vSTYz;!3 zRDeo`$BALwAKEuGvT|018lx64$?0E0#`KNgf7=)wR3y-NARc5xh%>o-5I3Enh5jr0 zf%TeeLRfMsIAv#niSbN)*RKd{kEBE0QTnc86;79x(F`UbzUFvPENm0*e%D1?6FrzR zCJ(Kykss&zKvpiD0-d6>V3GfeTaA>mkoOjlFqAa=fFDdcWC^I(pJdp(i7BSPHA`7k zCRhuJ++QsX54|tO((h-$oe{rX_Wy^HKkhdVZ34TFxmE4fN|4fnm9 zfzFlT@NL{_${KZX`+#+PdWSOBjH8ThjUBvwJ_@5^a=~1p1LJ2VfRbSWIIhkT-}xsL z#%|h%2HP*9ypg;KqmGO7iRoYA9gGU|lW^WR1~Z-oqEYNRrlPqJYuOvhm^p&ZqIJRv z(|8NAfrvi!rzL4@KA2P0o^06f$C_Qk*ib|HmXD8-|?b0OZbDuZ^tJTU)D1PV(JaRkl#+dr;EnaF@UuTy~9woV~s zO|dXCQ^1im0eCQNDtfPa#Vs^kx%JC8ynXIpv}4W^x*jd&{z0UhH?4!|2E`}=c>^OzR>q|kA(gzCv^2Lyb=iD~TAE9R{bzj(_^i!bN zXbg(2%;rJ;Tm^snPX_wy9f$VlNg!MBKxq3zoglxmKu3QlOx}=>mgeawr)QOH;u=2c zK`>f<*aXIJEy45R1y;9jDfXW=44uD~VAoz_UR#?3Q!_sL*^X zl;3V)@(G(E=+;br;ZQP$dlHkj*GZ;d9*G@a;&~nAmO4m}Ec_7(Ndp;nR^@|e(_=yL zaS&Izdkp-~>O$k12(E0i2xK4oFsqH*sO#&kQ16_@q^Y}MvXKIvQk+3Q*aULM?P^!yx7OUtHu+SCP9r0e*ft ziWANrL`Q2CJas}r+=4@_$*dn+Kgf*w{HB7?bPN)H$e4cVC6v@o;}*@sz+-(DDz7|& z63uY5?%A7%W|Ho9rxr!7X=ZYr5}~`J5S@R1(0QJ+ zr%|x^Cj!`Ci!vJMBh`t%6y0t`hs3m6-rJb47O3=uz3?vr{{06L#8^a>^KYNy{te*=QzIQ z#Jpc#z=G@|xf<#B{uSBq<9H-Wn&t}9e&@hGVLI4;PX*_)yKKKlHRu-`;r+Q;XsG86 z-*#@q8EXK`sWZXY&Kl~kFzPdipxJd1PiQCwxy2JivL$ zFR`DWW};%v5<|Y~LsZiT4lb2^{x=i6kQ72U)rqiOI}wXbPC#2j7A$Vc zB4)>Go;SS^W&cjZ9dhC$Z#yA0s@8y!#b@T+s{jW5L;Iyo$aQ>yrbpD+R?frLw&gcMf_k|3eA&+F4Pirtl zrHcRe@(Akxx1HD&gT+A;-8p?$Fm;JDHCm-m_gxkSHT}mtQ-*Sxe*o+H@Hh20edRsN z$wR-jO}w#u3YMHcjQOuG!V%btPIr#sygP{gAG_A0$@oObUOxp~7x*Hd5saQ27qFoQ9IZul;`Z5fOm1a| zD&r=CBi;AZn)@R8d`NSn=iBc!bW868k2OxB|48DUR1X2V)I#gUYNi!(3Ti)GlOW3nX43p7Bbc=A4BmRwrS8?-pX( z%oChC`$FTCIdJ_QvCl6?kQeVC-m)lCFg0st4d(~rqv#-X8{92+46f%E7kaXnB_ zaKJq3N*;Tt8be0j=aaYP;#+IqdP_L(yLRtdqb9A9R?Z3L(ftKpLyPoeFp4}5wTu~BB< z=LzBVp!(DwHlN#%8gVC>O41xC-lhbTKE=`Jrp~M-GKD#;dd(YjQ^kEgoCcjAPndF9 z7V|voz`rC;qx@AIs~<83>`zkeY2{`(ub+>jW(T3;hYf7@tUCny=Pn`kGv&#Ab1*^u zII|7B&%&|=R1*h5%WiLIiCQR}yLSv7m+of_U zd^2YR#^$=QAsY)(+W%LD=Vv|0m=nu2`k8{=yUFO;^cU^m$UU|$#S_!ZaB*%JdM>-l zTl;9C!MTgj#*M+~i8fbGCcn(uzL2)f1YJy@@XountbP7ibSPMeio&lf@D>Rp`Wof$2>z zetpFP(9aRL;zcyv=}Y?4i6 z^BB*csYfgr>esRy0K@7-KvCJjw8#sX@7V_)y^%r)>6OmKKF}rn#itmif#uIj*s!~a zH*LMk+kV|(gX7&%$HoKPm!C(!j7sn%{i!*23^Y0Ku+PbbZ?~(^e3g`U-P;45 zuT7z4QY9WXrEKY-!Pw8!2%oLDM4ja6+_PT_Hr}p=&h9i^?XwrH=$73y+L1C5VP;s485KL1Si< ze~jL>UQy%xP9n(ZaO3l+cZW%{+DBH-O*5pxDkPl zE^malgJu%{QidH$&sg{Nqd2gS4N9iJ_sl_r@0Ag&$0*MzSuFiQXE3Fg#&3AYM<^hw`T$3kNqhl zH(@n6O?}GS=ZyyaQ|oz(#7fZr+|3O7#bW+iXK2ulA(qh#=9M1|#?GtZXlF9&Ui8IP zvyE~0b}0r7%cXuCN0_rF1Eo)su))`o#cn^r9W(C;A=hhpoPiZ4=tuDW)IryBZ!T*& zF&X^RPvHE$a!8=J`1Na($Rm_FQ~AL{$o7~GO-_eUny*yhDVRW1ngi`F2^H7l)v533 zhLC$L2coDOsNvdkv7_Dq;m7S$7?j_}Tz1ExR?ELU=L*d^mS;ebK{MKe0IV%o4^gj- zK=z_ZkXU>YBF`Ma;y?+woKS=No~OunQOz0#GTK+n~{Mi(n>W; zc-RN-Ijlj4-l^y|Cj<4q#p6ixa_nzVMI8>l&^EP`_bfV!QLD6|LAeg&XM3P~UOaUE zs}I&&GQ31+0GQO00?7Mzy>TgmSH8 z;B%80YL~@O=a&kK_K|{POn)JE{7r5-MF8W0SDD9|%RK!dF%Pa+pks1ATs^NrIlnnz zI%hh|K5NSzdj_+*iUjOfW`NSoVkmw~{G6iMtodg$44;>XmhZ#KcM^@s7H3@FxWC}31*t6;^VSh^oVo@|7D(_t}_X`$?s)-$rMKPutS&CPnflN zmRRk*1Y$?eVtZ#q;O>}g^j;hXwMkOq9m~=9V=Zcq+=c$*W@5v_U978h1Jk~@6z^KA z;g@y9Ue^rbyTau7b#*1${=Uod56~TK*i2M6&gDiC6ZnyW(Rg`<0&6S7+5HhwG`r6? zYZ=y`xhYYv^ix~b&|@2PbhNP`x_c*#@J83EF{tQYE_~Y@4*OltL(|9r$goZ0*3mL- z$zkHS(+I2AFU9V4gW$}cJnX7G!gLSFLE<`;sgBJ7vHEJ1o0JM(S0nKA&oXw`^;=1=~Zl#oJqX@R-94h5}9f?|1%gz_qL-nEf9xE~_XL23c&uVzE+7i<0q z{UKJ#gMU9c4E6UFGRsvLz)If>q{PBhx_cH{?%PB9pW`U6`Hvgl*a6nDFND#g;pCey z0`v8L5ZqmY@&Whh{!;@R-KxMMf_Atv;;ekBW|>DXqDYu*_WD&G*vYL>VsF9*nPs8l z*d1ci^WALwM0<2<--I}aJo9o}fbzbeThhrj|4xQEn`0q1JA!yk#2I?Gh4Lv!AmR^c zT#F8KUC$UGL>5e#JQ8%g%Jg(K*_%pN*f}{o9k=Vt)zz;!*+q> z!)>OivmZmIogv@oYG~Cv#nSADp~k@^kS*&?ogy@I+8l=s`)Ti8 zhWbnb5x6Cx=Qdle7n_7W&FA4|;vCf0sD&5 zE{zANbrsWE-v=U8l<;==5;R;J!F21sbFBwzAe)>hlot47lm~euPNWLTV<{ut82~cB zN91A=tg~L#|I1>LlkcN71C!^8O zC`?lgB>vzfv10555ZQlVG7HN2>Z$XV&nF-wWEM!4t`vNR(2eqTASkwPg7z`hZ1}@V z*mHCMntRq@z1|w;pl1$B^j$FOn<%z_^@8)+wpi5|g-*A!$jf=0x(NGW_5|`5SqBRm zU2ADaP=)UOXUH#dftzhMLl5(O7FA~iv6C<0^sY=WNTT1!RtsSm*Z3oD`O?=tGPeZ~CpqTuL3Z^XT)sf(rhn@>}+!ZeR*^pEbBG0e-!!x!og*no*pisIfjG}o< z>zFQNxV`1Y`2t*2 zli|mtqJ7=Ko1T6K)I9d`%2437Gl^8uDEg6jj^r~_vS z#6Fo0UL(ptl#*Q5m|o93LLRfZ8LC*anRrjZ%lR*pDd_fi2h+LVpS>TakJcIYSqSq3 z@BRWz@r;0|A;f?UK8bA|8<_FidY-(|2X&%9iN7C6M(1W1>PS!H7E)K%A)M!3+cq=t zsYU4j_5e1MFQ8%WKqy~g2v^S}L8ne{%!oP0?)IQgtWg)B&!lP?KaTieZN%j=oy%^P zlh?1Rg-`ucNgY^`*ke-&nhtx)S~h#|meS{9?|!>Lq1Q@Y#_9O4emSbOh2tnovwoDxXO1=bIRF{(;JEp?<^Bl0gtj`@?c0tFIF!o7E#ICFPkX_o$*%3l<08sCp!l!L+i`V>$tIfl&q6!e`Ih>9WI%)>C3HF=DNovUV{k#smWEp_LiGj^fw z^;ytodJ<|aP;iqzbmQ2Q&PV!A#QL{1-^>^erVr2Zg45C1PM`nsj`5&H9Q$UG0jPTA zQvTK+d)daLmi7aN!D*nUSb)o4Av&%#6)U^;g%G8S*kq;&iWi5)2D0UN^AmYLj;#Ub zP4?_`>Mjhn(Z~9+8(_$^7%;>IkW*HOA3vS}|CR#CeN_hz9tm(|+Zg(8kRM)q6ttO) zf`6y%LwmL5eE;hj$PS4@{jPZEpIwETnrEmFCqz)`f1iJDNyX*EbI_+Sgm}^GfwpyG zxz54+TPt9YN)l?n6BD;)iP>?o8+c~k<+SCBU1QI(F?rd5K!P)lBgUAVbx9V+zm#UXii#95xr z+#D%C@&ci~G8@9r&`t5_BL+JEa*6XvvzjMmXs3~njxR?FIg`d>kaB;HBYNVUzXsr< zUykVZWidC2kH^mfdDR zto*BjM)`VZG+P2F)&!4)0cf(}IO!EnnW)ABn${Fy%0Uqi6MCTYu?Vob?GM&Q+hE+7 z85os7*{7U27#2>mUf2VsW3^lGtR;SOaURpYrG&NvByi07EdCr<3eM)Eu;AGx=onlj zc9^yT>|08ib4&ux9AJvZ!$VF zu`ie+tAzGRA2UJt8?#21p_c9~W_l)=dnJ2x_Z_5T9TV5TCq5T>uKs_^hz(x{PGisDnHjY>eQqB4 zAs3*A{RMEaDQD$-Buu+sJ{I~GQTOi|P&@ga#bpMf>UFyNU)hPZ3T-gHc>oNp!?<&p z7#iABvE!duL8|?qS=8(}2-@leMHSZg|GR#h(P=O~=L+phXoo8q2G;Y;@V_3p7&pla zv%B8&cG7}%qWcQ!I}U;4UPJ!v-!h!LAq#c>&J=gWWMPw6&$R5%Fs}{K_|qpPvW<>#Ujzpxy$GC&~e|FW`W;KCnr?gl%0)|hmXbbCDwd>7kR6OoWX%- z^0E44Jo-gsppr=l$}|&k_H9+_;hc%PeTJi_T_`{7mxz~7p28jP&Y@U+DEeD0!j9dc zVp|r(r2Q(yl07mARXv9qk1QeM@Oc>7WQaQthl7W!7L4+mfX*{tbLm4hE=@Q^x2?a$ zO?q3%&*R1d@^Y{+sS+F(k^jjc91BBEqna-HFb4#4$C4N*Fuep-mPb+8OFfk5FNzg$ z7tNYIZDIS06~sU;#Ei+t|L2YIrUP}na=>!bN+y2K%PQ=TgQAn5I54NPo5QjTmT)b{J%-_ER zQw~26oW@<@qBUOPY^U)M6nYeOwE zakL$y9&QJRTorJ&xqzBALHOgi8d}R|fr?8gq>Sxk@}*;p=cD$GeyDamg@px%fNpO)@IPG)&cY%#ET-Y1X8FgMGKBzaEYV?4{KcWxp3II|3u z_enuI=`Yfq)S!M@4;a&3jo%){LwPVgZuqhBm68B4|f97yNs24 z9mJN1P<8}QW7n!C=D+haWs;)NBBc~WjX%xwKPb5Fgq~12o5OYEMc{mXA*gir#+1$* zTxaKVvC)}m9GN{9i-pdjb#n zL|vxhe|Xy>S2*%!H%84(fkoX4(7TPa_XT44vJP%Aqze3fEg{y*1$uhSLn$+3?FSpV zd|@0&UucW%C;72J@^u$y4~8!N4mN#RAf~NPBhKdlel>%9OB3SIDCq)-56Qrf&B1x$ zo_IL&Z%ir567P!&#*W{qV$1zQQR8PkM-B25*~W6)$FxJMuYwm3q@=SY!^>(3DwfR^ zOe@sDNv)KdUVX(Jtxbd{-z(8W^DB27Kw7;t8f=%>GtIfvuxM8S-E_vH4Thqz>KxLn zN?_L+>U4g$R){VoZFuJkt`T+)ulH3a-MA05=()1;Y5lNY%|x_f-tewI6++Z*^ZGw} zOp*V_thkl>H`Z)~{ON)GQpRr7B)^{WwK}eEN4}`D$nx1F>?~aZh7XlMCv%**yZ0Zi zBHsuuk_MKOn~ZLk)mVxn@uf5a;o$6AY&yFby0%d+bM1NXP_}_v4~xE% zQi`Gm_cD9R(~Bk>RIGVeh^B|5&}FJATEzHsjYp%v;L2+3=$Zx}p6$e?+9z?WS2SLA zOed!9u36=aC2(C_j9xER;gyHU_;b=^lqo6T+^r*CPR>_I;>q`kIO9#aeBifJiK&14lkICEKQ8~D-?`gEAf`=LBitb3JiL>2Gq+B z;JORMfBvS==U9|MN=Xz%(K}J6RU~$MyMk*!Ifa>eML2HN7;FmlVSj2upngn0u6$!J zx8HmeLb7c^gLg$a%-Av&b*IWvW4<{HZoY_y%}b!O>?wEBsRr$Dr_el?^t1b?d0m<> zKK+WAw_Fcf57j~Y$KLRdz9AmnIvP9b7lG=Hi;(ds9edVfLjR4$C=H!ZAzCyW+9xc; z*f*Wb=}J8PtQVQoXSLYA%MH4`%NZU`h0dMCISRDp-jp@yxPOYbrgU?&;lxJnIw4p; zUj+?cD}-(#hvf&H=6QDyqsPsO;ILvY)T~SbT~7{fIiBp)J@Q)Wn3*(3(d-pCfLnhm zg`XKGpvx@-zI{CpeJlm2$ce_CXTw0{K_qK)ECT0#Iy_|uy}LUfRyamP@Xy6gSQ~Yo zW@hSzq&=yri+oEFwmd;6g_~AeKxg7PILj(fB3dZ64oMK}Pgmm_Lys`iRY$pZZXyrb zt_vZD_e0x1UOay27;L-P$V#^*LQ`mO-sQOyyptoLdGbV<6%dco_p0DFqAy4{%&Smb z5(HJ(FqlDE9+|fld{nK#h^*e|Q0D?lx+m~eMk&fn8d=4QQ<&10gthwuz+*}svmN<_ z%i3JPbj*HqnUVxP?zy101%<}>k9gPX5wy$hXKM3`x#`ya{M0;-O=A4eNCW;k8I zAs?x4et9u8b${dQ^DbfBL|LM+i=~~epvh64_ZWO__{6<)SelE&ZpB{ zdgDQb(gs&($kn3mf;)oSkl#$YI7j$s7Y<&!J5gmG@$`D=LHF^U5T$JgF`7NdvppO% zW|Y7%uM$X!i4>;=97Bn6Tg8V0SB#p3Xmq*&G(zL~hgMDWuK&Z`9>u}dWjn|Nsfmv5 z7Oc#{4u_haNB51o=sfoztFe^eb=6E%rHs7!e_~Jxh-Cf_8K9eV7(S3z>uK);BQG0b z)YWyw441*v%kk(qX0hNa-GhDat-{(B;mq1`4k)<5#+3S_EP5C-diHRwtp_I3=iCg^=9fU4z8)rQBA?)hhZQR}S>hkPRD_-P z`584g)X5!%Ru@ZP$KML#nvA4Q+j#C>+`-#Rb6CfjXrXg`D|d@N0uD8WsAKtt4M>`S z+0nh2k;zA{G3_Atu6;xq+8=C6X$X8-?})J(lbBn-oshrtGk2Oe6>9ITLs6F{>yy2k zc!~NT&9-C8J#X6--$-IdbCD(NTmBGhYc6eW&f4`h{&YuXL zSz2(XAQ-YAPX^ihCeQLsWs5?u}sLgOup{vnu1Hq60m-o^goWyJs`&IZNnoWl_ar42uUb~q-H%4 zLa2n0gd_-g#GTAmYO#4jml!EnzjIYAGH(BAs6X;Q5m?_g`-Pv;0;}~z(3&tK`Im}r#T>IYb;C6Ka^`8)p zo^|oqHB6uDihV$Tu=6BDjRxDGr{pcGYL#AX#KK=w*?sRO$)4DNjbDah;-B%*KCT81 zM54GCM->2o~b{ac(#N{Pw(b-j!wcy`}d%R+hf+3;Nz>!u`qCHJZL+{qVlj3 zy!6`%Ct95cGQJpx?6AX>oSiV*s}4P0grU=k7#x0$u?xLDKxI}c1TSQai)W|Nr8nbp92Eq3SI z!}eaD7pY-QDVlW8q;4kRSiB<#x>T>D-tlPo$9pwqeNAEf9yz|ox-;n77zZ7d6yhv1 znD-zRTf9>k({L8HI4k1z*`+Ab^(6^D>lh1qHt=$iOb=YlMuF=lNJ^4|hwma8 z6199E3(|$-7~3cc?29V%tzKWM2{aQ;eV{k))p)1T{a5;7|+It)$15P zON;8RQh_YpnQ)&bgX5_gpfP?7OwfwO0hYsjsAy7xwV(T8^ND(D-0ufg zyE(RNj>J>_j7KyxkXz}y3omG;qQ~SCqT9Ng#`!eB9NmLBv)z{M!WM#Xndl;mxOHwx8ry5SJ#P)G3nctI+weId{?mmGtShfJq z2PeU)+EB)Ax`NVwt4swM$<+hVdPJSMj^$5h;lQ70#<~wAlPCU#^Or1SdH?U^{jWR_ z-S_57542m3hTF!EPv+Ilbc9w=`QF#CBPAnozU|^A*k1EN6`Zt zFfn>e1iU^|-V1Mb#;%0-SM4$OLm|s;{h=Ka<^{^S4ubbXtR4)W!+sbd`QtZGd>%r=myB|FiF$J`hh*+klLnLG2qmECdbs2ndDKkjNo z&!)@RyR4s{x>$x?u2*T-g)96@xlB-?}{k@E3(( zU}uP-Rbepup9$z4u8o$7RkYB3Dg2#og&sqY>+)nhjsD1LU+0-%b<`gt4cDXIpDtp% z<`ZAx%5W0aGaEz4vkU?6GbJ*>X$Y*y4a*KkeUD z1-kC~V6o>6n3rksW%Zdb+^Pf<(wDG~mnlq4iv&-`>Uj8l0%Cat(fptS7H2hJ;_wXU zGFw7b&CEbSMqq|i06r5ALe}0L@Leq#U56xr;iOBP&&X>;$R9?8o@0Qw zaRuMQcN62UTanhpd!%dZIx;iR1XbnBI6bR2(o_&fWqx9M|H@QcD-8wFij}n2&=xWd zT!T4}Ss!rJV>)HE66SwV#a0qQUsf?!+HeW3^1cqvIf)?qBL)kF3)E`p4*206fOb`h zjNNgJEG=ZLD)VYn?Ir7>CHx8o%w=* zMOX4b!61{k4J##GmTUN;BoC_Fa~5wIv%K|!5wPWeA843{pg04;sI>$HbNs3;L+q)M zKFix2W?ia&2^}B57k&LrVP8QJ!nq$5ccejJwg|@i$AgB$UfMCBPI|Q061R_&>Gf+@ zL34ZwcG+Cw!%!JWJ{_W`FWX>Ze=X6OH5x}LoX0~BPN=(zc!yY3=mG3+xnlS$B|&)4$_khsr3#r?76_7jbKOPsXhbs z9t{VaD=}q6?wxO;rUC}n^k0czCB4W=s5utq!X!7b_iOV!a(Hh06P~m z2F8sotmoNg`u+7f%v!h>EUfFWH=OwwA2xz5^(1y_@faB$NqD305{tz*iOH%%EFXH8 zCXStjatE`~#s4n-{e{h`Z&wpVmfujAJ_+VDb112%q-jVTWQ-_>cK1b~{YMjoy_9o| zUI-qaC(s3}2nJ5LO5K|c(8I2rdoqsVi!3vglm}2jx*j-A4<>y|owTq(2}W3GqyOj& z`0DKrRK52X?eLAH7TTkrg1!5tf$^9!cMKTLHNmPH4_=9m*ni|bJv%!ZHl61% zzc&pp5D}XTcMz-TvDoACfo1q?u{bIk>~BY)N!u~#*|q~PA(eEcPbFSEjG&@%0esuO z0af;HM3cA55XE@-6Mm&(sm4(nTX>1_q%6Q;e*wM;*2j!Dx|meX?&CV!*g5SMsW8sL ztW%f4zp5NJr2K^<%?+H7buJtk-GHy^)?!C;ATd0<6f9De(0=Vva+}-A9P*8vW#40} zYuQOFqlB>fTLE^uMNs!IF?d}2GFBQ~g?Ysn7_%~ye%%}brByqjbNCi`d?N-s{6jEu z3+w*`8Ii2fc_h*%7Yz0sL7vN_D(&5wWZ?XHVqW)*Q+U3bouOXPcDw(m$LmhMQ&JeU zw^>Ac7X@Lj(f^mzLiF(K;|sbsLZ5+{ijK9=>17-=(J|C2dI~7~Hx}Hd8eron z0c^V}!`Y#gXsPW1_C0fm(W6Y1|9KF@Mpxs2IEJWp-sSpe24}VRF&VFthGvV3aQL_B zm@t1G!noBSjnCy&*9lSL!7^gK^J&K>2Pm3ffbG>GaJ|6~6iz*(+louj?U5Gv`m+2? ziz3WPm5aeIhuL?n42V6zlsQ(`$Wj`*FA!`AFmc$~f0T^e^_(-Y?H3EK*yWzNtk%7XTo z2r%5e4x&=`APkq0Q(-JavZof@N4SA_@l+aTeT}gwbopBhST;PZi1zL|1k1ZhaQ33* zXgKNxX%DJElMz2iZ`wPW>7$1Bn*Px8w<4}`8iU6tF-PCH#aKro;nur!sOT9DSt0Xa z-aFQJtr!pX`}3i&Y!|-Olz@2p7~-~cfMkv+z|B#c&~71P@W~&`eL1Ouy21PZ-<2i!_=_|8*-tY^6k?D1 zb>h1w9CWW~(3R(E;I+OxBYE-ce`!=ph( zXxj-y$ukB^&+ny$k8`17!#oVtwME&^OW=}yowQEqB~BG%$Q2WR)XZ@OcZFIaUMYd` zo04#Ru?D)Uus)>%Ql9Z_;vmY!H+BfIULo6p=d`IL6&GD z?EO3$-M84FThMV7=Ig-U@_QKb>Ij+Qz_QS>j?~^M;a5h5qO>U+nmju~{d(a~)o;irKXyZy5nCG%Qg4`C; zwO~2vTB$%DlecvoLVgIT-6)j#_)v(R{^Pz9c4`W_cSy zO3^MbHQ0f5N26hCx)?W>20)Bw1fF>)Mq3Eue`7nQKHe$_CaGvH9||+xuxD`C61pvZ zBRchOCHE~Oz++Q9XBVu1XH)#qP2(`|mhXm6rFXOi52AEuv1#STIT*9d4P`yCpq7^m z!(U#3wyP5C@X91U6R*H!uV_er;32w4G%emxQI8H^ zb5b%4q@>X<>1a?lj;Efj^$_T*z}WNhe6g7I>W}9zU!8!sREnTYdmjoe8gUlW?7+}v z2sIo@@XYLFwzG}If}t01+RJLZ&+
ijSuFBO$_lAw86G4wRlVbl=Bx;qy@SEn8| zJA$D(`ZyRpt06K!Kjsmc1><`%pmAs^^nPm~x16t|`vMv5tqCHMEn*@)x|fLBmJy2u zGofohm7bWs99w;p=+RB4_{)Sig^rD$Zt=7W~8;AMkUqSHt%Ng7ULz6<*kh}x%bk-=qk)V^De zU{V^j-O6T5&xLp!a?xi5%N!)0W*jnqC=Ox%!94~r>3%Mj{xg+yMjU|YV#edD+Q}E* zYM{CaQ|J?=MrdT+HHk$${dS9w!)_g?~AqXNc?oNrp#(1`8b4Itg42DYcl ziI$=vr+RP{4yPjU2@s(9jdB#eUJP2ep9++=a1)n!p}Aur(Y*JNI;9k#%aQZYXjX}; zE`yv&vVhG#w4gC26wTiif&GCuFAGv@jf=4@>~Nlb1klggR?5IW}qf|Va`Sa1bXTvtFx zt^o|tr7-Z`5fV2EiBZ5QXsZtZ%XLLym^Fi{3%o&F^&)w`O$wXFTcbplOnP73A{zGd zNa(`^(zI{~$%Gq zUn7KQFVul~))MrTX2Zq}{y6(9+Y8#LKwxS$yc#10!xR1_%j*i&`uc`0*I}8C+(5~Q zqydL+DiMOxAYG64p`1Os1D5cafm9^7eb&)2t4?3 z9<9Vz$%G+ksQ0>)s%u`SzDhMXGo=3W_uiLV4~U z4AZK?tZ5Xa;lGJb-Do=gILmvKCQz*>=lJ4rV~Ox2%Zy$-h_|P)`J(CwVxn^iG*+>h z0#A+2y8oimNtZ!$n>=$^7|_dOMzSty1cf#K^EgQHqv0f+QlO2kyTzDeR1Y5yv)X$>@jV}ik&hYw zAwmidTY^xT<)g*AhaqPSb0oHYr}D``SRv1WNbWPS`XL4zt47e6xshlGjfT3jDa^+* z4sJS}#m=koL={`8wowWy?>Pqc{VQn0NN;@GmBTzDlplGNIl={EqCWKmwz64s<97jY z8!ga?@#78q3ox)J3+~s)g8K`9OxbZ725i>T#7}D=P`!oPYqK5P^EQ@Gae!WbWoXY5 zkvX4PSKYdS;~n1uj{Ta<31dNY=WipcVpt9^To)AIv1gBs26NXYf&1ieB7B_yrPHFg zI0N?mv_1)knD5D!=+lmfljKy=S=3(JPE>1p_{H&o0NvB!{@q~cNny|81Z!;lxsG(c zxl0XuglJ?l3`fm(Lh<~eprtmF-Yhd?&syf^VQh8ruW+=VjfD zy0OG}+5fmUO5hNi1meT$;Cnoo@KtNTgWa1-W8<;O-5>fwwW#K<77{v@bvop2xUP~W z&V9Ftc(9q%@3livwjiH0Z`4FdbR_L@J3|L87ed=KKYTEx3>UkU5{+sxTM7eXqjO@2~mN#L;v*>(}&zDbl|VtzbS*BkXPoB3h41pxN>QG2fC1U9TST zE$@hl!}@X*te8p6!w30>bNb1EoFnutRmZ;PN?>lbjJ9qnM`^hy zL=4&4p2nOI{e1TmjKM~fso$j?xVcIln=kmm@cKj)E0&_K-!u?Uk-{IfnfPOc7+hzZ z#J2}iK;fJ!hynt!wc`Zr`5{5C#ArGsHykx)rxAGV2^#~aq4mpf)C*u)!1Fc4@T8de z3Y+OPuYEYPkL_S5R#MS?_Iq~bExrFU8M0jq@cOz4JpUvWpIwSTkFqcPev=S*Z$Ad# z`mvqUK?gS9wE>7)l^7Ow<%obN-mL!Wv z_vM&|>a)-6^M8qO_W?TF-<`cXkmS_wMm5drV3au(^rRf}e(vW)o0Ez1%n2-?!}^Mn zlQ6lJorzu*l0|Ln(V>&MGxTSWUd;s9vMw7XJI9l*=?&C&-3uWY>fzOf9K7$7-TIX@g$z-r-(S}>= zB%q+T0CW#*pzb&4fo?$q^DOS7;$nSBK2OnRY!T>X z?K@Zu85Y&>{#6lbHQndm-h=Q3hbj!Z%^>MhQB7F z{u^B6vMI>>9bsa-iw6p$v*1az1WUbc@XBT zW9RU2H$47h1qyZ94y)FHE@0e-uE8tZTTOX1|K$k5?r9L`B_)x(XvomB#Z%weY)&Ns zEbfiKtU*c~|Ct1aHabN9a08U(1Yy^-i-gk*2TO~!)X=Y$ZzmiEZPl@Oc&!pXJ-dN9 zuxjX*JTbb|FrLN1i?oYn=RYk^L&t((qL;A&$50H#+{6W zGPejY!DM*<_8Qz8&)lycE>)MlnGE@Xj2Ad|90*>G#GWBBxO{RV>o%%i?- zJ}KD4a!U8#5MM_DsytQ3JO>%3)nwqA|9wsm)p28cx1sv!YSJP1jdm%k@%!3TVPa$g z+tIC{=4O^;iGDH2Um6YnT1BF^eHbn83&WYQLUen^@(W#jY912CoMbEmr1RL+KQ4s% z=nYLL7BfDBmk31(ZG44i`@wZxJx-O+LrdFsqF=cekNZYrmi;mC`C1R@WEwVC&4Kn= zBDVLQN44U-q3`27_+P$+M~9kfR9{E)DY_Ig-_yIzIvDz<3fzOAg7mg3Wr!sbE3*)OSu>yJPixf^wLG|bak}}x~e4bcQ+i$a& zn{_wbpHs%Tc-N`6c^$gm%)?HfEwDpVjBdtvh{LjL=pPXeg@%DRT}=M z63*m)k4eCtKN~=!VK-nb+kF)$fvV{wdfqG&`^P*YTDOXc@Ye_sJtiPOwHOZW%tP~l zjRc=sfOOXy&iC#zaQTO2x=qTU$>ISK?UT}bWsR8gJ`p6dRf*QxeCly|IbhmUxNkKP zd*WvjCwmpH_v;eq8Tf}hd>O>{Bz0WlV_lq4w-t@v+29Ve!wZsY&^7J>UuT*w%KgW3 zB1=TrZ4^kI`b2ne2*DjcQm~7eJCwEa=&?CcT(hSRrQ@!`oI2)hnwP`ebW+Ti5e99G zFM>4gyQ$^L4>YTBCX^j#&hk|__<|lgACCc=>oD@53@kQ=Ks*(Ih09IqG`g1Zo_7%sZF6us)W{cHilfFO zYruWM2kP~pgl7HfAV2pQ;W*s_=Gg3{XGB{t<9Qt%(gw7s=Y!a~1Rs`M1E(r8{?`xX zEQefz76uhWn9rEhGo-M))*4$n>ToMB9xWLQxqhY^+S=#h(S?qOj8Jf+IM6Wg%{xqK6+oWTEn>?dUc>oN(V*{&l@0>~6n=o`&oUxiE&PClAG5 z6UJx`3WxsSjVSHd2If06$#n+&&$$-~GJST(G1^Bm9%NuaQXUF-v9Civ3Y=U`xZ~d39tNA+SmCQlYdwDo_I&(aC-loCa1hjmXNnlq49eBEu3SPSM8%AHp z{?*F_}~b?kX+KzUmxl2vu=4js6UuFxdt+?D}~`s^Ovqyl<- zjbQ$$MBI4u3RoBj!01#kjIp|mYnl7d{>UgOoKg#C49+s2rIZR5k0Po=n}sOwDB{Z)yUaB{2)c3#;q>3v(BdOUvtFP0pZ6F` zBOlS_TLpOQSsG-OF{et%9g^adPunN3dzFm>N_;DDv{nHc#VkhAj!M4If9-^KHq4~! zmm8gOIDp;T()kR1n=R+j%!tjhO-GY=Hxe1oYy@8M zTZ=yYo!B@x9!z2ba1P;MUCkN1@kxNCuU=EjjgzqOb~T;0;R22u9f!QEVZ=Jg06%?F z#eoxYX4^q7G)bDazr5O^hM!pc^S@=+l`7pD&Z4jxW#E6gVwVMZtPCi#`3NM zlP$q#{|Zz-7eEKJK2wdy_HdcKr;(28bboaqJFb~qeE?=1uMC$DMyZ^rUD*h91h z4V34hwAkb2DH7a13O$QsQL3qehP<^@ed2)T_|_mNmlArI5((0~T*1 zP;2V~u1(7aS1&a~$4!o?w>cF0H_c=2={ZEO#1*LhJV`Z-I;hJ@9=O;FX~&mRniBAs-CK1C?_mz)|C<4;YxA%>)f#U1v+n5UJ*a#` zhAkUppt`LdI-U%L{ab^vUqKF5-%h{`Pj~$JG7LLk@=#WpLp`1zrh8YtlSQ80_^U9&TA`M-*Y0S_4gSaYXg43FC%ClZz+7~~a2Ku;y zo@*e6C?3SPx5cQnd@Ltb{==0W4#K_fFJsdm4LZI>0-;}d(D&U$oYk|Pv5P54=3>b4AhSufdz>kx(tHG0RxcD{kotO-D+rv=s`aGFo$#Q5T z7{~8yCBJ=H2}m9W z0HK}}=?@_XmZgu0OU4-BIZP#%bxVn4L?qZN=Yjad5?GrhhxT)ZK;MT*@_VBYbFDN` zw3E5oj~c?oR5dI$sN;OU+e7!eS$J1&4GvyoT!T>=sJX`-1WoTqZBK zd@!`F;VazULlmsP5t+^v(#Fo(_qQy>ws?OW?p}`hQcIk2zZy*jOR=@#Uw+(&Ae?Zz z5;Zsz@JQ1}-gCw_y0en&D=MQN-)BK!BMC^5CmYMui2D;vCS0*tdPPbQ}n|VEGFJ+t^ zA#*r-XPFvZY@`(jlIB$&-I|IQrf3H{2G^#ooQ{Ch5 zhc8+q#{+fbXF?fctXV_g6xadJww* z>ZX$Pji3?oj(HV@s2Y5Y&$C$psR2{j*~-uK!irolPqbq^lo+sO9En@?K4{)5Pvne~ zp!EG4vN!M=`rZs9-DeYt*FY<6U6)73D+l2Bd|edl7lBby1<7Z;S&hSN&iI_Y&*UlO z+Z;r}r99}=o(nuPt!i`jb8>pOl8A!^7&to{+*UUdJ z6R_jVX=1WyDz^N?@+cXvBwD#c;Oc8{yg%O%>1z#)AQw^Swwh`@-3b#%<>k|m?v$<)gmIi(6goItD&i-|<*7m-doMh~;jG@G#(LWA>;Q2)X5jr3 z6*ylOhl!nhQi|E6a1^_{nG|5@+F`IRq7a)RtT4{*H0$_1rjiAD(Dd&Y;^~%!t*(Fb zeb(pGw7Cu_=z46bK1YbR4#nbPz4a(KbGllA@zXR}&+hBq0u-!H14Wk2_&2c;`)8DZ z$6bmlW24YwQYLYF^o2Uz3j~kRZ}=WcW2s>IeuCd4QS@~t_bMX=JK_jQ`9@Kw5=q3@ zV&VR=>G z(trdicru4SvYk1kgjPiR;sjLgu7Km^jC(hfaZJXzL&vyD_}iH=iO+h#;FwvU{7I3q zw%nmrv6E}wQw+S~AEtfpPeWlzDOiezurrP^^B8=i?>(YmWd0_UzLMjl-yZRKTnOtI zuV!we1z=;%0k6#p^bA?vWAaqS8MlB3XX9}&Mh(P@eT3iXhbi`opcrxyWu_;<>T)A! z9dsaV*R!E;HQP%Bj|Ictu^32&Bye02lnw#v+v>@>ZzZ5T+a0Y|JR-^yBI$o?ISlNN zA#VTrgZ+dDv?stBc+>Bg&QZt%Eiubuu4MU}7n#r%p2=DM^A{Kndq|+}0<^4A!BUm~ zh|{Y}_}YT`q26Y z(Ls=ejRWs^J=6>QmuP$Vg72wRmW5$GfC5zvo%EIn4ro@J7HFaIJR!tXoJF6xLts#i z!}Hr&H}b$-OzS8_bN#1$Vc-WcaY_yAsy^fdKclK|FP?!S^B^2J`I!Pa$>uVPK~T;3 zdc9An_CN#LPIycjlJ(JI?l|bWJQ8gAb}+bh9{74s1~=nVV6|y7OpBO)WT{=}S1K9X98127g!m`9`%$t?U z7wfqY(bT8p!A(H53kUIQYbgYD&cRdpF|a>565|u1q3;|!ugcaEJEi+%-osjK?Jp!h zhS#HN+e6NA{2}OMJ&nT`VzIAs7b(*)!MTNX=+%)#wI)!`XL%SIeX|kG4?m~IQz^>7 z4xsY?RAJ)+mJjn(ppvr_z$COB6n@TvISvAFIvfn$ml@}->MTwC7KD0vBf|(%u-Z4n&|A#T0(g5pcqWMxG%XnV_t+VnVd2S0m${h6Z#}cP27eKY;GQIO_ zGWJIghtMyV(6;s^y>;L)zQaP~eYN2GvUltIy%m_3T!SjK5Hu2AQyKeOe{WOA*3$u0 z`Rx!;ZCwL~d)|{(%9X6o-9&l8CfK|z7*d|b5c4l7RQmZ4;HMvS#LZ;v+s<6U=_#yu zmqa8{8=!UPexmzzG3jcrp@C}@z|rIaRd5*&^2^xV@7qi;b7tL@CU0o3Xdjyhj>n3ajYvzeRpxEhM@I78(N37mdu!G zmIh!bW6(Ms{lfK|$e1_9g^TwWz@qLH^qORVUDLe@Z&eYl-x!A$?B8+pPKKsvW8hz| z14p_KpNzkTt+~(mpT|3+k3kSr7@cN+s*n;rK&QwQYV2POz86Y~y$nkLjPoVO4~U@t^K~qnJPW72TaAha^^oYljjq2Jh^p(dIc?_x zxWx+vV>8B9`Ro7<>%#F=rT`2-1`xM7RV4jW7UQ{%0?i;1Bz6Zf){-wL{4s=h{FlPc z$Cak7L$|}mt|(BqW}Nl)%%$F*MBWYSI&fhEyy!ysxE zrVWF_aO@CYrGe*e65G!;d>=VKlBI2lUoV*Aq-&;VqT)*|cT9j#Wc~fNX#A(m2l)rC zK&Wdz`7t6MJQ+`$7nV+v9$rE|n|+8ZoViuo@UdiDjVt!4M#|9ug zq<7q=;MnPl&|_#azI0fFzC$L$wJ(TvgU3K4>k=-W5{+7!^N5FG37NH`3a2bWHnI@|tGpUu|fJ>xZi*dGcL^st>{&*Ujj=%0eoIQr)Sb|%{hrH9@UjjS{{D|5!r zYp3Gh{eT*C90=ZtMAbeQ>N9yHnE$D%Zn6(V0h`Ob)YnEwg>|U4rkDsOmJlbFXHe+& zh1S5|W5%CVFL#^}f#84y$_WDKa z?|jWyT@goC9NNLYmp%Me)mg-sDr{NNQB zuva9_yVbzhU>Zu|rxBBvZ;8342CP*rfJ_^fcleWuW$p9OzWqJvu8SeC#_)Oqy~4ouUW%6^ipf`jNd`s>(Rk$zO=A^#v;3|A|Qd)<=)Iei$)n zG{_^_s5F!8pcU zUfOGpqCd%0RbhZyF`krpu??~Ky^{Sr{UG>11ypwr0r5Uf>Qs0F8xH1UlwmyM7#`;? z-@CvZ^KPcM+5NBm^?Rx*-~+F38DDyP5$XC>hdwrebbEvi9%*Hc*^kK>yN<&dHCkBO z7>S`R!>A|seD+?rtXB&P)@HON*$KnJ2)VawcC=eZ{C8kG7O5bc68Q1vh&GscX@jJzEJ>>THE;R@Kwslu6A%u#<` zkz~0XLZNC7PX1Gak0->T&xK-)%l8J2l@`=-YYpux_{Q1JoQk>&_Y;pfPl(TJ6B22- zlkG@1qnPd0?T-;SzNj3XPQN8$^;2}r2}kVsnFzP$Mqo-q32tOv&JSO&W2sFA6`OAY z<9U&AioM?z*3ZDD+vlNEl#K5?H313dM{~b zer1T_s2s4g3aBYzg)`!4pbZAheoL(5G?S-<)2j}Nfk&nW2n zy_-Kg%#8h?SJ7sVB&s0qPtVoNK(TBjd>kBuS(^`m`Qkhpw@JpFoaN9bxWx?|7Q+Ig zWVTz@2gknxux)uIIMGI0>T{l-_<9^X+@AnnXETmWz#%Aku@Ku$vcM$Vl%&67?t}wu zjm(9k5y!gCS($9lDo^b!%0c#19wJxyqetsJ5MAox9JRe6@s%z)Ws<`9;-k>q(v9RM z8R6aC#>}6QM{cI1piFfwwD$2xr%fZ|xa7djuk2YHE~3ldhQs|8tmF9ODHphIGMf(# z1%(p_saC{wIOih5M3X(R_0&4_+NJ{)ZqMk;zJ+K}@|a|;J5L-jnBwIUENs^RTmMfa zu+$$8UR;G5HIZyyQqFfew1saw;w3e-w;-3lRD$6hC(>bi2_{j0)c)iQHXYeG;m9nO zZ!YC4JSqlJ#3gWE!JNbPMUcB~GWPVyQ}08r_}r%ybA}dxN^%1FdY*vRCpOR@mWlJ0 zvRuOE3HY`$1$?%zhrVJVbsffbZKkK`s;C;&s^Sst%@;t_D}~`EVJw$vLDU#$(r98C z9W#0YTFuxGZM9J-_!&oeimcCab`)nQTnTMMWzf9wAC@<+rLL7hqin6knWdZq*aLzvND!4Iro#G=}#&-GL*DVnh*W2J6KjM z($rjj5!k&6gfYR>urS3Ii`e^<3)jVt*2mN>#eynKK1lzBp2julaj4ZihrdHG1=Bwt z!&G+H7MjITt(KPi<;XyMQ|bf4x?JRcFu`j(?eZ+8GF-)o`qvrF)6 z9&^FT3aQtAYbc9kd8w~(A3uM1?S^!16nNi2*%IIi?bQX%RS3=h4&H3 zD?b!2DkGm?#$#LDMYvn-j24Oyh{=;eVouk<(V;BQx*`e_->xA4J0GG~9rcNQMLgEm zLeG0OP&hIc)bKi3#J{3DEc$b2z!|tBIhdePJE{#E_`*oa$K?T3};w4Zm z>LXV3=R&iM6U6vj#U9aaqP{Q=t|%_Y&3TpBedZ3aq;pVmXpqQ;i6F1T83k#7S8p5| z%kGJcv-v9+>-x@v`nXsuo%WPFp0^x5ru@wxG@HPlp`H*L8w(z{82@32H{%CZfVn^j z*~eF)Yg;6)T@VYlp^Q_L#lAhc8pr?8iY8=m?Rxbx0 z8jlFazXqyHdia{LSJ-`R6!7#~P2wJ0g)7;isP)H;-=l5~wu^kpU&pQS&fBBd-S?SR zoEQSnscI+fic>wU9W4&fIU$lRH35i+VNS4=fdjRoDXd@6ro0T>3coXYY) zjC+DkVAkSv*p-wf#DxnruRZORLCfl7C>&Q! z`s;_FMvoWRx?V*4uc1WN{C^akdt8j`*M|oogph<}kR(Zx)Z7cR6GD=Nkc1GD5JDK~ ze4KQe&Lin89aVELHB!kr=McXTLI|O4hj+dI?~nbVr{}rvwZ7l$s=>RHVahGMQ3c2fr9w=`%W8$edc~E&eD?XG4h2P(>C-h!<)2{-)u8v2O(ShKiq7Fgg zG?u9s0jj6wf!D=6I9x@~ax=McQ={=8Jclp8mqE;G+QB}p!c`k;z)r^mPJP;fo_)4b zZ>9*%R;|GMZuNZc|6=g133=9%_M(?pPsm?i%NxgJLBLf_%+qng#$WY(JMl9sqC?=5 zg7PHY!7zL88cf`h1bKbxa9vp`_Mhtq-MRl@@hTl`Uwe}^o*BZ6|IEX#{RiRYc*JJg zGtjyH3eOZzM8`a0fxe4oTgz#8aBd6O%s2+kt2(&h_nn|}=>ROx+=w0HvJ_T(yigF< zuJG=26a;Thvc+cuQK;*K!c`*BPaDZ?3oD6_RAjNZ*byTJhrun63)ns>9ncv-B|8f{ z&g_B+JL)cldJ-Qh0A$mCF=wYNFrs{i?EX&XtC|3+eaUtA?jC=9+y?cWj-cV@YfQhD zSOvcxi^K+$%d?yUb{9`UQ?JYD(zlja42QYaCQCHAeFmgOZxn6`)0wd{Ph@TJ4L`>&D@BV0iRfWZcdVaZ6~a}$xM0^F(Zdy&A*^x~Y8B7n zsZ*<=vvLbOA4@Lt06SJXkp9f0#G6VCWlBPdY8JR-K5?SF?W5U7%_Q{J=n2<$m%;t% zS*&ApK6N+ftoXnTsxx*W{1b(CM`ujCb?_gFZEI|EVun|4$0u zr!BabUk?!diNs+7Z#2$L;+g6DP%_2`&a6lx)(iCwivPzVuFk{Jt-1L0_;Tvbk7Xi{ zbZl{?zS-4nAW4;@%Z+jP?C}_MvZOxHFClvE+z4GJOW5EmmRS3-0_{{BnKVC=p4)>k zq;e`YUi-x}{M|sdVHpTkc~vyd)Z%LuC&2xF0B?^=pnS*<-g18ed@Vdr?we7Ne&{4N zs8#XBpYvhjzj9R9Ndbq60Zv`e||4`_v=OOGCg8H^V z>@->eZR-ax-6KidX^1s8Exf~ak|WP__AMSnJk6#N8Q80E6P^k#gT^mm{66(Y$3IR+ z!Q{&rR7!k!v!SSJK8~^(Hasi17~2a^6GLhRpLx3+#cLfE;-lGUX(&hS`_|BK*Ot|Z z6=?0MV4nM_XAu98Ki^aaT27NdWy=@tD3iji`U@zwS|aKid4!ig^+fNlL7=wSpJPEX z>s&P%t$XjJ9)1RhkDp-bm9$&vS;ED;=Wy-o3qWJxMo`Ph2iZ%1-2D0?4mfI#E@rww z;%F{86p7jUMc~gY%$G$A|5m2#1d$3?+ecN&wyv!T^7(w0)=u%h)l^swJ(dgO?yXTrc_O;Lfy}6Kzfqc1eDFU+UWruv;E-Lp+s2xV-n57 zS7GDv5ZwR2xu~{fHVB`kqO>iZeK~fJoIr!XuD1nPz5ByGSG?kqy}o$1w+k*P3I(gX zLnw2=!Ebm5IW;2Di_RwU&mVZuQkr2u2xgtH7vuib07BW9qCAj7wULU%LFz zYIq2jeCw@vR%M8cC2KwH%Y(0VkQ z39XNc92#rb)u(|dD_sH^PPWixsLtI+d%(U~l>7Ew51osm*uSWa0Y~Dn{$ns+d^ZBc zYo@WpXu4a@*aONB8lsp-H(8){G-fZK3V2fs#J#RnF3OmLE*WC#c?_vEe9_1y>ou@@ zK`{DerDBdP-4$$!G3@cgLd;_nj_>lAjo}(-wJG3T>y^AMd@X$XkOUJN?9p&=4A=Z` z5>sAL7l6OXCQe%4CZ@0;XN0opgPTo+>aq|*tmu# z*XzRLb57`9Q->`{Rf?92iR@NzESk%vfwABkmppkYvI@I|63JB7sdtXGteqxuxL^hC z_B|na^f~OZDB&#+$m8{81{}Lm1%=~lp|>I)8>B*hCT1*tjJSkRC7Ql^o8l3n|B8k$m9vkJ>D=6*E*X=U?$UG4ZkDMU(h7GIJ)5D8p zIjGq$5*%Knv1{v+L2kKE3 zVIrr?b79ke6=1&DmM1*(#HM31?s{)EEMAm`@~OIt&TY$~bdQw%?+ZQedv7S*mONl> zFFKiAeKuI_J;@rk9D=rqK`deX4oq1+8pVI!D@t3|fU$b9!b?99Z}=PlC0s-g&jPTW zTnK%yyJGB)wP8*F~jIe#d5b6yksaosFv=~Ob=&-2 z{QDVfcr}}ux&;vjfOrNkCWD%-3G9nWM`N3MMRs8d+UgL;z>)S8R!=yX5S#965nFIG z4on6F!Go6p)cKbwbO(}%G3_7gxQ5_g?tN(t z+Z56ZS9YDlS26?a_#y%r+@DxS`J(%KY0vDr2EETpp!H4ySEWwGs@4)PvO5RYTFHU_ zAb|2%jiR;R4Ny?(UFkR@pL@e`kZ&}DX1%K{ERp&#y%xb<(QuUOBCB0jhu!0&q0>8_ z8#+*bxu+i_Y@LWYK2b1#Vj=ywOkUrPt=r<5AFdHNbY~0 zW;G+QJO6*&+sJ|$CDU0b`Uj7(iQt#Zhv3;y^N1r_&icb`Xbb{7lU>h^$q{-L-k)jMe@dASiOa^9>ju^ZW3_3bB3G;4p361PI*U1%ju^HbOutuff5 zYQ}BVByj8YIfyS;;==#NpLKYf?PRz6 zq3VdQ%rII9^G9W2g~SIuE0+=nkfX`)9CUmc13i}(;>PJy@oRbrc-0W+VG%hqs7vCU zTS?4|NSInT2OF0D%L>O#1W8dYQ~fXmvR64n_FzY7PH}`b`_&NV9*T)(1JJHoF7n=d zhxuYETGt^HZ?+W~9ghRSMEcaCSzTSOUZeNff-L%GzU3XRd}5SW#Z?%y{<_b7sqXW^J63|rhx_#p9e%y#wzofUC7 zzNZYj@-{J~oS@WwJRAv_h{?MPncb8Dydi)%QtN}*rl0`$u_lV<3)7(GnjdCQwqx?r z%lwK}G-_)!@Vjlv;PyF_nKt&oL03yaT_sy2jQJxH7~icFc10^()0(KWphbM>QB1gX zCDU9;9^tyb-1hw?NQzvE(BTg1J56Z^*b^FFo@OvJh%2*I;6{8O@`v}Jb8`ha?e>6? z#J`Gm7=Z`6RLp_ieZtVC@E};(k}uch7}|Rur|x?sN}JB|hJp!9VE3gw`RP1# z|69OiTZ5QrcLb)M2?Z7UU7OPjK|^`~v^sZj7s~=Bt62ka6V7ADtxv3_eI|HjFNQVo zLvZm+Vzgz~vCL2nw5q(u0!HOQW{n-X&OQK#P3oa}i7J#I-iWpNAy~d}IX1QpWKCZ# zvhMtFp1r9zGd>u{YVu^**h9e8zZ2`y<}_0|ZU=_ssB~cdt3otKd7@E$?Me*|U3S~=9rJd-Gs4j;b&zkSJbW)b6 z!Ei4W=3k{2O%QbWYl2Tz5bGK>6PmUi14%%-NINza1ZuA4dunH+onHkOELn%y{ob)c zjUpHyS&0?e`n=)Y1l;qBlVi~dH&h(Lds^k_a`*uc?Mg+*{Nvy{UjsbPWpQ(pD(+_Q z0<}&-@NPvBW@iuN$7!~ynl+8riB0kClU$r_7lL}zdZN+JL#Wif&RTmK(adl%KABVq z8;PCbxXY49$TP{6q6t>I&Up9&ebXodb3}6%@lY>=X;TtUF^R)j-U@8-_^5dPrVa)c zOhOf>c!sJLkUBCLE~qWXuR|38UWXtqI|)`KoIzP~8p~Jj;3{giVD>(jy2ATW??)JB zgsD(wKNA8xdSi{6Hr5xPhSrc+6hDi`7`6@^$nB9Zo90rJi3|EpByuy;M~BG)OwHRB zq!Vl~Lv9Z($Jc;lfdRCru7dvELlLHWfjRNOD{iHN(0c>8+}Xjzeex7-<0iqJ_mv>Y zJOSE5f98@Ng(Lo4Z-3Yv7nJ_MBkVXOe^jc zbK12RqL!3n^Rsv^b#vvph8%1nzw)ku^C0GW4E7&ogf=}-qN6;RcSpAHjc?s>(YuLQ zWlKGSqD3J1BNPRVPUn7;M&VwKRA@hZ1lV719IwYw;{H-`uF3_yf7^oO&Sb^~$#^l1 z;m<5f@*-1)O*k4GebW@{w^yRO&2bP0bLRTW1$_)IGs(K%Ah5Hoc=CXHpPd7s>{tl4 zWCp>nmIADpIUSmi1kI8!-VO{S)-(^N9je34mqJjoU=@3L#23{|e~Z)y>9TGk^6TkeQ6H$yggVL~S?C@U&6MTuCXkg6LhFxP-4s+4;=y)*I+ypHuOW;#e zEczVRf=+%4)8Ed(puQ{M^1Xv7Twex{{l)nFL^K?j6_2-wFJO{i30|*8gLh0aPJFl+ z!v`i{`|%Ls?sl;u{t4Kf8_%+*9OK40C&2p5AULpa3Yvz^1ffr|=x6v`)Gn^TLEYp! zZ%9Iow$YH9IuM^;R^Y1bnYgM?Hhf)kig=C{Oi)h_D48E-_K||{{dm4Tun=8Trt+9W z@hH%|Z_%(=mE|8e%hnm@;6TqnG?or#eui_QHl4D|isF?Z$e_cPM)oV5*%A zye3w$=Jqh|IMad0dU>N#b1xHkcA3}y{U5t_xhKl{)iQa7gxK}xpm7xEZ?DNPOkK(B znul?R?=&-`&gP)FC~z#9MZNREe8I1aX#2+$4;-vPn=>WMYx*C)>qa^X=MI3I!)Vvl z>VUG%*O^3WXc2Z~BV=E$W}i=$(0x1st$s&f8gF*DTV1pZkjC+=1j3RtB@P_{qIFN*^2ho{-H zFO)6cSqBkkioonZ6l`cMM6rAYQ>*C*4r}{hlS)5~7#0qi3A!|o`z?x^oPx&kbr5&u zILdEmGMy3SsIQiXOD`0o=J!H;vi&rjAOB-9XX)LOR|dnpC*!-Zr_e_ABvX(7tk7;f z&3jKEC&|Sid_6^ur9YRzR-JPIr$cyqIq|?3z7olAP68LVWJRC8C-Kqz^*EjQ)}GS} zn6Fs?p8AKlMoTVM;?~<-_1Vq+K7xaGgohLlx#1c+}Yd52zXK^gV>}laD&o*K9`4{Z6E~5L_ zbS8P4jgAx}eW?`#UhN?m_GStuzmY?nel?n_^@C+=hoZxQ;n3Aj7q?qYz>cRfUcd1S zDl>musK-h~@}p}N*3DAp^ldXVY;B{==@^WAnM1k8S(WlR1x#(D4_2Hz0t))s$h$6y zB%ddO@8NLxGjuFEof-u)mvAm_saDAPX@l|hA@E)-1H0XcBI)aJrfdioiT?~`*;SMGM&(DX#4F0s^;gIPybxvvGnDQ zgXpgQr;N*AX(%*`9x&f2RiG}4g`Xd3zrJHC*6JODreH5#_h&a2GH+-(wuZS4?u9BR zA25Alh~>Xu4VUt1zLV+CItDk2+TvD#VIlbjR@8y__$1!u^pkD+L*1a|15pDe0~ikn z!RBifDicP79=_YIq7OJ_xw{3C%}o-~KiGG&~Py zKAG#8a+EEoX7ShoY%(55i&Q^?i*O2JDv6dkGK zm(z5a9BY19xZZ&a#;y|y1H-w-epN_T_v5Peqe1QZBW8R;3&IAxV53@kq3f?S=!oA2 zx_dW+ou7AJC*#Tdqi@Y;kSJ_iAb5+py^ReRRPQ{DTFCwvn%cU zh0q-5AMBFPh7XcCD4sG+u{U}sy0wNfc>wvYXE{Kl=qfi}Aq1hY(hTmB+bZ$^^rFj%mDM5G%LH4v%=@^b|JPVG;@) z8p^$0D%hQ?HF)ROIJ}UWjZeC2!27Ki$R~?okPf+w^-~}wB9l*1BNl>jELw)<5Sxec zQ+jTgUv~lQsW&Q^kfpdgHWo}Sk4F2qy|81U0;jhaW02iHpp`{_k<|;a+H5y&ix0u} z9#&v8*PoS%_MnQIA1!!9#J!G(p^bqky&j@4F4Ez4UxUbXU4~CBmBfzikGAW>aKm;p zY{=-(#f$XN`uz{4Ya)ZfXDX~^>m?9Q7zFMkogw4nb1o_Wz;bmjqRR0!P@NRbk3FUH zx9Uz5+sP}py2N73p^2c9I1awh%+!3hRaka&*&~0{&ZSQPsN&p6@<@#{CXJ>Vu8wnw$yydnBP_?mq5vEfSqxN_gh$ zz9@bjUb*$3Q_wTa6eaL47hjHqUmpTMns-s5K1_s9rfQ)zWlJjhpJQ$N;^CNHDmgO_ z!u$7qP@y=9^4?!rP-i)pC8|Qi#_^QN+zEEC?Qpi&cvPQph&(ZlAR9Oxx&kb?V!ae# zT|Mi}2?6uMo&5j36EP3dL78@yKM!`p4z>WteqV?%Z87X~T!*I%E}+A_Fy?6Whnd|k zhFRt57-=U$*~$>yLEZ56M%(aIe<|8+%oPdW(!5}j2-e>zL6z%EA>)yP_9c5D!^i=R zcNT+i#ZVahycqX9c0}XtKe={@5cIu9GG*&G(U>w{R67_6sc|)!bvzwC-pkQk&;x$X z&_mEm#O&>Z*pmneIE|@9pWE?#Tegz8>*cV(CZ7DtCoAFmY^a#+4BB50KyhjmSWjIE z&4!2J!p;-$(!3J>Waffh!~_`8Zb+Gq+aja=d0;sz0`@LWMysDXuqaH1^F88F=(3&d zZIOZJ#5ao8gFt)U66mit2o2}xf_Qr=d(>+V2I(;r1twri24z7rc5~e;h;y}fpk(fP z-luK~YAg(8FKkYs;AkM+GCu(eBs8mEcpm!vuLM0Q#^3%dn`>lEz_rl@sJhz-mY6xC za9A7bG|dH*tLcaidGzCW^y(w$E#H@cR&OoPe%2Fisu6o(S`pYh4q>N~BJe}t zFqB>O#;p&+pzwhWXgB7AUDk96AFDu9Gi&&MOo^?6SpLd79#6d|K1Ti&ES*!1VauXe zhtWgD@<0PT?Qjg`A}g+DP{4}MJJjp${&Dy>%dnmjUw#Dum(^ zU(o7UM^38m%rRgPm>boDajFiuzB>r2Hk!PEvUZZT15D>%>V_RT1G14iyuoG@cu0$( zNp%I-%}eHEEH~hVG9An|@rR~g3Z}D!zLU4>KuhHV?|Q!qru9z1j9%kFvj2vtVofqr zDg)SgC(1FMkt!NblkdK21PXTT25Ao!$euF*ZrD&pA@T&};Vhx~x)POM38I;6sAv4E z44nSF;TJBZqfgd2$S=&o1AQ}LqskU+zA_wUtf<0yw`m?C3d$)NliB+3{| z%)a(p{PL#rNKjl<$r=lIaUOO0qaic98pCeqa6j^fh~L4oon!Q(=%_021e| z%u}flnVhUgE`7n zNRP{g8+Ynhc=+DT53C7F5(dsYI260nnb> zA2L3bKyO2B6pv`G?3gtICQWw3K$Ek0|Cx+sjivh<*g)yOd%){3gGn_x^sX7lH`qF( z>9zCRuqYN)d;Vqa-QM7Iw1QbilY?`~ENnMRLDlrjyy^81?jon&`;Q%P)IS;j7rF>l z7i6dgYVGRN;qNXws# z(_ZMKVe%*LsI!a@nmrp|7(}B(HT7sW7sJu|OL$Us6OJD5g}*kQhqfO;KJzZ#NK`nN zCyQb60=ipI>dEzIYJgK@8kj6jrM+=~_OAC_y#Fp1TwT(6*Ps3P%4-%rw+w>2B_UAk zA4BJwLe&304u6bJ$M{J_)M?l#YIYg|jRxVO?CLy@hJ{co)&Z^S#AQo3S=lsbC(8~m zMTc#PsDIClC0Dw@M>khg49o%>sV{$u9J^+#fV_B*;_EgQRIR7}zP5~e2j^pF|JU5< z^i!t4dmUEX5HY3QA<^*3M^O2n8#BM82kxUAxotr*IJ_SR731lFL&u3Sh%bNV zeJ$RQ)^pV|&U}7thqob)=u<-X&S9gmb%GW&^t%lCPDjCO!ypKLq(wcr6OdaT4@&X9 z${@RGn0mAlg&VVZ$KaI^mKq2b=9L46B4iJt-XbyMg61yb9gf2wYzckmqNkx;bH1YC z@O38p@t*xNZa9iXQOxSCBV`@R!S{mz_0JlB&|)4FOV^1gL&sjb4MF493?_5jg_0sB zY8kv%q4jnXChp9_q}pSsPJM0T6ItMvN;CYLm3SciEc(7lC$4}lYQBkuyL&6~<4+~V zs?}r5fH;`(fLI)J@>%9qZB*5zZ_0ToYDUK(c#sERVY{Mr)dP0_mjtw(Uh<#YJY?ndUpQkMKLjrowNrLr zj)7C;0?I&dyAtj=K)^D-KH+y>%t0TiCAJ5w1I5AJ)V)3k<~~YLonL~D5#JR~-hEMj zJMka&o!O*7#2}i{Wlt$#rAOAlV23zVDygfLMov(7;!Vx25Q!zes5w3#*M0~GzYDaR z*_j7gi#G7$tWZ$<-zbn$7N%-#B)pjzia&0J)A^_rc^R&_G*mc_X zhlBgxQ0P7$2BtU+to`fAfwdmRwKps-E}4f9(~$s_`KsX zKJ~Cd@7N>g{?r)mV+z+lsteJ*?eLN!5e>+reW;uKPOjnHH18iY-rxjwomO1yWG`^J zdYd;5{>j=)hk~*xuu`0E%EXC6$jd9m8$0Jg#r8m`T}htZXZGyVcj`Xa%Xxk|-3eOy zz>Oz|K|N&>(;xkg%^Dp=XMAnEZ?ze+`%MN(DONH}0Dz>-^(GqljiWB zIz#Du9V0R?p-#0=8FZ5qb(OXZoW3svW9!T4 zY$p9TYrrTc0@PPufTo9(-Fl`D&VM$MCt(k+B42^?!2ndXoxxjH?ZZ<21pdBfI5;^9 zK(=%`uf{~Q-g1(wca=bf6)}2Jl^|H1X}Xpy-RN#l z6q=o9k~Bx~kva#kSfW^V$y zpSvjdyCpV^oeK?FgJ6+eB6e6CGGXdo2IHc*;QBp_v&0ayTX~MU5(hwYM+P)rEN2eS zqp-t8or#Vmk%LbLFQ%HJ`r%maHf;eby+j_df2cbWb_%;b*}xHz5IsL_!=_~l81gC{ z1wZ>Ll=tlwTQzerY;ggHQGT4bfuL5C#3cF`EL8s`H=)6Hu-_R0XOmZ;aEBT`ZmhrgCd=ljfOqH_e&Ts%%>*e4Y~QP*0T-(Zn#L9@AIQr!FP5*k<5 zD(0stFwjm51!pnFevQ_>x`B0M7|wM)ihDwKUtLh#u>tXAB9cChutJT z33`t8;Ofh;A^aHyqP9(~&MYkTv4~=yEs!dUMGY@UMxYVSNOY_ROWZNCh+s=-++nN)(1P z(+n(QmaN+x-8P1?)?X&z(#sOmoqvk{=d=!|Xir0pn^89KBpY zxUT}*%L>S~7yvFQvw7&yTGUBB2ZFM-mC^}8;H8qrg~Icq9mA`LUmYuI7*)dCUIMm9 z>;s4Gb|`RKU6DO{EcGrLSVfNfh|xHdf0DW48k6B@mJkW=4g7Fsy*;)Y7cd9EQndcHmD??!!T8;1bRE>d-kk1( zj+OIyVde$uiAd1&)h?Q`-sYl!D#-s&iESfJ!jMrln4j0k@4twI9WRqm<g0BlEAgR?fxf6iP2t0m5$Y#?`f=zOrvPJ=mR5tzFq7xWw7 zGL@;xtV`1o7SU`#Fmx{W++T;@o=d>Zb_g4NS`#l6)nd@t<*bFeLgKBx72}iq@Zq`) zR5vSURgK%x&+!(&urv|6kJK^OeM7+0IElSqlni4E^3eO(4lvzd4URu|flGKl(c4eN zU;cPUkt+;_;<#AobHEn@*5^X&4R!DyK;Q4ta`2Cwi_*$nV7>GsJ0;kJ?+q(qZE6Y3 zDK7^5Oa9=wt6I^OQOjMt6r%gDmV%&tg}H=$=Y=ifSbs+)R-Eb&9YZgoaeOOtxV;Z+ z_U%DC`AOdJzz@}`i19Ga5|)-z7d7o0Wj?c@!M&ZiADRH}3v)OL4?*o84UqpA&g_0} z00o^+3frHjc-h1LL8s_W`bcDo$ZsGKv4MEFSyMw?xUJAr^&)Wn8(nwX#j? z7Prp~1f>z>-L72bF5B<$?uKNr7#xf02fet~ogeJR>Y*^Fsv7n3@*vg73WuxBLx-eN z__HntGfl`Tb!h{%3k%qfMF?9p&VtvUN31j=gB`v=`QU~g=;gf>lKWCGW$9DJ=O=r} zVX*?so*ls_awVh;q5aX67;HB+hiSHlFzl*63Z6YOPtqAdovLL__I4Qi^{pBLE}un5 zVv$|lb^1?&^t|n?;N@&{b@e;!wDUY%dNmW> zD}q1}+8@V9OYq?h>OjqV$~^_|6}wI@M>D%%@R|XXUS$cfxN$`HqOT|ya#^U*O3 zyZz!Z$Vkit5AC?IRULP;`oS`auJ9XIr(sC>84UYE-lf_AE?E6mbbr7WhBrV$PyH^+Eq+-P~c;Y1K85?MF&$!&KibVA)U-o^FQX@dJ$SD zQYJN&c7X;Lc~4?lJ$XX>@AwiloaGN6>m-VQ4P>>~_e0_PW6&}B7|6AZA++i& z`Y=Bz)U6gdDyMVQY<6`89O@It@b4uiWTOIheN} z0%dy|7#{h=<*i{N>7>CZ8TnlC*Q*MB=q?fVej_Fuba3&}!-|eM)##=X1IBxzxjVgU zyz}~VwW*2FD2-DDRSjTWRe@+lvn9dK7i{92N%*2P2NK^(?JOK4qN4^90N;ZqRtJ`%$%Zq}G$%uFeJadZU+o!SbAQ;T5c zrwEjMcViuU>X}vF)7<<|HCSJ&$HJe_n0)gVa9X|_Tl&6d@*Ba5tox(U(WZ)Zm3l#A z!F`dP(_pqXHWV6(J2}HvhTl)upmoMdFnO3bq-qAoaV_}kHh6V0eE9dDHptH z7dhGsL9b^Nh_6gXfq@BheT;<96RWY%#0lO_u|Ut-98^@i|$F8;*XzXc> z-h~=qmb(W{4=x8KWwyE(U80}E_e%FhKi;^`j#*i$;&XWvsJC$TY6*2}w*_!j*>7e{ z*?7+>UNrYM0PB8E&~sZ3YMe{PCvJ(@=&TFt?CJiTVvG*A^Dv_>l_~e8T3onaiIS-& z;6BZ_>Z0@T!n}GYtl7s(tBd%6{TgVajNmP0&lTSG6VO^Cfv?R7#QU}_d}3)Kc85EI z_riLR&9Y*fH0kdtsAihVY^=aHTpRP?LP0;g*BF8gBXYUmro2*9XEf^HoDPTc%hAD2 zfH8ZWpw`Rco|zovs*2dt^CJA$}hgA9?QqItk>AxXuR<>=wClh_rh>i z=upO0#sspivT^V$T?Jd{&nb78KS*&L6N4nY{?I0*!UeW)jG1-f^M8{WSM zjMGcNgC@tY{qi$rNqfGzGYe6$8Ni5G7L!hR;62efY||{~8%xqDc9;eaCn@2-pJnL2 zA(or`G{A3em(b942!316iOaeV3p53|=V$~*cOS+D^0WAQ0l7q~6If%?Zq_o&fFD?y z4SI7Dpt45{ZtA6l=0C^Kv*;;OKH0{#8x(bRJ#z`hm}Q$|qm=!n5Agqv0*;;@-N(Z#c|1lsph={z!!L9h7PBA|~8Z z73PAitTophVrFQ9;%N`O@sl`6$6oQEQu68*xqzm_GV+GhkpD=i7{4zAPNfAPF7xLK zvt01%Qwa@@2cTi*TUOz`5ggNgFo$AtYj=d<#Ajzw8n%E*d^faTXWvU~pl70vN+HmNRt< zW~Zp|uFfgEk0s@XeU2z5@^#4Sx1wiCJ_wWxS#4k`WmWGp(~VZpxH5vf%v;WHdz9k3 zNkh`v$$qysjW(|RZXLy=*WFNRJ{(k4 zRiV;IgIrydubisDeA9ZS^&hdycM^xrwVrx@-J<8k@z{q+;d8-8Z2uYu?q(ZN9Oe%` zX(cSoZ~z)N_2E`U&Fn!-8BR^g#P-_*(S*4B9RtW$rEZAgVN*ml^b84#kHOyQ9=J-U z6mG0M4bq`f&_jAQLsGd@QZ4VwIS((t<^p7`2RrTc;J9-xxCynH_ak%sNju6ale5tB zU>9p!G!B(|7b^3)W*zuZw`XDEN@+O+$yt8QZ zJum8iiMV_vCUAge>*g(6p>(g5wP*N)pyUFG@7++;?$QINM=x2;J247UJ1g{u%!Z-W z7eOL%2HpRz^HVA0W$hS1KMQ@{aqYUIWx{-sU`HV|-44KJ?}?~#dMi1x2cg^k|9Fe4 z9Z2sSq^{>>CLR~a)csx%RJfVnr~8EY7d`N<8w8OyrZh|TfL1T+R~)hDdXG+kv|lBw zSY8Cys@IuX@IbH{HVrzbc*Dxc1}Lm`hoJc-eD*{sCikfaLG4N=`BS8D*}Q;v{C9_` z8f}L#CsUYpEfi%__Cs4@Z|d3SLw-Xc|C1X^{Qd7-I5R;M_UE5}-yLvw5_=KDlHkG`mNQV9v zfoSvdJ{J@$5IGt(alH^ZDBk5k=iyOMV?vo0+mpQDa|FEP$#C+~SyX+B;Js}SND^I{ z+vhG;HpL7pXr8OL$QPEg3UsE~tMKK2Oz#%4p>CbQP?;@e#MDubtr9#tC?A?}9$EwS zp|DRlXs@efZD|2uT2My4y--o%+FLA!It6&e z#a-eZIfq$mubt5KSc8XoSAcb|4B}1QVm}1rGMK6d`V;rFiu0i$+%*Whu2I&DoR_jK zbcb9~$O=W&x$hN^(~Fa_VJi8nIt}1`U+R30Spu7gp(_~MVXitL7@R&e^Foi^T?UOtH7h22$U!6h2}5kVC}JZR53jbRh?nD zBiR{^4HolZPxWxTZ8{t+%E6Yhl9<% zyS!`Ce6V}57&>hRf>heXD})6wvyHxelhyGh%*Abu5VN&P6=pB_h@Du2aZn>-jw8PpL=HSu|`5-=XugL|4~FwZDMZlp`^ z6;prEe;NXY8Qajmj|}ULiYdbwg-ZDe#p0L5MqYRs-$a~;)6PSTvB%bfCs zTfw`Z6ujD^D5F&+5{`btjqTow#P9mU;{$VX&c(ji)_Ru}cLt)WWEZP_X%EkB&Y-~Y z7~H;1E}iHPJa4l%>XzSNA%lY;`Ndjr_v;PP3{{vjE(B^)=$^Z&0!;sx2TSx6*p)hp z3rsT<%a0qN>&NA29yt}nlN&0dJZsVL&u1PH5P*K0Lx}}a3mt)L!SmiZP(j+W{NsU# zC?DW-BoYlPq@c`|Til2eV_HiNN^-Wq0_l0`Ge&^Z%ro5QiW~LkH-OTuR+RJj7}`CO zbGO7{+*o@sNLUyP`YR+x`fYxjR%!W?Dp)u|1M8}^seg9_4z#aEyU$8R&_Na6;?$*Z zn`}t$J0sA{)JEA_A3A$PbDup2!FWqgCO;@r{I6V$83P_O{k}!ed7uVr`pQw#FrB(N z#H79y1@E_9!pu=q@yWPy@Ytus-$S%e64qO^Ip`qTy-bI#k#aCvbq*xY;w@q#zj4pU z$snCQj&_j|aR0>#ID2{lP9HQ9FBedk{WG!Z{^zWi`CJcsdYs1mBX@YygEV&g-Z1Pq z>V@7r%h;7wZn)vlMeMGlJLREskT^CII-9AlG-$u5?Nlrun=u)Khnzw$VGLJ^Jq*$% zy8(W?!kvnJD46q+TvT7#;iq-PlDMz9KVc@A=1{+8L{@lO`u7%}lZq(os%95oq@&yNs9zUM-!zQg#E_KcZ zE%?q(O(ACTDPr;1Y10gI9M>ieY^V8Ic6b=g4HITpx}TvPWy3P2IzR^&Zpy?4hcJkn z7Xo|VAvR<=fkBCoo~Klf$#1!z`aiH~UkNen7K-G*JGq*cH{4h_2TXe8U~SYo_;=rV zJdt(`&4ae^#l9NY}VYfOA zQY>KJeL3s3Z#8C$v@!XNCbR{Ff%_>l?3%U{0~W+V?wl}eE^p(K`3oz}!5;MQsH16I zU+5fK1@7}kfTgG!ZAwy@_40U((N#iE2bvFg6jY|2%0bUJnJ9d9nH8QFFdsU*dS3a$ z-u=kJyrZYE=05>;dP<>1V#bzSF-4ts(X_Mn0dcl1%X<-k<}PR1fdk{wf4>2?2@JS- z@J?v2k7KdZD4&{Q2XKd&|6z4p&AAlq@2x}h8v=!!lBh3`S*iKH0KhjKwf5zMG^7Ru zIb)b#Q!03xorf6f6zUw@{JHRUE~JxECq}j8AdG8zlyd@ z8Pg6U4up{or_V6Uet8nRC#EpjyZs>F)KBCWxe)TtmO%2dOm42{2Fpg3;Ld>scvE18 zRt{%z!j~xGW{rj$UdiBf^)gzY_rm(mrlJj zom_HmCo9dLj8Yv3@KLkJhdPt-*Xb}625K_{SA?4$NqDV)2y`C}#kt=8=yLA0qR`u& z$1I3u)~ZvWb=-15`~8q^PG_&19^@yYnS0JjtZ+&P;ll(bcclz<>NeKa{~}l0Qw_OK zb+F&UBiQVt3bJd*&^_}CtC?2_MNW|@`&P%VW~QJ~ofB&PEoU(*cbW1hiafO9-~r4+ z(_QlcX1`>f^&HEC2Vq@{9eVDL1znX~W>=hszsoP;sHtZ__svE)Y+MH}Ybsbp?Ru6k zJP5sq&cJM+>Ho*knFiF0xY5JCtcBs~csggWa+5<*BqNRlLkObMw|(x6e}DNULu zNppMM>S)qK2qAqDXxX=xD^6-;1L?f-*cFRb z;}$^Yl|0n*iNt`b*I9iiXJfl#QF>(~yXUtQbw17l_5bEkJ@lO&zavIkyn$rEXWqEx zI4G&4z#A*d_=z?GtF9Zo?w2#@T#bYQ4@)q#+#7YP{8_Ki4780eMdzwYd^mA2>Xvq~ zy!Vx8Q7Pr&=k0O8Wif`EMxey}jM(K)Kgt|l&%C~qHalAZt%Nc-*MBQz#07Azs4#3Q z`NSJ9s6fcNOv>_j&aIdCN2jY%&^%-n7@E8^kTqOo)ng;@S#B-JNW+#;bP48aIArCauqzXyAu3-Qw{qWat&KYUVi}lD!}6J0~GbkfHwUB1pWI z4!R{S=;9m9RC}j0{Vg}REioRIt2gkF1QqW1LLXWxGlaLKHE^6h0w$XaC~i89>7*@C zu}cH1fywY@7tIN$-e5tKrJ&l7!a7|4fd;uA=27O1bA2wr0@oDi8kGPISwkSCe+-o0 z&qeb|l_+w4Q<`RZ7U~Xf=Gscd5PCBcPu*I8c{^6J4x{fv4(^w_Ulls+5^&+MvUuX-pZY82vVVv>CAB_(a&Sk{rdlG!A$6@Kgvr#zJ)SnU*m z9nTKK;@#8mhq@e=t_>u=N;ZnVy%nCGB|c@&=NIOW#2kltP-HX~+tU-+lRjy9c6T7i=64y)eiDG;CKBeb zUydD6Aie4xF*~`H_^u-+GUff5tZrX5=7iFYW>+v5&D7?K3yBw*I-R^@RzhXTdAPN; z6kPg?Sxfd+2)J$v+EWYQhCXEym0vEFo``*a@McKEwhx88dvi22ef!FaLldCHG6=I*%teKBF|4C<5gsY< z#dqBsu{GNiyr!qX@kR4cJ!u4}zh;8+o^qJ1WQ;QdQ}Ju^Lr1HCVm& z0T2Z>3!;IC#a7`L@uc?@x{SQIdhTA~V(+c^fHapXkFUb6bt5oCe;HpZzX?SZz6Nul z5YEv2&81b3r({+`bEK61KQ3cl;}~xK%L{B`_JHr`2h7QOIb=k|@L0tt7}n;2tz{wL zb^R3VS#$^=_)4*J_+pR>3;5jXGyZU*c#w22( zX`JIS%OHNYv=9|e6*9@R(dZzd9VhK6ACH-f8RtsCbu#5&o9Ey$_fWJ~T!Rk!+rag7 z6^o6XOFfYyc2{M?ty8J+pf8=D`GbXyJps5jE&_Ew*7GU97o$!mWzY`QfsDI(XjD@U z15Tx)S9})4TPK02b&gP~{e)XRoJ~xt3ovQQS{(7b6rOdYLYw#`RO=A)DI|#L5eLK2 zdkI@DAZVV|1cy-;eE-5^G!9ZGAAd7zZ6qyyts)btD>2E-?P9|%zgW*bS9q;cj=o!W zK*zWzLU|EoFf5-7aR=jZ=}uxfjEP3I-W;5s=i-d(bE$7X1@FqJj*2(KoCirMRvHig9!XF-|t_KnhXo;9%-hOc_Z<70?_bBz)`6jRYEb{^<;)-N*=x%Y&RzI#f&V$Qj^5h-osJ_??61RqlZ~fSW z#VM4TrWV6Pl82#W!SJ#p9cOrhSRW+cnuS8e5)i2t&0Br*B-*JQf)3X#Fx2=c9*!7)E`)@di#Jy4FXEH7vlaDPHe_3m16Ytzg`l0X)HX|w) z#n)0%+y64Nd-RCu=jkx(9RlEUcQ~>y9m}=o`(HB(ZB~y!GsTV2KY@1KciUJ-pMk7i z>k5~xLW2k6u3*bd+KazmhbB8B(f9Ie-m<@2C{Z4b56r_zLza&RW~QQD%1Dg&%!S%& z;(okL6>J`z6-z%%WipYXfVR0HyV)WBbki5?X^xt+FM-<-58bzcyvVG`d2MAe7S*`AAicBez9!g}a(-^O;`HbzBv9j4o76*1!#QD5Z@Zw!xS z?0FP6Y`n%AiCAKoyR)1$GP z8KSgjGHI!Iu}weo@!yEOxVgj&P4W&i!yn45YyVarYFz`NO{pL~kc(}dejMB;gOOuA z-nbr(CLtV@_HTm5x2s|L;H_xhUk+RHoS^HBCUZTj2j)bD<}x~hwFbzf?N zhn^mWy2oU~nfrm*@Jt@|xQ)fy>;9nfWC{)?U-!DzRoHss0jo>>$c{cAi7yXtN1Gjk zP|@0t-x^hjv$8Wl(sh{S-weaVnn_~)n-f6&*j65Y-3vN4$DljuP_#42+vmFlU0=%Z z*ZV+h!xLy}x0>s8#h~)gX>h@P3o4&EE4Fb=hcOJiI_h^;s&R{J4_V@W z2E^o9CGJHUSG7O~4848@jN0PSdni{1P-9FLTXf)US^2F=$(A28Wc%bEQG^*yROG)5HYNUOorkjwfN^ zkyxfUK84?jPD8I&ng@mI;5hO++8=bsx&5b-|Ir@?b|ALw{wtnN+Rlbg9r3~Bp=N!QTB z6jc{-3*RlIQP?ETyE}!Az9>b}$(d!^f$vyo$|9_s+81-K&<<}#Y83Vtn%n%ApVF#Ui6T&Xz z)$D3ixI;O%$IgP+zGKAK%3wFYZO5*V?|e_`0u)awMX9v``;ip^wUL*gIi(Vtemvtn zXMLb7Q;hS=Xb-teilQP+Q3P`KB57ic| zMU~C_P#&_@<*q@yJ{|m!yNEJ!&hqA?6`=ooA5^{{iMr=g z1?Pc|3>teKU&3reGK(T!q9Jxi= zLhb(eOvJ%?rU&{OjK;NAwei*fVFT*$U` z!6`O5SbN_Umo7odm@*XiOtyg1Z@p1fUYVOvUa-s1`|QAT4Vv49Vf76)NXZG|y5E}E z>r+|8Z7IZ4Q8gH#SIGjm_@a0QX}$jG1#09!t(&(Sv_zrUX|e`p?jx;O>QDniwSk~d z@0Rk^cWe>$1!{Y`cuQI|;Efa(FwqKqb;q)M#JJO)(8?9Z??Tbkw?c!%52o>`0K8)6 zz-dbhvI^82_{I%QRm8b!bO!8oM$M?vF#0@#YQRKhX5m9j&_H¨YX zC1ca^0v6vdn=)4Z6W`jbjo;eiQTga*$lHAhEk@7`Wt1tn%^piNM7;R!bjs6A)WGnc zCOD^I3?4Vp#6B<2V$R%gAihf;x0}&mKAQHaE1vV#kbl5p!6ml3APx-85lk1Q(*D8$ z@|+{ZB8%eEpvppQeO?9~eVz*X<9cJ^bvbOD{*Ak&AK;?cQ|xn^4uqMQYy>Oq#dSSK6L4qu@HPDD9j@* zO2jZQ+_Rij%Ef}(sME}D<5X~KrnAm!RoPaJE4b$3By4z3e8MQ&Lyq}TR;pkPgHxSR z&o>Yp$TW>-1qPG~w&&57YPNbU)hvNJMP^CO{l@FBRpsou$o))9a^#japYzf%EiX+|} z&08w7g*SgQAWb+A(lMFvEIAnZt{_kRtbQ!zsviS+szEpZ$01-oyfQDr=c4)8O;7f^ z-y(dd8zJ$ybL-^v}s-*b(y>47aX6@@ZO+kbg+vM1yY^@9yDy>au6O!7Vr z6?A{MiCrdBO|Y=CQG^U)?~Ddf&tk#QXC0vDbm*uTScb41g1U;qWN##v~KIO4^9m;{Y9U$nD21C}X=q37Bx7#N$2_5H6f!-g8L8|upf zmVe|HYa(&Mm$j&Tb}6&a9t1YF0J54J!qaW(Xxcadx_;$BNL(jVwfm2EEUDo=Rj~{P zoP^lY3n0ph1$s#-qr)65GmgXLfqK~FqRYD0TxOEJwc^sU9N0{n(ZfdzuV_qcV6saC=7LH&S zk45*i>BI?8#g9$x5Qc<~g+Av~)4}a9fNvU(bmpr_>A@mX|TJlOOq$wpi35ww$ufPcBM`DV4qH zHb|%MPNMax%uC&yxXR3k7@i0pidojo0To$Zvh#%-Vwq20} z{eEV=ct8y(dQ1U{&DFAJ53|8VHUM;pRqr)G6Jl#rq5khm^skzRv3_1)n3@PFi)>*) zlMaqpQbfM`i>U9Wz}j!d!QCeUCT~l@vblbE^Gq&|Hr7JBxrZTUiG*@O*WlCfsu=uA zhPv`=$j*}u=}D6?ZgxHnck@9@Clws9Kn0c8?PF#WbC~?~bX@#x8K&NerDoM)d1;!`tCELW0sep0GbIme`n6xI|dZl;pyA{GUx|LiM)EgIehL zGn?t?`Cx}#5a7065VUU|DsQ^TLmi{Qx}Xa0&@MpR#SWB}X!kp#O&rE2picBfIQh^E z4^)<*d7BOO9K>Qi_t!x2VlZXB74V78oAC(UE6Q);1u=O5p8Xq+6%~FMkViVv74dxi zwM0~q-r+a5*dp6G4Nc6eKt!6D6kB%?1-vYY-aZdQJ5pez`302MPDO`)%b`|f7bYsY zvgD}q*kd*y+7)_JR`)pUbGLwUbWic0E&I?jz#WgAu!e<;%CM1i8peMX!ox9Bas2UR zC|X-$(6ss?S6|vD+5Y1B=ST~~!6r9=cb!xX}Tjl-tW1CVb~1WivB z_`;i2cqlRk9O87@!n+Bm`|oDZ8K((G-~CV)?ZR!`&$1Ey&tX76LzvL$g{HfELHxTo z3|CO1eOxZ69XQK$vmOWzfsvpsd&@Fn&w%qdeXKTLgxYs}d7;a19@b2`X!%Cs%Pq(8 zM`AAME|26vkA0yqMHj2XJy_>x~np`j8`hqYpb$rHe4H|?ia^`n_x3R=b3 zL6hDtkbYiBo)TM_b+jA|`+hf2e^*19v`HXP9j^M>8Va8}Lg5fw=&6qf>44#U)mbDa z*&c5El32cTXttNGAhuOU?8siq3ziw7!&M`uEFA@_2Nz*Bc@sn5c|yab;b5qAjdVpL z#nOzkbpDg?&>#^!zZJm>ADTzGHS*H;ImDpV0?8y#v0d~wFj4=(`d$jb);|Y8Z!*ov zNs~3ArjRnL^uY1SWjtG-1+k5fxbDno%*qf*qgsvM9ZsRxvmY*X@CS=~q2TT1iIYa1 z$Cic)Si4FSm#Rj=$UAJKapx-ZswXD$fk(Xg%w-Hu4`Y@-|6t3)W|lZB5{?{-; zMNM@!driHME zl1iArfG)L?FSp7!|6Ex+?kCJttmg&Dw%Dbavq1CqP(B$ zr{M5~D_9clk2+ylJbJnp$~Ma3p)rw=)E0_{H$-C96G7nMLC^Nj7Vt7B{eWpIxI`DT zyqm-hl+0n(a_V3=y%+=2voU^X7POUZ!n`%7z&I?5y?T2B$CU)3>xdLyCSQtIh?}GC ztRUbBDJsrc&mMRLkmgEXEczB}Aepbi= zf`eko%X%uWOfv2&mxPFgXLjd7^u2?j}9q# z8pnpC>WR;61F@tGBE8XZ);xe4bxiX=(zI-d#6kbBCvJ%oH=TE8I(wR!diXZ6rCS7e z&r5?7Mj?=4y@@%zv*nMUWkJ)$#jv=9^aJweA(r^ChKpY_(Ld+)MY=|1e&IFPZ^I^3 z?itO%X#veR&S48FYm^>Qy{4eW>K{|Tb+w#+4&Y8vznHwp2^CZ6-xk>)pRHL&8DVc} zuXR^Y>Yv7JW{SjJVbmA?SSy}KIj+(%bGZX$n6%&Hpm=mFsy#&bylfY$4JPBexd;F6 zKOgkopA8q9XHqV04AhEJvHpt!dX3S-Mnz-LpLG$GjGRz!VP9DFWiriQ52MY7*Md~J zFWA)SvuFK6!AbomFO8#lnmwJ}%E%p^) zJ)ThJOXm%Fn{xhCvGX@qp;enV*tPo*3F8b5o_ZBC!iIy|$9a@bJr6ooU10%SiVpAp zauhna_Bm}(s!K=fneK?4hly_#4ToaRpry$jZu$5**DIR}7FPp6YjXy?47vb&R&T~~ z)i`h&bQLTg2Z5(Av6qh|bGO18c)*FHm!J=wze|`zs{-$?9q>ObBN!V&3eC^g&J0F1 z?G#XLDufohmG~n(5jDkQA^p}`%ySi((t&8EuCSKX^>SekLc-w4$4FFMH;_wy*@*4@ zG%>%g4_KsS!;)9Y7VFc5LD~E9pC5I1!TV z(-4OpfqsX`fc#Gcl%5P@b+__COJ_Ddq1jDW%~V+JJ_i+#mV)}=6sGWS8MIQ4eaPA2 z-0*BX7To4&*ek^yeTF6zs53#@X#E$n@U}F?a(FT68ww`gIdwL|Rc28oC=gu%Q>l$HV`Ei^S zV~lvg7>p9i;m2Ci1146AyL_Wqfir2ywD)1p0UuOb5(?)_irv8+{jIqm-Dr&D-eF{gFHgJ#;)=~5oL(no=D%)CT~7h zBNtM-mxFHZAV>sfao%_t?PJ5(t<_@Wxi+go>101ns?rVB9(nB6rHb za9t%+P$>e}<4we5naD4PC*x6*UD*1$2nq+_DpYS0TYeP=k zItt2#@V*;BCQjnrHJ-REMjwqGsE(V|RE7iNS>N?@P-5!|Ig37X#YM}pa8DIWBE3(? z3t}xhkzVJ96}WckLE`3Zg3`b((9*ILV`(ndu(X+FOrHS0Z=#^lF$+?%J)yaO5DuLd ziEWcE@?Lrolu?-svDf-yUBWA7lu&^|(dBR}B$#-?sod#yx6nLM4=lxpQS)~e*Kv{I zn;{Xz2R;hVPgkL~bOqQ9+6tNx&$#iF3ihL_2IXXKcp@PKdwU;4r}rWre8vsya#n%# z^+I;X*d3+aGhlF-4k`{<1(*~J4QGr%O3z`0S031>CBVqhPQ;-NgpNVJVDV-;Bmz}dpCLpXnF4Nhn$2F~0-aET>0#@AECR9%H4RR#SPLtU}L#2VJn zX~A_Lroxit<(N}25e$1qvgTo_Xft3jZ+*WF>IPjwHX#T5{0T?V#qHvk6Z_%9jso0# z#RsH^7qGfW%2truhVR~~W5cRWrar=6Z1^auOgefLFMZLA8x@6NfTc5)kR>H znujjoQfQmmADcZh&}`3eo@iewE+1J8U%si(?9v1?Jdg9n_X5@K4j_7duT0x-HmK4a zq}O5!l1Iv7UBeOlkHr|2T)V{0s;+=nxi5MtJmEvvo`u!4D?N8Z9e*u4i>h_wAUIYN zPk5dIk4Za8-?t1DBMN!P;itlgtXLdwZiVSzd~t$C1x8w`;G_y-C$*jh)zjaZ+25}` zq;vo`Q)}lU(O7-$nm)vw|3IFLK9GG#4Lf?vvo`qwAZdQbCwtR5V6j&}aq>bz@0JAo zgJq~)cNM0DMZoH7945wJKuS|&(cy|;8{MH9!4OLM5;Le`3FU98dRm6~v zCISH`>xKOyC=af`Km46m0RgSV zQtR&m>RS|9Mi$kPBj2+F2Q_f@(p6|Sxj)!dB{9iRC6>3)m1eLrnC|u_?)o_dJBEir z8`a7^qxV9$;%N+Mv_SQ!>8wvm73@D12!%$KTr{vrKScc!o*S1-I>HRFxc!Ik920@o zdoE(Q`xek&MfKE%HTo_Nc2rmO;x0`C!JU)7cHbT_p}It*&{5V9`9Z8p`V5)IuOXQVaGMzX_1ta%QKE!oTl` z(YU5RNMmfV#WoomrnhmGU8*ps?jY8D;+Qda8+iQI$Fp_G*s7Ak4Ig!uDIbla-a`fS zO=g1O$D=~ZauskaoCC^tN3n+VK&)O@%j&kZvw-|<&@f01rp&TN%PRwz{)`xe-{--` zqYx|t@>t077Gcz;Vm!TMEFRh<1r5~%oaGk?rvE%=x4s%v@D3hjb&=6uBQSryWUJz>bvw)gcTozaYE_1gq^|v9y^AiqOV;znyO)XsX zJ6ZgBN*?}RdKTCGI|Msz4WKoSc%YA@?1^DEDmzDr^;ci#0gdEy>s1ZdoCftKSxkHO zN^W@1Uhp03!PG1BxWk^aCbiLKNSWJGp(6Ys||SFOaXO;E5du)`BmMILD|2R1|i$Jn9K8}EF=P0`q6NK;OJUaLO~>dhC50YLQ1dw zlnJ*Edx&G{6?2QJjq>LqU%rZSethEj4p-q&2?B%?${L4k#E`TO!*Fu{1!*$x@j6T*IrNazCV&5gS zH6Q0%-{|jAQ-n5kI+SC#L)`Z78R9GcVSk2YqMi3b(w#jKHp}NBR`mg!0oMfuU4O8C za0#pyz2&}(U15K(WYp+e1{tScGxaf>Kxg6ske!xenq327=uv!L4)Qhbh6%t zYFesLH!lY}yjoeu;j7{a`-h<3*h^4++YAj$=8H9xoX~2uGTshJLCLRJ1C#Po&=W_` zazip7{gs2!u5%b((an9YmZS6eXiObfioJ+4mpJ=9SD$3YkCZRP$jL4^FJ%v|qUUbr zeMg+Q7{ORa3)Up&qlTtDs=X#%`-Cg3saeF^H1(jvDn{(J13>fVZje36!#vXmIyyFCTn|Z3XGKYStN3!AzbnfVx*-nbYp6Oh2d( zd$}SNol?HAYsqD(JvWps)i0tf)n(lAw<74iNMub*R`c)A&ruH2LLM}!64Gy6#>&h< z@N-*?ZL^Yj-NF9MgnY?k=U&1V>t^omO4>N8lSFR+8pOzZgHu>Bgp*cX_HrgYFMCk` zwMiIxZUXV|TbWw?JTQGuHPqGFtmXKAP;Z~Z_51?==Y!L5OoflSOYzmjEG*Tw2DeII zv@h9%Dhr1|-5X=bQ@kvAbp&zAQ}TrjOe-_}QO=tK^oW;5Y?Shs2BUNKVaPKMdYd_# zMP!S+g<6Qc@tCQNt7a~AW{4#7^;;~ES@k6*UaUEcRHs?%l7TF$#tj!P&%!>LSrD>a z3k>g{z?mN`ux*nqb`YPnCo_%z*|-=F73`(+#u!{)TS6a?DAaWs2O>>hXj@YaXTq|u zXp- z4N5PpgMc4Jysr0gh@YPdFOC$T(t|)O?f%GZZmwZoks(+uJZCEE%b|4NA~tCGMidz( zmwg)LgFgO#n0sRyUe~2QA+?<;2jt-_Ar2azmWzj%$nfvF71%R02eiMP5mcR=Hh90rbl%t0-KJ2#5(!A~hPeI+g9=*5`xc`%wxt>Tid zAQ-7ui7UeC9&K2QULyiYXR-+zHk`mwhvIShn3>p?wgVLLFLzW<;!3s~q2JpK)E#w@ zNtQ>!7`w$deR3%*rCvsLUN(RBI~k;xU^kdLGywB1hT=W*LX7Rc0E1^$W1?gznw{(vs^xOP^4J$v8ckYl2Oa7kRfNuC z6{P)C#1G{`#2^|6ZNo2PN>4d#zIXu?hF4;J5Qih!$P$CsKuGUlOudoV9o9-{p6-g* zJGNn!{3O&_Q-)6mq@i9@B2%8Wjq7B(!Z?ovV%XhaE(-Nr+j<>b406E4drr_WV;47E z_l+qHKf`>drtl-VC8XsH7Dl|d3f_B{q2Y`vpy~FIH!aGBGesv+;Z+FTb>Erpz>mB% zU?_Q%;>%nf>VWsvLMVtmijA!Yp=bX&+`ma4GlGNow6$V1xF3o_LM-gNq>QQ?$G{n# zENt6Z%3Y5|qRXiO+z@yg5(^RpGjX2yb-#s}*rzY(UpvIr&-k&H>d|Pu#GE`A9fGG$ zAgof8qGZ1Zyi1P8jGs%mm0Bovk0usX@8jauiZV9NoZ;?um8g&=2HoHeHlSk$+I3yv zx$|pr^Me@FOP1|{3b4?o`6<<~rplDXr#ceU4qk-) z%W}}v^%(e`y9#FOW$@x0`A?3Uf`!@?a4b*)xk@^Zc3ZGa%?nufHIu22y2u)RDNpQE z1hk$ZPSwI6LIcR7!7g?DH1jknuC!pE_YB5!JB`rmRVaLo$VSh?D8z^uD6ar?i<*w(~h^OcBK4mV)xc8K)^Qu5>TQ5PA>BI;8l*c@`k~jMK z4m@gf83itd&RJQ|@8mF_`w!T ziLYgn=Ne_w^{JTh+8ov%os5Z3&XG@j3uVgoVn^=I0o65%kY#fPb-YO9ag*--fkT>7h;gj~ zOMfeFcBh>8+&0FVNK(5 zwZzc5CMhf3>COV$)0y(0r%Y2x5$9Y!hHn;1Q9AP?{4g4e`rE&=sK!XF&=`l;gNETN zXOia+z6jwlWjtLv6!oG>cat*-Ri2Ch&9_Amus;T^rUzj4`BTuKs>r-Io`sv|D>17d zWllczLD#KWbUzY@;L}~^jhT%q~O zH0T`lji=}l>nMo$IgJxp4gDRSEOke}EeFwb#~A3MS<1)6>1fz`%Rm&;TH2|X0?LDG z1d-)baQJQkvI-BBt^LKCCp_he!#&}dJk3za-<@(a8{4BjnU(W(9`Y(ntp9l)@A}u6 z9sWu0f?@$khUb-OO)bH;@8h^>zq`!#5ost(MuADH4ww~uVTK1pU?o?~**_InO^y4B zF9Be-r7h#_iJoaBt-mXx9HSblvmdLFeh;opD{P?C;Law?x9Q zkH=6^?jth`G64~>6Kc;5#t~JvsHgFhw{=(Ziupd+78-!6nSa^i_bJeBb`nCSh(K}U zRG#=H61F7fqJ?-h$fDGtVYVyg>80W%9rC?cT^9_)eV}cn8*eC*;m;rA@zdjS&>yr9 z6%II)j;s*8-BL09(_qN>a-7M^=HSafFWA0Y4aY6M0IC~4@w$hh5O8}s-jer$J2^A3 zIWLUMd~C#ZF*z_OgJznd0KAoW3A=xbvHkl2aGm!b_a#l5ueLeY=_O?^RLh7TE@K-K zi{SXGsrbw%4<%a$h$BfqZj))x_06K8{>3V=k_NN%z454X;SP^KBF3FG6SrTQiQ<}E zY<%KHzV8V_Pfaav|ENG79pjRe*@y8GI!L~m5~PUSP$GLrobZF?dd7a#V?0AIt&}eei|>qc)UV9;YvXzxRlt9yWq$S zUl=-pW=gJSvGu+m*LFS4oAFc@JK(0{Sj=(tguml6K{CbleCDxiuDd7y`KJTeQ+lm;>1FIwQ#nJm<17USXw zPc+CHk7kdugqGLc;-Y~kQCwSv5~Eac=ggDP@F|gLHtV5>fdw`nSPw!qouP(FaK~a4 z4)i~T%D2oxR7Sacqe7W$MHo5mGM;!%`-o^u%AuajHSDTD z^fmsqjrDz=0V~dFyil}r;6O+`)Th|^8y(_e_-a$b-_|IltQI-j6$m8_%wa%0*lpr`UWo(3@Z(@HNC1Z!bGh-WC7{0OAz%I7 z5G6-;8QiZLh8?eO@s!pgJRzez+l<}h^_f8YCtto<*MM@b*BWHpCO&U~HmXp5sc$M} z5{=k^O_z^SZfzKwJIWI)Mh0TY>Nw1ExFfdIq@1dvFYKIS4eF3S|HV)*RF~BWqCwSQ z;yVMn{|N)xFAuTmjm>Owh!5(;Prw+$CoG`5t+RbQy{A$GlSC_Sm1&1t=bPf)BqhB4 zViM(ihhu9@EQ7U)kf$5LOur%vZ?}hLi&Nx_{UCOk=?qS%PlBfMA;?{`A7eTzAWt%m z-5gen(}I>^gJ}k|zg`VmResnSV2DaZ5v+y2n=3~zV%u{S=9UzPdWLgA(Z`#$%#s(2 zSuoDg*l<1@3WmxE}V*TbrS3vj_e2zL&v}Zn7z>! zef!vhlJ#n^Sk?<1KvCdeC%#2L!&aq1}3v;5y|1l&S{eI*(y!cvzo1gjBK)$6ws@W-&PIyqS;hkwW>}yFZN+M! zNh~78szX@kb`G=1uA1HTkV*$inG+I0#o3?qOTFyqWbovKuOb4Q?_Cjcp z9}Vpta?tYjIn$rxjSltYye@4BcQY&lS$a3OcPNLchc{y@@kvL`oR1|&yRmxIb7t7s zk2$TZ&T{Mj@( zk>-I)HTA4$r+_wlbA^^2$0#4}0K5Ld8yfcv!j?J5;oe|Rls-4$()CC9`lc|9HJ%Oy zN8>SLQ9V!Wl@0rDS7Ek#EbaBO;LW{ka1GD_>6%}x^ivNXTUdnJFCX#Jl8G$NbrK4` zq|2TPK!?l;0=j#HNnthQ^-V@6 z7jM3@_bII29moRqP`2j8Y_3;56kOy*&`kBL=izktPn-N5LEreLb@M29uFgPo*9j%L zXT*3emUpb`5v-O7XmwZ~TtZ1JVkd*H9&P9|t_qwxC?onxiIC<-Jm91h82U2@bR~_< ztfvTcAMb{~6Xsxv@^D-h;EY+VNHd-+s52hQv}>1v`W!z&r2VVJWzWi>2Z8YtS4uau;Hl{FA4O=_Qz_d#OmNt2yvm=6q{5``z zLsHAi-9dvk~l}0C_O^Dql#7;uaA%t&z|MTP5Oz-$WA+t#+3b3 z4!Fgn!O?KZfWFH!8<^_L04D3Vg6lNeamQ;(d|qt|?!J8y6}l$?FDEkTywRm|ztOp{ zQKPiexEC%pF-Dz9zlETjXkNl*K(^UAZtf~(&Oi6lng0#-m;vU`LR8L;!@8}LKz8qf z5H|QKM9%X@vnSaI{wgrnW)u$DcL+xp9z|K7O<>ulja!y|=K%`JX}viP7KTyqaM4I| zAK3C{%DcEPI)y%iGr^ED4bI)tQ8{k3Fgb2GruY%(qcjF5X>Y-nvpJBObPB7wUBuk~ zI9UA}$~>NqW;!7WDBW#fCR=wL#@{T*=9*_>19Q6nPff;%7;DVFl!$G{JE7&;c+i|4 zhmB{eut+}@{8y*ocHN`sYn6reY!s;N-UA5}Y{2jD5jd{zDd^;(;C?)neczKozR0~y zG&hw?|MM2qMo%K9O*wz{-(0K@i(rahjY0*vo)a!UXHmqb?u=Xrp2^pkYmzQkzNj?w zNGKE}+SEDvL-)xED#RKZ!zDW1nA*c*P%@o?#x?~+)W~s@WeUp9j)vDg_Thm$QQ$&z zP}T0v-13zY1&w?(ra2)DxW{sX%HiP2vluwHFV1>mhN8~Nrjmt&nJ}gZniI{jalS6U zu_hN}>4*7`iwN%G0JM&Z`S+ONeIx*PgJsu)d3qcy4E^at=o1bmG3Qsw_N28Q!6>PXL%#7NI+w{Co2A3%dfp0 zgGRTa*+su9d^DJx94&hxAm=`B8@h=6qpP4}QW$7dkOO|#F>c@c0<>(jg~Zzv&_}WZ ztQ{r-{uPK*Hqd>xhb#Z>dj##5&jKy-e5$r~al51@@&>7Z)AeNtA=O}~dy|>(C=xnu zJY}6dYz=^J2|KZKhdV!3 zEZ~FnTToy33v-%KOgoTr5{bBj>smSfh@@v@ZZ0nz{r}&YV-R&Rlyx?pV$vtEEbu@m z?S{z5wUC)V`b9^rg z81|C?EH1&cC6AfPs^LJpL>Ayb2cmW?M9VwVh=n*CY|OHxORL!=&HouuJ0<+%oY4yW#%i6Cv-gmmeg) zIF9){&w%K&HmIk*WTLkPB`@8q(R}qcE;0B=F#qt8Lys)-lP+Zg&M(9P!KK(Nci^?g z$&?dPfq;Rk*fLTH4%!0V(uf6}+bdYcgj3vL;%cmU?TqHZ4@;x>7T|+|3T%#b<{pQ9 z1(zSxmAaAxO~dFNcH1A{UGk$G(;SpKFXXiiX6W9YflX`NA-B&|R7?yMwDk63!s(k_ zuJVk_?T@2kS06}QRD}tYebZmt9Smcc7XE^7_D@=&{ZZ10Jt{tJ)h-&F>JT z{UNt;)<(42C>~il75m!s#J%C&@x~n$bo!6jB+sl+?!OsK;yqC^!x&b# zUq)s3zPN^FMXm)3ZaS)fjR~i7>0N6uk2M9A?t38D=>(`Bc+c_*PoY&Daq@$vl`0!P znTgJ1m8y>+S3}A#uJB*aEqz0oN8i&J^XMzr$}I!sWO~*H8-S6D3s0C(18N3ypu~73 zlxj{y>CX$LLdX^HDo$mmt`R4?%!jvMSqokZrCg;llc~%0kOS*){y8iP#TRJ@TH3|s z2h4a`?ggxMScD4wBrwds&6=*Rg|y1cT$n3^3%%yj+$e@Q`JIH9H|=os0ed_=)e$LT zMfsa1=6duh*f}?_vMChmd5w_s?GO$VQSqZ<5Qt*Q>0U^_y!AosNoEkviH<;#yPc`_ z?U_*3mvi&=HD-Z_PGDi^RrFcS;O;hZu$-YBi%}Rps*gtPWk!%;rwP|j%thIvJM4yj zFI4}M3x(;qc-6)a<&}Gx^j3~ponjFDnW>KrMX`K^&OyvRwg-l#PR94sE~1iM;8!Mu zg7;TnFdQ}s^wk2{!2}I#)0l)c;l0r?N&q+O5-2o&$Ls&=&D4hkvu;C9VrO1I?!dAk zEb|;#rB~r<;)YdJ1wc2|mdl*1dY^gbj=Egd1O zbP(c4%0e`J;rZc8$bKKhH`YwTd>>z&V-t=;f5yT7t%q>XQtIXnI}FnAf~m3Rg1vrErQMLV3htN78>)YKX^Tz&2UP@<#L*n zygA0D^Di4J>TkkSE%TR2nKgXPtzKKWgErPc3*>EY?8_g>`put(merpfI zlo4gn@I@Oc|1Cl1dp#&q??5|&U##kB4O5PnqvCWd`QkM&aqV_Y2(^Nw!Nj$zRT8t^ zU7UGEf$o=U*gy1K^?PNF?X5xJwVir*uf8$0$7{f{-*hM}+6C=hTJYFA2Y*)Y$HINy zpgSQRV|L8LjA@LyCkEo%(77mz+zaYO34+jW8NL4#I$C z#FJe*7sfQ3Vd1+KXf#1CHX$a5`btv(3vKY)ycR@1c4Jilb-b=LaN`jpAx$%%JLP0y z6H5Xs$C=;~M)UUz|M7xPe__r&;-_jIgok$$L2Bv+7XRg<){7{Ru9BE-5vc1Glg2b> z?uPnprmdslO^+O6^xt|Xz_1c-#WVD6{qy58hA?1%1soHGm=ZRRd`~bwo!v z5W_^bcfvTMQ?SXT2AY2k1o_I9f@U`#@JP`BEw`79KhDQGc?UOK8A5l`I2bJpg3+3X z(ML-HfwSXihjf|0o)iJHI?BoqpnUI)S!PBPH-XiA8R#574^7EUykz@g-aJbzsIAIm zus8~8zO1Ewa}a3tXlLzrGk6Gne(gMi?MbUdy*Vkc;<-I4r!8ZRn&(jQ#S&D<{NIon zL3`e9D0R^j;<5sWm#!!F*g!M$se16{c_2PF5kR)HSon}Z=dnr0K+zQq?;=CsLwqEd zN4*rEr8}Nzk2CwQ$cVeepCb49SSI@HXR2+m2K8Ed!|hocQL^_4ESyqF{*pE>|GrO< z->V_-l^tt7-W$R$7=ud2NT_eH1C?F(`Jx4BI59Q@dugSD!-+)b5l_5eV?UmfL*C4E zJ=hvogjIhYaB2{VWRDIUuQq^(1Z+?`mR8HaPUD3^Dl7-jduL0WDLij}tn zr>_MNSQdcJW8V^+<2c9{yRZ*!`(Woua=1*7p?uOT=sPwC>eci>b*TdveN7h^|8oW1 zC;OnJbQ)`7D`Cl+7)&G2Mqw1?C*Boe>tDgJ_d*&}y-MURB^UU~>^z*ZHUMWkoW*;s zh1mW~U{wXDVY*EUlwJx$ujCNy;K{Jibs75n31ICO3E(nHNj&;1=qa8LBhPL`{V`|3 zi@NVlOQ;KAUkNI`9O3)x4Y+#XMQFdF2T_j{(6BZLeIhEsb^dZ_IZR#fwO@s2$F^a8 z@Mx~rvkXOpE|pBb7J{b|V!(dICT>1TA)Nb4eZRhYAi_EnpS_^-dFfo1R*}rFzpucI zIdXJ-s$^QD^69L&oqx#P4a+x-qT5 zsc>uZ6|mRY%^qYVg7!)caMg`vuTLg`zO;>Cp8w&DX7Hp221^KT0K=pS!tN)kk57fEWJQswuZF|Xw8!5I@PhnOggIKnm z1$=T21U<_J9=R(YKeX(Crdz~-9#sIQ)9BgnFaY;xB6?TKA>d^n2ul6ITx(poRrf5O z{k11FCeC51U#@aJmkHqMOy|egFlP5F0h;%&q1|(9X_}1sc4rqbcl&5w>uUvzH#!mj z=A791{wTOJ=q$?j)H9Wx%Q5{#1}fa2i?f!I2ifH{pSq-)JP8v~X)=u8BQ8!?)ByCF z@gJW=-XksBYh3q)IR*@t;fU@2P0ZXa?>mBB z>Dt)7CJl`S$l+Q5N27dK^g681_fCk$Dfw~mpdt{p{9f>{G7k)DvxX!e>Rd~RJr{ec z)ETJ%)!T);uU<$jHa~3Hbc5+U*AQy$+F55;41A8I+}XcVgpt(2)=@vh-HLqBes4I| z9x!B@e#GV`H@&;tRi?S1mOJ_{fv4JW=zn(`YQGF(e>v#mYx+Mqo1W&M!iJ%^M<@hs z&Be53*SWOrFiUgRgqG>mAgOsMmj1UI0y<-u^k*ike>53ur&WMT1Gz3=x-%Vvc(KIh zgHUBY1x+r-gQO>&L&BO^J^5C(^=OaSmVvH%ry%X0n_T;L3YR}|-ZL%C>uIA+V1BeZpj=hSNv{T6kqZcEXFo^lGj?(T;6ew0(VRszd2iM0~h2X<;M!u6caVdEp1`oI;S zvZS2Loepu!ag*>7`Y%BV-^kv^B8+*@~H;@ z;40M8Im4Zfbwi^)exP&c3T8zRi|WEC9KDjfC!6w^{_rxGu#ht3e1~wPeG+Dm^aN)v z2cyub7_<3brkzzxjAem5$``QvmQ09$7C;Qa8Qg1VIcPO(;!#2wp)NG<>%%tPF2REbY34GmlwG`Y8Wc^Ju`c&CJEJp#`l$K1{ChR%*9_vK@+W3J zClWvC$S!oXj3(!HhF~|)6uRaw#n$LzR1Sa2KLrO97i}DC8a5fEyt?%6t87?NyB6!m z$lv@`|=bQh|jPS4CY%)7D_9h;MR za{~y&?xi3a+~+c%D5za^oEf_X^ES0v+&=yof4yZn)@ZE7fYk}0YVXT>q-Eg?Lj!F5 z@rOCzJ`EA2(O|1ib5IXr1{XVlTSW*=QclDH(yiFoAz@vaIUsc;c2BGo+>rU9;++aB zepi5DRa0TvyVGc$5y=b>xACy~G8}6#g|cA1SRCv@*h+vlz6?6*4j?oKLeWclpTX6N~w~?!7 z5Bq-H19#~qL0^Lm^c`~+_2};MC_feDe?JfkSKQ@BNo!!?p$pixf^yV8?^#z~CA+a@ z1u7esh%KXzL)+t{Ae(HPbUDTkpSgQ4*}<*ZW+K>f{i!Rwz^CSM*5Ro&?wToudm zzmj|WW)3zxFBfofJwHpk7=7_BXgDi?ar_lj4Q}DZuZl5dSvv2~zsdBiLz!{!onV-m z%5SVCUxjc;XdYWG)*E~j?4}PzZ6k7akQXY!u#*^Qkzm@rg13j1GudCe!7{Lh$3@Dp zIYEP~tyzXzYriu~Qzxh=x3d;yfW5;lDO32R)aO|#F`%Q&qUKy;5t4G)^dKIZ&y?^W z{bMX(w!lR}+NIU@@#uZ70L%w>G12f8ZhlHnY}irCCBN_SmnJ6>+v3R^qYLts?ZSy! zCsFawFgDaIkvftM+_OZ4ot6`z^Sl&RJy?ug&*s6q6KoCV278*!j)5e8A8 zV%LHi@cJ15I!X_5p}8u^E)@#)_rpPF?n|*+g)g`Nbp|^XsqBDvB&y$31KH{^tmcpa z&xPq2lBEPY6YAj`PG-%0TNLbuLx7cBbJo4b@v3rJ(}P;ClHRR(XQ0T z6e5y~pur@S%gzi13{pqiyQ|P^_k3_>p}eu@M<)62EpuLJ3mg1W&_gE0l$dj1c-aNp zSvKnUTjSyIEYxvb#6{VL;?A80yumOTq=lolQz>Dwi7ennX{_Zaa?ihH+Omw2q~rhP#;^4j|Yy&I^}7| zTVI0-R&`wKdKkzK-()e%)P;r z9SYirj(!E;P)3}>am3@TP2*PU#Y{SKHea<>K$Z6H;NTU6f$BYR$&h{cJMIFge{J;g!TU%oIcmDZw=e9+eDQH|Bs2+*>OK$W7X1M4)4zyi?1ql(j{nK)6sj1^(D(;Y~t&N@^gFroZE$n=` z8^eCA1k20aA>dgB9uM;)FL?c$uV@K0P3Q0$tyhv?EYQ{H}aLJo+|;7zreMcuQ9#;C1Agd z-nmaQOWWG4nY!jM_GwQQ&D3r9Icq~Sof*k+q7x|2{}iltbh7pvv#~moI@|xQf&oz% zV9L94VjesaJLZLQjkmqgZpBQ<5j$YJO*Y6a90kK0eKD>149yfZg|wqXVdg<421ILt zspUd&@0QGKd#mtw>*<^}uu5$7D-#{G^)WA<;gEZ@(`=5xaTSO0k`eKen$CmsTW40Y zg!Y4{9+ZX+%VY^2MxZ%l8?1TmgI!~cVVSc7+OHZ2%27p>aRO7j*r_0W+eawTRIvMz zTX77X3)(cF@z>S9^s@-(eRuZ3;%8G)+FFICc}KCH{D-1D-_0a1-Qb<(Y)nu)&aWJu zj}KGnuCSvD0*;G7YaVf(NA&}*1xCzs#!qf;Xd+h5O*ISh3c&WrEuh`L66_<1xl~7c zz<+CbYxQK}6&Nz*jeRi3eHOm1U57@o!{}WAr8w9TYHVY0Kp!dkY`p?1-=gT8l84J@ zoFS(m^#di8$!s-X(T>z7NXp_xBZxm#JqnDJ7ntVHy}S5Ye;Pi(Iw40spPzspH3Cl5I9Pen|%Tq_N_<-rCgL zZRZI#zEC@E3&^_SVM=r;XrEJ~vyDBL(DT1j9tuYF;i&g65{&t1*tcmkW>3A&`h1;^ zKVOC6mOeVD`t3Y~4jPA=(H!00MM1;cfnfitjZ3!f=`p-|lTH6AUDHL>llzgf1uKW+^t2mK~u|LmO2b&A@=_L3Z~+D3DiD=MJu z_zOtH1u<dfzuR_(cbe%Uo=;Wx!1~W7tFwwK!V$)?N zysboyZCPK5o|B2iU1kqSNny}C=^`FIMc>P(Y0wfI&PCU|gQ}l~~d`k3X3kjH(4c*oS5ZX4KUp?iw`$o!gUnwox#j{@5N1o||A(z-6R$ z9H<{Jhv^PQ*krQ}zMc(&gaOew$UllYAIsRKX&QKr0 zIY)$_XRN^A#|psc+jBu=v8+UA)LrqgkLLJQuNPW9Is|n;^}%yy3iajx6@R#TT9Ea> z1aI<*Ir+YT$>;V2`P83G|9vqpv7vc{GwrwnEKBQ7YJ%gDi_CFm3|!2&$JhU)g0y!# zk2q_N&BwR0m~$cIQ+dObhEiT=9m^`drNgz&mr#AV8}VHo#iCvgX6@Umn6@rC3!eQY z?$;v_t6LXiLsu$_E05rHCv{ACbP^P6EV)j%BKUMU4vJiAKLeM{&=)-C$WwKpz-w(C<*z% zg07u{#%(5OzUBaQbVp`Ukc2@kXIV@`285|aqm{-~t~mRHpS(h|m^trwUB!HOzMhze zEh12#`h}ezLV2|{|FE#)G_L5D1Op0lQB$iF4?V3$&qL(vRjY)XZ zF}kPq0iyyNuD@g(ac#{&a~Am*3myucZa&2JrOx9g9cEbT3w<>Tz-8oE_^_4k6gqn$ z&GZ;J?p5-WBVzFL?DeQRXC`Yt(_O4{M#dDDbFlARgyo+pZ{kWkmW|}}7_yB^%Hy#6 z6!^yIFY)H{(f-Mc|&VGp?LLV4N9;oOTYXPfpW zqosQ;Tq}>HS>sNaR7ZQx#l)`2D@Bhc>S9)eaFsc8peVN*fBM^_N4G$*OgMyVQ?wCV zy}^9~r&;WKA!yJ=Y}u9sn*W`F#u*V{@AwzD>6VBqG}qzsV+eJnDzII%mg}Kpyeou7-{Z6jP*QP9*l^JX&ki_)KE9do z)$%xeSWXPRzVpEKTOu(hCcylhYFwNefKLw<;uFCOt%jWi^HD3sUb{HTa)RLEmeu&g z@(emp>m_8*Q-?Zr5ttUPK)v16Rj+q|=G$F@R!BXwEW6CN9-M%xz0#RHK2K08(@WKo z%JIwAO3ZsV9WSmrhu@wLz^(KgP;~u8{q<@5>xF1+$ms>%hZJBm@`1ShBk?kcZ67eZ zhM9*1miA~WN7XyMSm(KmAkTCZYnn|!^Kp#({7L39eeEGCb0XCICKgh9iP-Q>EU0XY zK)vtVA#Uthh-yt^P4%akbxR^PtVm|A+p>rwQwZ@EeyH@m}~pU3L2+VJKh9|gJZ5gyapi^Zcqwz=G4h0~Lv&g~v+oKFss zuI0>p=0Y|jWe~avXEB)0MvFd)5l>%$1GTF#VaH~un_R?RC+0w{XgqZcby)MScyuiv z!84BZLXDd-XhDAJl3#O}&Y(NOyY<mOK`=`d>ocT-j_p4tbO&E4c$Sx%2yj8 zZpv=-zIPn;2ft;a+Qs00j{FS?YUF3@gYp9@LQ6po80FZ5W?C#*9tnl~Vq)#5szab_ z9Msz8z=N6~Y+JVSI3M!rPbLfF0wt zL3FCL#BS*r94n}xJ684C3yH$&3slOX!Ln%BFQb7!+Pp!P!zrLPj% z%!I*6__%vs=nNl;|r4;isp%&Mn8&v%Q2`~{Kd`RfCB z?@J_|o*~3Hb_TeA8HwD$M6sX6YH9!rw^MF66K#Oq!q^+Sn@ys{eb|7WZ2Sj4kqy8|-I}TxV5VsHf$qIk| z;F67E$etF%4+y^GTJB(oi7DVOPY%LuPfJn$eUwnxYbn4# z^*o@}0<0>C(_yd(x)x1^!U<8#aPc1EYoub~rGMGd?)1HV@{!*tCXSS6A-L?j!$q2# zAz?-VeqJ#VWhVn6W_AU9h#Jn)j-6%^{;_~>Kl2?y+bKVJM(|V)2b118pio&3ij!MF z&+9AkQcts>9AEJHLkKUk;an@+4-NeoWUSo?^}dIg>q=kDe^m~xABh(jum;Re(L2p) zKZacEL*GFa*ws1%2RsUc=KYt!y66$hKXe>qVXEBp*MB_PCj*CeR)bg76_7r9!IouL zU|VPnb6IH&!&YuY&ru)wF8x?yH1s8IVmM6wl#4Di^T2MIgm8jKz{-&3$CSfO6Rg3v zhU3Dir(omTD%4x6h8-sNxWZ3|wP+-QXRHIb?#N)vavV`RER|-nBca)Au@JxW6zV=x zqVx1h?lYJCgF73Au$*^1O1B@)jc&18z5C4Rz7*wSqnOmu$;@M%jP-j<^P7heVs+i4 zlrJaWuhT8AzF|7gxaS8^b(Di}?oSS(7t<(SCouKwY zGH*&+0itfU;=&#`xyb4{A1V$;XV<=<7)5-GZpJ7&IK|BHZO={$)AmypNakXUn<{j=!jmr z-J#LXoSW|q0@LAMVAZrABDUv)GNnJ99yS_-ZtRAyU!p3;P*xPy)4(v<;nGlNYYZK9R zS|xafOSvb_h%+wPu`8*CkgstOzSL==gYIVh-piJD;UQQ;eIu`R=i%;)RK%c2wn?J| zRDZl<`uF=|`_w)l9eNPD@2h})OH5GJjXYy_-|^0+8<}DWG0o%=;Pgj{(svc&MmOqq z4BrNwD+&>=3e3m#44P}mc*)n1XjN7Yi53EitojtYr}f9$>pDFC`5w$8)=Qnn0pfy= z<~qT(VndsO;FnPVjw6pmSFnw>#hr@|rE;pwy|TV;_urEtQZvFdr0u z)-VUf4iw29#Cv!GxlmJ4=iNiG@`0N8^+xjgbY3hK>juEX8|5g4#Vt3Z}{MMtANFpoIRtzWN!`+C~g-)r7Zi1OA~OwKFUFy{3&>v zkrV3zx$!ooKuftl4)nZ)s-7Jz#_9lfd^`wDP2ExPTn{u%N>G}oEz~Pl12p8r>-qHa z?5m04TQZ4Ja*z#KHXGf&KXYzB2hp&RNn(;%%j*%8RX2piggi2ynGSb_Oss6DY}mjA z=5jC#uHGDjM~jI8G2}IW++#i_c$PE!xKiTwbu+6sixKPJ*~N4^VzDu26Zl;%M4x~T z{)=WgI@h1Dw6N}=F@!QUf*XX~p-!~R32eSPhNapo(DKy~bo^b$Kkc1`X&L4q`&7Z} zJJ<1|;wUg2)WoL_IfPGVxM6)VG2QnZpm}Jrsk_z!7!jKX?XSmDkLfmpH(}iBOAoGh zq?QGZUCp(EXR%GmD%e3i-qu}mDAA*P(#^%>-};w{B->_b?fT9t-j+etlU{JbnR0^t zMx#9C9kZmljr~V8xOOrTQ9}1rZw5iJ%VAg782lNRg7P8rSnR)aXLP&E%zrLtUVnvx zPkRXO6`ct&27|FRGah_SnL^Z)DsU+~1NH->F;r4O^OtTyVL$4r%v6DxY5ic?T5ps; zxXN-i#^Az}r@@MTF27a!;glCwutHn{9Vd-p-hN#?H~b=H0#5UO1DB)G)uuiytDLm43PM@ycFJ16$bz92IgpkZ1-Gl$!}~QUNUCRPuzu9Ym3p?*8}hGF;2x%jQ@tJaCzbwvH1w+_nHy8|oS)mkMdS+IURE7baYx9*<26uhsv- zM6WzcCGqhfy2eWl-<)91?5UufdPe~7dLCn1!#ba~aP5vVX5Om7rZ1_1gzo2g24!T; zHwTt}*t!hbC}$Vb%^zY`yYc&c5Vk1ffX=Jn9c3xK`mALt7Na4yi_TpOjD>g`V~n|> zML*lO)a5+PN8c~N1CQx$`$ECnmAX(8unroBzF+~z{NVVra?}}A4F<;5m{6nvM%ghS zN#4d+CLTfekH*k8UBOGPS7P0P9bkSh*(~UW5BSjx#%S4mCZ3*+o$0Aibu^04eC>|X zB^SlJtcu`RLndnX?ThtG27{?t5bFJ@XC4ODT-p08JJNSFYHlrJrdD+KDGOxgGsx|9 z?+@)!uR`b8QBaoRkG5N>*AVfJOPv1^-)&N&-T4TpKaxspLYf`;Kj7x`-9UTOC6EdR zW*#9GY~WFv?JMj-`%DlBk-oSo%m%$8h|S@#nct15hPT;O*!glk>lZKJ`dH$Vr!R+| zX1=(-v>0oHSAnEe4$rT7p`m>}_bM_0N#8!GIe!vVBs!q>n#<@i_%0hYe zYDXf7Rb*9awC_8!Jrs;{wx5C4{h^uzRsw9EgTF!((LL&0CWim z*{;O%U$IDh_x^G47#j`pLvNV+y9{PDa5PiQ3lXyC7s2w73@}Opf|@1oO?avRT4S>Fo||cLk4my znG3|NG{GBU`nO�GGmP(C{z0crz35;^z=-_Rkf_J*BXqAi-)32KX}k_ z4D0`tV{2k4WcQc{8Pmyyy0eN0veEcJG9QDEU*Q?_ecEU^2CcuH=Jn?BVv%4{;-pG* zwUA!e@v(?|>1Tn*T7mY5y}5R`adakjFaz_$4gyLKV%DC|BniTZX})S87Q|DV0sZxp&V5cj|EG#s0gi}mX-iVe+t z;C1q4RJ}(#+Ce8lvU3)1_#uJ900Fknt47tP80sDeFooYZ?0G#FRf7(~ptcjl-|!cE z)ONBNy|l0;JDYo(*MQ_x1SAv(!GOt>HMT3{g}jFO-yTor%^+xgGYF=-YhiQP4Y70X zSQav}n%E3^pdFhGvK3+C7WWlcvVJUka_}$)I+sHVeV_H8tplw^Q$SRdDHg$PGtHnZ z7@3(t-RcY;^!W#~dz}aF(<5O?pGvTO;t$%hmcbQ^bC5!FM#B?}Fl1sl=&g%Fx0@V| z{I3WGL+P0JErEULaaJt1(idmv+kr-px!7?xj(Pr_#k{j)uy{lUzCWUc&g$E^*8VJ3 zDFkD$*ReQfd=cn`+6tFOlw+aB1+cqs10KffSyjy_c>3RI)Y6_x$A?tjJU0pI_kHF@ zJDj2T#B#J(2t3HvowGA3_+i3je(i7t#{G`*hyzcXWN1I>4^^Xdamr81CiYX}VMl9J>~UkcYBlI}T!ZQ9QT9fj z0?RwDLPmrMR8?g`{t)UjYi5FbF|lsj$O(2QlE)klhx#okFnZ5L7!pj3t5IR7`l}E6 zd`)K=J&1kZT>ReIcv$c0)T0~=*4~u83V+5rPDDW1RqC4VTWRLFL_+*68PE0i1E)InQx!-h*YInraCT%o-PWiZp6Zv-PF~lFp!Vw<|QMR7=X{YAl^{D-*o=@Mk zm43X*HjY>BIf748V$nJG3Tr0zpwW;C5cIqTrD3L!J)P!sFRXd3b59m7+kw`PAG2L! zQ)#EM8G~Yrq5JO;4EkcvTau&EHO7j$a$^u_ZY%W}5d^h8)u3dS0)u0wp!@L`eDpZV zsqehYL|LYSW6(L|&eTD53Kj|vUt=C>9;{&52pp3;h;qTi7g+DhyoO|Qt({)rc0!KN zH{^hwPYJZ&j^dhwrh!k~6=ak@}hR8W8%b0;>N*-Iun#SsDd^G|-V(V6RNAtqBTsCbq`3cUHMtGkEdDkOBtxN(Pp9>*- zjDoMwR^WiQq2v;*XU*TzK=R_7*ebCn+_We_{rT#wg_W?@WSXDvpjpJC-Kc8n23kR5 zp{qA}6ml-HYA*%44)2doo<@?#&x6mT_m28_e@HN^!rGPf%;niJaQeOz&0qH9jjc;j zJUUZ>WPFY85VUgJ#kkPB+M3@%j6LbZvLVfeFLoZl~qJjA)65gbJGsx;Op z>S8e}U)ht?3m^$g5*uDw{EQ+t&bs$|n=Qrdsew-2_V$QqjaW z6B~ATjVqoOXr5`$0`w2?;cFd2h1| zRaYf~x$}4CS~>v)nV}#x z2tlEi%vGrgU#}(LhYe=XI+F6ZG7Vlb-h*H0Q%ruINK|gS!M~5$fH}9PqO8Gzdn`SP z&ZB!`>#GFZzdixmJh!vLMHOH-DFE$n{>OX0A>OuKC5Z}obx5Kfkj@zk@W`_#chd^cr@Mk&^MknXHaQJ;UMJV@W@h(P!AyT< zfmF3ysrKqnl*;va&%qnXXKjsI9Xmj(yAbNXlRy66RZ!@~3U*VbfsW2*R@yQVtG+9t zA>$2)f1fi`yKPL*R~0r}D^cN}2;C>AfTeR1*Lt^=Tl~t0+(Z4b$ANjcrgt3n?UJCu z1nScriokauLc^&i;6qj#c33OG+g%$UAETY~G#_x9cZ}wVRt($ypzF##R{ye!H`4Ax zec%!17?2Gm4lkJv&AhU^46)mGdvb%c^RlJGQTpV$u=I@|ih5EeXx4wF9`KAeZjXis z4*sZnNE5|(x`;+m+j3hi>pFTaQ)TYnI_3=>O}=J8|WucG?JFM@ve z3nH85K!A<*k!QXEZ z(OLfu>snFF!i_V@<^7I#5pTLguZ!1w-ijvcig1(02~?WaiS12>@E_l=QlGU1RE(>b z{_0I^;}mDSdGo@Ihh_)K*5-vti2 zM~QDdiI?=d%uda^3cIH1P5T8|UIm)c;f5_Z*VJIg{b024_Jw<`lz%yz56#I31XJsLZZ2Ld)F18z`bl${Tz4zV zHjc+`G<*XlzdyqG7 z9s(M_FQIkdUa&r414Z8h(e~pxw9<-)D`lCGa{nYY6jSeK^+4z)Jq}sR(owWwx)5V{ znb}1cBYswrJM6MJeMlv|9?7xdR0U|KS96D%$ryF^64QC{lx?h{GkEKIkVSjr*4Mr? zJMGI3Iz^)Pur<`*JPFO4Ex2}T0A`OF1#tBQx1e5+_WmeXHHBtCU4Qt2**#GHPGb!kDjT6F;O18mlnB2wiCy1Q4iL0hb|gzOJ#+5DZDVN z8#t%@5W>Diz}JdsNSl0|$*7F@KZ?#hF2=15!y`%BMU+ZHk|d!B&AT27ArwhSl8_`} zkc2QgNu@J7H=WPvYueNnUL&? zWsH>ZN+9}t96Z0cp#5HUZdf1(cAt{C`wCV#IM^FsB&MR&_7U}SO(P~&)wC`u2;)i< zLHcq6?JUsYL<5b?DIACfxsAALygxpg9tb}f&)CF)A@Wkwrn!G z{)mGMv#e3=p(%z`Ml#0$bK~YW!HL6R5HD(A^VM;Xb2Af?{#t{6sci0K!Ca~Pr=wfw zRp^t9$0(N(Akg)PjL!(-KSfmfa~vr=u%2Z-%CU8bh$^nHgq}GiH1E(75Pg|X+TYP}ytTNmll^_wIx_?|wBhAsr7M}Uj+;^6nxK**PmMf1Kb@JJR3F0*sc92l?P zyNk#^zo+MW#i(4(dd_;uoSsDyJ}j)muXQmfJA4X6$LpA5M-k-p_Cud{Bxy@H0p`Ig zaNmDXc>VJwEZVUU22mUmu8W=+Fi8M~*Qzl+gqbTUQ zAZ+|=9L^ZZavoLdpk;XzRlV_rb5%^?LY7I;W9w+ln_r2d-V$7}b28d}3P#l&1K4AG z60dL0L02;!`c5kd3&NIT;GP&*zuyKYv!8ttCuCU&G5UTU0j{5Rf${VzHv5&qgc18u zXAg4`**dTsYCJW2atX35kHV=T%=hlK70=&^#*GbuXxPp8W;0r-K%ycPBunQ#cvuN5 zOxf)B7Gr9g*r7*5Bl92*BBG`Y?%kmjQ2ft<_B>(i%1=?GM{OuZJ*?;Zhm8OXX0r{A zm9#itf+i1xi0YOWe#QGMu&C$?^lm*3A`5>U`eq(JWm!m*JF}_FyfV~%{xA1tX$D$e z9pEi}_R_g4V^BAgz-5#Bb91m`P3)S)sACRfcxQGEjk1rs1K@*+4_-=ZQz#(RC64O$aT$epEU z&|5Kp_%2VQa{KGib7}<$7dSJ1?P1RR(rD0XV0rzkg`f{PaOjve>PWXhNbX@&2w90y zN87mYEo=rMON6l}5Up>_MM?Kj&LNX!QC+RcDnCS>lpXN;I1iRTvW0kM7L>gR$2B@B zDACYBMfV^KHJ5@@W(<@xM`C+Z7)s9!hTnbiSo_%*JFaD5>FB{=U7tv0Vm+!_Gn4c@ z(E}Zp!(F=60JXA!xXu6)d{7L|<0pbl#&hjkFN5OlZo02*ISK|(hZRq&5Jm=oG>UaR zzn??Tx0(2j%>~DIOQFVmCmMSP5&7jiVHAl2WZ!Yw#Zu;{UIfLSVr;y_foPKzL@l?c zzWq5E71Bah1mz4V5D9QF;{1HLb#p?5l)b2V3iV)#cAPyeI1VHUia zKOJQ{hV-h1HEJj`r%S|O3^|nn{rLnK1ZcL- zBU*p_s6qJ!aMT$D@_(~T+l6dA*CfWi?o!ern+$@=UnF?DI(|OLd|`_y6>F{FHH>Cs zQT{5_(mF~zmN74I-UKQwpF{gM*U|NR^B_IF26gVRcV@rDN)#53x=#Ef#>x>{O>6t{Jc#S!xTa5!C_*OrbLEyPeF$TWyE1@8*$nY z%Xm5guV}c>XeY~-F{#`{u*gt5Qi;^o1wXKIpeWxCa#L_c`uJW&}tAzKYQhZ;mbJEs3w8@ zJJT?DY9iFW%)%S1DLmSc1l^wg=sqGDRX085k3O1(JrxVUWZhSCw|xP&{$&TNY*p|@ z{ZVY&sRL(Xtx#O?N%-UQF>F80qwlhFP%|wbSG={x7Yo9%@?9)?OjU*g0fn6^dgw7S zfqFdhBCS)bz(ac)ED^Hoo9^6s65ID2jf%l*9_#STRpwF8RKuu3#=-g-3Oj;}kuOp~ zPc=3xRkWm)E1A<{mQ>jA;r z%b+GQ8ubrr!0G{itTU+xH}wQ)-87H6AeUe-UqA*f%tYh1r$kG`49#2hQT)Xo?7L2b z+ly%|&%)t1k63hl7Yg!QE`sFwG9JwjusifQcD5P;%5|ym74pzE-x0J9L{qV8zVI3B z#nP?SytT0{>tm!5#~*!+k(|H{W?hl_k9f4-Uc&ZQgJ`-!A-0&c5cnESBtDMZ=jH$$ z{bMY9pR0fwSB-=Bh*9t)mFmA0lelNksg~PdFgzSX96tz%iFGg>OY_3-?#Hm9Jr=~< z=97Q}P3Wd!Zs|yR5^Cbx!=YAnMx|gx_@mM%FN5Y=1hBQ^B0SyH5C@)DOd1mc& znS~i{Q^|*1125FgIRTaLvY~!W0-9^~l0WWE*uP$aM?UUG>8KZ^bjJpGAgl)c+Ii5T zQo!y$HKc%XJco`a<~*&2rRdbD9YGORT3p57c*s(CsQ} z*k`H_)n`tlvrRQPKE6acj?aSdD?h1oXE@2?8Arr-BS_&Pnf+rb%9{j&%hzPaq?Xe5 z{20`ln1(s&>WpDy3@Q8bQA`wSb`DyEuK(~n+J=JV(Q4rwE%qKTPa=H}ub`-B7=->4 zhn7hNU~iNI;Z+=Vt~-fZvcW`gu#DubN{8})8bR0eDc_Qli`v^8Sl;S1iJ!g!+74bP zCX1f)W)n9tzi%w+epjQvU+~y&vldj(KciCkN$e6wgW?j#Q^W&M>EO#))e3k-yBclX z+)$k{4CH03W7MUE(&_17Xk7tO!C(06aZ>#7iS-kd^U>JZ05Wd5qIk5Z|lX7QI-yNP8wv{WyUoQIzj^#SWjmpU!&~*gPJ5LgEPAi zbw3yl*1HCXvTh;WbU~fvAEME3+A?s=oDQOs1H^b_25L<>M27^kx#lYlW}S=2lK*lM zF3doiWGU%e+(spETy_cYro5Z`DG1)fLdHd5RC;u^ZdvDb1L1 z27CJmD9*G6iL{vBm#=0BYCotOauI&}Ek@UrgG7IN6}9vC0dzYI-6O_AhPng{`6pEO z#u)TJUkh$u+1_q-rLfdH89tmp1*O*uXz#Ya;PGK=R5f9L->*ny22$e2SPSjs{X@vPsIMGsT*-vwO^iyH|p<^;h?1^;&DndSIA= z^5ZPa?RtF}H~e)C+6AeDD!Rksq{SG2ErKpsbrp28x=7FGa$Fb{iGTX`;L~AOK_JT% zDg+bs7W*Syi>7u_2}E&WD$Uxo0Q7?AVw$EWI&T{dQKC=Wk$ai=Xjc>-h+w_?@k!9C zpi2bfhk%cj8aqcWqU#wutuMI&J%XwrZfPh9bSy^kk&YV8A+>0y7fUSfS7O%fV$@99 z$<71`biUjP+~B(tHyVvaoBDEca7;A%KHWg_2MX{M%a%IStb|cvGt$b0p^3kEgP_b2XRX82S#sLJDHC+5P_k z>2i8at)DcJj`2(2>gEJ^F`9?*?vd!WWeRrxx1We(m4!BWPAHx`h_gNwK=tJspy#nA zgxds>w(U+Z+0zl7+KcdP=^-?#i34;0ImFvB7ED^Uu`|~Wlr}Q%%dbBE>GL`0aNGy< zj^;q$(PUD6k#U~>FqT?R4Iohg%O~QR)-M<7fo=I{I6M}jWHn&*E)^3O#e>emE7-GN z3tEiTknhh1EAtvSux3A2ZeqCwrGuax6bqfZ-Dz0J1caPRm~1c@^^;v#ENCK1a#Emk zT`iyUAP9!vOhBt8>sa64jM$y(;&lR5nWwm!_-?TvqwcZy+)*6&AN@V8>eGY zfa+y;@bfB#%qu)*rO1IOYd$<(TL-hRkH+pfdDKm-fK>k~g9m>5(84Mb%@(M` zwozGVW-vhQ-=7ADB7e3!%fwZNQqXHCgVu3s)OyJ}617ef3^j7$omv8{8hsi*8=iuZ ze{9iwfjK1i7lA8d0(m^%0Q#FJW9MAPA`E8Td4)hK)gBIgM|@%X({K!bqJs+CE0|;G z2d5P>l=k)8f#10TxZ3Cs$x|8MJZ&+}>c2)6)-W!!JKM+iXuz%4nrMA;E(j#&$jDEO zk@j{56`QHmNUB$Z&7VEczOMvF3j;-K_U8M!n0Dn_PM;Q2@4fRiOuul6GF%UOr~ zeFz2qAe(i)C+ZVYsB(Zih#9w4aqDE#e53&eq$;T2xeiM053|0r9_h5Rqvwxh!k+g| z*x0NG)^BWKR8ui?=Vq~YP$3t0FP(}5`fHw^Nn-u_Y* z3nx!Jf3Jb%24}(9k|XFkin(R?t%JGS^if6W9JUQhg61-o_xYm>L8dGN6wgN3+e3XmB!k1@0XmC$8Dqy@LEfqg#6@go*S(L3)h-g> z^;03Q>n>fAXN);2#mra3z7yV;(cE@8#3kjC?lckHm^&Xu0W4?r_bB>9Hy`*x383@f zHf>eDNi0uX=h`;gfFOIh&}DT!1g(=`cuNZ76SFzZtB=(6Ydn~+6YO+$ue~ER!_=c^ z@OU1}AosM<(c;PIG1(JN%hxl9U?}snoaX#Pf*7a&5EU;u$sJRm*j}NAvOnEI8xPhU z{Hu-(pK=O%?|Wm*ts&6Cx+Tjz3sEwCBR7AY6pLIu;a8m)|4#75i1T51Z_gC8+;g9o zbsYq^KV?)k<2>A+$^4zo74R;miRJz>p~vhURqqW4IO+$2nPOhNC$*;1Q2};}^RPQn zj)pd+L9gR4YPK&0tEX}3w>KVr2bbf(90`ce8w%TPa^PbkJKuhZC%T#yG`Ok}cKLqw>5qY*R4LPOBSkC%rIlmlVF!$>1h38?xl_K!X zNkvIQ0QDBgVet8SD0-OzYQ=7Nf~$m%j2cp6;(!Kj0q{Uo8?$)$F z)L9ugHUUgX@kws7|>MW=u-Qe)UmMJ2Qfvkw$>(mSYg|{R5FnYKZyR zLe_(3Oq3m2;4vZ^<&Q=murY712K$8d=WZ6%|bz(xiBuRi#oFNztw*$z{#Qj z#qxTr7(X6U#@8d9ze73q%UCKX0M(~&IjyBrAfmPb1cDmw?fea_PoYQc-VOo9hl_}0 z`#uhCx`MoTFk`fo<1b3EL^~d=S+}`4U@*%5YN5_yjFna43H?@@)On^8bWB?fyBFzT zY6GJFS?2lP;tOR;TQT?)2K8|!kV@#on_uZ(QUOoxl%JxHdFJP}wLfX7o9_8gOl5Jdz zK6`@UbA2={FqFrXUc8C1AK9MK{vtj4n)PII#$rro zI*Nia+1%?E(K_2pJVqP?Rj2P{t_y{6&m7T3QUKn$h4jdnWCS7W;*OXHf0eo6me<92 zO*tIBWry(FyaMKVd_nxm8=;ubKzr3~xY{8KU!_gOki;xlr7VY`LH|EbH-X09BADGg z5kvlpAV*7O?EkME8ze(f_Whc$vf2)H%Q{Hjr2}Z(tPZ{ZUFXV|YvNsnH0b)BOWyBr zMVYNH_zjMSWKS>VK$}PNzm{W;ZVK$ZSBu)ibkI4f3nbCi zD-$`vI8ULKT_wHNxs%;5ZBTh`F+_cM$#NkQQ2j>>l+$*h?u?Jr{Ov-Lv5fV9u1Byt z^KQr*s{|fj2<%lqgHe7?=zK#FJ}#@k7X~S?!Dt`OvX_B(|0HN+OgWQ_{?uv$b0E&0 zLhU#K6`e}O{{P}g^V;iFwm1oTz6WC0kF((YBoahX9lY&$ZJd;xi@xm4wDd*+4@b2`3mmMCc?U4e{VvErF<2cIy z$-|*)Ip`7gjdZsk0If|k;B!~y|7YlA-SbLplZ8-0dKwED#}Ze)Z{&~}bHcaJ1&8BB zG_Z~BOl~Ql*PAOO{=q6ZzV|5BEO2G4iW4L}sf3z(Eyg$7j$_X6P3W7n1da7VA*E+E zh977lPi~gu%WFbxk6lc}Q|<}>#%feg2#40KQT&IK8_;#$X|B0y4vomFg3hm%oNUSg z_S`y0_?^M{b&Ds8FFV)dnv|h+%WUY|bOB^<)5z~>aaeU^KO!#&g58%9{ko`a;9S%> za{|QS3Vd+sCD6&ZMLv^FC@?6e^7g49QSkv6Qh+*>*&fAW5D1d*O1mHbCMJEE5W3_t zhFn)6v~eqb%XdP_GH=c@DnO{8QUaNrBV*PvCvwqwV)ObP)#`gmT=(zhEjQn+QG6Ck zolh2#z7WO|x!fe&TD=#0wP(>O(sO9jRR$}c7hvEEbr7jGLD~vL*VD_0YRMM5$WIYl z4%{SN6%=peok6X`-K3%Q5@RhZkrRtAFa~H6(LzJ2JArZjPQT?G&sUOt`BIdm>v5Lq z5xi`38SOpwiZ{mV)M`9qN{gcTPMxI?z99>&6Fq1`xjni%=zy-nR;ZfHd^@}DP=BpB z2)w|7mfQkj?0TP0W&I+nk^a!JPfE3Ghp_KYj?n&>1S(ye*gYwW{<2t);cr;!^@;_w z&6x|%Iu|g0>P~oLu>{YZ8iT#RZK?H+snGnplJ&xJfV-%IQT4g};K*9^DzX8O>+8W; zZy0I`7s8CZC@gdL#U0Cv_#7S``&SS86-8 zwDRH>*~Ot~b2ZW3H=6q~Arks%FgE9;GyaPWf8b9n_=Z2Afh#=G?ngJ59#@5`>G>#V zzC^`7&O*B+mJ7}DgVl#NpzOdZ&Liv!#(0V$q233T3=_ce)t8zVhnBNBQ8orVJA_t@ zLttW?MmtwHgIixa72JC$6b-Q6a8NjYDmaX#bF0b04Ydf58kr9`n0GvA!E&zYyz6q- z@i@SqIWbpJf7L3iT)Kz-{kHNhvq~^$0sFh|H>0}$M&nWUO4NED3zj#mIIn|@$!lT( z>P`{NKjln~XVyU8iQQnbGKZf3oCT%Qoy5*c5t5aQ;O4Jb9QahvcBEO%bMlE8CM~BU zCtSs%oE;$Gwowm7ODI}$pL&-~grlc(q46tEven%&TCterq#NM1!X_MZyn(TEXOhax zLm3x(7yY_I6TR(6!|j7nAd9D5R?{&MY-u6J6H^%5CxJG9-%Jj$Y`DK|DTwusLs6zJ z48%-=){JrB5}bp-BP+50aU3KYoknH3k?7(s!KYo9SwH?iPBe${9(B}ERXQ8fXS-wg zCq?#o-Q$mrKMU0jS0T})5nay(qJt<8^X(DcYp0^&%+n}Z8A0o2#52DAX{g?k39rwO zM8zP+tC+yfKd0pQzUoX=UcVgNCir3Kj&10+Ko{4)WPAL%4G_}*hZr6l4G%As;Begv z3>=vO=2M2CLy`y-7cV7qk^`|csT>`-2B=&<2J%!E(?-@ynmc41%U>I#Sg(WAGH1@# zRW8JRFzb@s&7{@04AK147~nbsu+d^6dQ41%oGO+rx;?|Z)6@d3i=2r_qlI{@wUhT< z6VPh-De66;37V^NVE49i44*g#Jzgur40a|^do>=vPh%ZerxZT*@OgMK%@cjP4`HAk znnn5%g z+j!>l+8W4frdFfLnN#GcM+hFN%)-D-o;F`MAyJY+SUtG{=B=KGrShLS@wRHA!2NPf z@1!GS!ump7>A|>~pIIkoRt37HpF+#y-)aWVBoX-_E$AD;c<&EbFZ4LF9e5zz-kc9I zt4S!?8A%il=0VX4#yQuxz?T^;#bt#Qy^3pz?k@H$-BtqfLpOoFoD>~;Cc$p)v)D0& zCw;w)8Gqm&327CRBX=@U5O$zO7C9CK=RB$SV0%sJwj3U&){^KOY}VStGHG9iLfg5O z(0{R!Xta+<9bq}S9U#W6g}NXa#JG|39}{Pe<-G0PLGXh02^zN$>&OpegQNoA?>4{} z(hyt{cN#~13>KQAuQcDj@(sUhJoRx z@biuv)*bzShEpB#eyZWG3=>=u~DcoKejb{zRu_06keaq(17=yDgE1?n0 znfotrb1lvm`Qa+3Y>fZckeVn7sgg!Md){3I=}4BJD{!juecVLjPsM>w70aC*Rw7|8 zE-3hM0CZcMIsdV-(6)``fja!r@vQ^x);&rrCp&R+BeNMpCxm(ZY5=~D#Cw0dv8PW) zuB&9C^lcXO+l_@#r!!#HcM(jB7*FcKX`#AW5Von;LU5BFN`JPKe8+H1c78x+4Y9$c z(E^q${Z8eR(!qDf16uLS2Camu;1{O@D?27)QR+eP7$$)5MbEoL_MTehu4;{VA$oW;7Z~)ppU=@ZY?2^jzp~^8+m(89W#o4~8n9q*rw+%vf3u zb@R%hLv9fYcJvdo&VIUTdnM! zW4L`PnIH*8N%0X#-tB{3GqhmV!ON@@#r9vj&O*_=I*@LavS;peP*=^xBF7crCRzy+ zRhE;8S0U(hkD9QKYy3kumdD)%x|&L?XOfFu4mFJbo=)Zc-qG1?CZhXL5iD;mrDBya zHSPNezN`;JpOI0}mXt$OibmsYma8{6t)uNj*}bR90UEbm!i%>1SJ~SND%g}Wvx~;!w{4-&*?AE9 zMYCbAnFnSRM`6_4qf|?!f{MH|c(KK?8vR5~w6n_PX0tO>GTS}I&0PoKzIM>s#TXc3 zHe0V;!W@O{ym@CAV+K2btoki4>unQioKeR8+zB+RVyszFI<)COAOpkMeR0kgy6)E$ z><@5-;#-~=ZV>_vZJA)#R0X)6b$`75`RMH-X#Lk^uz4TJ+*bP_OFy0JL^*)+AXSY2 zkV^*!EkO6lr_rW#A}IY`1$`c|uySr5+D+1c?t9(DE7uLof9XS3y#sW1+fdz?uSmzZ z*(ej}@iJu>FyF>O=LqI`G7aVhRV*KWn1Dx^fUyB%*jcupv%Pr=6LiO*xv?_^mgmW{ z(xn0Sk72X^QWE%c6*eBYNK#f-qst4%7BMvgJb8w=+NtxU8addz_$T%I7)}Q@GG6PD zx1=*Ij4Rw>gGK>%C~otF@(`9$K32ufXj?&?u1}?d+0WI{YczhI2JLW-!`Uge7+7Hs zdU~ZGT7QCg8i#}79u+iPkqC{lbLbXz76h+$GB4i);xi)*P(A=wOl93y4c2qKHy?cM zrjkiR51?JN3CVjuntGTqr*HuK-b~pB@`r-yZxu)M3rV1jntMPYH4eUgT*hWW8dxyv zGPY-k;PZu0kQoo<28v8zX_+$eS4zR)ejLi&jA`2mZ3tAZB8@(wxabjc>P)Y#QDFUI z-H($|Ip`^|eC7d)txu`t;YOO*6as#gvrJlXp(Nk{MwZ${`P_GAoFN=bRg~f zHW{4vmqE$NV7Sd#%7XZ5!Uv_f7^%njr^fO`{y)ZYEx%604|fw2cD{SFX9Eh}+!eOG zT?@Vjw&=QC3a$H6pks6>y7uK(35QhMwdX?j(MJ$fpbb>-z*H|Pp`*3?M_~Q8gNQB-gqsUSP5vs1;t1zK7M!wA4h0vMtfLs5#F{Z>2R8?bp{_ZJcF8iDWEv7Ut^fHy- za*NnbGeQ#!XVCXLj(KxiiJ@*a4RkvKcAd%)c*dLg!plj>*>w197mfu=%*S>5DQ_tD z21%t8i*t-1QAYbnglj0f z^Df4yN#5)ZD+fizRK)=~nuzh_cl^;6`uq79^^)QbLw+4(32g2OT+2B!; z2;W&wGGpK(DBB+aiK!`eJPKy;hGX#0NC%AjI2ly!_d|SU3st(8j#<)1(rum#MX7c8 zd4v>ngx5(?z#`~W{!JXFhB0po5;5pCh%ZQV&!uegBD<+Bi_^p*D4Bx5|uB4y08*s82TduAq~-J*Qh%y=B4 z=Y4$ZMtulbyO4<870IQhwJgW!FEpt;MD~xkiX!76{4JLXoeD-=R_qu!x5NjB4+_F$ zzi{fYZQIuDCOxr)^pmM=B z=*%!gd&cGz#jsh6>NBqYbS8b^8xCF8Bbb9r3#8$DnVV@f(cQd=8maojy4eR<=b!Db z)F(r4V=Uhj$<&AU;vmcUGSNt6Ok~q??6GE?P@P)z7)S@fDpzxT16#mEU-tV4)5bTA zM0rUNv>Tfc_0=rL_A~r2z))#kTfiI$RZB&~zpLnmf1S{~F`QoI zs!?njDU^QONG*r`M{w<5;IU*WJdqk<)Jc}h-cUm&OSQl?z7CBervb){0xzqR(4l%3 zZeM0@U%h6|`&tU@eo%-TU_AEjVjPg;CqOo_kLnjzVcxtsG-^c{cnpuBvIZG%Kgj-8sGKyD{n{nw6f6bv+YcCbHvp~AXhIh2*GQ@hIsdc*sE;(k5pg>( zS-%|P<3ph`dkVM~E#~7F%dqA9W11D40{wHAkY3?qdd*Z9t??n%t*fENKG|@nn(^BI zx&#iL9a`ax>4YwmIwNI~r;0J{LA$Xa0@bcc|KeTHLeB5=GrQsQ%p?%FRmQ+~YA=WCEtUsrqjhvBPH9_*LlVtU(BQ}$?ifnUrPFBC4-xd;Re|JR@~kub zo(fE_@ljbZ#PY#v*gxh9N>1G8lm`;nye$t?_Z-2V@^SE@?jk&YpMg{V)nh!8ENSWV z0y;E(Cssb(0{9?<_WsM*LPsm;1brdV3~Ls@KDVm#?6qlbAlO{|jG#Dg*!AI7GMAuwa}C4m=Kk&SBNG^nNYZUuQ!s zvoc7hdn>19=tpfL)__g>BQ`Jc!0rq-uT*|SMoun7!S#pcz4qq#kUh8g<3{*yqZ8}> zad7o=7(7^=g4@cDVwF)2N?Lco&{eT0ODq%0KU0E%&3j?h$1;=fLZXM5<*F6S& zR%N5(QC;l%dJ+a73c<$2YcwO#7~k*Mh+W$1&}{yTbY2aDkrE4h?wgIq(pn;YkpRjq zEcf|xUw$e4!dAFF~coWvXjjg){cBxsBXg-go{j zVjLC?16}Jtr)4HMS6#vuH5bUc?F@?2EDZT|kLclq37woDs0 zl%K>q=TmXj!qXUdF$U~*X_DQZr8sbHB=+J;5bRN?X;_vAU1QdP$)_TGe1-X!{#s51 zU2CEAQ4N&7yv{2sGQR1-HOw=Th}Zufjg~3-HTu^$>@l!H-G4_C&%KPZZu}p$`J6)9 z?azW!c@tQ36KKzz@nAXs4D=HbW1y%3MjmBMUwzW~do2}vr3!6Ylu$O6vDstU9Cmvw z=vrA3aqUHJ#yrN;VD~-4he{w&n2GvdSQdCtKJ<=dzKXzX>R8+e_AWdW?MQ-e&8Gk= zlSrjzCFr&&aZbK@SiRscwA7kOy@NJ^*{Au;xv+`$#4)ycWEJO;Tm@3&La=+tnD&>V z8UIWh%uK472VH{S>^I`^P}ZB)PGvlg+nmnjCJ=kN)$pUrz&WxJ75q;F8nF5QqJL;g ze-*lxH*yb&2V))DL$X;WqVH6O=&OY&i2)R;S%LMmV)8T~7Ua)l(%AZ4*i~r_YMaI( zXffVvr3cMYJp~5I&q_S^r&QApDyP4rD8U=@cnO7$>9s zQxEjcIsdKkSQBeSX%?pE90?lcsdwdVlz=e8nr#U2|K>4V)yGZ`Ykm9JjNT+ z-rM`}ZaM3ry1(JY^G%5I^h#`=qJUOT=}=(<}MamtKbeD7zAy6y>d_-0EKTLIzUv(92$4QlLUb4OEc$Y9=p;Ai#t z)Q07Ol@1e&H(B`pVhTo(FpLk~1=9VcaK^h9T}S9J9=sJWqgjop*#uhuRUu~=CgTeA zBy{w8M?O4`g~y#aD05f`kEbm|*6Afh`~IP_XLq?p7jnV-sV3-djH6Ls^guG)mt_Jz z60_9D)H;u`7iK7+d1wbsuqnVB^WE_P%Zub`wv*BuHO$*0Bjx1(GpAl&Q?eT4^)^z| zg0(1`d5VAfq6mZYHF2Ks1SV~)MDzWpNM3Rqxv;N^c}e8RHnS)glbeYvTOv{Nrk2#) z-GC7zGr>%%1%eaTrMpJg;k_GsQTE{(HI(Q=+DZ@XzfuWa$2NfI8_TP)UX>5qJxii| z_?{E#bjHjEjEYO(gI(GE)g{4P>besQvl(|c_7q&S7imCf_Ab9y;Kw z-X(~^^Fdm(9p1|Yq4?HazAf}LEY3GYLD5!d*%$%hYb8|sa0rU30d&ET+c7N}oivAF z&&X)ho?e6|h1n24cOh*tRRQ(MexR%PA16O9g7h0Vg1ku@ITp)uX=x8>_{#?RsJ;;6 zmwN)PV>z1{y_}q76AWnNvi|98BC_29{_G6PGl$HjDXe=LbrQ@+?1A{+kEBv^|;Q5Y|95!n-3v&Dd1q%Lyu>#NA*!>sC-c*_@4QPzWTBmU8fJDevgp0 zHhNIqwJ9|9$!W+GoyGR0*XV>d3sCje6vld41R-iR;P-R`WQ~@`W$#Qe%Jw+p57mQJ zZxvZ}P6-Wfl%vp@DOHTw}2nX^Cpzf-Z28?m=7hw*3aZXM zOeD`u!7;Rg1dn1FL`4C$bXVZ4+54pEzoYQlRE_oa`^d;?*{EA*0Pk{`hwb*lQb4mC^FE7?&B5AAv}$!J}CfEsuadY zCPVAX0cz6I&RJ@lZ14%&d#tGS zXZ+%9^xb`gz8cB+kV77E9<7sUV?Yd8S6Gm`h$__85^^jaL<8k|sQE%uI{j=l1|O_~ zcD=V$ayW%IG7hN>be1cibfgOuai@r(>l)g==^0gz3n2~HSf*llF;4B@ zfqk|C*eM*#cSL-p3+w_h|2^XOOvdW{m-UxLno#?f3ntvOWak0};!zp^=%o$rTgRc+ ztm|ac<(=3ZkVW(QBj~^*w(lKeO&w0p29f%A5)^8Vm9Cp0{+tm>s+ouj?^NOCg!AaV zj-AOLZ{fRFMxpvSJLa1-h6k(zrT_6IZ7v@ImcG95<2VJ$!X&JeT_gc)|0y#rsd@Nq zFIFvZ!OrLAY#y1)Lys~v`$}wH31%h5(eP47=zMvY@3;_)oyHfqH4*difZqY+ zH^zcG_m=i`yr#}~(!o5QxeT4ggKd~C{%dWG0#{$6elQo*t?EgsnIY39WpL6l!-yr> zNV0abz71t(!_Y!BoUR7z-=BwGkHOGU<_%fV0(g{^1{qd`U|6nk< zKVOM>!wg>juLQ;c*Q7^BW}#u2ITh_Rpvu{%m@%Uq{MN9(Y+<&rcbg>%c^gQ?!)EYs zM-xRVMKske7$k=aAS;>uzkk~d-b=!0+sXj)$)yU{_+G}Nin(ySl=ZZ;F0q;QPvJp@ z#i*{!X4p@b;HkG4(VS&Ay^1apNq-IB+%gVUOpFGJnJagANG!IdeCIuqjX}=nB(~f> z%6Q*TiMlWmTK)b|vzQSu{0!RzR1Belg=IpIfigCiMk-E<6dnroMl*g4DCX@2k1;|F zF7}620TJk884hBjOi&q`qomXmA9FOM|T4jqKaB_b3J zunyhPXLO)!0`z=d#QN2*X#d$MjKy9;dWT=<1RkR}o11;q`m;PZi&;<8?JO$q?x1o) zEwuT0lXhJ3$E647p<9b$+Faf%Y${ zokJ|u@hKs@oc%EM*I@jRRE8d3?^E5yCwbSBOI%~kPH2pk$7??rFYMt>qC4G_&r1JB zODY3kz+)LC_V2-K6%+8RM1<-G%HXc&Fcb%R@{S!{bh{nvg)CZG6Pj#^p=J%3Tzr+Z z8Qr1PW&7~O>qICln2xPQ$W4(Og+EeF(cEneDIcPW%8j)sfByb3W>>`c`##%6n(qS5`n{5s^1+>e5$1I=#m(ZMnr+#$;VXMBnQ2gZqR2D zft5QdY3b6-ocQMkaNT&C>k4Lkf_E91XMT~2<`i&uos*!iJBJGF{fO7OV7Bj?imE{= zjPpI2tRAxxCr+G?&VxH>^sg+`6BohJ(h4wtQ;&{2wc*%x5ekxz@$1LCIqJ?rDOh3o@bTW+Q3d8wVkCyg>iYP>^i#;8in2aEwVLh8Hi!JE<|) z@@O132Zf>c)qjXd+X3)(_(SZ1Pl8}-s=4^qq8e+d8|aU=1`{PIz#|b37&7O;!U*cv zJC5}g5#tyKD&+n=5+1Y|qSmpz(da@5i7N$}MJ>0qrvMZ)GpVfn1HtEep>p42^3f?D zy_T6nPtQ2)h`vk}N63L>S1wrX3nZ0Ely!KbS${p2Mn8ANZlAAg4-rMOhi^k`AEGT*i*6XV7Kc5R^tHkSm|e*k>7!Z8OX0pM>$KKW`e0cZp*2@H4PPV*w77ZGgP% zCqSg5LTqXmVUSZXN*2je*SYKQ%Gd_%`C3gEo!3LV_&l`gDxrQ){z4n8ks#|_j^@@K zMA~OVyk~fV!G&@-n!~a*nXBmo+bR@3wMWNcdSKHz1}abbVTf@Y3EwMV99JK-Qo2vy zHJ-sUhuPkK-x1J_`%DE!Dd<-gLDgR+Vri=hRWOQ!-qi)X?8(a-YsQW8syUA>yPgvl z%^JKEIst19bWm|~3hOTa&DUv%!O20J(Jq~JFV3$A%(aG$x)Esp_zekOeh#}$jOa`M zVsvv;M0uxDy0ne?ZGI_(mS-eZ4xI;bcg8V?&v2^qq7t%)sbg!h1rZd7AcKz4ez7YF z9O?_0pM=)mKNF*I=`cl-g_qv!M%TMfiH6dAe044!uN@0Vx9|w)vP&iI+&Z*Y90f;y z^0;t(9u7MxLB+pL6M@rt7~)rnGfrN{Ua#w9baD-2{Z&9IJ8StR6=CPGljOiSKgN^@ zL-`*Splx#*wa9d6b+6=1+Q(skry|PAVj$$}4x+X19erIG2Z}X^sC?9E@J<^Hx>~EL zT@!NwBn^eTLY9pydPi@&U0|J@3!r-9FA)0-fd{KDLhvp*v4V$(%xQK^tS}0qCfhEG((QNTL0rJnDJLt7=aTlA9* z7nGo-Tf9*9wk0~AW89@~R~SAx4C)`*;NY@Fh_&W0d==}J_)UPK5y(>H8KUPMmEzt^A_TFtCS@1At;~sk{nQvfH8d(!!klKaES}}RhWbMSt(d*N{NXp zN7NNZg7>39BHCaAZoMV+$GlMRUjKp^&R9i;+-t<14?)l|_7fG~T1E9Yo+ZE6F2wk( zyR_R&3U&*Gq?wK-Q-t<7E1JC@m);Tf&cDW~4vyfW*4*Vwo4-+g10Iz2hoi}BMVcaE zu7&VbF#ebn@89!cp35?*J6eu$V_wj@C6;WbJPJfUGpTLK5E)28z8f+uhoQ(-5oB#oY6b=>K>2?hooiT( z?bpUfLQxDt2qPhc5bC}b1|ftHLI@#*5TayCdzH4dH|;k{`%H7Kn5bkQLJ~pc)Z7%TYU=}=?P2Ql^ zekj{jsrBM{U-lLv9* z9+X@@z;i~Bcf5B9Qw(^+6^27Vm66VU%Ir~c{4{lC60`X08uk}vqwe}|EM>u22n0(| zi;)vUumWs`s53j?a-~|WDQ-6n1$WP4oZ(Knk4<%abHD{$e0L~m1>3Rovj;ZMuSDen z6EwJ8h^@UObl2H}KL@E$-8UR?k{2Agu^ZJ+&%&lHW4$B@HExXrVlJmUk0 z=Cbt=J)Jb%)B#XkX~f@_g~R+Q=Rmdbn2>LfiWiofXV&I}GRJ`rRNQ-z~)73~mh`IvUfoRIG@M2H)WB5c9C;KB2jxSG26fE;>I2Pxe)FHbB8f$z zkAp3|@WQxQti5RjS;;zRAP&I3G58`k~*l2{@1lIho!!DVM?2UIdq-FF^L$+pPB2Zg4rT3mLk9*iD5S_P*kV zF26~`doZumZ*U^)>`ECRLjlyi!_oK|oiTakm@)7-8|O#eNRm%{X)ZBR>#KOfju3En zFU7#wSoo!RnA3*yM_s`QlivHQgV+#!^P=Jmsq&IDy5c6kcI+ zn>9Wp&)AqCuJ_MU>g&n^-7b^(_0~N!pT6dzt=z2fNHO03mvTDp7eOnl7s!_^LC5PJ z;QZDP3g`Qx{UiaV#ipR?iP_|-T1~vUH~jd#NNmtZhBp`1VD+~Pi01vEwJaGc$CTp> z?G324zXT`5mBShIz_0tWsna4&Xw}r_^6#z68zs@;GNw1!SxtsfC*@eWITDTgmh&Cn z^KpM2Jt|(8na;T+u8MCL%#8tiRx4oSV)Af@R)Im#MV9e$0XNP(15N8sL33R$zhXjN zB_$`A)UY1_J#!~>!ZGT^SzNrP1ZxjoqOQ>em~z=0B#kk=q-$qX{5a31&Ypp(ZwKSC zj2@_ylm{j^ec^gfTSSd~W?r8`oSto9PyOHXf5zfJ?%C)!rVuQ}MfhOXIvl@Pf~G0s zK{Cc!>F{J3t25}$+P_Fxu(SXS29{!@PP#JhS{*n2P|eM+N3pyO=LJ^{Gi;8(isBzK zh$^4KMxT>noo4_vw;kf2LuFXp?ub^W`tsJQV5l8l4ns>uVUv?O^{J&#FDleQ9SDJp z2Fk0V9@ce7E?yr3;)|}B@w&ZwnJ;MlY>7Io?DWULkWEQq-&En0Y4dClnF*bI00mV}>ylY*9 zOQSBJyt6Y@8+C^oNpIrO-{-a32B2{xomq8>M%#|PAbMv4_Ww);_0wtKmvk0Rj8mYj zeHypee1umFSpx}LsW9_zBorljLSvx1Qq*rEs~Gd1cZ#dP*8RXNuBn)#(@|8m2BHv(?dK8$&H z)&Z>bfgd_y`0|1)j@x$-F zE|33V54&yD4p1a)%uYC zZO^t+NoV3EJl(>oC5u6CcNYjFHoO-7`}3C}YKJ`vKe&}=+<1D*D#+%i6ZOT?;($piLuEr8}I7%!!7INi33#>_&lE(Lx zJMtN<67^fiDmjpJ`>xF0`Ie0P1zKKw{_%66qvCB>T72 zk{2<{2f38(dB-bv4?}CaE4(XdfQra`-lidfkba@e^N%(81t|NpXAP6B1+$;o3h>=F z6L-?Rw(Qs@H2Gc3)D~1h)cKK2o-75gYlh&knD(4$PnozZg*Qe4w0kXq&9)XewyQ5X zTpt3x!_%=)nu*QiVVP_~pH0d<=<9S64QCi~zdjqmuSEnlTlT`&BS9EAI2OLn34?~A zxqQy3D0I-=4?gN=!LOQju>&W9!vV^)P5)ggdtEPx{jZ?9YY$9O^M;ln;&qPo=a~_6 z(8xs$jxWM_ca2nBv3ePba@N4_BmR_O*Z|{9JkSujqjdQ)rCRe^^r`TNGxd3(WoiPl zW$U=#L%MIh?Sw7{!BG6K84iglN39-8t{!s%>(b*X>yQJA=>S$sf z&09BR@L-7=+8ww+J?E4aJ#m0le+IxyOKaIV zslw;rds7AW@#R?NBaN!V@kcJiQIhUK;jClCMjMkBm!gK}DbOgM&iejYgmFF2!I7d6w4Z&9v>-p$y7;Wprq?Aj)*8&}Q3LHx z-4gasEuj10IeL$u!bh`LWB(dC6wrQiS`QCcai|#DiI02IpL`_59qCd@y%%dk(XU}P zGdcc&t9pjO{&l7(&dG&a4wUmeJ&Zd&cV?}N`oQ}K#LvB*4t0YzgGia9=<#I?ro^a0 z)WlLWEZ#vn(h#t3T1>m_PGJ1-vheqN0$LyGg)W1_Swn|{iH9hKrMfE8YGsr?x(v2+ z<&1qlgSxLxxu}OeF&sKr&%6q(Y$D%{peEQ43jjRm$L4H{1o!ZZVEbY&_-LvqL($pH zZCfHZX&itXnP)*K_Y`U^seo+DAvj1J4DF+LbCZEXaauPe4E{}-g{>2rL6$aRycBFN zhEey1f|=}-kOq{%uRD6HC!07PK|fpdWe=;iIfSysUzqy{;;8QaLc8)T z)X6+febCOJU+DoC7aqjo_)z?~uK@9L8dy{>fhCStvFdU-YBpq5i8SbW&Rane!fSYZTHi7z~1>_Chf5DIzAJXl1iJ4pX- z5Jc?`g1ToB!pbZzYVTnt`ZWin9`#%rp)j*PdXKqg%)$#3>0LLXABd_yn%cJ<0QvOq z+-%YYG=G|mSuw;&tFVW<6@k<*OS(&b09t$0GSQj6u)RnI4ZX8?#v97?56Gdu(ivPe zKD5-PWd<7IGMe-dhUoxny- z(%q6iE3Gd4C&+B}mx>Fh&-Dp|9nS{f_w%P<;+;rz-5iArZd77!YdKDvQGw;sgJ>|H zGR|+F^Qg%opiXyxN!1RedNA^c;vUrhrlf+t5dL}eMF^G#qqL+$=|wZghht|jV>Jie z^CDtk+ktL#HiXnZ=Ide|&_eNr*=_y8jAu_mtsoy3_<1oXde!m3H&rmd+84!^;rvSQ zDdL2mfwpl;V0)6XS?|yC&`p*2F31(5EL0#Qi!j(W6SH(I(QC>%HvJIsEpQvZw9XsT z_9)S7yfYXdY@j(+fyN7TS^LLDELfulnyyV{F|QBeoQ7CzrcRI18N`pg-UH$m2k^k< z_S9DyfG%DKnbQV2n{_G|qjWM!Gnzn*awox-{Mcf>CxXj9g#Bxgm~L7qzar;7wyZ$u z!L^`zagB$Zkb{zVl^HuPf<;{})3`bWURPw}w3S7$J=zacy$Y4tH}cr$^@Zs2cK~@% zow@Q^1T=2!47U5a(0iOXQ)S7_G;BA|-IjsI!+tWCno4+h>jK`eIEyfIIM{tm;38?? zlH=tgQJQlZHrlzMsn13JJ9QT}NKc?)wht?wKL9&F8jm4*%ehr-IP@Dg0$uCNuz4PN zMCP0UyJg#1YyCW^{u>IS{#%rdo3g=rpqwj?Z2(!aD{;1*m9{(PQ9s@*9$^s;gQG$* zE~Y13BF~pJZwg!yCE|uz*{GA)39POu@a4sD;>>?!5~p22xn-q#dp>p9%twVs4DTZ) zo@|7O$(?s|c^_jsGpB<{|FpvB@LUKCd(GPyh?#WMU_omwF~t^aq>jAVn7x_26YG-U zDrp0fiYG$L0V&M-S&S~X6j0a3$cQ~iD9&%2e!i@GiaFfBl> zk0JPBR4G=C4+g_>1zS;ak^17}usbJzow_F~rgq{H-lM?TdJl*Wip-=7=E2^ZerTXR z4IXt(LYsf=nGKUb|51^UU@{-ow`rm?+~!()o)Ir88N7$))Bf%{c~YyeM(34Ka=HLz zHt+c0NrN!(oB@hnO)$GOJP%TO{@^p6%OGxT0?Sz3m-z6ZJf%++8>C(U<4#<~Ch8nB zSyVxuf;CKI*#?-XCPUNIS|)iUQc4D-DZk!|#I38tvG&_a?C4a)CjF8nG@F^3uzdwfr`7nG(pt9%ZA%-iCL*TMG5Wvij^k z65Dj*7(7p=o#sQqaRT|>q872e!~F3UWkWu8S&W{U=fTwODi^bGrSn?`hMlYVs3meV zpbX{r?x9fFD+aUk7s6Y!TxiVr%$yyMf^A;JUp(oC?lz7$ve@8^8=XSX{IL93Wp)K{GZS?NBZD6ccIMw{I7? z+$&;LbVoNj8N^|yCfJO&r0(Ox7&mSTIE*A-w@)QVUar6+G}C#w(o1$b?NdB^D_$P% zg(0C5sOzW%yLdlzShyTSQ`=ac^9fM@9?n%)9|-Cf>30s?4o!>6w|d1ysdhIDFV%TN z;QBiD@O@vbNu2PA83ua}adgB^! z4*td)f{1^a+aF-UV_r>c15+U!XUcqGT6Qsb<%U3{uOYf@P2z@I^8d$TVs+XQX1r_w z*uB(-CXp4|HtFFEyHd3OTm)YK&<^|WA&|MbbNG*ZF%{lWG?p?tNB46vp8-t=IE>wv zf_ZsM`K>2s`Al&UnC1)y`EFatyRaUH=}*U1!wz9a)llwoq>fjam4TD!ld|ni5lrwU zPS}%dLAuxirrL&Lk-i_7Vb@aer<;75VLs^ENgys~J9u8t#dhrqY_Rfzroc9?8<>H+ zqU2!JF^7lD3>M7OpD;DaMR;-90kg+@f#n_-aPe`3Ye)02{vPQPcu*s=C?=E}<1r&}}jr5VOztINb;19txs6 z$)$d$mveU~AGGjDMbkAEuzuPs6o0b?*ISFQ)%OJ-9jlN0@*<4*_X1R;>7jAdcNX!6 zcq36&kmXJJj1}O6;1WN(cX* zpy4Ru4)3<{=ZQt+6aAxX*^rLY_RyWM!x%o%tbdq10gC#oFjOrR^yEFjepL+0CqGh# ztgC=7`5Vw$J0IG0^T>zsVtS41O{Lej@o=E{D0!G(Ql^J=z=txd8F7M{OqFx-E)!x_ zjKc_P4X}8B6_5QTAIm}NW%Jd9#!dA7?nYffTlK(iyB=EQ@|tML9hDVjR>1IZwF zp+)qHFMXCzp3akC@-2d`z7>qhd(OD%EOj|ge#~lnuBX0Rf5@s&#f@cYpgtl4EV@1C zFS1ow_bL|dM|xwz_#!yavkEUi4z6eqjow6W^Dn>pqD)TR1Ri8?Mk`0#UTB!kC3Kyp7sx4 znEJ{EAYROQ-icx;Ck-uY|4!1TOku2=54815Mmzg?+&Z8eRG&DF&RSP_T(JkMxHcLx zi2ZYGz%nomT+A00UIDi`nuzHQJi}%^lYKnJ`gWYgpceA;ZOX=|F6*IX?J3;amFDg? z;>nD!V6|CVexVN9~%7x}pOtdB8?I z6Pt(wNvo3zZoIzj4m$U{WM44cJ&7ZZWdxP$W3tTH91DwqUfY+H2xH#w{HjZXYemM?G zj-)|~rY>xCNkHk;1C(Q%3|IDgV@)OL$&XK>uFD`4Hu-|ZkXc+~!wfbtG8pq5@|0Ch zY3SI|83LY0!F;kr*`4bFJKs*mITufY@wPnPnpCU&__-q&GNzib znyraUx_2GRfL=^~GDvXVa~M?_RXoDO6_(B1gEMu5!B%$?R@wVQ%)gXXxi*qC+&J<) z&Vf&JH>1omp;YgC3e=R$1lhkwsWU18=R*OGoJc)kKH;d7E91f!Q*0VQ9*+w&GvPq& zK42N~mAybRejPKtJqUg-rdh9)()}ZAGw=Yf_C+Y4q~c=f zW|aQ*FSR|$FndHQ@3YJk)}@J1WK(H2#d#Y>EWQdG^oL=?!V<1YECY){iL84<8F@0& zN;_WUgGSOVrYA3jx|nL#W$SX3cZw2R#@K+N`a6DiS0D2F>vFlb4+IpS!#v%G%-U}w zsFxJ;I}hh#UV;txB25c>bw`^K%OE7Dp5OA)!m-6^*eU2J%Bf6pvGq!v-?bEfcRP&@ zb^BrCFb@#_bVvPc50p9#(qU_k~#JZxGb)N$^ z7IW3?edr8qevkxaOB1M<$Co-o&x7-!Ss+`j$rmB<^|w+t^9077&JJJ_tYyz_Poc@? zyBwYevZ!I?pKJ7II-M57ysqc*_&--sWsnaRwmM**xD49Dhy}i6J#{R^K;(j>XmhnE z{HFY}mC=b6#FbOq=C-J7oL1^XjOzC4&0@k01@j`#M=QW$2vz9 zV|(&?ellk*wwrkp8{Yv|_X{NEO;<4fN(|y%HvI0FEYu3P#b$vO%0CQII?pKQPf92b zs~ZQVp|tNlyqIRPrC@h5oLBS;Ag|MAF3w99B&U1xYj%{c)URVsUY7u7#o$7c{gsm^cEOstIZRcXfIdAUz*(OG@f?{{ z;tE~IMSy|PEEw-g?9pezN+W-B|a)4#jKR6KHX&%Z%#wPnaeO|VPDL+y@ov*fY|i-8XNgy56;bW!PWy$ z*q5&h@q?d?xLnh@Xq?E*@N6zr*hhn6E%nfzaHSoLm@D>drfz2qXu9bG-=e8w+fW1r z@1r5lRVB!@51N?|%Hpz|-@Ku=kUxKxfHt#^u(7IOaJtaUMMj29ht5#hJq2EB3Pf3J zCj2vb9ol~>g<3nxa&}GyJJ(@QKw6l2q5)Jt-^C&xontzK6EUOV7_UpDXR}`pM164u z#gBumd4w7&2H5jy=kww7krJ$V)uAj2EJM*JBQt4PKk)0=#zhUYOH0W2*@bk+xWnh6 zWXodQVb9QBxXg@?5NE}{2OoPR1iV&U;^ptop?Kjp<;jOZSd*g7%&8xy=8+YTLh84_ z8w{oHTTy1#&1_=qGVHiN7NoDLo<8VYJORXiN8#7*iI{tRDArA{XDj4;QCEK|cwfDUriZ(7oBILa(%*pF z?D&_vQQv090NODw%Y-l8-N}Qn7~A%ag$VmOVEp|Ek4j9%rc^DEyeE%``g(9a{+qk3 zS-=9hIb=U|g+JaQu$nYtYt0GZ)zSqdjIPR$onQ=_4ftbYH$e?b+O~EgKe43p6O7%YBtn8 z>kjVZGn~Lw_$q857Pt;Ur@(0t;F<=lvU08xPvEW5vr)t@FgTR}=3h%#-GU zDt|GDtP@cGC4(~E@wmBX5bnC3gmE1M@y3f0*mD{2%>y=bZ_=(5JE%if{*u`()D&EP zj)D+BO{`fz1a!ytgwHRdG4Nl~GroCosg(_{n4t{<^#V7`rh|`j7TAYe<5tsyxb-N+ z7fQ;wY2IP+E+rVT6QGf_x|JSh&}}Y<2DTJRX}2tzyVk6F;!IE+Pi4=2$Y0la3M<-B zz`stU?m4@9{vu#E{ch#?)c zH93}de4)?an-nCQN5SrY$74~V3I>>!;jN(=U^n2LV5dJ*sr53Ni$imXMehe%Y5tIM zjWTkYp3q=Kxq9Pkie8UrVx2=Gv`?`IvCeJ~`q<+Gi(G6Ok^)8scbV;cAJ7>6A6ql8 z09C8UGSeL*%uI2^z;V}^lRTJzcU^;$GB2e(?+WZO3Bp19==pXH=5fo1L6keqKz9}i zMTJgaSNfja(!S2CXW8=R=U>@~4+ZEq?gU@3xC(ET(T=*vfp|9KKz8sPv+DT5tM|q7 z@7F_cXln*q4<8P}UoW6~FBMGtSqjsZ%h0hsf{8mv^H=tliOYC`%iczreRx7yiN-O+ z0a?XebS6X17(b{K`U+RivP0 z8h~0eC7{6!aenVokX8K_fEfHu4@$`E(+OoFYcrSQJDJ0YDUkQ?A>OpPj!E+9x$!N8 zPE!=PYC{#B4J#mCE+OqN2ga-{#;|)d4~u^Cn6o*s?)Dk{V_OCd{~>@S@lKcWs67|vW=tWeu<(Z{i?}WNFX6Ur`k}^--1s23+VbjhSw3_)$X}f+eTIFi- z{yAYN?YO<%MD%3!U>T@W*Y^ zs)|)8|F}#r__7jgj&Ww);?FWdDxl5O32g>+u#v=(F$k;5c4{lx&{2WM8}x7oOsLI9+3adXdl>|_JDwYsY|)n zMEGOsj~P(OH^xv#Klut?KN^7IPa3S#?Bk@Njus5~H=Z>(5LyDLcP67J&-;;%(Y`6z zbDRjHN;;V2nF4Ip&w|18Tvq!BdB!bu9v-#?I}aIz>Putb^9MP&RCs`%C*4u^4oB4_ zBcZ5AAqJ-xf?NJM_<4RKif?s-JeRkEq!anXKNZ7CjU?RXpv2Y0?Q@dM<`owx8^0GI z`>sDX-K&j-mMgF)n}-#FkNHNcEJ(=asMhB!IQM?ZRZB#$*^l_g1r9vmqYr$3N}i;} zEll-MrL1{%oCW3;WA)2T%+a_QJ5ABY?AAP9^1~cY{0PRBqf@{nKa%CV^25YtBh(u$ z;MDE*SmSd6?%yrL1cz{R`u71hvLS!L+-%ah2l6e%=_q>ko0qi9!AA(gV<&P!jrRMp z2Ksysr$Iq*DRn!msCUd0WG6N%D{h~qZh#5gNr;hOzXx;_CVJOmuPRrYTJ`c zr#zE#4*Qwi)K=C}nvLztw?KPr2K5Ri1Ljlj`<)^>m-}G*BW;itZRK8fJRoL$6jfINVF8VQPQcW_6DZVP0Jo$F z$dEI7*yGSK?KlK1kVBN61oP$*ll<#7US!nFUAh6Px)dra`cdCR=v!q}(L*EDW1|K*@V+=H=>R$yH}iE}gTQ^( zIXHYi7uq)ZGd#Q*EPO_?3UzhZ-}4Hw(iCXbqDFVZgG~O#M0lh@9Ita*VC0_VxT`P) z+^sguug4=OGe!@B`P(IG<2wElU_6IPJF#5x{iDdBJ-)CWu^ zCzI|t7MkqNGQ|pSenoj6E6lope9ks*Ro<01_nORQ8{MfNEeLLzS<{@=7gQ5cOZ$hG zL1W4zr99U_c&m3BGR{BeP4#~8^I0&;>NQJi&$_~;8N)G7cMP~R-{jrKCDNSqOSx|I zWq94!77y8zZsXQRDO#U}r@l?4-h_HZOMVXCKN1dZ=mU)>$pgPloh7d3#4>m0A%haB z6EvDTJ1ZfSvEW}X!67T?1#7d=aQC1d5aGzV7O#5 z`}AfOTJ@q%nD~B>y*CB*p2V@{2|u{#$aP^(&oilOhbutQ zKa|@J(*}86JZoR~nTv`lpojT5y<|_R>{usWt=h3NH|k5wP}NEth_xuGP#cEJA#4!VP~naN`*o3|5_Hc55S*I+8ThIB^8xmZmbtpXI1If_8`B&M@z>i&rZt4TZf zm@YZkO8JC!J?3Kj0UxG2A{8y}UIdq|Q@HlFKB!v!jC_x)vBffE2YZUcp)J zz+FrRQT+|s|w6d77l`o6a z_U1pdR-)bNAYvF~@SjgrVD44Ptc;yN=T!vs%=JX$ULS;`FKJFZFdHxBr@`|1h`DwS z*t+4b(o(AA(kty|?Rh<5*!b0`VWkGz8E$A}=>;yGZBY6$2S$fwVZp#6%>FwF4YeOK zRx<#P_Z*8YUnWCcO&nA&v*9|IM^eu>>6UZHVqJ7FU*xq0a|W%)AuD50!(k_cJYigB z)6U)%}l{*u_v!^Qo>$I0Cl762Jx*fFzl<-#D817ef2nJ7f#Yd}7g5f689!t}3 zk+ugh`)L+G?1ECaZseW#PigPwPmBZ`=(=ACE;hYc$3rU+Hw*;X_AX3zCcbA z$dBvPpIg4$3N1Zjv2o)-s4kHM%x>b%<4&@U%&T1U;0|OH@=<)wo2TqA=N*bSJmr@r zHXfctJsYRE6D)#mE2A(kX$xF=L!8y-pNdx%k)UEyFs!x&bJFc*oGV4+j6w1fYk?as4@v7Y@%TD>_{}+RL<<~jA4?Gj>;oDeNon{0%niC zf_WpU)6=bt`tlN)-S+N~@vI1?YEosy3u0w2Zz|Qbe?vV;!&%jda}cLp0S%$|xuk~! zvpd!qLiSx|PD=~9=~H#kJJJ*Sw|V2!l;L>FQ$c;BhOE5{fXHIs^wxXVc%DXgkWSph zafK!P&s&-~Dg)2cZpzC@!4EVOXEEx!vU$d4@Z2VYq<|~bTOP&dR=8m9eknS{#Ii-% z{%9~Y2AW&m@`nreV3+c2yjmu~nmcq4EKu;gF5yb^qqV$oJb4DX7GUF}L@;U{0@f3v zx#j-1JSaR0XGHf$E4ATxJ+=&0MAFuPzg#svR50{$0$G-W(D=^@1W7SC1RvvnkH#Uq z%Y;k6I+M?8Cs(bY%>3v8Fh1preyy9pZFN3u*PDp9Mr8s1isQC-W`ao&dAsylSzxj? z^vWv2mX|@yFO@RA5A7i;I*0GF5aAgY7i=FC!R$P?3KsJj=y+~|Mfb_)@%Ra8GmFR{ z>;o04W3i^tg8Jnq!K=S1&^FK!EhD<3)}BZP{SQFI)91{xcQ|)c$*}5cCUoU;us;-q zq7X~vF(-sCXjqFID{0^KD<2A0Ak^s{z07`g*& zVfjsVXf(eJa~|wP!{IlW)j1J(ec1_({H?O0iaO}#cd%W=UR4E_DHm22p}rYuaM4#; z$d+GR|BX8u=Iw$I;sxYMe({W}>RhH5A;e`p=e2=-FlsRM#z^>GVyuOL{?OpO%%99xO;Xmr-(7DaJeWcEy&U@MG8GA5$ z#xsUy-?{0yTxMPwPx3cxw)0&P4NY}kSZXd=iUkfhx2f(i%*{C;nA@%Q> z@{k_S*__9u&sds6=Vhd5T#bU!kE8IvUpDAgy%lu$A#DA%5abWJ^2#OwjZX}Nnpf)F zZTWUMV@i5Sg(<7(lm%|_)Wh<9FgE*=KD>1cx703%lv~MMF@FaJm&?#gLLQHxUeGke z1ngQ1l)Ae{aQ{KM7|}!?HSbr5&ClSMI#F!U33snv`1($((OqQkx&Yt&}V$&$ltE5>hYdZv=4?%gJE0;Zb zFQ{E7@9``TT>UVESc98|{@0VBx?ByadMZ&fF%8w`q~n%fboUIrMSYi#_}JdW^Y8sz z2$}woS$(Tv1|Mi1)}Ve)VtND*8i>urqpmjUqVmj}+k5WVTKs(i7tdO_AwUSdgy@7oHLjmdyZQ?S`IyGEph2FPcZ4U751Jzh?Xb! zgLIt_KO9C~_LsGo#5)NCG@?N=#GLQhYlTK{E`Z?(F`qbmF1ADzK~wThXxt2Z*!6B=-UIO!UgN^ z3G8F`8R}mbV@qZ!3yE3ARn-G|YkewQTX+FO4(jp7fg^u}WXcyYyJBOe|IvQlh&YJCYz5?dEZ!qfzUd-^dKD4=J zqFvYoZsh9%o?$s){+U6w{Q(|xM!?l?eXz&EwP@oE%!GVH&mX43D32hpd*%YhufGT( z-}gYnGukZ-&trDC;+U-8D533M0+(lUK}N-a6)it_byPLC-LMIqXuq*qKMS(Ig~LXD z(%B;eA!S%!a3+04td~R2xf--Z{^8=w5*D^314i*2NO78hqAn3=`y>fYjrYaL?K$-S z7UQB_#5iCIrkgbbN@RH;k{ni;p6tT4q@i4{rp^r((Hw1eO(88a5)2Kt^37I>Xc?XX zs)jRg>)=GnRYwc?B_)V!&cV72x}TRh@i9(~?Lqm9%8_QfY*sieNtSz!Jx z8Kcq;gQaU0Y^e{%SG`K0748U&ZYNV_%bnTvJA|&D&e+kiAC_p&#*Tf|-Rj{%zQBGk zESctN+l$z(E*t;MK1rVLbokbpIvShIpeQ{KrYX1Jg_nt-yCRPNDLD<*2ZzGCxO})? zm5JiC0wJV)25$|hQ2w1T8)cGdXxBfLWe-q?F}1YA@1}{ydVhs&;bmz0d<|}Q$V2rx z+8CAkk=1m6!J}yAeD{Iw6U8sM^NKW%d!nG?{2Oj`s+rrl%ww%#2~78XDi}qcgS@m* zOq-L6H>5JOZI|=<7fVpAoes8hs#)6?Uv?w67(}^A0{mJ8Rkit8tr^BzJmF5#qsCvgAmA!xm14g|YXf8T-~e44Em`Y9g}b0iu|e8S;lCEZEY zqVUSDvvBWa5o&A>Wq~O(A7@U}1R_4c*k>j*ADJaX*Hx_ejGpIu1@t+E|Dz z80@9vAmsX8Ueoij@}FLF@yPi@nAMZ~?Eg^5k1Piqo@8*x?=B$nf6g>U(>^&kmhM#4 z!+!G;in`HG+AfG!?~riSVHKDv0=Y^VVP;#Qg*&>(q4@A2R`=Tm(wEEePh2>DFNpzX z%P44iwjQc$ec1NXKG4*m4WH-PVDK|{)a5z&x*zpMwTy(fZ!NKHLKt;81%ulBOCZvl z$3vFh=Wcz;)8}$S85Ot_9DZ|j7`+1Q-UJF-XUMmym_}z`JP1J3?CynpwkN-usoxQt}WCLD<8~?9OAiOi#~iFk_Cm6 z&SUymOH8rcNjusi&>ZE28QXlp_`R6P?st?nuJUJArRjn!Gmm9Y6T{{^7f~L+461#` zv*!A)>}~d096B!=MEe}k`m+If<~`6Q>jf8w8B%U66pWI(VC5WlyuXJSl|P6}rd7e4 z=f<`lM#^woM;C%s%4lf&&mG%)L~*^Y`_NhA z9Nr-PLoA%)-j@o{_}D4*-sp|~X-;U_ErE?`UWLXv&B_FKUwHCq1@=9^5ChLXW^KbZ z!kH>#)PAjxamP~G)AjxE>#Q<}-|L5S6anzOjd&H#+5kCw;MUt_o@K8@X-ZEhfHDl! z)`!gBZcVY;mwTid}hIq3&62F47P^`p-yQLt{7Jc z{x>ILv*a;Psd&w`-aq50xf!G_H_cYv3BnGC%P4cXCb$)yfcrUmXlLGEX*y**7`Q9B z19_n1W>+%B_UBA1enCXzP{+S=VJ~ zW?P0&YB>(|A&$+m87yT?B~uL(NZ*jbu=Fgf`f(bKXNn*pUO`;#uJGX^`LXJw;oyc~ zT>6qjMrTuS+gZs&T$b|BpNYqBa!FZKxebl-h)pfr*;q5t>yT3WiZ?tWdj>QNQ9!P06RtNshgx)Z&=4QuqG{`4@hmZJ@m4_C$Z%-v z{g?W1)(fJ*&4TKmb4p=&1$1)@#!L05Xr?0W#mpkgl5fCit4radA{=j7Y=MyHiRgM` zEq>^c0<_Nn^}%%J_CEqG`?o?)yb1%C`EvipMX0LVr}R3s0`2d4@H*SoAWg4ky32D| z%{B%6l$SzdP^Pl|b0BoA-40(f9r4EgcpRh`kK*%vA!@K83+p@&O$xqq^UibO{L(oX zv2Y;hS|0{g;vJT?)DBYu&S8sE&W*QWF{_IA5Y-MR!p{irA?q#FNt`*cV`F|Xp zdq7Rw_x87hBncr@LI_DnYRw@bBq0eQ2_YmQ2}!z1DxrJPR;g5Kcds=^wN;Yq34PNc zK3A={Ee z#GqR7ueKZmFE2q=>lk#>Sw!VB;`}Xf;?@ln%&I*KlS7YVVY@G@?_!@g~ZO; z$|tQmj4~}<6mM2S>x5j;o3F&`@3pb)uM(Uu&)7fgH>7$(`SZ`E` zdMl}eL(k$}uH;jiSIO*@FEc}rBv?gPP4~-Hp!$S%@(~>0)Flv0u^j%Tyr;Un53KZz z#%1?u53aD$wCQ*}lRut-KK(~w(i1-%Q$}}6k3c~sr3ANFS&C|6veI{!9WrAzxX|OEZ3kntFdvYC-rqA3=3wG)57mR_fF|Z-Z|x~u7Kh~WDdYPTc zSnA{q^X}$BowhBPtY22wj+`c`7^4j;y z3P&e$^9h;EWwth(e(x-tSs#PVPWn{KvgDRU(P;269ljYkqMLCts?~Q1q3eHeshXy^ z&maPGh83amlTr+LYz*ctSxl$LVQ89If)S51$wghwbo!4GOV(b*=B=Ya@;_;*Q>Yi% zZg)bB<8N8)33o30=^C_z8lPi*_#6JPqrd{FQi>o>ujcB zR?Fr0HMkJ644+Q*#eRuP(D(LUFzirea?M;J^~Q9{nkRx<(`g~#?GenF7RyFcuHVKj z7}O?gVQL@F2_~~CV?OK}tC=wuENu6o>a1{(4INC+7(Wi-mGn-jVaa2UqoJH!Q&#r8 zHU1h%=7)$%;b%co|J77y%vUjp`GNU<1Lk(^Dr8J6Mep?~_(cC28f@!>&52puE#4RR z6Z7oJABILFC(&6@D$Hvtg)|p`)LHBbisN3fpa2A!&oSs+-y3fKdzq;~4^+EV04Wb0 zP`)by>OVV!t5hAD5>r4^a~1SmZi$g2^O#Ph8bpLH0IzrcAln>7oyHQexh;cR-%$fK zEfaoNkv!67xy-V^KR8)53RzWKQ77#Vzr9Bt7Ip`NZ4}MdN<&4*R@zmL4>L{enZZx- zB7D!~SUB?txbHp;PtA$jE*T5Q`>)0Jj7wnYq0Q~!75~5fkIs;H?C`>3lq&1N2Zv#p z=u5mR!$eTpRfX<$0a&(A8#(9V=&p^A{OwEI$pt6{~pSV(Jsml~51Ii>2%-MDa%fNT0>vrFBQpW6o=S z(=-ep-*d(K9AA(hPzSrmjqGN2GLF7D3!mN^h@&4xqRYUA@V_=|w154P1^j5|E``Zp z_$Xe8*l>^iEIyB~{?PzKisVc7_zUmCmSRi4LI1~0VQX%wVfKy!Sh$iHj%a}OyN?l* zdnw9}OaRGpB|dK~-OcJ2L3%1>0eVMM7bgXW?ca~`=K1t{wqhA?K5)&SRjkJD8vnky z0s;&Mfm428&^cNR(Wg=%_Rkd1ZZbeTE5*9Q%G9^D1pUEu&U;kO%!l+QpKdFU4b-7o z&kW)MxI$=g0%Zzx`Nz5~DB0Utdh1svuIxDsf9B+(uU#H@(s6~m#EICqHx7r_%F%d4 z45W_Fg5Qai81#At>gOha$+$)2W%`FVzb|AhiuZ-gyJUoK2!OHwZNtXK7|gxC3gOTr zhUb;=zkgydYH1~U=eokH5jjv7w-Jm}qd`96t*8*bAH5O{SmY^3xZog#gI6jsdVVOh zWE~J{qR+CH759b0xd&0H#1HIZ`k-X7QyIC=g(J25F!TdG7mkkM7v^t3&D%*(`X`5t z-6TWx?iMEb_`<~FwI4Pg{l))P$U++(MO0c&bH*N7T(jRZZk>0MZ{^cb3@AY&=Rh#1 zL+Dr)@MV)hr~8EXdKLADm95FsUX1;f&fwm2mvHDHnz^g(hW-VsQFG-*R9T`5V~2`p zn5qml>owqZ*$Fs3mZN1D!32WgQ$NIz0^@XT2x|69LH3JVLdWMbIX&T)l z2J5Gs7LB5o(Vbd^;L1FZbxlM0&r|5NVJZYK|HGVjJ7N9c{vhdl2u80V$Ib^EF8N;) z=9y>WrK7}MJ@wwiMoAB{ujasPb8?0aG=lMQ0a%}y%560@Dc_VQ7&?As1Km!bNy!?n zu3Lun$3HNc&rlZi-$)!3Cql3IFdVd67rXrEOuLZ0BNunjJj0L6UJvChPtG&n_A4MR zm&5mC@wD^0#-8a_;^VwLx>tQ>`;UHZjhh19* zygxDyJrY`Z%Zd7s zUMQbRER)&SS+R2{$k)3H3NA62C@;iX{S%lr`5dLPkHP7mnP@ko6m-^Xq&~ll__SVh zFH!=@I@m49nK~5plLPG5g&?DUk&hjbec&|Z9t8*QA|CR_z{1#1-^O~ zTK19Re~E`_*6E6xkE&Vgl%sI>(G*1U`~0Uy01WW8z}l)x@Gf5u-5=P$s(1C`E;){Am6b-h&~fcKl`ad?C#!NqP^3^Wx^@wsMY6I zs!^bS=>$_#o5n33yTRBI7AVTeC`oh0jxtSVR~f_ai2D%KlwcB-aQEo1M3-vXMczBu95dFBBClyaSJIeOxs!g882+$|S zP^3HcPrQBEtGFOGCN~jE)14`k(ICj((@fj-Yq-Wt>IG`;hiBjIs4F}Qe2B9$=@4}b za&v@BnjF_PhvDkXIPB>2ovC%F<4wN`u$0V$&>^A3iLPU(|G9?tk9E0T)kNOqvIPn= zi0Q?=!I6hR-=9eNy@{-IQ7RMurQN`uaTwh9B-n4KEN?pT+{aUIBI_Uz^`rYpbq2P* z2!gKV<6&$>5w`4#5bL|G(dteE zE>p!vg>(jIF ztTA~&-&|rH@&PPJo$hJ<$1{}>6HpqGOOABTzNdwQ>(Euul4%RedQFNLm_k!J@ z*`vdCJv8?p&Oe4<#f5*eu=|+@M6M%ez(K~YEZB-Q?_-(jyBz2sew5{xFWg;gA%^-r zV9ufecyFH!wE^+)p6*Y<#fG3V{T)a6z}t6H?(zjO&OQzRd+qBi;+RZOp&tI5$w9EU zC*8m1PsMTV zaW{vc^R7dXz90^@trH;oQ3*4@zZ+6}RKYo$vFM$Y2J#yi?A3*;E2UOMxfDd=3qQ_(o_QwW6+mZlGev(RV17d%1KX_OX zhth#t#lIITF~5>{AT7~cs`P>F8+!tr)b9z|rEf`St zn%BH@fHiY1QCh3VWC;<%cim7(Jdw}uriH-0(qzWnZR`V2yPPm$$}-SVI42stD`p;QerP_bpI~4y z0+se2!j=@7=n@-)nnF75sji5oi8iQIdYba|7F=Zsu?(wPOOQIjUVCE*`T1-C6NiwQ4j9Z8F@p=`2)ytijcv zhGSRICD1r_n+L3LL?ahsrn+f~;aUq&YOM|3Hh!qOvnNV-m&3kkc_2CABx-9BUHemv{@bBQclVh=K zDme{q(fhowoVgqbW*Q0(;QJ`hRQf7ITv~kv4=wRRNm5^UwO|&6zIB9c4Ka}JSd6e> z3$#Y~fcKVMR2V^Q{f5*ZC}0|$AtPt*2Y34CVPunG#|pQxJpQ-y`GJ?F7sA?&*QGF zMyF8?Oz{!To#VFSi3?}Z;mAWas8<^Cl``16aj`H_r37szZGxS><d}H!K4|!?aan#JSz}7kbERuc>%~MZvt4v4K zwuwbe?OaT%w?fr%9K0Vla%s7Xu>5WwM!1|5CQ+_Dq=T|7eU(|PJPhXE4ne2+m8{f( z?!8ODh#RH4xU=~p)VF4`9)60n)n!XyM z-(J8Bx@WcYf5x2`DMIs_i|BRMkuocKywqSMH(Ie9qg(u7{i0o1_~svG`y><$tWTrj z5c=K>yNVaWqQSXx2Xxmx;=A)GS7St(s0YiqMQ9{QCW)pkE3XRj>zr#|sY164MPQ-T z&6^sD`>bRI7oSF=-m)L8n3Es;e|oGw#tm901w+$cV*e@yelMCJSvJ^~5?WB^GhK59?Zy3?q)6z^2#A?AVZxblMxY?6jna_pW1+`$PNlt(Q4|M-N?GZ;0pRQ3BwW}L11omZJNaP}Wr54UTnvVrK;P@9 z+9o6ghT!L8xv?|{I+tGIw}XqJ@SqPgtltVXO=U1R%LirFo4C2O5ASIzqV=x3EO^Fb zc;LDRjX$Kq;P3RENmXK7iqz23pbvf_wPZ7t+S{mzzVz37t4vo7f}GJ^t{zQbA`v#xxQH?9)g{u z5KR2RklvwC8WYEK4($_Nx6Oksu8UE9Vjnu!6=0Ws64of6=Z2qi*vyR+F*4u{x4D=s zIM2@JZAO{cu22lIs&850{a6e&oB$Ec+B|7&1-3*uh&l=ZVpGt3c)OI`Vb_5ls}@jE zm#k<3fDDPPH?iT%kvN3#y+Q&8Ju6~;!ovks_a z|LnIwmxbg@S?2=XI{V?;qm|h4cq?nrO$WnXhe3LBFpM-ULc;^L#D+-cP5(}Y9XaIt ze7-@rsfbY5=Qo?rSD@s0L0Phz5~@8D`Qd=oD4F3@7SJah!ak~@??}39Rs9esrp~?Y z81dA1V^Hy6FiVV!2Svq!7_cpi7tMA>=Wya&HdMnp?LOFD(Z+2a>_e~qmE0)|v5LMY zGRJb%O{6SQNj{d=F6NS9@fhLO##LrI0F>t9vO7c2#V7^!R5G!!fx-O4QxFz4QE#ja z&F-W?tM)?y!8B3F=g1OZt(??knG~IXw zTlKQx_r+28eeyzdFI)_genUi^Uk|veUKZ@1%aNM|l+AJXI6xYr~k%UdnX`^=4N7JF)EjT3kMNGwKA6fcjD^lwFeWSmlje zWff&1#+#acl9Zz#3xwcx)u6F)2=mgMgN1($p!C^sE;YU-wB0BLS+yP5%=^n4YOk_o zm#=`y-*3E!%2Es}y#gNbxwvp*9_7i%;i6t8NPpgB?%zW|VO%n}mJ9{!`!t^zrp@KU z8wJ7lG^*BXVB$=A&j*nYXWJ|8d;KuEXm0XgCv|vmvIH}>Ov5EU@mR3qC}k5{`1%UU zyeirANn0o3Z>jezrr|04H~HCk=$+v~XOW;Eyh)Bf-}?O7N9$F8lWL z8d`cDgyZWs5%cXi`w_bl<)M>V`!niPd=O#io=AuYAIM$L(~SM67r3Wsv9o#-wAcU4 zI}|<$9RWG;N;HG%(}~|@wFKN-l0~^nCY0JGf|FJP{}X%#lqVy!Z#fUj!9CGpW&|p4 zT!p^jdj<33#e)2FFhu@IgJUa+yQ0_=QtgI-i|1IFzl-=!^cj2ECdHmU0ccZshPtL! zFn$C%eKr+9O^G5j84rc}-Mzs3W;L(aaRv6h^@VNEWyB&G#p~Dgf#p$_=w)*jT*|5E z_3aLuqM45Ig*20)XM4x=B=T1niOyZ_)Up1>bbq{LcaImLWfg*nN`I!-BUy;>xx^#p zZ3UavGq~rdOv;R9Vf%h9P&zjl;!|iJx6mCjuV#UTVI+DMhJwcRc!-s3WWJtj@q580 zRD4tb<|EBOW1NgT?a1e1udS3TS_HoH_X`d;v|wRUCLFfxjb`7-TReRLGtIAE0^vvyS>Kgqxh0-}a#D$^f;LCo0@{=z@L;tfFU4I&G_q>c> zq{+a(P`+n_hUh-0kV_tJ<-uovQ;yCBgQ&Y*e<2K7=1&(2^G|Y#rhcj4GZ8Gt?gr8R z1Za1Tz{rj(V0K&vXHQ3HrvU zIH85L&z}K0i_5`YqmiZBUji*1V#XiZ54HqOymIgwxtrwBe$bS9Xs5*5xPut284K;V zN_bMv0OUS&j(R+!ETVD>w~Rgy^{?*><2xL%v?z-&`+5$g88aESP;cz{Ilg=J0&ML; z_g|w>;&hqtN_iNz%vmYe{yGOoHMfzcawq7I{lqn9r-0+=BxovAW0q%kGFj+s__1&` zsybc<1%>xw5}^xZP@lssp#pYu@t13e3~BVgLe zOw?gM!p+`k_+#}>v@`7oo(nReE^{c~lYbJuc_>`m=#4f{9lTK~JHT_1&+EN~f+u{N7XG zWVoCw=v|^d1V_vZWlA}NVRya#1rD<} zz=#Vu*g3)oWr1VCaQ#!^)fy{^sEPrnv&)%lLJfSsm59=QU&W4o)?9T|A}lho!-hCl zc;HZs$`M-;mgk^DuR?A)ZVo%@G7a5dujTFsnz?M#QP6Hp2cw?rz&z-pX+=Eo-<)2T z1n)I~R+pDt@1_lWE!dCa|A|7c!6sn8dm>K>_P`OFQed*#CRA7K;_cfl0gw4X_qGd6 zQ?>`L?sCH0?ZjZ+ww8xR#Dip?0>+M2fKz=}qWhg-$oMgyd)bl$sgq`yeW&8Vqz&km z=LFK2E}=9$fVb7UV)FsYAL;pt($;+LK-{%tzn%D}trSAK_v71N#%TUvuh8XU1uu!2 zs9_z)3Td9HmhR71$thz)Ss&SmhfKDh7wU}lMVbD;wB!4aje1;wURrl~gI_0)?m>PZ zzYlD2>P8HhXAN`c*_BZ;mzAC+&)%SsaB@j9W)&8qjgtx7{us~SADW0(yK12IeJQqV zrnBG8qXnySkohkv`iFO9xR+0566LZFGo@fJSmAz*^1s^5 z_~SBk+Wc8?9v%x7)3U(ZXF9ZRRz!*Izoz=W$TaR9bmWy7@Z3$F;j?TAo8DiUWiFIS$5c1 z$e!T<=H<6dBcEM^dB?I~3~|z3WGOJUpEEjp5l?tmwdfwZ1`ZtAN&5k1q{b$6eV7Zk zu0^6@=M}-dwop`WD&om2v%qDj52|ZBLC9{P`=txFdA5(+C^d2Umhs$O=Oh@WWa2VV zMLYe=5a&M(&E8z2{(BK-(9Gxvb$c7X`$OTSM6`*j1lzfpu=18BrtY-{eYf3kzwaiL$V$r9B}{?DkL1NtCO&gTPpp1A2UEL) zz+`s@FB-4~TkFSw{(enxQqd9vT8+RX^&>ajql|@#S6FuUU8c5UAEWu@sUIV#hiP$MC!}Sv`p^It*fAPKqd)dZem(Og>SaKd3A5TWb z^%}%63x&{-6qebScI^g#SYi1x5RYHRNNYOp=H-eKV#2v+neoiQ5m>Ka%Q}Q?L8)aV zBbNZEeh-BD_bpHRQm+#SvHNAb>#cxYPL#nl$2uNio;vk2l(}PO zCBEqs4+W>mFEzCQ$9&3yS=Dnf+f)dazP8|;sR`!&qJ=S?JyDfr0V-Mbtl54Q z^c%Sszn*o)z_41pJ5vMU#!fmb_u}pkvW1TYDVXdkhu|CL(DhfHe8BFt-yWdDG;%>oB4W}n##{>f>gFwEYESl zR@rXKr4tik**bP3M?`jI3I-f%;ib>I_+{r}v>uhgoul@Gz1t`zEBc zMEUYCT~r*j7+Oj<2@yF{AZJx6nxC2g##fUdqvQg}b?1OAuL`8Y!^9S~Yh2gR4Sd#y zq9XN$H_z;Y#XBqThsia}e^dl7rqW#Uiy4JBW`W@Z4e-@0G!5%2!gEKBFygQVv-6Yj zHl6=Fo9c5zVlkb1k%o;I%AwQ$1q)D@(9R}MG#{7%jY%hQXR9{K%PRS^j-hB0yAdw_ z+>GWghcn?;3iP%K1p6R=$i24&#X-ezbXzpqD9c2XyjjSVkDxm*=H^cvM2*_>%%nFl za+UUr5x=Ie&bOvuZhw$JRU41{9$Y}FjUUwi-T}VK!%T0l4+XDDK5&%&{N1TE4|9%! zhBfM7Gm}{9D?%`-d^om`Gl8l*^Kq?t1nPUlurJAHLHXqw7^chcbZjbyjL*k28X zYhagChK2LvP~EGT-T6-o%@6vP8GfPe{GJjheX$vIj|8%UmoxFhKNCEul=!eyz)QCIxV!_Nev(u!GJr>t9?XI=oMY$Q)L z%Yk;AD|l)14vg;(!tAM)#8J&+zR}fqBsK_TD;I#LRuPP>A=$~) zH}t_jYtnFP1??w#EChYk-#j+if*&*1K)soP5U`~W^>4Cp_1sMK8F>{m)>ZR2Zpi>g z)X7a!!wvBgOSFB)`tm*KzS3Wm-slVU-&TNO{c~}}>~iem6@=40iqY894kcs8LyY}& zH2JN}olbogeN_g5m&raT{dkPuX+4AXT7Iymjb;JDJ(dv{07I`+|6%(Jj=8jlqu=Z7 zuZ!^3ohsBIeyZLO>SR9|%{(vZqQ=1KYpj*Tdo=G zR3^UovG+o^$4p4wXNFqGHeq3|AL}B=vCMc2BtAXPtZ^y5!=Ey*rk!X~H=bqEXUk=< zC$~_T1;GR55cbaw%wBhjuN{_viKSIwo_GP2uF@>5z8qlWU8cAH8mKy!!>8PJczoVC zj2)~2Mq_(|{I(bVmzjf3tCR%S9mMSWUlh1BZ-iNTdr_^%6e8%e-gnC?jNDhwyOt7H zb6FVgzkEA}{d)zYqOwuu(a3Jko&=$`Y3TCnAOt(RvqkFaXz_X^cr?_2O5Z{z$$uy| z&AQF%f1MOE(l^02>VV0=Ob{&dRLBwcg(*F$z`}2F`0o2Y)J{2%3sz1-mzV26XWbOk zTR0W$A06Vz$P=fK#*|~y(5d#IV4|Z8n4iZ!K2*Wxv`DzQ=mf0KD5cNTHgMQ}hgoYT zVD&;ReD-z_p0YiMdTU2>jjb*GLgF>Z&OL*AxB9_13sdy#n1qjKW#gv5i?QHM2{CdC zAa$b$WZP^8jfIw6da6b+%)Tl}?u+&C_ z(s7lmZJEY&rKh<=EoCtl1Y^hT^Pk!O%5sOV9AG5+w`$0`d zirtI*KxVxU3X{%)?8O(>Vv#1A1b$}0mBgPi>xV%DC*dFCzIec~6gA=&az#h#B!8F< zFV56N0vcQclkYt2WEQX~0k}B;~>D1Aa9Bj%GIA zQ<;3gZWb&HA{J5#q&Z(jO=4*~e9qz*cs1lTWug8vZ8+Rn1r9%n(Gndj#6G_SFLY8c z^_2vTo~H2pG`cf7o)wzx=JTrDC|GUYlg?HBKxNhmFt}cYCS#VO=)50m7p9@y++I|y z>H`tN0ao940nBvKMXAXa_V8c?ZmYP4Ci6bA=D$^dzC*zLf`K5vktZ5!ZAR(;Mu>B5 z1|h_r=MCv`V7Y}@)#Fcdo2-WnMvnpwdQNFv*M{u1fs~QThFu-hVK=k_#Z3coo!tdg z$-V%0;|sB|Ed>tAC*!!ttLPQ4!Ay25b8qPbF4Yn!yFLx`b)r#cvpNg@GX_e-mHF4D z)9}?dEojWt!K(ShGdU8#-^DLNlf|B}xb*_+4p<2-bF;X0Q34e=$H5n!PI~B}N#h2Ne_;4@FgJ3i&7~P1^YOtb`*p2PXec~#xw92BK z>2a{hnhM`^!_h_U8|zYLOzY-m496h+_pl9Cgk^(L$48EP{^jzsCW8FdMsV-`!89$s z!REqTcK5?+a8j7avX6OE2Vxrd2Ilck)@M+`DjBcNoP;(v-g5i8|MA4NIT+2tVM}*8 z>RjlF-6|sd=#z`n?eb}lSR};8oMJ}RHqcH^-#=}+(D2p;UaM83uP-slAAc2Ws`?YN z@dbCic^Z~4o{Z2$t^&B8g*_(NW?M-2y93645fg$oT&La5I^aCzs-OkSwKo6HP2 zH7t$p4Kc#d--*yZ;Q@EQxfONmCqv{6PE4`s@Wvn&9-i<9MW|jc% zx97tju^93`x?#w?SvdciKAMNU;~8VWG8dz{Oro|%TsXrE)z2zGO+yBk-P7kS9?R)& z=MMIH6%b12WSQeVb|IT)s?oi84xJsKPYTl&dV;U5U0HA=3U znVA~Z|I74#+QZJ#aX2X~7^JU$G4)=N(3up3X3>%0)Ho0tSC)a1RwcT>Uk>8}M&j|t z6ZmR(B)G0ig0|*p_)Gk)wCm}Z)=1fweYHHeu#Qn!EI5bK&=lK)hCJkJdM4V{H3ep8g>Ob!IQ%-IDuEwo}5o2CKtua#h4{B6gBz zrT{a0f$jf1h>dHHhTk^8pKXcIY2vU**_XcOKgN9Ru}Vd!ePiO7uNu3hwxr ztzB7#jXRG+)8=NT|JWXyy8~Iv>oN>zi^ryE1G%s62K?-5iNpTN$?LAjn{S4*%~yBe z?!8&){DJnkISa7k`zNs_Z6!IN1~N;_ZuS%t(aB*DefBP~P_W`mZS4+k%V*;7`C~C+^dFXe!w;NV=Wz4i$60tz4mQsX0eQDS zm)8tu&S6)X+xN35IeCCZ6w_RLmkjQV)JEC5tzc2Q3Fa&kQ2KW{bj-2_V7mvi!^Iu0aKlX%e?z~2O#R<^Tu^&2GhO_Xs)ZHx4A^*v7Vb3&8?ER()vkNbC zlXnsPk#!v2Ssa0_DScVJ&j)b?Iaw97vmivf5<*<6pmSFlYnV3v@>~y4dp(i0+b;o!uvnUDxRv@Y+`v8Z62R-* zb3RqY0Ie2WgR>U~qVKS=;Gtc}5?5Dq)ej!%ypr}IG#{*8H4gLJ<*4F`-0=Q>HofaK zxR02@e)c#+p1%=-dzvUn&RUuV+|)yPz7b0D-U@2^S;C;h^z1uO%{r4RG1#n$-PTdZ zjheOSH2yrQ-&+P}n`6Ob(QDps$u3NcHU-@Q*3gks$qL=5kGhOJgAD0ljeQPK_ z58j6vzLYh;e^@xQ(gsy2Kyaw)8aCWr2~MFMT*4f<&GS{bW#Kkd%lCrURT~jr^}|ru z1B%HwqK>5Mb%*$vQm^k)<#JV_4 z%8$Uys zuFV*viqP9R8!bPNh3*$y!2PEK^zJwZ;W6g`k4Lbfo5S#h7I9R$E6NNiMBRE0`FcqZ zGVn4u^_mTO3l+F?Z3Z+{kAU$s15kT$QZTTO!~J74Fu&gnw0JmMCG%_=iF&aWT%=Z9u1gK}?$#0kIqIv%v7>mRTk}rnin+3U9T;^dMPC@8RhtFgErc%NnSJi8FwiI>oF zUlhP^IqI#pV=ku~nYD)tm>8{R@0X9o(vyCmn|+o?w%lb3(MhP2y$R-~6=V2nEu1^U z9@SQe_*y8(#5BsN?F|R}w-fof9j@qmg66E}Lt*5I3uvr97QI%PGBvgc^xpe`_kSVa z^u|~0-JA_JoBpzyCKl-SPzq>X$CO4NVVfd%WAM8~xO+Yd?i&ZvzC|Ri`4t%RGz@$v z)2?UzP&9h*hkFfm;@WFPaxx^a>=o5u?iq)cZp2SE84sylKJZ?fVWL_w*q7;nxBn?P zpGq$8Sv5Uj#_WN03*H`GIJ$s)-co%)Gs!3p`i$Lx;^>q6Du)sL<*Oz z63fgRBZMVgRp_!@2EhT#nR(P8_?)JNiKEv-eYQ75o^%22mO`jo^NMZWB%^%ATr`ZQ zx$NB{;!xMZfygQadYbZ!f#)3g9ISdzA!|F2ypkcTGyU2MP(fcazotH_^k5KFkBhJ`= zQ~0n^Zs->k4Uv^aY;%+x-0s#O4s`?TMHkV2dl-7WT1{u=CZ;zg2TVT8dA=kN7SFH7 zV?md3@gpsa=o>DMIhaBnsbQ3bzQYm^yMVRp6COF{F|*lUgVy&Dtj)HAqi+$K+xv@C zE?J_xRv<>!oCotWzeSw{Gdd$E(R?ZtUEV)n@`J^u=GKb5VgGQDwR(c?>4^}1hdQn= zeTiFri>XIO;GkPUg6LvCX><-?JMha=XZtDjYv0NrrXeYoQ zn%$^0Co-SGa!5Q@!Pc&dL~H%|@XA078(bf<-$j*ZxrlQ7il@1H?G5HsA~NmLGoY>- z%cApwz%nC?&pf#d(~1;OP2#`?oxTDsiQC|hDdprEY`EIUxBSn=B6KaS;a*D}A!ETk z=Az$^IqI*)SNhla)wynH8M+-bX*Q?yLz(*YNtk%(0W+fBbhAbjb5B-;6-Zt6xb1M( zpb`vS9Khz^4sx~+ff4N|u)gRxND@7TU^#IU?NWHF)-h<^+!M^*=77Dj7Nl*VIkeFX zkW5i8>!kUn#u#h1uJ{T%sB8!QJ%cfIa}AoGJX2sln#XD)$1PcFmZ>w)09fcV?F2;(;q+w5LCF@^6lNm2-ytlR)!zfm7F z>oCN6O@(&qSLqHhBc6pR*qtc?jpMNIuF`&LyE-tTMHTbA(b0b5NJ(;NtibXxL7?y3wZ8qlo41Hlv>hKq6YMxm2> zF|m${@vvDD*bf=Z>qq5_(n|xxV9)8;opBhp@0X#zBnUEsT%eBjqO&{);ul#UWQO!Z zdEpG^aNz;J^ywVRXQ@!e>?~7EGY0+XLqJKxlZS0iLz`!Tf_i%qJb9#vKz9-At`rD1 z*a8WaOVRR5G4JX{`LPj`nKya2*8FcB8qA-G5vgNf`mZ>;cO)=Z^Q+v4_|?iWeNj5j zO^m&|7tTIjgFUx-qG8@e<~dazyKRji;_4qZw0Ard8~78)dmG6ARfeu{#G1eHO?1lK zBPyL6M){asbPn~Peey-_B2#Cr-fsNn=8F&sftY({GhQ55ff+%CtSWU8>df6EO6Sz` z3+B7A>B@Mhr}uch-W@SqAqnokr99-Sc_8~S43yma18Sx5*q+S1f)9hy^TEV7 zRf2{;K_FeVmFbUu#y7_m;+IiHaBLTKiL-yR>jm4eL(Kx(%}+x|b*$(LaaeyloLe@B zL5JyR@R%gyde?VD?DB6+ii1#+zgYbEei1%@n}Dx=4}{Fvu^zq@8kD7QFS?ZSqx%>& zDVcBOMJ!$F3eFCtpd^|=uPue>9(;_)UUh)NUPatX?*OE3?c^iK2ctW(H~ts11h>vz zkLK#0Y+H^_exu38J$lar!WWy_~cjohC@3?wtaP A_{(~v%_Q#A~ znfT9D8P@12gDfjaP%_TLQb{PLdVc01_l>b{{dR1>8VsklW|Oln8pa=`&iv!`#2tOa z^?+PS>Z#M+H|5B&CZQ&4|B3O4p#g_qAbqrcMB^~t=C8K z`_6Fqt3e&zK?||LC<5ilCrr)Fw-PULj;XTUIlPusg0Fgq!*{JKP^o{)g+lIqoFy_<01RDjwpSkjpg3Aa~O4T-FpoPTBJx1TbEUktqWD z?rvqHa%cu>ZV0w!smPnu{=cisXx{(W>gW^DvUDJ{mOW=ypDsiD3{&_qf>?Rk89?ql z_R_!#O*xQIZS?y*2cR%tvD)5fh5^>7mN|fE04I5t%!o_A|i1T?y&@9;i z?NJF}zT%+J^tA$yt{F}&;1D{;UxL*IzZT7 zqYM!8YX1Oie_73K{uF}jVLFStri0RsbY`4(1Uu_?gTu*5U~CbDD`=;vxxp9j zCu?K7!*K8#XTmyjDC_;Sf@P1W0`1Q!pb_4mxeV3fe;)Z0d-ocb{Nq+;FZ5#_ZF51l zj|J=rnvQmD7NEntK^}Hf@O`|MyY$g!MpMpUUF#Yqw^$^0{ocbLt*3LagPb2ed=i|7 z%Ruu=9H_p%OpLlSwD+6MHTEnA>ELysaL*qiI*Xv0GWB);n+jfWFPX!nS6t;EWWk

>$+4anpa=sB$cjJ>6Mrb=rVD&#rc*FWw{B<`O zf-hyE%c~mD5q4u&xdl0a7V=vsW}%caXd8YIC$xWWNSG!8Cl?=Xyg3uT3@#_;&^_im zQ5U>7q{C}X%7g~{vz(>5DEcxOmM%F%z5QDH`*|oUAs)Er5bEmo!ei|jpncd0NGAf# z)vwu!G21bDLo<8YZ9M8kYe3+@TvV%lz!X1oS?huAsCl#-+Kk+UUCGPg#ju;i`lda; z|4hg)^dN4_D3Hu1?vsZf)7IS0rIwGxxkz)@Z(YoCRtjsH7>+K3ui&Se5U6w5%D|(D z|8Zmkx={D4bB+;A>`1}X8_p=_Z#DQtEv*C=?si3& zIhCNFevf-*_re{DgV^9Ok#CC}jFSJZD)omcpkTfVVv1v-?NUE5U1|fCdMUhHXE8Y% z62U(;6)YdEU=~@Y;n)01c-S%nqRsl@?VGeyou3Q8p$t>c(y&_n37_9Y+@We+VnanU zhx%=J$HpBUr|!iH^GZmXy%YNtJEB2pca(ix#lIh?Y{Ra1ysK*peD(-Ix&M4-wt>zx zZ`VRf;X%}|)`0O_XwG%sL74rp7~8T{P}sZ(m%IxB_X9gXHNHt{++F|$8CCG;UL1x! zQ^IS<6pRgsq>PRy8?sJ}zWa&y-HS1&afQtOTRLc5@B}lRudGpa5G)s7;Y;@QKrjkm z&+h;>Bn`x5(JCk#O`oBk)QO564E40*zik`@6V!%a^TKwq{bOPzeAoo`bIsUnNFtAz zC*Q3)iOt`qu&sL*VEy)q;NKXCEu8^4skbM_n9DJ&=Rp2IJrklv>Qn!!KT<9e+aA=g z+=lbyoV9?Kp2Um3@rl`3QwF3x4S)3&p#EnHR1c?~G~Q*|LswCswTfK3j$HQZEHp0| zE{>VHmN$;Q3aT?@!n={Haca^IeA)j5hOs&>TdN_CqTcd{L#I%4Oe7Yrxx}5w>(jU* zhTgB0tavjq<*V0oO}!YHed|2Phh7)U>g>h(YXkT;ogftdosJqtiG2F;F(|6iSEv?V z0pIf>_@eAMD=(L$S>#2rj$JCWCQOC*Kk~sZBowu1A5}Q&wm941HJ4eR0JD+0Onb&F zaaCh6{yI|$j~k-#*u4!X>-Lho;=7@@Q#zWCTMzB~5b}&_F^k;MfkD-vy>>HdzU_ta z)N5}wjf2}IGPu@%0)`Fyz&`yE1L-?!P|^1iW~9)&{h*XP52%5g7ZoT?)&$)Thp=sN zuGlQ&AOxtC*eA{byKo?woqodGZHONu^8kl|Whk1nhS^-~M$EuG?qWl~3kMCRGdY3! zh)W?jT^|Z}Gz#`+VOWk!vCvHp4JU^}U^F=dW?Tl7kqeRVN&Ny8(|>g+NSr zlJMvjam&r^v89^h=+aya_fVmD(-rE|xUix5F*tH9F<}DoI0kRRs0Z2P>zR$?R}$OA zXC{=KnG5!Q?p)MwAxn-C!J{A5Xt6W~4D|xU2G6sYS;ujq^O`;W#x0eWOr8JXGoT%i?RdiSzBnl-5tHShr({#yQ0ppt%;qvqs}9 z=i%gEA}CYtS=8T?&+ldnfYK~J+9ei8sLDWVc`cW0-Ndy1(Sv*BPK_x+ct%}{_P+;# zNm~T&?WCD@ex2p6FE$v{ryn%h&x9e9bFpquDVUW#7uRL&;I|_Ruu3FHOQ8Jpq_u*| z(FQz-F*vDY2i6sh1{{qT zPFSGylO`+B^~B_J3(3(GD~vu`g!=llyWC{RHT?f%vdnved_#Yz-f)^r>?Vj;ojynY z%O*blrZ0MawSlg%SiUk~19`EIah(U55EIcP)~cs>UCA{T5x*4;Bjbp}sQ~*dYmlzZ z1PMuWZ9fG;#m{JTwW@;Z3eIDSEg_6@rTK3SV8t6p?E9byyE(dIi{?qNpjn$qTn3tC zrLolZ6L6z%DP)_kWf6nUq2uM{P&QGChKClx_QgpMQ#g-H8WJJ>Oa|t{MY?awpjF%* z-_^OHsM*WP=^imuJSvr5!wg}YM*&J!A;{aRAn#W->YFZrhS*-jD=*}QkuKnOFdfVS ztHJs8DYVQm0sHh?Vb1A|s5qYp_U@tL+uMkZ=o`texZ2>}+p%C6JC8MJSwVxz3?6&4 z53cCH8nHVA>kFC8_(>kf$tU}%+!8+Xl@hJy@NY9!nTL110@Lg5=j3V)>H$WS28oPVd3a4+(%7(lcltQU+mH z5}B=6Dm!;597I_^xnn{F{MhD;9|latmjiIxh}T9 z{>rnHqnM&`0Wm!)A;EhqxUI{@`{tRL_VWbJTQdm@C(MMU8}rcV!)0`pC&Gl|a*QdB z6l;t2!EOCGRPRax7p+*PSZoQaZBlXjo=}k4H7Zx;j6pLUnmy=g@_#KT8{cgQ+i;M> z&Z@bX>=3~<+rz0xH~@?$_r~~pTfu_3b6RslcvE#QY}Y-3hR=F%Re-rT0~SF1I}On7 zlY-6DIV%k6$6Mbn0BO-uai`j0CW2S2t!5@i${K37&*!qTCaV|gy+F%t5X6hlVdus@ zxXTdXZe$v~@(Vz(1!qx~x`C_K{v%%0GXWB;>PnO;Cb#4 zc05W$dpm7*Y&gvp{@7XG!Jj;@4b=yEX4O?`N5SprzU+XmmR>tpj=SD|u+2Rfd8$eWUWvX*giyh(9@ds1@n zYPB3CbC!eXW}47+NDap-m*V_^Cs6xVU&vUi0UnVDa8N-Q$}=vA2R|E+gEjvk7o9u$ z@0t%zm2>gbv;+*JGo-CfE$F=~#e?e#&^~_!dppkweXB>ZobWv;n)y&c1zc#lugAyN zhhp1DBTw=OgEEAP7|pvz2hU@ZjDwus~-CWnLN3wn+YE&-A1tk{<_2Zg_XGC$Mlh+7=NPjfFm zdzMPMES%}+m#0Pk5Zn8OSD{ylbrIUlp}{qK|bo^qF>wNX|H zK4)>}uz57oal{#i&tsi(At>g>qucXyEZJ=~xLivFc@6_tx@>G<}cQ? z(}I_{RpGlY&gj%z2hLxhUh0g4te7%Yk|7b|Cr7fe{nbE7z8Ce%#VI-HM0 zTsD3hu2v4i7ZWI-xg#0%e-C5LdZU?k)_91a3`WU`MR21z3G*+}^MU5+^u^;YPy|}@ z3#ofm&m=Z8gr;`7zZmG@a??%~`}o(vtG$si032g8@!g;mRvF#gp87GD#{|FWKj zGpM^HpBGEB(-++1b`a*DzQOto3dZFASHS5=1{1nb2CgOsz4Dae;GeeO7WAM);S?=?&92kk%6ut`JCt-c>Vp*~HQ*K+1zv40n9j5SroK6edP2j+jk%eqI`}VJeefb> zmxv)C4d9OboS`oG6^rU0hVivd;2Bo|@^Q<>osC{FNUDR!FReq{Kht?g%y7K%EeicQ z$@3x4g4nzx=wuTp>Jd zB1V5ycg%l7d-j1v;JeKmx<4Uy;h7L7EuPB_rs_gNND;Gl|IFj}t78d9!fiJd+F$4c zbyY%t3wu3c|D?W%{dP>x<>2gB$2XV9i!C3&jn@!y-*;ib@2bep&f z0uT4cLDE_Tz3<#pItFA}L#({`R2*7Iofvv#d@RZA0G*6C%&2R&IT$KQ{3UAAKJEiCb_#djn1GrV8DPD8HkvG24{qa1VUTMOnx$V8)OFO*b6N?ss?#1QHwz{lqr3AW zCD`~Yc)Z_oz~y=TVA?M1jPd0wWCF&&SMZi}N0#?x8QxuU8j|K5$G^0*5Yqm~!kYrk zs}Bn?8jEoH5*50CcIDYi9x@Z3@vK275sOw`Af9<8^Oop>?E4t59~XoV`#oc3y1lsQ zLb9^k7bi5*vO(EGO{<>9DjZr(yzcSoyl!D5&u$=(P|rx#H0lbSuR`E{l?@j3N(F`N z3pdF<#s8z3NSov~2CF|H zNIQtf6BLiq;#{%8n|xF}8NvG>orP%wV(|9UzBqo-ah(5;8EOwB$IA!_GyM0o(5YSq zPhzXE-I5#~63XqG-r^5l&qwKcUm^S38SLC}4ty->E1nRAo_&tNkQv_pdr z8W68{8rq+mg8zWQF!4J1D*jo3?Z4N;s!ApKOqQdY^eTEKRdM;JI->m z-t1GFM{a}q%;|uKlkkQp0z~zO(0oT-?6p{+^WJ$BEePbf>bmF{LY=T-<8WKhCISYd!%>7xpJ^ zR&O-@o8|~)UJRow|5*I3Yq9%BhaZ% zgwx(jP;0UwxERr1pemLazHv?0CHHatKi`#mRl?ofW^8+pm4aMZyL_ zrn8G&k)wcS0uX<%2n`o~6AUMWisco7V)HlSK>CJe3hM5VU7HL}Gp<54Gll;eDVN_G z3_7!)a4+J=+Rs@A_uk1-(@Q`f<81U_v=pr8p8$jKUa;)12s}o!kf@v!;5jspSsI<^ zW<9lG+VmoHdmD+>?ccbu?n>;_D-2UIj~a-{0H*i8A6nBH$@!mHFe&@ad;JK&+&?ctpy5Sy|Bq$^?-j6}a+PLR=WVJBVRJx5DYIv#>5dgw_49 z#5nf}XtX&8mWQ83QO-a?f3^;@Jr~Q4-loj(&_RNwWf(kK;E%~C%R#$xB0v0OGdir0 z;%3)P7(0S^iBGCQG;%T9J|z)k3wwjfksMq-GZU@mbNouVhPn#}zu{1cu{_gdbh8n9z3_oH#KLV`e`ThpJQ7^OFT;omz&fLoUowP=oyE zqseU$!`vN1U_vtS0REZ87Is~Q5sTx&>vB4{+)w7$E+?Skvj@|f5y#)V#zM*Xa`;sc ziW5eY!^AfZWm)&EJc5>?we3uNFy7;Zz2cuB-vU;TYKTh=Zn@6DH^2z9+V$TI*u0e|rUG zTOy!woE45x%Z1KCwp?qtjJb#I2G75YVei7r_~Atrmb@zACIb?9NgeH_XZb5bzY<5! zuaNntQ-|8U7jJsjo$2;lh57T=nW?aCl&s_y9low zT8WpmE>fm2jJvDT?%LrRH=LQx?T6@+ZruStU8e8xe>u>2(FKN9XTh!?6(GE%Zby_F zcYlA0Cmh*=YRk5RLt`ecjw1f7n?xxp)gboyC*I+B2~4+L=61Am+NpLCwP>d%yHG1u zS!c59Db)Aft3t)meQaS3<&IS9g3W{<3=V}tu2KcTOAq5-pBy-USq75(9)i~w5r5_v z3xn%7pxjYMynRg))c=o9ww@T(Bixx~$vwVN=Md&kBv;MNv!Jz`APMCqe9HhQJco%8 z95@hucxYp@HNc!Vl+`lKqrz;k_n_?NOlhIDVUjP7Lb+WMhUYAAMZUX1QV!&z95-^||Iku`tb z%5XQp={u?DV{rx_22aHP1v9bbQYUx+wt-om>&6SGwy^50ula`~w4>HDW7T8R;rE&} zH0e$Zha1PZyV*Wgy(0~y_UV$lXdLB}p0cE4!KmZ&h)*1F7zd7}9q?%4(9SCc>6pII zW}S!f)oSqSh$(7(yUhlUjsQ=isnmb&i_Mdd3k~ibC|MfJUj+An=D*L1*UUK=S()wv4fyY-l?qo;WJ#sbuPPAnHM z;#)7Q6`R?ghK8_qUbxW*%(q;`&YoJtg1n4#RRTtv(vE7PJ@laGj`Zqzu=pN`^6FvS zP$2`hoW8?NhoGg;el$^A#7ne_QQMNvPe+RBtR%(e&fmO#WdUo^C}-Ms6)-Cz8l572 zp*Fq%wYG+0-O#(l6za!K@@3eM&epQ1Vcg}ybWpbo!snVrIB-Z7%Vsks~IoYWir`+tp);WysNFw788izn(cL3Vov{{Ga9-K*iF)1c*-WRT zin@;4Lcb$tz~Y!2I@1oqDR(mP-ePa5T!0w zm>uchm>7U9iPqFtSq*(x3?MH@3hS2_g$*CPKsxPD-cmG{cZ6KREnXMs+3yJrp>%aQ zxdJ<_D_Pw0-RNi#1i|~25dYvNYx3)XBmODF9~;Q=YP|pqN@$*JEfYsPr5=^3H>mXd z1h3T}nc*p4rf0qe>rIPbLa%i2OC_F{<6NfGRf-73T+7G~6M7i3t0y*NU4%WW{TV=e z?JvBfaSjCEyun9}-hg{Yg~0HrXv{Y`4DtbHJiao76&{KMyZ5J|(0Vc$nO=k=Q?5e7 z;e6iYKs*+MM&9{8lE2?M924G`GwG*WLQyz*Tn5|`3OY8Ei`zt5-+UCpuZ_ojj|(v0 zYYCXQ%Wz@9aqQ8o#G^lIP;~MoH}|**_6>C5}-%j9mM+JsE$AapsSg`Lu ziWwUD@aDm(lwqftGxa_@OZAz5mIjV;%tjrZe|TG>9y6cl0TTOMW@xCx=efs$@4T5{ z@(4lDqW;_!iTGGY9~>&(jm96sA$7|I=(lwxv976?wwN5>2Dt#_1i9<31l57lu;@4a zpU%}(O7F%9PDd9oRry>P9-~50?~lUWbBSnPaTua*F9Yufhp~EaU!dIte7$CiqWq!C zrbb8T{3!z0na=oLrvmfGHZouTRoqQMOv|g}6bTLlU(+YNn!V<;(|hB?larwTh*@a* zlQK(}f?)EDXt3^eofYaVg|^rsEQ9!X?RT&7#R*j?P4$L4kujJ5T_Uy}DhHM6EJ3s_ zL;N^X9lyLM8{JNOmWVKH7xF8 zwvTqBUe#3y->?ovKUOG{$6f|+`2&_PjTj&|cA%T>43HGxXO7paxX7J$b`2fk3WFSQ zzwnYNGIF4Kbx*^2Tf}yGS#k&V9;8Q@{d=9*S}7n zXzMev|M@Jiztbet{?w84&CLTm0g0qkAARMWB#C7PLSCA+#gWV+ln%g z%izI{H*{tkNMy%PQYr{9`GvF8#`NE_{vel`ph0A*lkIHC_#bQ?%HQJ`~=G*zP*wf%&?E*GyCZg7} z)0k%&jlQm#Fyc%m`TtHa#W-?#B=mqxGbfCkau&B|I3SW}6~Fz$;5>i-qSY-Fn;`SqXNYsxoq z=@b|A>q-MB>r^(aNdZYV3NUb*1_^2ztX(3b^Ku1RRp(;D`16pxaxpY%o?$I_k?s0V z2{DK31^>6MU_WLwWH*iD@Q~r_2`6!veG2}npjqDIbIfF$E*!0iLesZ8uy|xGc1^zm z?o$|l@?ap^zC8n7(~t5hy98Kp_cE&eP4ipm5Z4;4!RoSb@<=X+{Mr+ke}6W-2rlQt zh@&gFoi1p()^opH#Ls%W(RLkWDsp_mrfvf?Y#Gn)83qBGk*B6Li}uTAU_|#H(O9LT zb>DZs=06Wy_d!615vQSJu@Vee8IM`84O+^_;+eRaxX_>&7k+WUifQ>M`%SAtk* z^FZ8mf#bv5dLY|=QEZoe5@k~-hImx&V=x12c#(5yPE5Cp2MB-A?Uq- z2H5{Ko5{+4VOhOlfnSZ0kH!%uMS0WrK~OBNR{XTp|5e|&a16IVPVzSZf= z&~@I6cxL6;`O}C&{8={E9?@Y;DMZC7nAW=90PUwiJ7W#D-{?=dX&ZX}Ayex&7^HVC z#HQK1VTn&QWNf4C|LS&DSChfRdMt;;TQQV*oCr?uJ;7-;!)86Y4>o6G)YHqLvY3Gx zq6y%WwhVQ;U4i&M%`=)=<3Ve3s5*k{!Gl<@69k_;EYbDfOw9gh z0nx{D&~VpavBl_O80fFUu!vOFbjt}|WbH=ZxRX3)XA(-{y#=j?7n~wXT=A+89C%WQ z^BRk=UD3kbzLqgB$2PW<@==}NFF^d)eejg#Md{>V5^Wm7w>qW5c7p_PoH88oH1T7* zZxxI7gH@~SASx;}S)EY`c6A*F-{|AmaY{)U?hGdBb6zO?v>eQ9%GkJRu{iw89F%?D zYn8An93=S%K#`zJ9-7^7q^>(s}RXL{`@{41>GI zg5^~!-u!)t*yZrXEBKqZxo$m#O(Bk{y&s(VSOX(2Uq-X1`#>gM4_>D} z@M$51pt&^^8dojGLl>O!`+S#!px^a zUmzw%>qu~ad4!j|BHm7rF)X@MMV=%pZ2Ekhj}FLzjHC6;J>xLI{_C_q^9GHAO{{Z< zoc!l>CmVGc6NItc;As}~2|NuGI|DKM#XHu#_lQ_gm;*6w61FLOBl)LZ@a(^rGL!Z$ z_T;~D*i0Sq>g54YSo)PUJlO`)z4L_8l2mZocb!juUrm{2V-h@P#)Y_=`7Q=>xKe5G%{A+xdFc4vh4R#8wA6-=&!j+FPx7#;GFK*k*yH zw{$Qf;3^cFIP<&jqaoGuB-~a~XD$3b*Vs9lzZnq%71!xm*nZz0sIrR$$(-xLD{>pWp=Xf&fi+;*dpzqd%Y=Ut5i|6Q zAm)-aINDMUsdhCKj?55`zz}#gN{$*UO+deJEPwc(_)OPw$VXVmI;YHoZ(cTde&2C$ zbFPH2qi4upu!tv=jesq!BWS0+g~h(sM4boGEcv92HKrOs_TzZ!*2*B?tr86*KrHL~ zggn#kV0gV)To$g2&8OYPqKc1}(#HC6nnaZs5sdG9y`f&;*bUwNWsy~zQ^_Fb3KQtE1ESy>QPjc`ju3lr_ZC+qdjtwK?S=hvOcISQ; zs&;B%%@b!dEI!EJ|13w1`?>7RD)MJq{wr>-A?~&MKJL+PBAOqqgeDt-OEm51-8v3z z$fqr9xx^X-@=``Pg7-!}=(*ksZx_U(^EGwa;rgIFUCc9PIfH0(CDcc&k!OdzA-{<4 zca}0|M|Oh#5+|lVYAtKt;x9HZ(T3v8vDk}C(881QF#E?bO+gOX>rOJKKbL@_H4w~l zX%4et9`BYLfEvPEHtypIH2NzWT_%KJiN3oi~jr5ePo)xk5yhp@@ppLT`Y(fpGge9FrJq3IZu=!W3_ z@mKMMojUv+Vui^QqL_RtFx!;j&~$kx1ggcssRv1LC+#YB&5OnM#!AqcNVDv`>1TC^wjtxec(xp! zjnmL!S8w8?^khR~7okqZdmcud$!u1L{nUdYx+((wb)2AXp$W*AgHRY7$>dKDve&dn zxa~xDU11lPr4D3;s|N!87C>|`-)fFhj^;kG#Ff3qjFbVe;YoMm369{mixIPvYI(vw zEn=Hf&%DwGn6b;#K8J7G!$L(e{<7VOVIYX$+joQL6@OFWQ;Jx zr!8mjR9!5}=*}6~>k6th5}Vp06m$;K&YkX#n$HSQ z1E)VnaMc1+Xnl5r*hHzQ|8Nf56icq81;@A(1aUUG0^hGcLOkgqT>E-AR{!lP>$kTO zOyX%G{Z2qnV>j5UU5l$7g0PkLsiU7bQgV^&@hYV)aYk(6T9^=>&{ zeq@C4H{)6Jvj{=roB@qXGNAXQa%`MA3q*eogXM9qxV{%LNvY%O@;wK17W~6%iKo$b z$wib;T+8)qE<>38ZT{apU&`EWL@kFvrg}OF+ME5M>fK4Gj{nSSAEd!f?KBjnu47GS=fks* zzNlR^0&Hi=aF#lqdG!;xeDD{sr`3JFFKY+-=>_BD!Bf#)e290R+K48LkFpm11Xg{y z7EelZiN`XWHFr9IMx!gunIot({fOm-?!mTLx;J{aiAzlt5O&fZns+=FtfP-X*O|Lq zWBg{k9W@Wn^*aS&FXk|{L;rHOH?h2V%rT*T^<=P3DaOtZ(U@II4#m;Lk}4+;_ug`p z9UekHn*(4t(p8XsPE*R1LzHte`r)!Qbg$6fflKZ!LAUB;E>rKxJM(6+K~rjRsqYF@ z)a1dEff6)wrk&lV3N+|(0_+!f@SGGIlnfx|&Uzhje$^Ey%$dZ6_FR--Qx_Do9`kvB z?8jhlGwi-B8H4RtZNkOUxd!4FBZ4sdTo0!4q%VXW zxW>LjQ4Y_T_HU7;On;Fj$e!Hbl7cT*5_=0l+G8u6nkK@5mrB8}=S94ldy;m2K2*-Y`73_%gZ~|Kb@rH<<04OKAS_Ec3aOhmN!lZZbOpEyY*(vK{5< zw7M5-SiTSBFAt-=@nG;Wt^w_%k#Iaw7n7gpW4-GVkmx#t`P?QpFDC?A$424nHil|- zDyTi5jh)w&^x3?r(0(!yEW=Cro9hYaLjQK1umxJIM`CfaF$U`b8tkPVva+4KYlL#q zl=Xsbco@^#tB%L0KUJibc2iuE)Tw46196`nsK zF8u2cs1izfA^r_t)J{guR#7Fvj|mIX5t}gK`6g50=o9a!ozFpFpPGh zvVoi6g-ahW{9rG3$}h$p^gaJ)T^W}CpghB_t$g9i!T42UI(FWT#>HieusYTda_BvE zb7(v|%(;MKWgZxoZe(FU61c{piQskZwRpOx3`Nr{#V0FNxM=$_EIECJ^0uvFok=S8 z-Z~kwKP&hV`$FvM?*Wh=1JVWLxg{Uh3-KEsrP>V^@mIh!whuHvUL)N0BQ8NoEshOO z$2yx%u313;w*zNj+yiek(9Pt%TXtb#$1{GnD;$$&KW7Qu1%4y$vI~$0bYO4uzXl68MnF;5nhq>|?|!oi6Z`wOMFZw;LoTGX+)3A;Hgl9i5}MK%wVvaqj+TkUoDRo;}PQt=|m+ z{{@*~squmT8kL6je;xsDegYpv#bS)=BbSV@1k=9Js4?*f^O`w@e~>Rijn%t(bHP7? z!*ue9W_RaX|2&0Z66&l^^}~$)f3l9j7Xe?IgZ8?Q;_PlZtnv6BsQ1Wcp3Ch(Wcq>m z)>yJ)Ne)<<5rJd;P1td37XZR+7PNZ!<64z8)`p zqH|JR4On-M1K;X(&}8evgz_z@U3r5yw${MYorTzL@sOJYcE{Pybbq?I4phfCvZ|XG zP&I#*(Abp>KNbx_C+SPJU!I3W1FFg6mx>Kj=P+5nb}RS9!|=sr0$T1m1ZIa{iVN35Nuhx*`CbPX1uleJF4J>@#cmumFNx z1Gt+>2{S*W#K>)OjBhvxeEB3y9(Nw59VtVv>-mEF#Bbd2g%;QUFrT$-Sr2QbQ^xS` z3^+$jAG5Xj7#!1!BAy=5`q~v-=B7ZERv0QKTp;EAJG;RY(VZ`Y^EPgm81asp3ajOG| zx-FE49rA`l4;>KMCo{K<$#86DB;M+uhNEygb@St(dk;CpyrArOSr*ivf5$C5?BT%@ z6__lq;BFOCa5A0?QtdPzH6av4juk@lCkYRGn-31#11Pt29ySm+p!wTU@a_JQxl&e6 z5p4w7Pj*B9TjSAYh6Rg#YD>=BQdAYiSSf^)FthCvX7(S23du#@(0etM_)P+n+EHMn zlLk`aOUs?ynfGyX=&b$C9o`)$rerkDCS{;c8I;E2B;sp~ByM#C_!{P7X|rRGHWswEPhuwj1~Z2xW#Bt{2b*V4cgw|wVzXse;hcX4j+~x|l9}q@Wmmxt zzoI*4@CUKZ?nvwkj%H#|$9$IwFgfikNbkGBh;?)E?GkI$Q>4L>LSmKuz5?-)sciUy zP3U}d2=@CEpfIF=mGtDsXa1x@WOoMv-@5WFrYL}Hw zM=;Hm_pt2wr}0ivDN1bDh_{~If_onQfr{fn@b^~oC=I&=?!gCnhKmZ#H%sWA`<@Fm zhA53*0GiIRIKy%*HrziBD|A;7UpAZe%M+m?XB2Bz_2CM?B+Q<&0PP<-L;R|IZvSIE zuh**M@*h)p^Irw<$xNGNK-s(OQ-fx|QhT~$mzfl?_ z+h2tI8Q*xvo&szJSFl}k6lE-u8_t*U#`S)fzvLo0(NCiz@gS|sV^P1m1In_uD7CX< znWXuMtxIRei&sF6cZZ5+5zrF5mfe4zhi3=PMcc_|;n{Ts zx+&Mdw{91yOM4Iu$Ms=HpAoNY5X}v|8;EatiJ$R^!G2exVSmg;h2O5NWL3J|mTPH4n zjQP*mi8p_s+0#b zJJ*9rZ*^8uQOEx~Q3M7C4q!gJh(Df}g!=2F0q!Q?jXy5Koa5=}QW1!H^G?Dmmnt0j z(g{WEj+N~qDvV64gopFO!0W?j@iF^iFnJZl3=RQQzl>rH6_16x(i-$NoyvR{96;lT zo5)j>iyP96Fh4vP%pUGwD|#j3v5a7pWRxk*>Q!8Od>%1!6rlA}0>RD&@Ou9&^qrRm zL3%sTGAIT_$M%VZsv2}06a+^r^suByH#B)Z2s(+B@1*_S+l z6$_YY)hu#G5>x7)3M7^TnJ{_={=0{m4djaKjMHV&&z#X!w*(hy%s`d;N zw)drZPu>;KPG1YM%R9ODgm?U5MgX_dPXUYEK*}doLf`Er=<#q3=HJ!8p0>qwPPDU< zZL$-n^}rm@+LL!A2b}KsvwTy^SkN3g(a#Bg2>~EP7C?4GFo+_u(J81q_nNs1y}~~T z!9K;@a@b~86-<5eFU?}L;d`KcY%#laG)xUanu2ikwVneQ=Wb z`Bgw^_DpUVRrxtoeOIZ0v=UDE=KbzyieIT9A!7>u_29`hHIj9|uAMBPVz zDEimWN@GMaoLE4+&b3i+E;)pdzFp)+s{-pDMyOH#50AN00Dkdjp*6Y>N}sif6PCo$ zynPtAe}97w_!NgC?V-YuKdsRI=^ieb|I^C7p8@{cIujqB$YcKQ6N%OGoVSLlVYb~` zc(;poyXC2T|I1u#)40XMHf5m8bSrS5;0EqZL0mX`2;FzqU|qaAvXpStpW49Pdj~ke ztM#(*Ttx(oAU>4g(!M-lO%%M!q7PgU)(PSvk?{C z`9=v0i19)Hz!qLtcZ_MTmWh9DM6C2lN12+bQWp9~DcNSW(==gG$1L=8KZA{+ z4Ne)8n77+qE)a*!%xffeH%|bk9?{hQ8;L`wT!I1<^4@3bLCh3AG|In#8MZat#D58w ztcVAj#WYhgT?Ue~I%3h^0hZF9#b6OlY%IMD?DN7JrLTUAC%njn+K*vi_FubjlJX0x z=d{CpZD)wo^I?SR~WBb#daKC3ilnk9ANC#B2sNbA-1kf|?(pM(8H5VuA42G+{ z%g}Xv3_{xuXt?)Sd`uOD%binEPLN^)t(jQRNV7B_9p02cyrrcRm|=Vj^rCy%d&5$& zq&aNDDSfcFiWjmyiLq%ZPb?&j9!TW!*ZrXR_9$^TgGAh`TST6wIcT;u zgeUnfKu6_e2-Yt^Ctq!vQ8jbz4(jEmU&Tv^^DW51+8Xt|}!RpUC8`X)P=N+jWyOQ^?Hwi?=sMz9z)XO#M!L-Tv& zT6%Q^5Bt z1&fu9-DYCrm#_RqrxbiOMsk1Ojj-;m5Bhx8zyZ-VsQ6k1FD_i?%Pc(6j#yqI^&zwe zO-1R>*|Z0`#qaJR#&Pd$aQhIQu}juLmvI`~k|Ut?t_UVoR}-W`H2=*_bnH_(7(h+cYWd5C2}u48cd&yAjl!^^No!(Qwf;^`E(9a z%?(9=k7N*gs>lm(!-r&ELX&^@gZ98hkXd|{c)VHs+^LH+=hg-Yzrh+FpBMg*qH_<6 zvFqaSpa>yKiew~7LMUqXia`=W5<-$BAxTmp8J(n3Bb82SI#1~=HO*dXbP_@cp*JDk z5auO>~V?1%hp5$tHH#N*u6 zSTsi&?UmVHvepV@IhQfF$Oe1uHh_dVm1GvXAQtk#T74GuEK0_NePML^6*f-}*CqV7 z7s2MkRXp&4`TM&zO506Gz@5Bn_~yJ|c@CKu+lT_aOQ`g` z0Qg_N6N`Z=;Qn07vlpe4kSi>!=O&??fGVkSR|Cr4yAkn9AoeTUN$;^0VCV6Sy6fqI z-^yjwzvBh9ZAfLa$g$|Sx(@CZGA{p58BsD^MvIo|V))Mxz~DR_@$DSEYA8glH*GXQ z!-36YR!H5dMo~d`6v-JK0W)%ZkXtktIwnX!^7}OHJnt;^n;lHVH>=@^U@vwa>ZO9d zV&33cH7NhcIB@Mt=y6mC62`U1riti`|4Ps_B8P5zcpZf*Tj=qA3SZUS(c3N<^lMnh z`PVqI+nMoh2ft9aoAKEBxQpl|UW78Acnnp_M)}oc;Js@(lk{7yV)#QKKU1I94r7Yg({d> zR*J)}sv#$kbwn;}qS~88;5fRGb$zPA;jwOb{ZtKe$7q7sGy<(GHi65kyJY_wF&LZ} z!@Rm-cy&=V&Qdst!zNmy_!|$pcMe7wDWv0St0BLq8q#k~N58MhY+gQs*fjhjRi2U! zO8-{U3wOkL?{xx}-j-p>_;woR`hx`Dip9AHm?QesJ}SGK!+KW^kd49`?60$hC*zmm zHO_v0gR|zDFhC*o-46znMhQL-5}V^4z&=S2n&z#7YkworAWaOX!jka&$Z}|_I6?ip;-J|h7e&WpaJRY~#P4?i zvpQ0hfFY#w?KI*yW+7}Jyd0afUQykejUbwQ3fkA-prLs=Fn3fr?rN%IJH;k5?gL|< z^l3eI*EoRZ+WAm&^&<~8$3luT^Ah#5euITmiAHk-gdI5q<*VkgjOP;C`Am7@+r)*kpfjjH=rOHZUyKeO z*V+A_?e6w$AnHvAv3pex^>3~R@ili)i91KFk|j|1R~xTuG-9L2RkTzXkKJ0#-HsL2`TA8^s(afkazrGkk# z=%0OzX~+fs-5a#+QwpwO`R-N)#!Y$JW zysJ@gntEYno(v7zby#mQ%Xg@6C;kb{fnZe!*Fqz3W>gK@S+W_`FNEV95!~{bfSkp1 zrEB+{#97JeIQX6cieD^&j^?H4b(^`59+|-ADG0ICFGDZ4gO+c|0sZ3lRO3?=_CD>S zf{CSOg3`M@!>d))Wz~OFtjOFjSGc6@{VjSwn{}febOZSb4q8MN08=V}(#u@bu8u_A zC(P}1ubz~IOoHa#C$!}84pQDzhq+!2=(gqtsdtzQ%gxz)I&Kr|1Q-UFlEN_Y@>tY( z+eHs2$%^*nl=mp+460lic z0kaCzSbsG|b;kl!=zLE@&qsihSrh3_m`D9W*Auz=X*jXG0^wa5G^J%jSfL`yY=m^8 zatUgB%kY?QAg0{k1oc1CK+Q8A^}l~(K1k+YyZ)DQ_EoSQDRT)vy2KpFm+4`HQgq56 zLM2l#!rN36$%!2#j78sYLi;dg>jI|lYW)hhs%j*@ zR6$CVE}{k{R5>kyIJE&-xgMk%&aVjfXOh&#EF1?t-geHMN8mT{#1#1!lOrzZ5TB(nhoogj&5CG?dOmd-{#&H|`{& z17msLYuGIB6<3Y~A~q*t$l7^3nS=Z|G)>(}=~$VslvVs~+3VwbEtw zhNvaKgsbmoV&|V|p6uZm=*erMPtK}f*wxEKD`zX5yFU|iI5ntz%nS7chk^GvZ;+!zJ5~g6jUzIp35Vz*C&u7M5;>!pSFLw1sawXRc-NsL&fpd;x znQ0qPk_uPZl2k;8kb?u~e2w>5_rBq8bIEeFv@0lDs5hb4VN zddTD;sww)TL|jZdmx)Q3q6p5u%SE-P!$7}nA?bFWOLZ_h`Fa#cCW9n<46)E)J!}&KNbCL6bdp~_J}_fFi%$}Ho*ROseNkhf!G8lf zmGH=y_o>(%tqlcdk|AvEBqH}Pz=|XR7!6rO4B682p=LCS`TE$z&eC$`n(8YVL-Yr- z5Of$1Hrx`{MO}qUy;10%kk010!PpMKRAQZ{YawbbdQQ@ELc>ZCN9(N6Te48v81B}SB9{R{jZZ?SA7_)+*7Gh z_FeiQHV8DBFIDhkFDXCa4fDCvQ8wTWHsjw)6+D(e3GWQ{zOsNpFALF-F(%`J0%16B z6Sgd5I}J%DtX3$7slMyb^U8TJ9r2M?%&!GbBg@7d%$0DW%Q32}0HY_SW5j{=xcuxG z=Bp2*UqYf_H7-L=({jQcyi}T!(??4ti_k7r4Wd@mpn6X}crP@8Fo6|(PNggZZy^=8 zdx1}GIsCtJui1lf%k3Y@z;EW5?0v_}-js_@!%d+8w-> zam<6kv~v+ew!Np-PorUUcNolHmk*p-ro_$B2*N`4Q5T~;RQ|03PR2{Iim^bGP6#p3 zm2ojj7l7M09-KZT5r+pQ1m3WGYsEQC7(Nbn%s-3LXTwqDS2i@Xmf)F*jK2$B$n*SG zO5ZRRE8(*$v0;os-wNia(w|1)^J>`4^1=4UOj!ShGq@~>0J|&ttp9BR1{aG_^m#aM zdUpZ5RqYSdL|xC0HC?3B6B`VfMrVbo_5Px-HTm z+ft)|K{@zNScIZ0*)Yc=6$kd7f$rYH)M5%lq<0ljyE68Us|+WROU3ZlXbpO~M#20C zT>8PT*f;v1y@+m$J>;g_Rui6G_e?S^!-n@dtegtCg z2`$izIR_a>XQHxhG->fjVvuJgyjxZl30G`NzO7&y5Ons>}kG zR%1!BGX$>nLc!)ln3uxE#l?9j(5W=@W|BD1`7)LjX77hws-DpR82OVPYm1N(ocAn#@>d>IxAf{O2EooVZ+ivCRO@h||{Fhl4JRG0d|99Z6CoHA&fT@2pd?Cgm9o5YDr_j5i_YSFj2w{+7E?|@SnEz|$rvWlj|%Gfi=FrM zPry*ylh}D!PU@9C!Fe8q=8kC6c0f)n%$y=utE5zmU< zMCf~ys4(|n%A1c=QPmf}hKFEl>^-9JHW;!wVb~+%p+xqM{CT?{A24^zes=F$$G%^> zxph!1uET@`Teg2p0eSpnDpz{WD~;u%U&U+)SX2WAtw|_z2nPGNdqK9lpOh>4LwY^y zJYoD4r$flQ)xkxL&syZg^yS$2X9&8lW#`hQ(=et@y0e|v$?z&S z9Xk+Q2sR5IQxAe0q1D>T-Q{w>_mh{@|Hgjo%%2V3@1n8%V?0q&KMBGYhNK&&fiU4X?RZoU zqJOHTk_j4U{gs^=&oVyL^^w%5E0v@z*^a_Tqlv3YB|EDpz?X~Tv9qXC%He*O4*Qq+ za(3ON6Srl6WnBuu;R~ofED^rHIE_s!*3y>2D$u<-0o!}Ch(5hTKkFaCoZejUd4S06 z(*mo)4sz|i3Tkamr`3B;V5^Edc&Z_J7SRZy6<5(`?Fdv1uSK<^$@EgK4Bv-lV$Ap) z5Hb`-|3L-qybw#c>E#e2+lC3IEC(t{B)l6}Fsil+#@SYZnnViwmsg@j?h=ss{it%< zzK<$Z@mc5kZ=&Kg0Tf?aVfYlpa?v={%`YaxogRD0kG?M;k9IGl+S zKSY2$+74`6N-*|B9V(yWfU?tm`10u*@(uqbS*sIh%M7-oO+{#VXh8eiufQ2I#wlu^ zVmpt`G-Zg4oM_pFL+uNhGvF?jM2k`TwlaFUv%USYbmWhdfQX%YOjTo`to$k%>-
SFifLqu@(2i-g^77F&KlehPZVB*&E zU>~jlE*`bpS-KOX!=cRfzHb(q>yb6jaxk^*?(}11ob_F1wG^A5J7? zy)l?QgO9B{uENN%m#}_*0`agCVrV4Rns%t6+Hkh>ox0sj{%s-nxd)?Z?Rb!k z%_EYeSaj`i#FW7sLE!dVI(?M{CAx{ARW%EY%+5*m0;i(}>nwIOOaSYp7jOjQ#N9Th zBHzYMTDQIyzf3&?Zp!sU_WS{9%bP$r|Af%2j~8hF;|S1cIflW4Kh)u>78-hN-+JAG{$>7&i-+h-Rm!)jrmq6c#(}|H7PL9JsXuSD+4z$4A!qajr}R_ zX^7fB6jjfl-|t3Y_}OGod|HA2bEksg9Uq9bV|Vy}_mCB~e%P2*2~J8=q)vMI5H+6$QvvIM3%G9O#jR4CEzlD5=$lQ~~5gUx@e z*FIguI`=5;7Ak>!cNv5&*p5nUPFcH64Z9gz+SynDZhCH{=~*+aPYP#V)BE(P%|_HH z4hP|bVi4J|yp8i5=pio9bdq^Wcd(4^5N4LxyB7w2CxS&67mQZ8Fvl75wf3LGycgP7 z?G=K3ec`BNrH$LdnD={)5mDC^;4>o;bYGl-lAZHN&&xRK>^ux58}y*xJfHNfVC;Lm zJ_`TQ1%6imF~08#V{V?poV|rmzx5bw_w~e8?oQYjQ3C=tGn2Orkfuq&5LqQ4x0Gs8 zT*}Ux-yhO8zY*{e^U%gsgSKxv4kCknC~kO1I`##Dg3ukZjs&v4$XL>B5)18_d8og7 z6WQ)L38(CCKygPhNuGQeTjn5b7Rm6BfhF=;=ij0cE@)qrj?PD>F$ei0Y03O;q=oyI z^oie*&bilkDT`7eR3R9udc?SKwJN5Zbb_I$x1!yT4Y1hcIt+hm!?L9suz$mDOxUy; z{IzP)&~_)hJ#qoO*RKV8mObI~I_TN4i`nx(9&D6WQ4w?Cb(aaSeBySzd^iq+?P@9a zoEJ=AV~4%#*}Z+-DxBRCgkv*YmXycYe ztiSJK5rkzqklnxZQDSXR6~fe+tNsc$w`qXUfF_J)dt1Siq0};_272!pK<7+HqC87N zYFr9%4ttIb`5nV%A-bS1yhg*40*Ii(+Dt9P9&T+C!Y#`%+(dn`w?++(PKbG4;eM#f z6`)N*9(mlE3!FDe5~s&fNR3NFAsJ4c<}M=HA@CxW;h zo}&6L40l<&1IiiYbjhcCrcgBsxTy%Eo_EYu4j2x?~V zy1TzjaZ^R;+w z|DA-Tm?e|=GbdgrJJVO;!)p342#bu`rRJD;%*4nYoT+}V448;Y16wv z((#w^KTToyxFQA@EjW(fkDZ4p<&B8*XP|}&0c-!YSm7{_?L{1=9NQgI{-6$?(z0l3 z9Iz0^j`zV9=VEXw;4uHyG7=ZM4eO`uMAHRYVDoD!V_g1`s)bXk_1YX=`cx4vD5BB5 zg_PSmm{(G72s& zrIvG$bBb$bgPVw-;SwTqerxvXC!1#-`B$ps7XnTRRXqMDozJ9Z7ZswYT+>^$i-asn>* zHf_BdNgcAc!;0)^6xrs{6z3AKLM3W9EdrX(*g@TgD0FFfMgAD)v%NzUW~rOAK8`EW z5j!1O2Q=$78he5ER_B0SW*Cb(f5!`( z90Q{FI#R(9e`yqNHEJ19lJs*CcGeA}1Nlo(m$5uy9)85)DX)3wn{m!4 z_bZXwcoc%S(M)ODU{b7f}<;Bcp=6Y1uRqBtT7Ar4mpc=CuJa~XO*c= zJM&MZn}J}ih&K4_MFKp!jkc4otC;UF)t`rcXbGG0$jdkvJ3DC`18)9#62RY+w zMhB;%`TkY-WZ6^r_xc1 zbJ5{q4a=47qNY2B(_1?$;emM!`tMYLyRXu*;O;HTxf?FEf{D<#?;7jz)ss=koxu3c4&LV2wRkghIyO@i2re29 zFO^eqRi+Aa+j~)NxdUwqiKYE_f`QAj%z`(2QE5~HStv6=S^H%i{O~Bg7^r4HD?g}l z7xNG~MT4oPHQ<~iQ2L$)DG?T+XCKbanYU^DcoFD67E!y+<>(!^1#aDEj+CfV&}EPY zre_R+|KcZ!^FPaa@$$%`(Okrz?7s75F4$l$imeAo{lp+T?amhL?7B z7~AIYQ7XjMb9!3{V)jft@X8b)pNnNqtqYYi1$ z?BRXNX+RZ|2-?kdF`s5=p=R$=T=EPtsjwVeS1e(3Um>F&*P$x67InkQXw%H) z@P6k6Jaz0cy6wxSFe8BR8{;6;p5|AgNW2d-+ z@(45R8g0t5Q-xGk?N7Rtk74iUSn_s7EQEQ7lSd_DmIZl166R==wxTMK>$B&ze*i8# z5d^bR{-0;dCF#K`IHs`*MNiGKKWY&iI&ut8d)DBgr3tuR;)^yN91zW)0pv|Fs;oOi zI=e^k24!5vW12Z=3dJNTc zhgN}Yb}@vd`mr;LBK!YuSmfiNg&KBjcHfu2$R$z_e*bmAT_aS{#wLjuIpQFQ4rh?C zN26ff%?ftLJqer^yG09!>_ERNHF7+m9Ce395N;`RO^?1n6-y&<#yBDN&pnRTCI-Np zRDibGLe%pAP2DEVTgX&+5B1*A)X^-A@=sZ6hx~*O5 zk!g--ct8buJ|0Gx_LsVMy`lEDZ0AtWy~$NCI@bf4#B|JXHl5> z4=K@OcjtGt;JdsU+doHB=d0z=X~eoGKMtp3gc(?-5sa?0E`fv3RxmVjfzIudiG0Bm zy3Du|w!ACC@+ae=|78ei(;QE5>_O<$jKd9XKDhr%Do(X8#?Y(5pwk)&k-xQ|^JW~x zdt3&c)QiySKLdn?%v<*193Hza!^+onSo>-p>kCc-Z^MU_lfIL7s%wDq*VPa`BsMNSaBKEOoW5@|c7EQ<RYShv6&j2Z1I)-(w@8IeG z;X&`B{ituQ#kwK~5q|3tsgeuZV;ZDFmo$aCI4=N~_EPw+&$_0bn!(P~%==@-{H7gk zbX=q(KDK9>oYl6ZJ0KXk*06WRxnaDp?HR<#tsV@9RIBUKvsK2uieWTt;cN(g;>z9)S}EY^>TV7oDvHGXD6u3H)*K4Siljvb`)VlvM(DIJVX z1)}bo5~BSl8HbMN;6bMgI4L>`|L@3A_Or)bKG$&mA0_0B+QeI@Qw*kWYw-Mqt9Uyw z4G;cJ!fNxg5cWX~^3~4R);A6mPMLs2mWw@62S7cv1XT`kKt$1Rz~3V zc?c0tLfP(6p1m<2gxDh$SwAIQi315d!#d?1>p*2zCGXz+S;$#qM@`S2r&bvkF(tKv z%Jj~G@swgvbG^=bT+*oM`507U+}3>GOVF1X2liU*KDha()JnV7%e>BbZjLCH19s z%pn$ODyWK}<&`5a?R7fte;*4x*O{o!dK+r@ML}$$0_ID1vYB)SO3z*a<@S7VIQkzI zE#3hGDIVx>v<`mj6{6{lZZbHH`NJTMF;901cXI)6(k&B|Ji7op?oDLgvj(Y@5n%>F z5h!z753{rUQG<5|lHWPvYE24#rfCqoKtlS9qN!-^f26a20_9qVQnTmGdz-ff44n_a zaCI)Wy}CwRmPSEkbp!UC%>tz--{`*Go|wIuWwGa61CMeQymprD!xY&JXmSunF4cza zu_uUCEVIV;lt(#I`A9DaTcSmC7iMCCM<_c_I0(*|^^*2v;6=L%$QN$jYVlD1TE1I};-?ka2R&>)D-`n~kD6 zMV{{N93tN}A5uDwf~eb+{CXb&Jy?l06HigWf$wHx;}2oa))%C&)PR_}|DnBJFX_SA z`Is593AH6DY(6NY`Yu|O)AC!gjqM@0ZW1^?nC;rLyGdVV0_!=qg6WypSeJl0`S)%i z7VOvqdgG#@dH+f1^cw?3qBwBkSrT3ICuCvzajf6!%~)kkG#G-wS0hB_P6Tzo7K=@5 zH&LS%ZnUU@<>rq)lqP>Zh@9oRRi01dm78{m!rshl=M+yII4~7q#S+% z^*0t$jf7k(8b6(So1X;T2@W{7fca%?|MK{gb->SiJmagBX@hzIy4ioH^25#24=dMT z^S@!x!g>HkGRLV(@jIT2-V5qi&OFO2|56JZ4QRcVM=UPogMDQjN3^e7lXr!W}xX1Au+91q~htmXyNPu z{q+|hMSCJRR4zlO?_z0*&jX&%jAS%ckAf`^mou+vKF@nKC8E2RLFw~;Qjl>5^wZqn zI}Kv9&?W5sI2sD9jKGlH=Ud$05Sx7|q{l6k^cBZ|(vLKv@$@W|W=qgU*%alrRN6Ml zf$+nMt3>~@=gZqn@LI!mKzgO1x-9`Og$PmjvlRG?2Jyx{D}vr*j3>$K;Nio|1Rlh) ze6j(xJLU*YzsgW*dK#-uVqkG$1k0mmnORTt2RV5q{X~-R_c&L4TOW;);}T)r;}A5= z%0~Nu7bNWS392#r6V3nA00mn2NL%?@Fua`!PX3GmUA&WOJTxTAoieIC$r%iMXJKA) z7A81zU{op>!xnxZR&%b9rcmbB{Tv0QlRp9_M(EX{QEbPJ4OM@~o@W(z^)0 zIEvxboOp=b+d`lH3P8?{1BB-r3DbTgpw;CW!2hq3s`Py1#Tv>;){5yMbll3AI?5aQ z(-m>L3W@|;V7bHst&={}uGl>GUOI>elcRCy{&;9V!E#amt~E1F4gj|mMl>hK6vHCN zL%^AQ95+k=t~o_`gK#O6@&@sO2k!IaafAXVesQ zv#zJd?2BuI%7w(P*E4m5lZcb)A*iAMY$3fGHmFQM<5`P9jO;zCFb}edzfr%Ep&%SyNMuJcfFti;D7Tq|rlS~xa4r=) zP6{y9E*}M1n$$t#0y#7`AKSN=V?#v)Hm`LCqb+rGjwPb>W-z-mjU+ZN2a%4~ArL#` zA(brO4q9*HP$=F+Zfw4SroLk!;H>}@RFXht&}QU#=1SVDXF%{P<`rLZiS#Pmq@pBa z;&Qc&^5f%qoaJwL1y9{kw`&CBMh4?%-xTf)#1i8{nsdyXX4;>oFh$RvTJ|PA1f>7Xu zK9b0Ty?~qJA(e0c!Q-jtqQ&rSU?thd&eK;>Ye@xlQ=0@Ij^<(#J1Q$B~>p9 zL#NN@r9-b9VCo71gkcQHiuMQHz2{-^XIG4#yACh=3ov%oOg6Wl2?gelsn#NYaQpd& zPS6pcI4%PMi?3jeFCR5bub@?~0p&J^;5b9 zSocy6Nm^D5kGr#=>?re#*Lr})@@`_!IRq+dEVQmCCRPy(Kzf|HnDLl(Iulw>go0>I2Crqu0MA})0vJ_C zK<$nx*yiLya>rLz!&*Cud>oF|QDPXSL$uMUcW5Z%yVe!Hsuoxdil}1gcYMQ1{z?d|( zRTfb4N|6*E%EnNy3fOC2fWqJ+;2Xw6)Z=OlxE+L?m?>5ASyHKR1|`DERa9bIiuJLK z-|&dVh;fV2-?<9=TCJ&qkt?+Ld?MVYJ)q?311*cvu_bma@MoGbuD?OrvMosZ>+fcK zLD_Ti&kEpf3+3q^9t@oQLwW88UXia0B60MKJvi7!4HfS*58Uh?s$unwy4cKz7?s_a z%FeZXd0Ex$70jnWuY%6xM6^>MjW2Fn;6Z5r4`9Ri~!KKa`hgL@B-M&7C<;B@hMO0(jy zEwhLY>bJ$kL&BjY+7?RqCD8M46kVuz2}>YYy20TX8f{w!e7!n)|a%bejo=UZ{$#&cr$9E3nP)B5YYY9W&!i@$Kt6u(!`Z ztt5B2?NWw+uZ%&xRkP6Z>MzEpyrcGUUew4b6NSSSA?oLJELd3w0v}J{&W+-UZ)wor zFkfg&F9XlZn_&CJ&6s%M7}__Mfy4M3!r8Kp^{MX$L*|gOaUS3uVcm#6p;^#<%8fKm zF9MCUl{E6idD8qgi{+bS=&;TVQtn*@>woI7_)8dSIIg3=SJt5mD^2G=@&%g_moeZ* z1=ywLQMc@)5WXn}H1wn3!u@(o8F!b|3@Je`%?nW05ea&mtI%s&85qubLp#IQOCw** zL3h7ttXr|Lia|&ycu{9odLSD$BsN58Yzwtv^EvbWwK(kLYSg(93BjRBtm{r2?5o)v zwb+A1ChEZX*-ohRW)77-j*z-9I1Kvt*)wooSykYG2%Q|RQ~wGn>(ZRh^QJOT%?!XM ziz!57axJLM)rG%J=UHy#Bk@Z_7<(fLU4NxR&;2R*^2ajNw{`-vJ$h)mMF>62=`B$U z!f`F-z}Fc{x^qr|!sI6M%ruMT`Rj4;sTz=_e2^wYslcFbGtnj{06eGv;7ML?XH~%0 ziQC>}BCr2Va&9wczWp5Po+}`R_myCAQ31=#R1rg?$*f;EUE0xeiB_dXCml=Z_IVjrT~}hlUddx8Sm(Az~;9N#6o2y+??f#8a_8^*EV<14?IESnuln2 zTNM~hQepmEBWVeH&Kaw$1{;>?GQYe8f3Z2xnRJ#}&H6x0U4y{NTL5um*<5o#1Gy!> zr1g3{)%d-LWOhzsd*Vpw``||UYMf~Q{Z7hX`t(dsI~@WJ?`3-u z#s+vvc%NPf+1_vntsSh1C871S<*y3M9u6X(c@%NlJ@V%F6!x4O3g*A!aF!99bIn(P z?)*BIo$Ri96ULm1Woj@XuKZ8 zhK5Lusvk(Lq-WT(OB03vR)ALTTxdPN7f#!qLXU_;D5%w@lJL!hn>C7vx9lb!dJ1S# zW6U}?i>2!tVj=uZF7!AsM^?>KnzCRQXuRzsX(hJEuli1GjLl){izvL6!@TSDjMMy9 zMjs4A;BGCF2CusWF0aO-S`~9}bx#D9!$!2{{yNyWeHQ+%K7?6c-C@Dv^C%10DqWor z2g+Bbf(1LLYK=tF_xA!w7qd>=FN?r0bORX9V|^qoUx_@hQp#~LkSxngfm-1O5PvbF zdxH<*MB<6P?0q30P{cTUiZKHhL1cavoTj%E=Nc27yFwjHYOWKfIUjj8+KXvPv^ywI z*}^;`|B!%=b5L2t*o;gGeEFz>PDa@*+j|}?@-GvzEDY7^Iw+%`P=BNb^j)u}D$n&v zY}0G1VlLzjzwUZu$-6*LpiZS)@yy zw0 zCQmgJi863gHn$UdCc9!5J6Ic)P>Ek_)YSvyHip@5E5AB!@`gZw6$meiPR80 z4P!BO#Z>64McR5Q9Ylu9;ZSicHfg4Sm!3a1Sw_$&>kne5@?y01U>!uuhtN6CH1OT& zQ|SBf3fA9P0K$etV3lRfvh+jY!iGrXUs3`WlX~Ef%QDN#@dx!87f__Pj~8n{0%YCI zQgKZVy<1wwI<1a?)gx7Mf<4c8X|dQl*j48Jx(rT^Jb&d z(M>#kpNCX9Jx<#9Kn?ClClBzvm zd_0z#zWYrC!g^v~JrQkw_3$=tWGL=aUSk#cpliD&!*=$|?r>IVRfLjNVLcIRM3l|AbTVlMM04Nyt@B%NHq z^0q%Eq&9`R9MPeDw=PJJr9>MmZXG6 z4qT_hN3kBpzYQ?)kuoZc-oWmc?`cyy^8w#vd%+rW;9Jt#C&Vj_KsjmDIgvDs;QwAr67L*tFy!<>m~dPQ7bD`>od=%DV(!-bw-T zuRP(^{)|NR^-0*Vx&(ALpMhb!#8~NIhvQZSGH**XgiXn&Re}gqaa4wK)nMoe9|DWa zDzUx0oES|!4Xz_&A%XQ#s|`fp_{U4pIJz2&-fYH%s|uiBZUS~ei$HC89=T@GfWkK$ zz}+VoBiFwohDD}eCATMsHOf%n@5@|~COp3Cb&yY4PmNTgczxg5jQI2#oVIm3j#OI9 zX5h^2^{*IO?C$Z-PMd-Ided<42y^6*Vvc}61!Ut(*4LyH3a_3N0H~Z8`@Jw|@o1F&iNd~POTj)l2qvy!_u^UKsr}ZeBtO6dhjZB(+MM(Q% zG8gx@t*j?69h!(UmEKCgz<*e;hX=dU|4C&#o*d@An?#HmAI~{H6AZuBfsNfAUZ3|8 zh?yG=qJ7KYTy`>cPYQ!=`eN|hG#Yw8S+P5n7>%Tfyv}2rc`cT~ym$`lDIHu(KdIMa zd(>%EymENWu-c(Aq<5SuTkvtxJ`spqj;kI zTctwzcwD?j1QR+;u>Ss0XdjwKU&$*$RASBxQ({g|8Gu>>b8IZPA>y)c#Mq(;+y}8e z4JStWpqqo72Rz_5dGfaFUPO=imoQ;q2h^n1W7R%YY`heVVLRr5^^?sYFt|x97ZjlG z@qDcwlAz*MBI~T(!Q;d-f5lZF$oEbF1;+rmv0j9dGHa@&^dGsVKL!0Iml=!a zz?1yEKpf)|;Lj~R%(Cah;+8-(tZV>|n#`hJ-W5aR|o2x!6RpV$U=7?YU zhsw>TfkgWz?bmH&#b8k&oNq_v#(#*>R^|+OP)=2L8B^};Agt&*4_^e4psTG3oo_9r zC7Z%{+~aamv1L2933q`__hwR}A1)o>mVrP&8hYDjpxA8|8aDgDm)211G<;6?-)aPD zeF#J`50m)jRAMvo8n&J*qX#>gd#mCUIu9O+pS^Zq13iy#I*uV{fdj$zDbW2R1bT}S z@RSyt1-$)3nm#3Aea(7kbjroUHqq$CxeoqUB3MTE0Cm~KdJ5`Hh;Z_CXxf-eVi+Id zrm2cQ$6rEvB?!C3EGM;RBP!_)LuowgUV8t56g)mhQwmvc&Hie7RxJPve%gZ5!wpi= zi*h35#L=6|)_AA292MRh<4-Z`g7(-6v3YN3Qk)~!h<%Z}Y&Wznw}GtRgP`#o>*;J% zhD8bo@L8w`#Y_D_dOZsM(_4gR*R4mJlLo{jQ4{Sy<-m-$YfvMgo~(-sgt^TDs4_!< zElaKt?-dt7u;v_ApU6UoIZ;%8#scqcvcTC^0@l}?Np&n6z~jUMY|PAnva!JscrKDL z!M~|lV>U`QCBUtfy4b?)Bop){-l)V>Y9n-$=>?poxV zC-7X}&0!o9>pyiXrvkM@q-)y~a=>mM3Jh#O=SLK1-7Nsn&;^Yi2co&k61yA&K_UDD zRrHQPw`u!{if8NpC_4AB7}xKO50WGaAr%H8Bq7v1O9mk%A%rkUl8}TXOb4CMCDTDV z>s%@w-e<{3lEWskX>TEfwqxi^PWi3hU#`o=(9ApUyPkF5_ve&MRxSL-Utr zR;9C&Zy28iT?*2o1B;mL^~=J7U8dN*gnEsN9|#H;5vuvb!|jAR^v=s0BF--C`7z}C8P%^rUHC+pUhyTT*bA2FNwfH2> zZfhYkI1mNZHMlpc8rHhcz<1rJ*-3XVOl|ih&KUJbo_WacKc5Q*OZ!01;G( zq0Co^gzr#`@uO66x--ozT};x`%^x1#^uc%0s`xI6e7`IF1YNHhn6g^HTm56Pm+z8`tp`JI&eIWG}`wekglkLlqWR*E_uNMzN(?i*j!9m5(m;N!zJUBHe+Ux7VP~m z2^!XgfNp*?Narx_c9lA0>wN%MlkZ@OvL(5?V?&T6_5M$mxV*cqlbOC*A!y9%13}P(mp{#f7Y3Bu3_ppR`mIFe;$4F6>nXfj zw-Wp&T*l-%{h?9wf?%^d8f`7F@OQ=zcwuG|dWTPkt;)stAVChw@bggB%%NN{7er^0 ztL&E+@=3YG1=8Z?j(>>X{x}A@FS%gOkijhVPrJ~(d@NT!b_Mw`UH-vr3bDwNpu^&h zguNuDLze}{j+=uA(+6YP{2ID7U*uW)uHf{%c^G37h?<8Iz}ayvJpMcbn~X?Td7{MH zo$6@VJdqh(yw9SxSMV1bgHg+}OBhpL4}&!1c&%vwnjKruje3e;?9ZLpbN^ZSXD#(v@f{~ZniCw z|JG3Ub2{lPw?Dwn|AGGIyxmga%`|M&> z@mt_g{}9wr(9c)z$0BA>=aI2J*I8%575kzgV;C_ddIX{!c?aJgkHt5~d*kM$=@@4j zh4Zs(aNtZUlxePFa*O^@a3z6Xn-Pfm{x?~d@(_4_BQ9<2S@KOC!>B{)ysj~r$uns; z_&y&hj+}%oLmaW>r30*elZHoBW@798JA!=XTZwqp8ZNpsU2>q$T71{3U^xeSq4G&? zb(@hR==3G);NN~QHgF3zu6ZWB-dv0pC+lF;W{v~=CZP5SZ+NIi!_ilz_m zce@6Is!sD?6>Y=<^klY*ldN^8rbGw`LCa+vmgOJ7sNX7(OqqJ)(MPeX=X>^)GzJap zHfAz*HxrG%Sna>x4%}wT`N))+Xi#?{v!q>o#l0o%?6R41}j*7 zlUthH;@{UrVEZj%Hy`u`yZa|#WWPKZnIi}Lt)%<)JBgwji_8nR9H;N@6Z1ifA}ODE z1>CyFvi5;LnYU#%N?Y$U+1rcoZ|DYW)!odqkNv;qIRf;@1wixiiA*z@gX>f;@(J|8 zO@?Es4=Rhho;ZO{%jmVYHkr!`(_up&Z}iYgfdY*K(57Jk9lQr%&mj)VKPZ`(;s9`0PSfo#})#XB45qf#W#gY!;@SFGKN@-WZpY$o}PL zK}q+~wW^7f2eibcbl+;IKgeHE1}Rl%2=kt>3qI&)l0VNL?q99u+A3AJ^Xwq%aB;-~ z-%ys)=0hIoG#DCEjkZ6J!d~4#h;cN>uGX^<77<0Cc|B|W?ISeh`(k%~BshN<1IrVx zVz3Lb^0he+|!!*Yo218Kla0(iXd30 zor8_l19_M6UhG&;y@b?ND($WTHMeQ-qnmgELv)$w&WGwoy>k*rFFD@#+l9JXa@?g? zh$RWRY)$wEls;RHV~Uoc$y%;X`dM zgZquMsMFgF?4J~Jm9Y!4r7)j~^^VZJMAzK*SATH&D+Cp@D|vZMDt1>~0*BH$uzp<% z$`k4ZQKogZO5{OE*R@0ky|XaE-wo}8k}>8KF%E2$XzOT$9{LZMt8o%)_9=ze@d0?W zcPN%hlF%`=6hHi+=ke5DsX9DFt$%~ z;-YI~s|yyB7I}3qIM2O|YZr~j_Nk{ZVo4xoZd-vhBgr3@wG`d1-IYA`+K+L>I(1v1 zi>{dgpd(J@Dnd{4Zk02QBz^Fnx)9zK=cC(#t31l-g2ckq02}Ui^X8p}%<8Bs9G@x1 z*&}RGSv!n1{82FBZ{qDvp2gkY)}XX8K~k6bjF&_M&#{;esVSqlv)e}|?fJdB;eH+8 zKw5AWd0aGBuL61hM3Bz+7yg}>3y&8hqfbFBD%I1?)&6Aj9ruS}L)8&j`MCh!ebK?g zXN=IqTx_Z=1^?NWVBNPC zYa-U}t3xlA1hExvS3^x_86a;8!gOPc0QcK@Cq2;>#g z{_Ml&eYk-3A*Z46TMEV(kvC}w@LxI^{PVROh!}PX#Ge2hA09+mMi8^Ta+tNgTqO}t z=*eR?p9NvDg1mvbFrPHSuzsoFzv>KYC|knEZVtla$sB?X1);+06SK{_2nyMD@NB%! z?e3Mr_J{pZ=@`1o&lZzQ@-a$J73zMdgXNB|yv5}xNC%p;;(94sTp)&#S`An0LT*;{ff==B;;*dp zJnGmp!B`~|>=x7Otx#ca6GPA|v>M$`tz}Q?y?WUz9*uIgz{xLp*gV;idro#lyBE*- z_WqVw^~?z*d)-m`CXMB-tA-60m%;OQ8{g@ijR)MTP;L5Yw6`4!k}@@POd!tQoLC;V zirAJfyQ}*4+Kp;g+u5YzBv|#IA4-261GURqFi&xr^oGOcBSW&V(ef{e=w@zJOjaP= z-b>fdrqK|TMLq?O>k`k5Os4im7g_?>RKr-yU-tT=Q}$cOF+r?9P#{C>wbL5Zgg zgb$$S+^d5i9QT2{l>!RsDWI7}ew#6r9hsQ}t;qsH5dohuApv~OkJoT7=AiZ^LZ^;t1c+|=w z2n%%(jQzF+U+S;MtD#eH%2|CJl~IMtfISi|&1{L;m?)4sJdw!TQqX*22%Y;bnwCStdbK9)R7fvLkRQF`f?AgyRK7u@@y&SPR{ z3F$l{Ar_>=dVyME9&0eAjzq~}(zSYl0~e!AFAA4@ScSq2+C>yc!?fTPNNz94+n){h zBQ}9Dc02@ijm9IYS5WPrT_8Ui3t#uGMC~;zh@ZS0Ivpt&rsB<;iqC_a(<7nrQn%#$ z`#^NlYGiGmwQSby%b?b}ANOCN?Ej~6yeid&k1*aw{cgVmwKU4xZLWd|>#ecG`x;Zu z`6={19fUQ1U&Ps?uj2Nd@hII`MLY63FwV+_kQL#Ox-CUWz59XfJH+uvN;GwS^@j9| zfoL`U3R9lkEpggd464NntZ6!enX2o+SMNNE)HhVMw=8Gs*TX>T&}5!ju@kD&Z*Xy; zKk9#qp0&l}}Z8Rhdb zxH8|F1<{-y)7}gHyJQ3h+6zxk#=xaK%DR4=$z%`hXvXuQp3<{Sao>m^TBnBUMwQ^< z6i=S8V_>_=16(7C>k)d4i)Y76oIUqo&l&09pe&@BxEhR>UH}}5u-iHWQ;G_)!)F;E z+m(+ckMy}RY!jGG)#U9-Ke${gPGZ{M7uNKRMm41mdDGuWireaNT_1nEcES$NAF9UM zh6KE>XNOU}UNe7*3q-{Z=O0QAyyQB*Vu+ZI{^ydI2-@mkP5u?BM7 z`$4KzKJBCivm2Co6>pd(G(Pd=A6^U8KX{F`&O8crJ5^!)U&)xj>CaWi*{t`*L{S(2YyN&&&z5z=H z`=h~{dhVH?0ZKIkXb^P3dC6rst4aNfMe|W~@r#*q*KmnUMNjf5jo6hNh-tiqc0n_9 zLDt&J3qnqD<>)Dr#*7G_DOCZbrHPZvx1=@ErYEYMJ&XRg)m(i0k3FqYpt1m9=uz=r?z0v(5) zT&KX8w`TlDJ8n0=d3Xi%_*sURN7kX+y3r-Le{`O6f|a*)@vZ*hm^ed#&3cqS8BJct z&?;{Ck#<7|Ord#g6Vum7MY={ZYyF&=FPt6});<0wl% zDoKru;rc!tFeC(|qb%S@->cZ&5Dea5jL@;6kVO>Bv8lft+ZRq?txv7MWbst~zgT;m z5xWO1m->>Ikn(987lK>)d9F2TCL8M050yLoAk#vQEib3R`qvZDbJYd#Y%quR-$QuU zR%>{dHxugr7X>P+#G<@w3v0IRM(q%q`!6qr{>M(E>_`R7GefMQoz;-fmvP4<(vFKA zK#M#|%5-8c_M^Vt-Cp2*rJ0+IyDicEUIDG2IR4rj#yz$r@mmp^&|3MOOI4H1@2`%+ z28}EjG&>#64=~31xtZwvO_QEuzRbce3^%20$9I;rH;kv;V?iWa9Or=5^QNNyDe_&c zQ-zk^)}Zc{1vyLRu+|yrY|5*r(YyuQ5or?Y$9<(ns0(nhowIX#L)2It!8{&53XGJc!JXgVX6K(Ff znV9zvlDPcCqH6i}fs&^xi_u`Gg6l0Agu@>=p`zbeHpeCg3rdT?!Gji%uc&8$PlwGI zgVL;1SmK$<-cJ}v9?4iNuUwAvW6!|Ic`0D~^ncuRIQ2(H%wCS%*q7E_*I? z-_HlbU=JK*-jn?J0EgPBd*SJGp~|&@%U{H^u0cugNSEfr<%a}OYrDCzsyiybxJpbn zQD(0`7vx$0Nes$zdB?Xq{NfoiR91(uUKXS^jxFG)7DeIeQJSd#b}H=kj)0K^65wtb z?L^el;FL-M+UEG6srFi^@^)am^-|Hk%nTeeR;sV;&H}t*0l$TT`8uKn=cKv1Wc)K4W^#wHR zTZd*>>Uh`2Hyp1;g8kdG5GURXhIbP94udExdUh4V;+8{KTrrnT&F59k#DSP3=jIy^ zpp*Af?5G>dSM|$*uoZ#qYriPU)w=Mhf#oPtc2+3vS+LXfJBfV~AT%t!$4oq_OL^2@ zLD}albS;~}GEbkNOm^vPosWH?b=xLxHkY0U4ntwr_X((S(GXr;2q5ilk#NW^6FPjH zK>xuBFe$hov{v+Io?ou9TV1h~l^@HsrUVPG&O|_VWCS$yc){fRKUM2or;KQ#5~WXx z4;un|SS|0{Atw9JC9lpP6=^Chj_*;vCE(ju%6dLpi*lF2YqWDxk_q3HJxm zwfj&_@@>|6w7l3GZQECa(tU;Gv5y66EukGx&oQvXDhK^jDKB<+5(f(j)VZJLo`Y1G z?uZCXtuhnbz6yfZ!4&Xx7r54TJ!nt-hYcFI4egeAQ_o%%ij!9hij)#uvN{%7yF+Mj1*ppHim%?sInkK@{XpFu|PCd2lYx92dXbkA~36TKp8~=rb2w zGAILOJPy@@7J>c90B${}2LBqKfYrxlpj|t0LD!rHY0WN4Yo-QEE`Q84u#-DTsRMgP z9M~%2aG%W{Y%F&KJMHJ(*@sx@S7z|K{~5vy>Vv3!9*X}pjYN675&v~^8)Sal3E65P z==R4KMf)!EJ`2n7^V(?`$;)x|`z5_VD35`5n_*-XUQ9Q zD_Kzddx3ZoG*2lfqwOp`sOoVX&NX?_{ndpHbvpxJlMT?*vZvgRP-O$wvI)-CcRUiJL zM*$d!nwZ9gI_^Bi4WIu^2K~eo$P=PrsERAfevSm&Lnpz7rDJkG;+|&J@CKDZ5OqCI zVw01A?Y}CyJog*Btf1%2ljBVNCG{7mE@Jxk)7jh!O6)o`oxAKg4{1BgaFfG9>eH~r z{!GBnKLep`Qz7+(O{-SS?qphrjd{!0EaH$vWZs2mf4a%iGNxq%jO#!*jf*g4L8NHjUuGbq1lVS;Y)hyThR_s*cNpB+S<5x6rudABm`b zF!dD}2_KJIV$Q|G5bqKUV&gPWET`VR0}_z=6yTzQ0_wVcQ{D18kFQ=#9)#;(c)8DE zI6j~ZW|6nh$Ug?gjGu)Lnw6l;oDHTo)0nnvF#9nx54dC&=K69oT6URn{3kCKIrUwoy)QgNo@CK2qm4jn4EHrUw&m{-p^Qw`qL$x9k1UDa0S<*Jjk3> z&69)Z%z8{Ji7NtP z#`c|E%y#`-roOTQX5|*6XmF{yN#j;ne4RAmHL4KPD*;QE?Sd}LPdv(4$t~^5SmJa! zW!VnnQ)9~Aw-77f`56@5>dlg#I%4OtQuLhuo#kcC!p7%M*wI;ouygz@@V?;)^JV#z zHK{^>*#S`VKa2+YlflB1VJC4KJM=2K@}n5U3xmKYQ^{`6La?7`&5jQYhpg?|*le|k zRb8g*a`GJLIB-GoA%8RS@K|hJyq<^UMS?8jHMhTbo>?ho@*B;ml;;n`muGh3jjk-{ zzIqA5w6C-NMtgA62nAlfv<8jl)uS}xv1H!nL8#Cf0|nPr(AiHHLe2%lEF&*Gm>rDP zCzrsxTSH*dgM4T<{lsK$o|4-OvWUY$*HsnmZWI};>|_v{9-2@4krY9;>J2OT8Gzqb zyQ1x8KNb|Q0K7XAc&Z-?QE~&w`PRam_irL!-ZGw3bQ%|FM}qRpH)eK{u20DoCeQs^ zeR0x8>S)*quH)!=AEya!GuMKSj|7^PMcgxXDepLQU8w6xI?mv$xXRiCn_ai@Ii-h* zIe!^Ub5HSxOLO7N-L<&xaTI!5{b8|mF3)k%;En&Blavi9N8{S7P(Q8?ZV@BOxg?62 z=7qz8&D6u|HJ&@(zQ*hV7sC9R12Fk(85=yj8XcSeVcx3#C~KqtFO)$Lap%=8Rzu;w zKrFd;oBep?4};A5VV#8r>Yf%bFDewGI=p!vb>MF9>xv?qa*5LGzA$EFD5x9cK*MVn zESWPAG!E49sJ<1fv8M(sFA-zUSSe~19f4N06Oz`Q`a=K68dUDeXC=%2aFtmjLEJMM zrEyaw9dDO#joh=~yoUUMrxh^ka1!l*d!YQauDNtsAaj&#!JuAuS^ZE2j6Yrpa_w*C z+1^({o;=K4r`m;gHEKg^)+C9%>#})jpFYHSeb3}|p@QkS!*tg42QA&n;D0lWbv0~- z8v7#XdbN|oxq1+btayp>aHhIxILeNi!We@LXihKVPk-1@7IYCO@=ZylVYxVH-Q5kwWsKcT=AuvdDHFaqH%S0K^1rTdczl$Bw5Z#>J-P>VGd8i@L%!IuNd&cfMJO#O1Gh7| zlFn}>80FU}tk~j+Ci4&Cq9g)+5_RPZ#>f?rDl@Iy`>bQjfw zZSqoZGmI13deCnpxBrcm(Q>Tfo|&JUIMmJvRR@ z7;7!dsgpKUi1(C(1I<y1I0(^0+K>k7U86E2s%U?m%YH!ikjX0ao=tIA<$BR`7PIX}tMq7wAXG=lb< zbNR-g2vp8q#S~tta7bPWb#Ym!qIv=>^=|8M`VrvBt~Vuz5=`Y||!o<)A>QHIAZl0CffUFChQ+85pvBFUmE0 zRI3=N!u3MhkKEV<{;p}<=z|g5DW8F_Xn&lV?82{1h(~1%$GYX?DYteBe$&iaHC+li zHWrxKk;{Iqp9jjG_s!+un}o-cW3Vk(3zX|lNRHB3*0z@}xBjvX{g?d14lZUiL&h?% z-FvWL%0Ltqax5>n#M?Utfm_F0HnNWsN~4olbNy`IYP1Wa$&r$Zh5-22D+km*9p*Vr zjM#}wd1~GT*06dde=M)S9%rs#PKpDPg&mw5yjl7(UDSF#5N_G3gS}R7R^_#bt=c^s zt^Oo4<&9}z`sXTGmG;8-9|F*0l`rocMEgcjpGs}FL4cHz^@u7%gR5yElD1deI9w0% zi!&rGejm7abtL~VGY1V~A24}Gym@BAFzS~$z+KC3aZ#U%)!QeSVU^qy+MZoxHw*Mo z`_3U~bD9Si_xa$N#WPSVSd}I3AWxvM72vN~Fj=n_?|+VGR`w-$_SG4@_3jIxUy zRO*@O>Hd%q>5LXBNoe=aOi=oMml%3)r0;B!WW|93>@>T8Hc1&!a_=qMFed|^G&`eJ zlfL(s$xPwo03C07^MjiX-!njI%vgzQLKp;P z%;ZyFuEck@wLubC4Eo0_xxHFG=-jNK_iiQgm>6m=*HiL-40F$m_ouUk+YBzaQSQDE$@et}iEWmQO!?t-Bp!CFJUb0dyaSKp|-2Z6b z^Fj{~xRl_J!$DY|6pIGBV?k6j+|0u{0ctixQx|y*@%g4O>rJ5`%@4y5dqjAIxY@r0 z&9U5%gGQYpST7F&(U?AFL5sg~qs6|Uzho!;Z`XNLJJ63g?)Hax{}c?$+Xa2y2BVfi zDTw2@NHm)AsGH4##eEJ0zX6DwFM87ZaT(gi^@NJbcu1+Y#fC={q4DJhL8LR=+}gi@ z%NqSK`C=rhU9V(q@jjsQ`YQNSUM6U)2^RdxhK?Q%Fmi7SXzwbAhK~7Qw!0ku&A$k( zmM2IGVFj)k1u0QtLvbu zEDyWtU7_G*HgAk>6~11S;L>k-IN;-Y6lsc~=8GC??JvSB^^_$E3qlj)QnvHsP}IGd z3#%(9QSM_RD#wKguN{UFZ;3i2*WM?K4 zg;hDGkylDPirXg_gW<$%v@?FppE@%fDL#N2p2;ler9SignvN;kIlPL9g#mvw(F$g8 zx%@qAT_s?Iz6a_IbOz)7H6Y)u3pu9)_^-*7e;Hej_LnrMdww%9+!Sc&bC&HLlL1xY zpRDvr4YvMkE-5MVXPJpjY-PA5np~y!jex1|1x!rzgWZe5 zQC?ueq<7x&uJE_a-gymsyhYcA@vXe#9+U04D!SgI}&Q zpmW|O-0|K9t@l*3PuqggV|*>lx|0Idicz4Gr3qG!$9VS;B~<u*) z>!S^a zAE6FfHUdmakHJRy3P=~k=tbP9uj{hFAUBVx-m^xX8x?HTxmXO_;Q`81KID@a2lZhJ zFgcme-qe@EjU_2q$cN!`^<>&RlnTv}&zSLnllXGtY*c(5$_E`Ke`|0t3hp`>I>G{j zOh@wPcYM)xfeSnzNgkj6rfhmob!KO4y_~S=8-$7E9LzVnk9Mh}K$32JP%mylHoHovTk+7iE+*PV6S1pbHx&R-jmP z1|oW1f#y&1nRVE|ta-eQb%a!~OM28HXO<6EGk>zvqbB0OWpeywyc9$G&BZV}7x~G< zu}!5Ex}7TlC)e`Kwe*^or_eq&lo(?EnA6%9+@I~jrdM@*8_?{s4 z{K-NRsq5{tg~YbQ7r5dI%FK&#^zU@s+angUBPQW;uZ{TkZaZ`{C0_oj6b$*KhRuaT zDX*7mZZ=;5(-)NB45wo36?z139*IQD@Qn~QNF6;^R)b;LHgGu>4%M9jIKFTI%8zU^ zA2{L|hV&nfMwa(UPacI@JdU(s1%EVrE#3<9!Ilg*C0M*=l9d>?d}2SSGNc? zJ1H9|slj)uq14&qjj0ogA?oXHZcm*)O4DVM%R!X!jEREfPKVGo_aHNPR1EEXI(Xf* z^-#J-gg-TsP`T`^WNp7FxJ#ania|wS{38@Hy+YA`_!`PIJ!j58yx6Ah*_17`1jDJo z-igM7XXak0F-(BxhvUHRVl@}da}w-R48df|S!US07|h0O0JGvVOlKE)Tue7`#ZWce zqjW@>mnpQbKg!jf`9jy&fB5~>67;qx1<%*iZ#Zrb^&2(ulKIo&_O?<8dg99dOq+=9 zQ)mxg$05_W2%8@_bDd%KpuV#JwO>6?m?M_LGV1!tZ2!Gj=zMcX$ac+IrYAp-52l zbYwNDmuRNgF3j&BukU~m)(khfski_pYggmo<;D0k*qrnL;>BmrgXZ-%tW#0S(K8pHlmrpGxFr+J{MCtBqs=#! z&qn9|m*9o#Bpm&ic91V}z|wXA$ezXs?aQ;lAuGq7ci484J_$YHpbP(L$gK^82;Ab zM!rels+)xJjQ6bhS|rz8lZ1^TPv*5K4_bb>K*d>N2pB~3yaCg2ZO$&V7)SmK(>vU= z(~oyf8U@|I&VaLg0TWB3sZiztuN}1mJtzFl|0WK$>5w@fUgRTbjo-@M{D`yYAcI9n zpGVGD^HyVb$)!JuD1MtSZ0;RNUE|aF(C|>uS=NWw6`f%LiCeJwlah79K!BDVAll?4 zn7*Aze$(MNtKApe#uVU&o(edzPl?)^+u)LPGrCt4fwSu;;3hc)#;3n)To; z`FjQ!Z>KZ8K@Jye&U`y9R=7E9Gni(t9={zYr0>Xv@+|gV)U*F!}7_g;QCJzmK{z&&lwZo-Ga?v=rs}? zA3tVNCxy`K9|>CQJB}s+Vc=^N2z8wG>=)nS67x+K@Rw z1(o}If_?B1Ze-F6i<)E6-BpguyvL$C`Al24=}SDX*+IqKTu|R`iyHpKqob}Q_PiR) zBdVd||IcDE=b`;8We-PE|F2y#>S@lxuy=-F7Gy&maKIwZq@i;}5?4tIX3e`^a+!j5 zta_WEEtazf(VNj=XbdDy_s6;o*LhKmA!@j^@+VuvjwKB^F@UM| zEkm_gvCug4xnQM7og+KK!7?-uEgwz{V9Ps>)Jc}z9N@{e@;aBx)*gJnKn%P}tqEkc6 z4Ndm5=5xbg>5q62|8c_RgV&j1=Rx>I9D6rUGp=TK3hjo3gIKx(vt^W(RL^GKO{w6P zTM4#d-e61}h6kxn_};s#_`J;*J=cn0!SX6J8@+>TbW~!T9dWY${>)2se@c>7buj8{ z7x#`6xJu@HNS#bO4SVTU3g(z9vCc%zYN~JPmC$>`7~(&MCWj*s5s=6O!|> z;lL(lJjD;Sbni1AjWN*Tw-@u=y(w3z4;AZ9qw-imwWT7Rr#_h~$?ST;*Z!ug*pzhg z7HEMe-h-D%rm;ic^HG0H1FP#1%rt}|h#K;NnYLR2?w<&owx!@&-(2{(F#{i^mSNZ8 zBBnd|0_Z!dqxtw`Tsfl{#qOpcYDO+QkuKCVR5Q_xS7t3PrMzTQJ@1b8L9ukGaJ%p- zq)PXq^YaVb`amL@Xii|+Z_a~nKo;7~56AjtCD?DcB^J@_o%#D;{!V=vab2V!KYUFH zuvKCzJtylt7C@_Pqfju0QT8DT6Nv*j=u!mrD?i|iw*{e^w;oUKM_tE%y_VRmHKe`9 zNqAS@2VN2DWb2hl=#Uf2dXABxr{op$HyTX8?+<;n7T`da9F&Bf!POt^@u~j>G<@L? zs!ww;B{LbT!VeKI{c&|r>ICNYoY?kr;&E6`G``rvP|p2?oH_uvBep#56?x|J7n9dj zi`%Z<4ofO$V)@k5JdS*oG8!A3EuM12JFe{MB_ouUyGjg=qq*0gP?S|&krb%Mqu;0t zC~hr;Owv{qk0Jop1u*+>F8oDS3OYyYf^+?Fu)SQs3RZUWUfGFgI8BxRwdXMYcys~G z#;0+)XodNPlwImf+6-;P zXH@Cl0*+g|c(N7!S=mnBH*z>SPN>9p!<)Fl%4F8~?23@No~}o`b-)r!i9c6^-UIJ* z{W+;Tp7dEo$Rt#Lst|IH?BTLKrZD&w&Fvn~xH4nDMDc*~4XPDf^eu+6?K!;l)_P&^ z_Qfc&BHz2;P+o910hjNi=dRgR^O8@uc*Nm6`1&OSRFd;Sddat1{3sea|F(zI`N)VFtOs_7CV+fiB?N`60N;m&pp`ZmTpNzTR%yu z=|^m4*PiUdjvVN;_|6agB0nwVK(`D%NNn7LLZf;#mP7|gqQzw>SFWyJaz7u-r>sKn zq+pO&27&Fa@$6U`@!ETP)6y&ni98>2m=bz63+f#z-)CH^cc>zL=Il z*{$|gNlVxTC|7ZW;;h5?^2cgyo>T&6z7D+A`ZcrtE`b7H>g71jx#y=7u;CVmupu*G zxpOjh_3dUbD2?fEPk|uwf0+6hHGJMX2Mq3|!HpTSF?mlh+V=pER&J8?7$D%%C-u<# zzk@iq?<8C*xe9H0Cm_bb5=Gx9NZi(1aPMsc&}wirmks!fi&qZAaaBqbD`n7B5QdU3 z#H&}U6nbupr|({GbbJ4jo!Ux1F$wVm_FiQ>j}63>k-QUlnv&$U@htC7-z6g*=-sHhQ_|SDlAX@m=y!PyA@)Uk#8j+JgiQ5?r{_`~Lw#P$B?nP7^I|@cUO2-|iPGIV+k&>321a6R}fyqh>^)1~Ez5Hd`u7%WT9pmj1K)GMLD48` zYp#sq?^wHO5nrD>6SbSE?}YX_+uApxNtOc4wWeX?^fJkTr^`^|DtU+Ai2=q(@lJUV zKNVw(eRrng{rY&;uzNdmnj^zsMyF75y$>|`#DMhr3L*Iu-FF`p!PmST>~Uuz8q9BE zQs-6{_n;p{_Zv&!^N&?BgM*T#7S#3Oq{D{|&Bv8@!8l zj6avrp_6{q-bopB5EiBlM^PlP!)oHpoqY!}YrS<~clsi1UXlbJeJF2qc^}uZ60t6C z>MrOIAYlqI0~MS2tdDVE82L9J*}o4eUY*9a;=Le!mRX(QRfKL=_pz9;NOZNC3e|PB z*w$K%$A?ij!0z8{15ZS)d)rvH&RKj{9{^^f-Jzo}k-tlHgYrLC*s;@)xecsg0|T7! zRs{8yJ*~jKJDkvUWfk$>gSjMj6lsW4ASJvA4ww$ZAFd3mdIv&DkR`E(kMW#!{;;Sb z2;SX3OFBS*xNl?vjpN=6R&Vw(scnbQc0-09Yje>4=mjPc=grRC<3r3VJ9gtiKlF%o zf>yt++-2ESc>QWUnr!>Ta{fC6Rz=$XXT8&q(S!IC%F)nTJQc2NAkD~n7*>tZho~JJd5|+{*5nP;?<)rR z*iOMPE)kL!)If~l6lKt)tXG;9>iEid<}1?MRq{}o`qTW(OGK0u;jykjR6kk{`jnw> zE*`|4M)^XHEBSIt&ERo_1A4j~0o&D)LhY-u7`A5@h{t6Jt0t2V>7)^K#hwR~J;NYw zp$#;2*|J7|R}g(nuWI%sR+7~qHsrQDX4i_a{CjaYb(9>ZpUaD>dS{^i$113|tOV&YD|TyT z1_bqd!o+cD5~BynDEk!+)f>EU)d4@igZ)rG@qxLzJ^35@X0g=sw^(&i9a=OLfGvsV zziwGU(4^n&?$c~sBim2==SsnK=s=MD?aV9<=sa`p1USCo)O}paJ)O?u-IhzJVltRK zxPdrTEeUgemGBPhli2V;3(R!mQLZ~p*qV}uZ^_eQ^`R8X9WRhq<+Nmd(PqTU{vUTcp9w){KC^e9859gV3C&aX@^=Fh@i=Kz z^WA7Z%sS5F#>K*kNgHu|S1jJR>W;${>%F9(cP)J($gG<9`_r zp!YR{yACF{gPT8d_E=0kN*3JyAMzu0#Da2EneggjEOb=NM*mnPc--m7d`XJtQp8UeeG0?eI3SDOwa)b12 zZ0dXY|2R6=fEc&8jZX<7BuSDCLJ~%jde)63BuPk$FbE+@k|c~yD#>(I=`bB=be7cg zth*X1Vkfafo7ina>=0sy@UH*+l~2vg^Q?7W_jUa)+^OS>Vq5mvG&7%m|AmYVlT19s z1++J%7FN1UV>;Gp*5P9YMs*>?U3U@kZgbFIv6_9ZHE3km4{G~Lpsu0}0-};pd8C7M zzn1aJwPB>TokuHgBgTC>1OnAxmc0l1Q}ORiP%qBGl2=D*-?10;*fAf}v#)~Y^L}t# z6alFNQ^>fV`xwBAl9cT+mF2o#fe`pI2fgP7-;jdSDATWPtsc@N1H+(pOPyXA1%AvKi zd#aQdZdRf9;_Pr|E7NZ_J+AaO91MnKi(%J>LhSP#!+cEnyz90F_-oNA=#AaVzQZfb z10Rpgp_hr*^Lp@Zl<+c}-MqY?l>YBsEqJ>~(Ky`=0%~2cneiO@He7=}e*uc$4`B=p z4f=k+E5>h41FK2jD*Z+c!0W5@@Gj#HO8@XB|ND)!B~?(7d=hDyQvqe2d7zx@4lZ5C zNzg|h6nFV>?ndfp)JIttcYvj5Vgqq5FNF6jx2=7)nQ9h%=lmYb$FlTD9KSFVWfLB9 z!9Vw4-lS~&^zj1RZYhH1D>^7Eodv9)5QhA<6y5unRuh|v%H}-FvcJYbQgH!vpW6lv zN4^nFl~Www=Z$KegD~)|BPfrp;3Ue=)Tmz$S*#((_MTyMn06?7zTO6F3}v9muqU$c zN~-1-iv9;55ewseL=llk`W~dS-m>Aac8nX!yi_sY#vBzJ2Z6jU7EZMq;rEqSz`<@1 zKJ4CvNu$~4lVA>xCz83PG!(zuEJnfcExfe(V>6Z5TJwg5dSJ@@8sXE<(`K_|T=V5Tuul*Iv(!<&eFhaGsXz5sKNp2lIGiLl*$If_De6G`qq+RD0B>^qW(-cvtRj3VGNIE3Bf zt0C1bn>N(11!t{r)Ce>K>7;ma`SxVgkF^G?oe`E-7o0;8;i!xEdJw<#Fzk0DKiQo!L}A}H^glg{pZn)IE`;`=1n8>~icPv~Ov z&7H*d5X&=OxB#o!Gm~Z(Uee8SzhP}*yxM!@hbPUTe7~dgVnZ(?7 zBZ?T>vHXP51+;v9ig}P_G~{Chgv3}wvG@w=F8D|~6C%KQ=XF?Da|~yk=1_m-1q5X^ zn$-r=XU}(`_h3g*oHYdPvGr&l-$r^m&O-d~4*LGhaFp830e>ayr0{r53)=U<7%In- zWA&V4wGH)QUOCsrb|{=_4JC=&X#8zmY}|E@?d3kvI<4iuF^LJ$qTmXl|^*F3^ zEZQ>vt7PU_;(hpEuIE=dNY;nb)+g*f`m~8Rcv1pv5e+ws*Wsrz5}3K{EG+B{#`mk0 z5GCcDRCDj<6jo>ShP0_y}dd~Z+-z8{7)##zn=ur{xrr_m<|?}OUcjC5$ICEF^1X# z=pVih4MY{-dU^{MvH7bxrjD_Esu>sOCQ;|qNYq_>m>-*sq9zeW5g}B3kHw(gOIV$L z3}xR|G2Ru!+pDjIDEB~6&m2r!^Ye+(@&(jwUVp~+O2HPv-$c4pl{A^EqB6I$vL}$S z!jh(e%eHgS{U`F0X=^GqN4Qb9QH7A{Q;5AUmqTiW1OxI|CsT1LoIe$S)wj<=YLyrk zF-EZB{CME^&PVkvkIB<4O*DJ38_G_V63<9ua9ZEOu92M5#|gF#+2a+@ZYK`c0dn?Ui1XT;@8Bax}5MeqhqR7G885!1mZSPWZ+j+ZPQdGxsOM z=xwv`MJD6zUQYx1jBz@2e_4vo9D=5dP}pxj54&II5%(O%ZMay#+438yG;Ig8=^BFL zxSxFct5&+w=@53TOveyCb-c#(zB`eL=ut2SoE9e0*7Lupf1W-y)C+)iZ7bL`cPut| zt^6~WtGtRB?Ptv*G zn+~0+MDN1kwB!lXG^d1u@^oHh^b}qEUUm|FSD1kKyeik4y%xTjxMD}65fm3wEHMow z4Y~8dbKoypGG`VV`V3-uFbYQca`2tx4I(?{k(|35g65XO#L<_SuWmerw@aYi^%N)B z_@48Ap8=({>5wE!M|H-Qb^2Qlo=2Hy=GA5xbYeE!Nrhoa>quyjZlR55N{HjT7`~n9 zC7oN2fp`o1{Rhm%7a8^VQLqvS{Tu?~#+62>?%>tI&IUi4l%4SY*mXgBL% zGqorI`|Yt*k-#)Z&t;tMOD8IyV@RyVRzgRmF?`_}4?5NgEM^=9+v&H-_v|?E?2=QX z>#NDR{_O7Ft^)e$`7mT~CGKJK{fn~62-!nffAc7?pDiP{yW}XGB&2U{#$eZ8)~RZ~ zg)}b8MVF61Xm?LIUDue2O((8`ZN*vi-rGbs-i<>W-PvIE{3C8y>=?6x#iyju?w{jT8zEkm?$1dx0>LVXVJWqsGd@MY63+%SOk)ihrR zi$ELj*K{NDszt==>VCRd8HUGt((v`ld^CKZi??0nkQKnZ>?S?5XGni=QDOJCE)5zu ztQx!xu0ZO?1hjoH48PhXgTSxTa!$&2v^r?Yt+@@@?&S{c(HUS|Iv681FmAbF0o`^; z8|5o<$hXU0X!P^~cJ9|99rr?*u3Uz;VM9=RfEY}=5?Q~7g5TN_gYVrq?D7gD_EkF2 zJ%1eS_IOD(U3b6{wsVudVm{{8t3j?|3%Z19NQNh%>GLq`c%4hD@7iE^U@q9-tbrY$ zj-u*6C*k||a#%jl9}|C9L)o)|aFflACW)KCuH_OYmAk>J+vAWI#-gYz5W)_w$0Hj? zVyJBa<~Xi^g>VJj8-7!~C{CyuMwm;9iz0rqlUXxL} zcMkck5eW}}rNhotmJRhf%RK+*&@eR(?%a~0Tb?yzz17es2AgnO^<~zV)WPO2O8X=8 zK8-p8@tJH7>(BNQ^E@!Rq!hm=hJo)V8L_CkMC+$!qv2j15GEurUyB_yH7Y^ut-{k6 ztYg7wD6Fsz#=Tn16Ff1SdFD>SqRue9ZxM!q;T0C~hmJw-mSS+qWZfYyLGx}WIHT@G z9c=%7lFV$3!CQ~D(K$UEw9_V|BI*znEC~j!)JyPg)-qJ6Ql^VuLD@_r{^g=M_&H3W zLH>+NK|L47+cn(JIVI=Ol1JxBa2dPTc2;2c%5vKAl`+t!O$T>&25TNn<@K-sN0oAa zUfs5ha!WGsg4`HIuRd{4*R$+)X)&Zc%mQ1FsZbJ7$NJo#(C%sh>8S{#LQy;kGpRz2 zO-TLwCWH4IG4xLR!g?k~fMsD6DI zI9Ut=C7$MVqqIRWEgiGBA4ZGSS)^n{3g4F{p~6#tk#LUr2USz3!QBFwRlu^DX)&aC zL_INOcioJQ2{_pE3TU=G=EYl=ufKc zk)sb!q2b?=*dkd2bR3(@va+aV=rtnvQfnzU`ANi0QqHBuj4GL5fyv0AXFCF||J}^G zWRl@&XF4kGF@L`N8Ye&Sl*)%Mh2zU3;TXGDS!dKh@7hJQy`qXQX}2NTCCvXdAPnst zr{cqRnPB+E7;pmfkbwgzCk9yF(WDyNZ<3aq=m|E?;ind2miR>!tRfra`9d12T zJvfhs>|M~cw~J0u)G=SoZ@$yPoAfoECN9a0le+984j9XxBmH7%RvJ=YM>hKw&gI^H zILCMbKWWGBFVrPg48oI{U}bsUa^=0<*k3am8yaWQ@&RVpp|C{XicHX+kwv!WT}4HH zF?AT5jkddI6UVF7q~zT&6zqt`=0&SX_svOEG~y*`wN*iwe*`kTw&0ES%b@p?y^jrd zfEr_fxu_k)Mnr&*tR(a9DgWr z?Uxgb4XbV0u8~6aYX+j)#~Li&8o|7IB9LmN;;nBh@%!sCP=>Fk_+vP{El+|@<4fSV z)CXK%vR!b<8)|m$F>SctO@#IzX|VrpglTIq{G%S7X_Cu0vLi{t!$s&A@QpV6eBiE$ zMliiI6qD?x5vv1x$;?Appm;9$}=)fB8( zGT+zx0NQYT8R@WKxxc$n_^R?8iX(M7kCCTIj_!3x9q@+kJR6E3cEhp%&#MTh8)($! zv)J{|5cuz#4Br=Ahwgj#IHS)^WZ1zBER*&JNl60N?aX#!KW(VRo_N~)pcYNSr^2Q5 z94z=Vo%QOif!*zoEgkx$I0wpFRb+ou{jH!Gp7Pz|)TQt>nUj3moqA$;XtFfx5e zgWlBQ+;)l*pS5I*uo(CL8iq~6P;9eJA*Y|YF(#%3aaa`&Ykr>r{irf9sk8(YH6L_T z3&Zcb2^#VK(5veUsVmEf)vA@$C8?CnTa2q!siYFC$>7v86C}yEx%O>O_?} zEMR`cDW6G{b2@q35QQa9ah$~D7$>;vMs-JSqPDX_Na~ghd{!sJH)USf`)N6ys2j!3 zzfdkKpA_I)s?nTOYS&j6=>URLSA1yh5;&i=)Yw<(RZXQ=bZ#C*#kk6-%;sN zxfraH_mjGQo{V>q0@Z&8gRtKaPPVO`69jFcef~G;+e#(W%^Lv=HQ5Ly?xpSbJb5e2 z8qm&;2E+CtC=EPC#nWa&$0C;H_*_Yo=GqaFYzL})ou`@y1F5ik7O>@HFvA$-p(s&ae&}8F|ya0X?m=A?MN=ba{4;mYk9E{D8}7vi&tZ zpud9c84J*UNGd426XEgCT8MvB4_^za@qd3?(%3d4ah`8k^L7Ka`YK@Jms-X;Sx#jA zZt&_wfZY+Bpy|&E=*WHJaApKmQH;QlaU2*Gy0cCNV^F7aiQTSK>@GDM95YYxGE+}p zJ+z2?J$n+x-A`y;T^y_p+m5w&mAK%K(YS`PjX;Z-|mK&`evAP;sQ!{=n#}ULdeK$niJ@U78lK+zsYL! zQb@sJTO@cy=i|A% zz5pJU?n2LJtdG)oGt0|f!KcA)s5F|*x;B!qeeik+8GHoZ5(%u?kpn415%n{QXuF3s zZP3rf&0A+;`1^CvnX?x}!`PYQWKYa*ov2xpn`b^})|3C#of#e>u+-yPq} zGU2Sx7M#5+55@iacu^bcwWwk5JjNMoSeit%9$$dQv|4gMxl{gX@dIQ>9hS5Lr*%_C$j$WEsh04EQ(!Ud((5i4dmF|k-8i|1PtwyRhK*Aobww&Z-x>_Q ze)FMO762OZ{iykw-6y>kli`vT7+skPvK{8|&~H5|&Ru3bUj`Twxd3ecJwZfYyJ(2* zRB%~Y2UdrHmVH%+=CA{7pFa_*w^xCqbU)qy!iuqZH-pi8SMH3=<9oAJ91&{ZJV}qfT^p^Ld4R@zN&&;26L4*qC z50azllZn`VzLR)mF~-Z`DENLqooScGT;jeZxQ5N&rgf!QvLu%@E@8|()f#H`dKGk= z0Trw~Oxz~Eq?1^Wtn9fu-#yBmc|qJEFRCeh7JD-An98a zY7Ekb$7MwzoNhoM=>h4qyGzPys=#Q5ABjJ_2)0c%Yy@au4`bPa~I zYQkXEBHY+N9DDDn!B#yzTv;hcL3A9qP>p@(e@4-quH&HiV<@VfP*QvAS-`ciyZM7P z(3OSYC$9&q1%J_2uN*3D)}&pcAz*cM2>9Qh45hXdQ!}Dau{xGhjm6Y z`-4Gh1XwG|pzIdYh0YHqf=7>e_qJz5H;wu0|8Rikp-kJH%%dRgw!-c7E#iAOgIaCB zMLt>g#{mPbf@6LPFRGMc|8jO^2PtrL(di>q8NU?H*FMz+~8VDFsVqIS_VvDAv17#-3@HN$09bcxJCQ z>Kl|2k$oXQ+^qsz=DFf(71oFD$aazsWNZ)ANd3=@hoF&c-!jz}JlfRQb6`%AW?7Q> zZgogLJTv!srwX3#!BR8JV1>&1SUP&U%t8;~+_v zO2b8KX^uU+N9bl#kIp+pl%E5tIu-EAYZmq;dgIHwN-QA-U~%&hM42{`YtJ;X!0{*D zIAIFQWhp^lv=Uy-I)PSQ9-O$TlvfLi21)s0Y#xz~iwz@i%ly^YK5{- zGACX<f$aBoF8<(e8YUl)+{ueD<9!7F)(Awy3RQH}{Yktt z#*y;Dmq5KU8Iqi?(oae3jMUK>I@~XMaYsuzB)-fnu&-NT&ng4#i4(&f$h{r2)VL;Pqrt2!e>{>1Mth7Pl zk7?YG%q^JYD1oMiOU#4$fC}!<#Ij=>A=Ygu>&j{)qT&5Py|IHD_dkx^tkck4RL^|F zg)GbXlsMk@1J83AaO2SpOpR+Gp9by4?xmX`)cO+Ri2dU2A&xk%45GufGjI3$IdqV6 z7vk)6cr6@@UpGa9)3^WF^P>*6S5mO7yNFn5#=x^S1zcA1KvD91l5R8z-NPgRFdKCy`!^Tp%w(A>8&44PKHQiLDYo0Xk}#IN3SS_{ysiS4FG~b%J7<*7(uX#mHd0mP zgI2sYud0y?3&w|nf#GHJov;HM-Z5T{Lkahs%*F7`REU`}0sF0Aiq%XPu-9aa_Pnvw zK6EI&jV^})U+Zw>&}wW^4=36za~T~|4AsRlFnpVXE{2Oqs~7WCr#axXg>sZmSxO3G zEudSt6gAJ9v&{E<{<}dfh|dn@aCSLLJBHv~dml9YuO5Wc4Ii z@81ASpCg+3jwl;6hd<)77nQ4paHx;6d3^+VeX9rs*7l0(@N>{!(#9Dj?S_&&Gl+gw z3Al$e(#|o1L3Y!XHk5y1-Bm3_nAAnv5-N$P{1<&E0Nng%8XOO+0!u?3#(6NUEb$yj z!Z%NZqK~0~g>mpukIfnPPeYeIPX!uc=)Sp(#54YcINptPEKH!`YuG(&gELWo!p_9o zpNU+c2VS*IztZc69jBU!=bnq~K6wtp|JVuAiN;`@Qja!n>^}F~j4OK)$huVQD^D?h zg3`#Co$V^H`a~7JiVH%M$Jx{=)e20$d4f`YyHdqOwg^YKu8xD=y; z-%^Jn#yBPCI5*~3EA-!jDyy{6-CPNa9tGimTshP|&mwu=r&xC7CfEES8NullH9tlf zRPF$D9URSih1@EOSiYoHd4#d_#&U*_hG7ZIs7~uJLYDhG#Iwu|+a;IcH(_ zjtq{IF2IXF%CLRKTXLl&61xLyLFYQloR#e-{#Q>xV}2w}`l*V7i64mL%yZB&VG@q> zIfF)>4m4b!ac{1cTe{IQK$R#^j2{S|-M5KE_bBIQSB&a4FGzH52FphI!{tR6n1`$e z4D;(yQu&GM4t`6FAptfVtU`}|w!~^O^ZVXkMkQN2ERQelk1ZJz=$-3;?S2(Bie<1( zjJIN#Rx&iF?1q0PE75g0)4q=?z#t_DTvk*PtJSrr>B@4^|K`BE`#W%Z?<9;+IfH^< zd88$D0*IvE3FtoJ9x?eXMQ6s%vtKHQl9BN=#FIUTPs*U+ zB6a zYewsUN1O_H>nwwOrjIzPPi9#kIa*ZIkjtHg*zUHD_dfc7|MX`L=x=uiRrjH&Y`F}* zR@;e_s0Oi!aYkE)5T&XC^Z)H&9#jD~KWXPvc_HL{TMVuqrkGhY7d`%Vpnhcv46oIM zJ#%UxpzbgxO*lZBzfYr9l@%y)oLi~2T@ycP&%(1$b8x{7Io2O5MVDS`E-Gn{N2X)E}09S zk5z$7V1B7zuX#gD4!Q?qkij`}2>B$TAxj>TZF8^TqN8PKdYCaBWz+GkaU%GrL;`p4 zJp6jX?vhW{DxD01NXSFRnY?_JsHX@(<~fk6?R!ssRTvBF^;s&Ow1kIHwp)o*g|H_) z`m6X7oi*iHqWO+W9gd=O+X!&1)@7d8WIm*Y`R*o9ATDQ^rgqc|H~+!%^82!>p@%6% z=Z8WV+le}d6k=;#ISN3Zo8rC{RSb7xS3wzS#3oVU*ce`HcLI#3ZYA&sCqm=K=fBW5Oh2Y?uuDL>Cc)BGGvw%iLXY1i^#VmNh*JH20?XJTeQ6zM9eA zx%<%MR3?a3%NaA&n)s*wChECqAS|6ut!h8=e*Y#y`IbrW8cnkV4^-4nyq${2D^W0r)C`_xfMa++l%zw(hM9@n~wLB(r{1ZB`AA& zn}k+Hu;H?ZY7UA6IM41)H}znI&sdDty+m~F|Dy3%`l#30Ja{~?i0LI`QNC+1N=ER| z@UIlSeU`!d1TnhhHc|PHji9=Xv3ag5A#n2rvRX6~6E1GYlHYfDfnK4d%M3FTJ~@#w z=r!r8q;lqa9SpaIFT;|)Eg;;UPEGcFp$&0nG-K#)v{>Gc%GP-D5@%1UZs-XI%*sKdWKc`Fh&?AqP$*4#&ih41LJ2k(VmKh(CwW=_slp0o@+VQ^%4UU^%5agTmgR%OvhhE zr%?0MD9D+13A|WdN#9>< zA?e5f&}D(K#Uogs$mAU$`^OF%e^a`C^bIzPNSH@)4K=K2p}s>ZK{H>4j{Z`Ehv%lE zAS8`19nHL{V^@*XsrRVxWC!W638wNhM$kAyhF^&dermSC-p5<8j`fDgJ2w-BiY=_@ z+l!LBretTK6w_}@!Dm?nq+WbMf2|6`yu;_fjrpU!C!~W+@q~u>YzO@V`KY%^hUISy z;asi)>wcU^QB*Fkc890i^D@z&?d95AxAWptA-n;5emj>0(TuD*Y#G43fq@Qmd#lDlZAo_PXh;!xGZuFLRGX3rNHpZpjI026jILSIS_Hu&xYTSYz zDbs$uV8y%`^c%~(XR{Dun~$O})`fZ-rI7CBCpmw=XEdPi0zO+Cj1_wBC>1{C3j$&o zgZdb>h9!VY59@4@?x6phayJ7@ZP=gS_!$Z%fz)sBXHHbko|g zcur+E{?bZ7)unN;V675%>ak3i=^Uy*-W+_8l&GM`I@%cOVN96ItJ`H&AKqx!><`*Q8B!Y7`?hcVIb>)wO$4O|1i&8JwVAB zedv5xL3#odQ1Od()cxEFCW8YYYHB-m^J6=_Z7)gw>MhulZvl>41r!{ZAMJDo>8Uvl zdW@+W{%sIRO{pQQ6$MICZlKs8krQ7q=Cv7HyZz-V`1<_{2*zB*x?ndb*=I+Ld!wN} zbuRRl>?JbAcGzaadfy$In}svvUy#EoI^M89{e zqpO+qxw_ZEls&oNwlNyE_CL@1yBqj&&0G*F7L(NCaF_$GcqRT2CZ(@Jqkti-3$h4& zFo(^+&ZI}(2kaM?Fc#zpxY#in)ukO&V6w}y;FU32^)_H%|+Czy1yQvHk@M&%=${(sws~>BiJ8dcSOp9l}j$&v#a2;2%|Mz>92==DA zP}H2u&Zwb~qj3eTbcXX$R|BEDYX?zw+7YK;^;DrlafL3^u}=r%xE+Xj|MkQ4hXc{b zz@AKi^a8K|)#4V*lA(pj59mhLM7jh;9v-WUT)k8;bI207R^yqfIE z-imK`*rVo*7_gae8h`D}!TJw2=p8eO*bQX4)HA>>1ScdJZ_Cx!o5IW3@N1d_}D8I?Hy0h+_+HV^$ zb;!b`&r=|F@p*W5)d`Kpl#h!B??q3Wf;X~DqpoyoVkEP^P2|xpnnC^6bZR;KMyqVH@`4z9yV^TBjNw7XFI4! zv?%zC_*Q!1PUi8lc(xMu52#{$-$^LB5ox)<7mV3UD$pp2*oGgla=O^(Bk+= z`q(cE%%ZZHhU&m~2UNX9jC~Ub%HKw`p-c*Mr74x9mFPyi1S&w?s5EO796ot?9 znUDQA@h)ZkJXMFVPV)wQYg&S(JrWSky259D%)yIIk#KOfId;FXr)7J?iB(aXrOwz$ zY>tkFlMTyJkmF4gGh^YYY!j|y-sIr|;JT{lUdVxm}O0!YtGq!nzip<`8ikCE?G0 z5sTw#RKeJMqC7Y#sc$a3?Z}H%`i*D2DhoCK#$57nj@A|8@FXp615z1Di*`%de-~EGOtz! zA`&(F1QfdGV6VnhmQfHxLHScauC{EnQ zJ!aZ+PG$|7?Yl(lSY}K2WDOPXABw$KXON^IH-PEsAgMpd^#q8)+gFYz`yy$oZ!HZR zPjSGl^EkS0E%vxxhPsYi3{;DNT07=_U$=nHSZIt2X9+~v22;WER^s#}2J5!X0vih< z*k<{IX6ztH9TP`XH|0Uf{fnS;KM_kxw^O4BmQ>L*4oh5YiEa(Y?!e!OcNOD(v;CHf zB_o(RV0(YAm(5+bTa51)nf^wbKtdl%!meA;Q*X)qcjN_$Yxs|4+xY+tc18DFLx z!}67EpFdRy;UnIY*1MGP@7H7d>}$N^*HgUi`W$vwUjnA_|LbMXgg)1aAiXqz7EBrs z^M1#p6XQtc#K#hqUC}80CzkhK`-xaxKgYN4TTLad7m4FMH!S=ei)WLzVsm&c{%T{- z@H{V2UW@|w>+|sRjrEKz^~l2J*$xmL?jr#^S^n+tDx#fT2zHaNfz^6vBKgqC@d;5#nTa zuW$LX0&`w(fj?gCL>tX1Q0Cc2+C8sPx3S~VcgbMJmD-Hk4=~-CKI1NgXyM%e4(sA1 zbmYzHSZyDMmf52*c4s;c5359rr7C3U8rJa^{vYvlV=P|7pIrO1O0aPV!hIP$%Oh=v z8y!W=_Ye#rA9i4;x-~R!3?zYZ-)YqFP}*}*j2#U_p}WtP9zI}$l8NQ8?Q1Ard76r5 zwkH`!{s^u1C`Q4v3!HlB4`Rm4prmjC^HwuXg57jJTzwWTTbl?imno4xkAS6lThV^` zL#jLTKN9`=0_y5+p@P=Ae8E3c!CYMpeOFn4?M^4EmT3+4e$0=tIGR4c70$B!_V8Nk z1ST!@h4ghHXg@fU>VDoyP9%2>1I(u zK)0pPCzW+pWsvUgX+)4eq;i*0F&aP(;2p*V?8(K*Gg;u?J|4y#o{5LlS)Y_<2{f!d z17=nQjFEfQ()=-FbEM?6PDOPT%oxRc>~#dKzES9V^%s?$9?mEIy%1~%O@z$f=~yyK zOw|hq;1A8i*fzi!WR)4T`=fwbkT`6P_(B`4F3?|o60Ds^N3lS09adBEM3wcy_^AXbz1L*1pbuqG)VH+3$>fPD+m zG4DEX)2=b^(mKYiJ_b^=PedIyL!D*{?JljR?MLr%d)n~<~vYEVO?Oo0= z6EI3}g7Iv=5bxy@>gKv0L;G7}V5kH)*s*Sz@jSF`ilQCo=ELzdn^0Q06NWMt_E()N z>>2STo`b5$(izoYQEyHJ$K#=wV=PWlJt&uY5tl#5lJ9pe;yjE%r_EQv**gLpV*<#> z>2Zw1pKK{SyMdmRQ@jv<8Y@Lotm!Vn?xUKRGW8;Mlq`ofK7)GXMbkD7Yp@k9qIs*& zz=#9veN?05t$Z$k!Rk`1f42^8)-M5%Ew4$#Lseq5=rn1xa|9zRmap?OfPNtXC|o6_ z8@yA&#(FGhI7iYhrj5k0uBqtt%fLrI@tCq0n9$tQLlsn_;%O=v@Mep(e0nSc6&d}%`(Ma!B=8o zehq5RFUCTfO5FH17k@g5QFiSo*Li**eQsWeXjurl@iJSWw@)hnBSL zf~W6`(Mpvu`?i+Dd#g(*mbcN?%aQak&)B8AD(ULNI26~f=KDowqSJjf{Q5B*pZRA& z87+sL>fKm(tDXvOjj9y71!K=*#t4{ji?<%m`WZ}=yhUIsY0+5*w5Y55zjTI5fOrt%k6EOgFMK zmd}Thm3jEcp%Nd@tAK9HWAN-pHRw22fKc6*7d|e4@B1`S`%lJKn0=c-{1F(FbP8qB zj9oF%k3{8{(eQW|{I%;erj`jn`0yJQpK+(HbA7;RWC=@5O{2EG|W$M8yG zB7T-{#a`KCs_In@zmx{JdSwZotBptHl|a6=_d5}c{jW0o_%*oyxSI6=ET$Io*_prj zFWz_83h-ZZg=kGU1seTSA^cx0(st)0d{eW=AY~BCLg#?HJPcb01i^y8OVMLsF8%R8 z9~{%!ZLZgl?&Oo;rBuhhJ&U2wy+4>=&x7{E3!!C>9XOo@Ts4e6cSqQ6F>*2q+2{&+ zZ%$*2?FQCykp^uy=78gd!`OcM5VdN#&OaKTkCM{G;F)v-I}e6{>{>Yd>1KlaENZcH z(?PPPoX4b}oy6Zn45F!vA>l^>%KQiON0+lK&PQjOp0yd+iy9)BYzIf zMXkw2ko1E+NBgqSaM)Hbwkw2^<7=sqOy;jI%10Zjg!d@N?g}?z(ws{tjpwmsY#i2sx%Ybh=vz~5(F;)&$24ZOAZUML20K$up*8c7 zDNXMZ%}+tJbWt{H-PwcEQwNAm)fx~Ar;>fFd(2;5gbq8w@k(nbC(OI1_#T+!K@XjUby@csO z+m?gcS>`p^A%%^rr?ISP5&!Gg0#uZJC2D*MQPj<*W#2B4lHE6{=1E(Yy&FJks@a}r zQVzFn=~6skaSZkEu>0G?EvP7vFuv;tYQ8)fO{b;c=)zT~+#FfiG~o=q8|#QI|Kw1o zdX(m>*2i`jaR_-csSu2r|)&?VBIoqwRZG z4yiT6rGp zrc+$Ay9zsNufm9(7w}Nu2z0E8p~@NQl~J1)LD5{suD+WFCPzgOuVw<8g=bLw>LeNj zUWTU3>(E?h1WxZcP@aqh*}JXqUB4E8oGrxX&CKig^CB!+bq+J0okx#B#o!xZ42G8} zPWIIL`C!ohtDHn`&tklmJ<#&j57hS76PfBk zE<`2jntV?vb8gf3q7JDBJrA9kK@jg_e8=eE3+qjb<$7qeQlR zI`kwhfo6w&WZOi95RGC`U!P1`9ZbMxfje{!W1WUFDNG%mhb0eLw((~z5uSa-w{+*y zM%V}zlh;G!D)X4^(+{Q9iA1$MVBM9EaqahYZ*AM zKTq@xYO(gPFRIC!s3gON_1L*nt6(GE)*7+)Y$j}edJX#iEnt`Zg{YZ+o%dCZz{Vx% z6c4OLmC(_w-=r9ubyz<6MGd%*zXYMq=~xqxh3(^qla7)zq`7ny$|NUATYDQZn6Ci0 zyOr2mScSeUFCeV0=eyTmfD-FqPBK3oq-Qt6S-O^w+5&jPQI)R1E!bbiEXT0P)GmertR(mf&?=1B}~<1kgL6w^1}fD$`< z()?*1ef;VS#Fxs6gd1&X^;8QCTd$%1F$s;lDFgYg7<6(|Li^=;)XHEi2yZ{(ZEtoE zQ8P;jyKVrNIyL&KsVxa7B z6fIlf1r{kOAbJ$Wd3>2c1s7fu*|t~w`!ixJVf=CBr8%^SadAcc{-w%GZXn>k)2<8m zXlXo;J+l_T|55kuQ8j(xzyB_kQs}Om6qO`|r1pH)E`$(55|R*d&pm{#Qt2k?CS7)< zk|arL&u8r}lIjye5<>1t2qAzb0wx0BF-tlzJu|qmA^k(Ej3js=hf}n5*vr$rn!&i!gI+yQBey zt5-vh?n7FfAqU#-59w=}HGYd@8J!>5oX&WKZOk)J`t=^EZ5;;79)~j> zQTR{UP&{=%7IoP@Qkc8|zIyN|{?P1Dw6q-Nn@d5W?gQ#?Ca}G8D5#q(C%ut5@VQzZ z7ffA;>a3pGGdh@#_B{+b_2;n2RE+ALr=%70`+?W5Q*c@}1@)kk=sCH9W>gJMORIpb zhi0JKxkOT<9fI3z8Jn(t7Wy2&N?O%Ju*sF_cm_%!^}Cd~99+*dq>t@8cPztgN~KU3 zJPrhH>#j&_&cXQL2o#iR!Ly?*pSULk`YE3PG+cv47iY3QVKGthj>ElYPQ&XsrbjG$ zPEGUn;D|{~f1+JPRo5h<=u8qccgLXLjAfvvtpW;bMnYrVQR=%hgT%g`1SeKhqd{&N zA08n$(gXLa~Jx%h5Sb z76&n-mH%dEefa+w$?vYf)K15;z8l`Guvls(+po8`Pd1zKv zL@ZMR7;~%`3z}I@h?p^wRyx4UoCN4;9ZPcOM`3sft3yY>qgh%^6Y(q^{4?^Uw$15O z;X3Po22V%5%5FNoNX9hb*C^l59P#B6_@kZ-OZsHMhs;7S3d*I{Z|jMR#t%BKWCd=0 zSB=Cg2EC-ne2Lw}C-?^G@(Y2zW5U46pi!#)cLXtJnhLdR^AKYih{Dk0sNQy))Q-4L zoT9ft-m?_&eOF6E9cxe_xt{2G{h_`a%c%Rdo1||`3dS8gf!-4@W9_EZAozZztj+%* zbR@{YFP7`%sK zi?J=*_9age8KHWA=bwwtQ2)uV#fKBcAA ztnQ<5vP@z4GUnHqLS?g~F#ftR)b_I_ue((+ckdJseDSBTiJM_Qt4~(E=%$kI)5*`a zd?+o-h1cFPczpi?&dK0G^5hD!F$yEEVuk2yI~8(k)3CrX9zvs}I3bSdwjQ4a|5eA~ z@%76$6N_0pTC;=#{z1}(H+0Lm@; zq+*YqG`960ssES|bXz?Jx?-Y)5~d5l#851lrGoC($LKtVCD=IFjdU?SPYbK1xfP8d zZP%I3x~~j2d^(5xtTikHI;t$_aV&Jwd62w!3{^L8AVpWyv1Q^QFm5rx*1ZKZ#ySO5 zuLYyt3~jP9css_XCrBG>BpBDA#QdwFI9;y_qkhK_wN;m>#)%{x@vlcEJqBMq%))Ao z3J`aj3b(z>h4sB^*cI}N7GLb3!mo_;VwT`w@^L&Af8GGQjvT{QdJN^nhUj!ZNZ9JN z3dBE@h}a@ZSYdX6rZyJRLdj%eRW*xco-#c_|03e`NCR4D>0|wbtI&~k6=UsY!v?MZ zqqE}iUHx{(k<&qOmoGKF>W{h0Y{2K#Vg$1wxHBXTF5fr8=Dgz=lb3)!&H)hn)CwSf z80amu2Gi{aFkCs3PX4e74J;ENyz&q68Fv-5EoMSoSQu`m36Pt31lvYzW9*O`^fcT= zmo`@8s#Sw<%=B&O+P@lfllo)TFV>r?pUwE229RvOlQ>!Sl92w+U?bg*uU(CBn@2Jv zUrM0DbBy_%*-QdFFbh#=I? zL2U7q>W#TaIvujm#m}8SZ@Y{K2QJ0H_xU941LKQ|v}t <=mk5MVWFSFa@yemMxV zIBiT~I&u{c=6z7i6e_0&!HG*o=r&h_ehpuSacBDD)3w&)ltatyc-W(r zj+WoHgWAwYXiC!ZdB3(DU3DQY6n58;h_0B8?4W# z;>#@`=_uHO{`0FSt7U;;$_i?mdJ!b-`Lo@w2s2-uhNpXL@T=GfBN{`&$MzP@`u>Kh zw#-7euLB_|BoU+b@Sv7(7%V;>0=2*2Xhzcs9PQ0Klbgq}IekA`4)y?R#)@uPmxMRJ zGUnFE;VgshDC7m4gwG={pyjw5bZYb-G%MAG=1GZA95Dj?lhjeurv@y&ieR~}6Nc13 zq*mKcf$O`e=wHs*^QMES+na&pnTjVC`6a=#yR3)*d;>fwJ%%yY&Vo79Dw^ymg_$o_ zLu%SNbbBQhO2!j1$?X_gHyYxBVdns={otki1sZUE6gXLLB8&3_@kIO< z^s8A4<)!AR-P=eCSvH6uWi3P)q=An@udt$ZG~9dbiK|BO=$w>_ewO!%>$g!b;nF~i zkCcO^{iaZ~sRn+YI|h2iM~KbsIS^~03M(Vd!B=}fj6bA?de6(q!JDHn`M5nfr_S^k zCCeGJ?QE&r&%VsR!*r}_$H|~v!x&3OFR1uxABO?I&O&EWUs&IG2p=}5z|gyq5dPJT*pwec|FNOcMwS_) zKDHYDMl6FO=Km1hhyjTnP~UZBpe*W({x$uH+p@6`e={BJ_MCxTW+6B-nZ5h#w!*bT zr%+h@sRu+yc(Aet6!23P?5D0Rn@a4#(8D zVo=?2=(Jq{6KYxJ<*Kesba*fvY;m}7k8k_@)vGMkAiYhnhy$vkmLzawR zG`;YE;zays-BoN10Wx8MHO7}yNVQSGzNWf3KD_`pn#K}G?cM0+whGM6&afP{7?ub8 zo2q!7K%LQ2#%5xiZvE?Y=EzWRmdiog9u3C2+e0d>?I7OG7>_ewSl}r3ThMPM#VV<^ zqb3oo(=$OmVI#iWz6}~p)@&_( zp$7cc7~$iXNH)I~0b}iwV);u%A!H;}RbBvfHx*L+E(mHfU2)ffN*H%E1Al#6g<&(# zV*ck#xTP|i`NK`1G$xAW3jT6PG@OI}zP_Zu_7u)MoCFh+uHtRRKMDGL4Z8cU1&;X` zYXi#AXtNjeYb=45V>9Rz*9=hSU9qyCEr!cafs~n2y#4ed(kZi;c86sRE**)M@k}?{ zOogqE>%fiGA=WUhxaQ>J;21a;*9S44>#i)UI-HMw8LZzm={sFG(Fs#WZ-kz`2pgZ8 z!kU{Q&~~d#*!;85&6tN}%1Vk^IzSxyaF{nb*Hl=wtSY+QsBU)bH`2-?{}y+nz+V ztJkUW;Z_~z+PGl9y( zn^Dr4O%!xSLnDz=?G=L|V0t=qm&VfJqOs`oWed1P&!ky5R$!llRyeZG4E^2|KlEj!lUO9|qF=-Zb9lt`-3Kn3E zy&@Wm*?Thx$q%_qtU8~BiE2ABpsa{`O)7=fC1P|<}u1bf*Uaa?&2 z>K_}0pNFyclSL1i?9KW{NzbX5^%k(#bjFf&rqR|P1M$=o(BBaRn*WrQKA8m1);XhV zLpZdtT7LVdP1G>86n)m^qGarEa%aqH>|XPP^v;ZgYlGLaY)7f^PEQ0F|4Bt*FS7m3 z0*L-KJ3SHXQVO8>@JJYJREDzSJdViM!Ea~RV#E`c$8h5? zdFfFF6$_inY$jO}w)kC|>|)=?1E$zyRx4j8anjnheeq0a&(Fz(60Wv^Y(S!E~K$Cg0uHy(Q5 zSL5dEifCC=PGi=mv0Sj|x_pdG`57;|nRxF+VpP4{YWvCCvVo+zdtIEOZyna+6jSy~f&9E$fw z(w5BKOwT`sX_a+_F0w6PteSwWTjs*8un5dg7!RQ*L$M-FiL81Qfrd&$SzU0Suy>;; zR3z;bW~It-@+y|;eNzDb!y|;w58hFUuK`DUdpC0AwFO3R(SLQ;#;rirRjWdQSaH z^ru*%W5x`0R_g-?8?12Yo6YF!CjqBx84lkxBS1~*1gJfIOvCLpV9TmVY%!n3bZ}v1 z2bfmYW~nK}=6n{8JhBHzU&_MdJA~%0XSspy%opdF12#X_(;ai})7&SSIR5$uY?ot9 zp+!z~WrqZp&6$WVXXrx2@Ipl60_GXr0w(Az&5b%sC3;UuuBsyRXsF@wF9l3*Lx|wx z1ACjGu~7R@46Uy}g5_#^aDh%b+Lok&`ic~i=ft#|89Rl6jKdY1rvp9etj477O~TFe zK|VAM!w-EWDNeE2IwhRARZb^@R~8N?j^RWSvz6#vAh_gQJU(Ij#b#Aj>-iD_e|8*U zp1MC&-1J@A(#M4PLc^eq{ccMu-cjeDzRWD2=7XFLa_MvM=h7;0 z9yk`@4^P2mJ`~55L9I&`c70}fbw#?^*ypcMz}6N|q9qV$$1*MKN1)G%XsOs+C_S+} z150-$K*QW7vQeiPD_&c{`dL|Ez;x#}Ys<*};zOv}Hxe43u{_vuK2rP3j9bV0GyX4v zq_tf<6^|7=7Fzaluu<OaDS>e1d*vSOn&oUufn zRhFZYi4>lAM`Bm`AE8sn8zO%2R$47Q3Khkg!j&`%dKVOLk(X-EcVY z`YP5iR<7@vTz1brL9;0aq7Mb4<@N)xVbof58+%6@VO$K#r(Hl6%IebPTgr?!upF|5 zVw8QF$}(Knz|TY(nk+j(`?ghJ595M&J0bjiQjIS5D-+s zvetMkrby4hpXJezJbnSZ9DIv(DTdQnkuC%-4Z)B{cFbFV#IsosPIXkHVEF~9ATbQS zUA_$e3?7M+ErH;(w34*H9|C?52IpN%(RtQ5i0-G42hNwUocSS)Gr55@rZovm=bWH! zci#zZemxhSx}6P@N0rjHm{oA1Ef!M`E=ScjJ{a|mW3{MU()Mr=*cyDNpR{68uy`YJ z^V1I9F!Ir-Yc=>k@7S1*V zRYiAfYN%k(tReX@B@}xm?;;y>`k=q^6>5|&!Glb!c#p~HO4r~scmipxY4cz#cK5se?od&jGR*_BBpQH&5 z8aQmb#tQ|OYp7H612V|66cscsL%koXIe!04nvBkpfVu^Ao5Ep~6o(4~-3zhD{3e;u zH5)f>@nLmg7h06YJn+wsVuMdEW86F=v4>2k-?wIh+QYHY&4C6uU8B=H)Ub1XJ`EUs zo2VryqEWCNs0`kUt=(T}VRE4`w&^!H>dIrqNPQ5*#?s#OWH1|$hjC93I^%U$7uk8!4C!!i%E!Z>mS>jNydJd%sF-n8izO2)b+7ccs3l2{>FjZxOm11yn@Nf!y&tQGWvh@qu%9* z!9dXr?-}c1!^6+CJ+GLyYdDfSIu~J&>LonzdmDaiFvaF;h1j&XKW6Vj5RBMIN*C$l zO~YdtD11y@7^_v=G>aHLI>_F`0ay|sLZ7AIh0UEK(dS7sDU6Gx0afR5|Wfm+K zEVGxur$~tXO9(uhfvKJaERUP?WTBR-!;rDnk|JwT|e5G+z-r)CSibLEh(LUnFyw|J@+R;gHtYyzdaiT?c>lzu8i~y=|kEXJ8i@Kc=Wl* zYGAS}n7W1ek%U)Cw^KK@Iei)1rp5>x-#lb`(up)%;XKPgVE58xIOwck8^3KW^ps~Z z_`Iy4?Z?)WTq9%9eO-y(wh`FAxrh3lqNG7LjF=QmAl(a6*qTNMGj2PfSfKzPPrQm1 zd;5@iU*@8#?M$dQOGSUShqd`b(axq2#Al2FrM_&hc$IjKBydH4CF*_J0lHzSphdKB zjXY!J6%GT%{>w1p?MW~;T7m-iAau!(hr(5tAi$bwY-+ZUu7p+e-p(259iIiwtcT$5 z5i0Fb+JNqblNh&}Wl3yd9<$g@-~%qKrxgHQd)JdJ)ierCDfm+)z~{@x;hd#RBm1Do z!L{#85@RRZV|tyn z*BFcBFcd#;Aj&FXjOlh5Ur!Cg{LBGh#IPU(#xae;MGAuG64H`v1siwQk;d)AY0IT4 zP%Bd-%9qn1VCP{tIU)}|BiGPIg-X_o)`8GZ5zuqKmD=9%hQ!2Jtld2fimf+b7Gvz& zJocrzQ%up|S1BfI?}qrp$LNI%_AGbrr!c>FEd-{|WtkfZ2z6p=9UV$r78TPj_p{j8 z$@qx$612YQ1LLDupMBYFq00|vwuTLdz{Gy&vSdD~IM`L@&DM}^Ok*@it{lB$H$dCd zBC=q2M%9JW>xJR9>{K5T8BxF1bNA2_07;x8@*cP26 zW)}?bd1o3Vot}cH6<4A||6PdbPf3qf45S^Jj0qdZ;`@WU@$|G(Rtp=A>yI(Uz_1t^ z{^307vil=_W95vwmoDKpvJ!2!HxuujebC3>P}rhz1|H>j;1U;AY`dSrH0fbwjm77L z+bS9BKx-^Cn>b-fpL4J+^DIUk4yLsO822-M2ly97g4xk3mXEOo^Os+N(r9f^->{S_ zpF0VbS!Zzj;1qN@I*vT3n#Xcowo6THqor!Ek0YzELGOiUWL(ZgOu1GLUW;0(!KPHO z%`^jl_Rbc6^(9>oN5S+1{z$J{q2#U#%UG;}V{ZLmxDiT+~YsEcD5U&{zK`b>a;IWI}moe(Hi3I)%fj-=l>SHXYJ&```(=V} zy#oAxG#x*uEJU%3I%yO&6HlW;^lYr5ymY!$SS*eA=$ zpO^V)WHcX3{ksXDX@IVEEN}N@8QpS!JXSRP75>X}pSk4(25vrr=jF_>+U!2HFEs(b ztuDC4GzD`bkAU;vSoq_x6T4hlVu581UHR$+ZePyo&Uu$0>w*X1>LGBuZU=(C z1K{I046&RwZ04*!!KtQKJV;&Fp?_{8LR5C~}iQ!Pb zE^gd03swj0#p`#?Sa!+^(7lifJrDY!dZ`m_yz5B|FH9g977c7DB;w zDOP97gJ7YCgX_N0kUHl*wdp&Fx)m=ZCcEBH_pV6N^eq#em<|LTR>Q8VMJ(&x589VL zB(?dgh-CO$+PpUyU%3=uK=u%bOU(hJ4h@hDts}=RGgyAS6ErT0myX+?$b4(rG`30^ za%0)wukirC+J(e#!CorannD|gEu`8XSgxv!y)#OtgWnT#Jh+AFTjl#x$0lQ}K5Yws zC$7ZU$7xb!n}N`Eual0fTZ3)y9EFnoo28OpCc@O5t7PNXv!GYI9(o#B)_{%^_#BuI zfl5gbRS^xcZHx_A5+Ur=j{&#uQ)%r30f{9`VPgQ}c|~T~mn>jS8fRDC374&>0L4}#nbnYB=y&EQ63)V$6*_ z22Ib$!o(oPT-I|Gtu9>lr^N%wG<%&B|EtOCFQAWYeDuFN4zg8n6x<2A)P`;QzXV zex9p_p7X9UhF@`+S?*Hsf3uzS(U=~`{}1(yT||A{8mUe6FScg2pq>&b{gZhVY^J73 z)sl9C+pNCIQx48JE9g`a{Cl{~>Z2eKC@Ty8KOZ%Ll3>#o-_;w(_^#NoV$9fSljQ|V zmf(neqW-H@9oK%abX2P|FBdL)BT}tZcNE*-rec$K@N-?8$hpi?=43rfCei%`roI-A z4N9{`sk04a($k3|yOuYy?Dpp(v2X`xbkk9kHP@oNuFIx;nt@gM=3i$;lWST<1`)}88(tSa>a-c+J(|4?F-wQcMw;_8tm-b|?F;unOJEu^VtQ*87 zDH=ICuTT-iJozpQihd+}f{#S%(!YGv$5G`rFG~0sQ-(YCd#YJ(bNZFcL-~|wi6BSh zBT(hu^&9H=RC|!)6XPb4N9y z84I*T51n7~GZW)EQMHy#b8;h>*Y_P~Yc*bGWrm!CSW$F)tw8j#;R|nSrzV=T*g|Cb zNlA1+cce^cwwiCJZrtqJXZ#qeM|@~?Ip?}SPqxeCI=5)0x#-0L0};MdmU)`b;Tkvg zmyO=l$yvV10zSh+)X%+~w_P=cleZ}0oGPB9Qp_iQlx`y*;%LYBJXGdPmLoV%93Xq$ zmBINWydbM~$;k#pRC3j|yZHE9eYh!S#>t%G55g|XXMEJXOMJkqdECC#(IVwTIjrs} z1NS{UxtW@K`BimeWSKLKL_0pN;SU_H;rzba@M6uK*fUL0hVS?C7B94921RRm`vr{A zz5hOM(e|5PTQgLqb|8?yaeIWUW|EB4G1Zpk8cpZbqc`!!y%#gMxPz&j{flwjm9a{qqOon< z&#oeFXSyUESr>p8z}eXdEXjd$8E zM_OKpIKQ$d@U8M4FKv-<;@0=zd3+xy5YMvzymYjz%+*X(*EfbAD`y}w+%`gF;PQu_@dKn|6Mdpp`dyX(_w4ZMf)F zzkwpb01u(l&H_HreI&nk_ZZok=su#4=SIo=qn?5FS6i;}&lG;+COxjJ*c2Np9k65Q zO>UrLU(o=+%becoPX6}wa9(EiiL=>y0w1n!;s$luip2f;kYP#|qON6WV6WH35ALHY zJ2mDJr?r2$Xl~ID{&b+C=!E4ES>V7i+~$k{vRI~FUk^!~&!iwe|C$?LBhi#y>liNz z8q~tIt?8z6#ZCOrMjKf{=2Px_AuG;8%K{!NsX zNL+uLYUm3@Z?benR!2w4jy}B2cfH!gIsdBW=Ugz6Y3#5OInS}>%TF7NUR<{oUA$e! zhtE}&WyNUnf>#%D=;jAp)Fd}9_mL)-Kf8?gxfw|(2E_2SE*3J+wtH}FqnXG=@ef^K z_M4ZxpexIJGe#CHu$JAvqAF?+{sLL|)p&vCN}uVrcC(5FVS!2JPI1QjWP51U5`)jN1R^q8|<6-n@;^? zf&*qC(z(Gq3#RhoA58o0d64h9bsC>N9V$|dA0rZN>o4nX@{E7kr6lXN)08cH$exqM z*ExkabN=R!R<73U5ZeEm!d1_+mPK1Ciy{jQMBY8k+>%qavfC2|%RE;{a?}2P=BmY8 zxQOb}vMr-jMQtOl;(|VdMB{#%%0~WG66O1R=TDdEiM$1UWz9=dxtfu$Ip>#p+-9p5 zzER}|#>n?_v+|Wi9o`jO!;kT@BXs6-OYp^f_cL z(2#YR4@IYS&#B*I7p|((OjfGZiK0wx*%2*ES;ne?q8%IM`38?k{OrfcywT-Z+|J54 z-i6BXjjNq7Fjc_Sc8}+WU5(=lPo-k3ESYz?P=f!&HShs({beS8AE}L}A=mHHc}}8l zhEX%*_|G0mT*b#j*qAXNYrBqf2Q$aZvSizNhYoF#(hYZIDht`mT16+ zTf7P50go6mR`%<>KqM|b#@W1x$AwPvqBkpyL@$5K%WRG)@pA_MwKoynY2*BMYRZy7>&vWEJ2^v#0kUGR zfxMifk|^cwH_pb}l=~Hy!#_GPOcv`I%QgDNa)sSbuqbn&tawKZ7dC#Z=+v~2T*tQ^ z{K#P=W&f@phiZIT{$CdzY~k_0T6g?U`!|RGwtm_EWUR>VgJU(e45{eSWK_xyINqIWv6 z|MBnfAOHSr1NfhMezt-8Prd$Z1NJ|B|9`LJe{B8SU%>rO&k@@QIsBh5po2{fIsYje z;hXd<{K_@tHaHFDD%?H^g~kJTh2Ax=^WTilwK^ zboiR9FCp-TKCk!49KLi8=C)-I;8ipSaXybSFv9*e{?spHObRuATc3~c5|prL*$?DJ z*U|XSW7v4%C*JXDK)<`MsB!y87>~obGs$w_ z`a;2N9%Rc4p>5RyI@lx>-$wW4#`*Pv^6E$Q$LTm+r!LQ3cQfP2WnFHZPZg~2Q03fI zmVwjkU3BuGx%l(ZC6>|ohIlmo#;ya+6slrb2I~fzGuM!>A9)r%4rgOji4d;Wne)fH zEqKdwWwdo@2)1i`V)Cl*ME&eS4A`7V+`fLNR`SQ-sGT0CboUGV)V~bl4F+@Jr%%G( zU4F>?JZLglfmRG$PKTK(aXL4zp=_)?Kk?Ea?!n-8%spYkYuIb>R!QG+@jDgX*e(N> zW*x&kg`4Q~{s&Gt6NBUAPhivZOK6e*9>c3=f{$=Lb=tj_dA-$f)V?0ndpHocB;=yy zicOF(+<rT99msZ{3o&d!I3 zkd^pkWFrJ@)CS`L&(PDkn1~mNrQ0W!qvbvc4R@i$DZZajkZ)Yppmi2{O+BIbVjB+q zeH-J(`(cHW2kq)HBW;X_uC;#{Um>4_3ZMJ%PPLqLss2fv`KJW#|I*~u8t>rg%f|eo z)erE`(Pikbl}h`?Y4TlL1F_hB3%q*Mf<7C56Mc1rLVuhxD_nV zt(P@Q>-+FOIQslFyqygD_Bm9C&<^`1<$(2nAcnhPWelOT`y|sv77HuaKfYP z^#c@weq95CCPyI zD1QlRo(3$}LJWz+{=oi&h8zxOKHH0@g!cPRQcH<0ysXoK8AXPi?+hV&F|Nku1C|)G z;~k84c3^wXc&XjLY%A{gk+^>v;Or9%F0)#RGfM8qPkY{nFY{93-RL^{>^IBB(OQQa zBGtH6hjsby3o^0bP7UPu4C4Fcjl;I6qZlXJM_%)N$Xz_ zG^z3Z$_#n&v;kz>LIu8DDG~jDb1+%q5FBTGQ5W5CdS~ZNF#L23GvtQzena;{SHGjs zR&L5Sx7UIDa}kKGCrQH}y1>ezpCI)N^XKeHhc3%t>eLyCrhoN#yeP+bLT@qUcr$!^ z(+%C@gP_!FECjq(h4}p%_~39F`ptcdV$c=l43Osy`d7l}f;*@e_L6S4zK!0u7%Ot< z5MEt9l$_aV!nqvV4kFuNTr)ZctJXB3=cQL<@ko6x9`*R~`yZlE?QcFN4X<108F&io~8AhBf___%C(> zZl<6f?Do{*y#dEj-t`VFIj{{+Z;(_XQ>)zcttxQ}&nRQdTIY8ksZk8a!64c=`Z zVNTT_@T^?{Z@z1DpQpTn@ImIh-p(=LTAhGC3Nh3q@2##`U z{Kb4Fu1y*VYbu_h*4r37Fs%aSEPcW{HsrJRJam6_0ttxl&rU6f4j-bG)exmqOlS8z>6RoD9-AtzW$yrGJ zMTs!<#UMV)PXTJR75LY>9dPV@A8umua4vTLP_SS44v)BK@T2?ug)aRJxW|55y|;>S z(CKol*abqF_DvKow~)rjn!)M%AIdK%#G=NZxG0hBo$ovpPDnK8#ihzp8}CY*)HHxI z>USG2xR;^*k}T>&H`4GMSQ@O&*~Of@-qPSl8)27`in5} zem)-FsKf2;ABr0+hVVft&*1%IBmPdTF5lJ4@=4acrY%}}jC-{YN1u6%S{A)b6#n| zW6gN`eXyX!hfKUJCTN`EIOE`qLb?WErj)X6_9DF!(EHW z!bXR|RJ=K~OgoTiP~hGi=VyPslL_9HYrX2Ok*41#>`A87UZAGo=q3=9m9 zf%opm%qy`DEduIU?v8|>ZWVB!1#cnt`#BJXWq=^az@bayJ32+2pn<$P?-eteal7~7 z@*masZNpdS|I>&w;fGO`xBWTe{#EFo;Xz}&jL~hR91XWR2s0O4g3^%DjMsG>`oBoW zxd%;nTZtIrTpq)Qk(V%UR4DW?{#Z_d0cV|N4^BgwMt{~*smWT#JyuMn?aw=@f6!54 zf4va1TbW16zk;ac%mvAkufm)e`kY*A1QssNB$CXp!cw&}^nIc{ca~2?jeDB>m4xBk z&R+&xQQ;Hxe49(I=MU$kC6V}HK`qN`UrLFzKcDUV7VY=*C|rz2pEB{u$4JN*jKTTAKA3rHC|}{00RHLaXg%JLH~XEza_Y3<=9{^AndxCv zZVcrLuUF9)FJ+YO>0;R%Pp~lUA!(GFk=Jv_;3>1KaEft{*SJ{n*@iOsU1i31z5IhC z1H-V%a4;mUzJ%2a0zmJ$19nZA0Bw#bLebxFFkyP)N^>)=@#+E6I(i98>R#dRHdC&1 zk2#*6uFSjixkXLfS3<{l4ZeD3Fv@55<)0qv&!Mk2zVsh}(Txhccu4@cv*jZ0ygQKV zzM%zKo4=#x<+qFh`wCqGUXgDntVxOgH@iLAPSy|?=}qNJUumVq&N@rSYCruGmpVqrku(AFnVXZ14g`l2O)h{ zKvw7v6wIzG6CYnB_2hDh@uE-|VIGTa{Sz>;i0O_dtMj_%|3FGYB_0}8g)2ggc!7Fc zS%J|ftV&>+1PfKkCjCsDzLIGPuYI76f(5keK{MK`-6P4powV}kAWq#)kM|e%Foxbh zhl_hOxLOl+m?A%fw~4(*b)$9o(a+z*z9YJPJbOQdD0SniT4m1X-5%n$B88qbQ007K zH&ku?4nJfT+~jqG`GiesywPr3mdDzR-78kW!u7*Av%?zJkbHQR~zF}+jP`< z--Q11CVZ{>W~SwR0ACIN!G${nT$GCdG1T98t1d; z0~%%aLv1x9UT;w$&70f_9ovpUSIT-wJsAOsM*o2P;?K}`cQ8)5eR+gNej ziYAX4&PNoqqFW{78>DGVqjy!~-hEA=z2^~*nW@K#?++n;wTwCY5Ch)(>~k@#mrfgkb~QC|YFpY34T7iE6et|eGId?9{{$-}fj0Vnay zB@V$#T+OIjIP<;)8;5G4{he>LQ0*9el2_x_jNl<|r8nZOJ1AJUNZP$~0R%pl0|&PY zXkq#VMClY33IyCLe;(C!Zlm(1xiH`O85(U{giGJmq209pywk<8(wZsNkTSmr_UR4e z6Hdx;Z6h~R_1lSE+*Fk_EqS;wc<<+>~4Jt{-PDQRV}z7vQ*v-ymIf6%UUY z%j0L;d?|bQeM&^vC`w~?tN!A(~`bJ zYgcysse9H=65?y8)D@iH}6{0~zNTQ_fS(}iACL~FcxuO9{hVt(B zFQB^joW0k2p3et8vST1LFBVpN>l2ULUqJBEkiT1ELGM(gL6^Kdc^`2bBfjiMi?#0{ z-CCWx9=nCo+dI*HZV>E#jaa$V9}+)(<@3Le!+%3;=;+S(;Haj=oiDN^vzPUNdQTk) zW^It}y7&|8qq}jTz!}QB5-?yvC-^KL1*tl09^_ns-hoEg`F1@DxEEo1~8GoQ{FYeM&rCJW(P*Cs& z^-5xJ&uCq`(fbyD->yyqg?&;14J*+)KMG?WZ-cl9LlPLYn$xIJA{DXaeEENu;PE|k za%HSGHJW8gj+SeZ(hJAXG~+uioVO0bZ@=SuBnr?JEk~zWG~t3Js^oNA4qki3Sk?D* zv0HBnpM1%fwp_a>ZctRCqNEGbYmyFJbWfcQ`mRPK^{!&&b9bOZppK5Bas1TDY9w=~ zHfK3QNY-AGBND$k{7Q%8;5gHodVSQR!nGU(O)cPpC*`2P=#p46F~1}>tq}yzFMwy$ zaFC8@0+#|^jMoap?ygCE%Ysusb8jq_ZiuaMO-)htdJ*y@U!DwdM7~sqj=c$Q%FBQ z)Fy%QA-vt9K=7M>kT;w#A1iW2RK7=+sO_hy+G<3HeS8an6Mu>awHVXlzbqTsHI18j z@*m!L?TnaUMXju+FgEZqI=NK}()+L2XI7h-8EKIob7k6nrX4lMnNaOu3;MV`9*1f(AIjlq&fRu8r@s3k zoN|bPt&IY*@7@WFiqru~vofU48I7Wasp9bUxm;%kLby*n-}F@uont5{E;b>e!g}$h zFc-)X`@)Do0qN@isL@YD_o!>q$*X#?(&Z4QjGKdDoi^0PUIPO&BS4fej1z=G@qJlU z@`W1Hnhj?#4%LWv{Wsp*kpr3FmFRx)G<+Gop7GDL@mgIg6l+WQ*>75~voi_Kj&p$K z|B|6&tpy2oeE`v&{g@Uzn7G=TkYRTYL1b_q=7#oSY3MQWdt#0*5vhFi!A^J-qfP$p zR3=`%%J{$23dFAM9Urim@j7=Lg5p?1+IqW{ziyyH5@p!A?6ETBIi82urf=we(pnmu zQ3Wfn+=QMz%P`QYiz_;G2l8)R=iI;Mai80aiR$+8=rYjEhb6H2xswWQAf9ZtPz_U( zs?q269?V}Rek8A&4HINmA?)yGq3W)g*wE=HvlB(GF_k9u=_*(Ea)ECz~wkS$1B6NNz3`jdM4$i653kpx zpS~H=BSGKr=b}EG8>3A0!~X}0gVczw{aGCG;y4D>Y12*>6%_-+Zv7wdEgD3fzR!RQnrcL8 zx(OW|W=h5^i^awh26Tap7WGS!@}<#^(zN5>G5i(J53YZTZ;UiZdQTN39-qYb;8|$c zy#w`Z*d6yvCuD&+wSNB--`qDPbF^z2e`X^VKZ@oD$^-B^QKEtKc5#u+ix6cu6x3JP zQoA+%D6ei#w)cMmuWd3k@L{ibNxv+uDv%*zs?B`UaOUoqUdTV4Fo>M}FB58;_JDBh zVGO^N&QDLv#WdqS6@KUcqZTyS@djqx)+K-6Xk*cPMXIqwhW6$D z#+%F8*>K!o+qoF03eIew zBKek+42cyZ_!U7)#K6gh)D?|^J@+Me-1-`ZZwQ6pq#jtQ-v^@^TeLH%7)rj@B1EL>5`o9a<>cg{fUlEL4WlR${g|KY(FaCJLDemE6#Gs;lJ~Kv>`kHAHPSC?2cdMfA@x_UM^CMG?26N*vyb#a?B)B= zXY~@J=IjP?WDr%ldW5^JDMKU&CA?NN52X|Ar2?H>uKj94GIM~0JQVEt~lWoZA-kSf{K@(~$xVBe7{j4ReBJEj?uq#9r7yZ@1w|4*4- zTfL7d66C?J?ipv7Tg^l3Xb7y%<3;Hyd}rKQP%}~>$>Z2O`|Lz`aZQ^lHtCX8)QB`J z-vFz+%5i1ledzu*i?{UFAhD*#^a|UF>9*H{)@}m|V_f0d;xtH=2}g-+ovZucA};3s zWq#5dInv81(Bsw4;MBVej^lBVn>`2GRgS=sdks(;W&HX!s3he(O`a!Zl#oC_@A`_r#GOkHMlNY^O8sD2~r%@6!YcdRzVCV=7y@cZH8o z(sZdrFrWj64!Ywa1wC@8+kh5Jr@_4c84$_tQ+%f8R{nTeE;fP1qvhw}4TB0j_1~BQ7!T7N4|wCHN1z4-IVteB{|I6#Y{MNldy_|JggP{mw`f9kk*ft$B)< zrP}ns3+A(MXFP?>O8EJt0z+<01^1^4T+4quLHeNx1VJaE^j?-YFn1>WUE>1{OLQS| zoC@^zT?2tqTCw1UM@eGTQcif}FqB>O#;jvkp?26_m{Vs)#$3=MF`Gm*zTFH<-PZG^ zmknXH-4n*MGRAZN8raVD2-a^oitfJ;iM8(c!W_9bxaqqV2n1bxcK=;8R%`_A@?VUVzR|`cXB|I z(1u+zY-!kXZE7E33;CZ47~5qfC-FT7m6kbhlXiljE!@>OE(Sy6GU40(mk{1B#*109 z)FbjRtkg~bFSqG%+&2lQ|5}cjxd*uQhgz_-x`G!K?iKrI=i&JXT`D-;RZ?!-08Ux1 zU~%#-fQ}&)9@3>2@pW+Yr4`vb`vCrtoKL4~uPl!BFuBf$6S73iLLp0N!QKr>s7ER&ZdhHrM@%s=b! z@dZQjduADAq9xz5>@C-4)WQFXDZ||f7G&Fg9&gJUlCYVxxtetfB=X*B)Yy}U5#gq= zcd;h1-fK&|zGU(ma=Ntk&wDP9pU=89^<2YO4Z3vu6FBW=N@5E3fwfKw*19Os_Ge%5 zxIG8I_cx+7``(4b)-&(a2baVlid@6LtEd_;4`*#IgY(~?kegzQi!`-}t*17LX8Wp( zlb+zBKQhGeOcvjM=P2y4x1uhq)aW7qP1w8S3`oAzi-qAzRR47h*M7AG_HMH#5_Wf; zu`!!DBZi~%u+NaWr3U`2sMCezTYkY?{vYJz zD?pz4Gl-qR9C%0f^9sJdVDyeQ>{eaJrN~}}o#gVuZ3sh z1+)Q88Q)XFD@PW>6-hrPx_UsSdIZc&ZveZtXfC)mn=#OsH&8kg*B@f%P2Emz;7J3B z^tRxH3m0&E-v!2LZRSh)I%(R=D69`CgyotU5R-F`_ivqmiOKVrr{e%G&a40@nJjSh zAJ0dnc3|zgR{rm*>EJQ^$}CCg)pkK~E3?nKwj@)Ve(4H;o<*W4~qL@C*@gk5LmJfBl>jn0vU+OpF45r)#h@ zka-Y`hC<@vLhjKhJ>rsA%K7AH z)YQUHUiKMcSYOv6^C@#m9N}j_cmeLOy1Cw?4d--@&^VT7Wu$YS<6w1!S zj8Cz!zj%1ADfKgX&$~8LobKiWb#r#XKB0)ZH-t$WH<;40qb#$ar@+4p$ip@IhcR=3 z3U_GDaZq^GhJ8l=u;su!E_Br^&|>rQ7xXk53^OHBzZ1c(awk|%RwR+*KSB5FY(Bzq zKmO--6ZQ3V=*o~8=>9O0D@^!>;tS^V;^fcpKYzy4not8DR7`28rYz0ba~ZuSTJiZs zGQ1bu<-U!G!MERDLD+TVj^ELt?(GRuL*-`h(6|hh;hxOd+9;j0R>*SD_8?(9dqF`y ze z&;QSL-IH;{`qR)%zc60g9p14^NE^b=^KvKSz4!A-FR?)z&I z_lKhpuWm;D<{G{t!xL`*RHZIIu5%Hst}wg(F?za7m_OYME3@Qi^O9bO``;j1{q6(a zbyFt)d1({(Q`@A|dLF=+9#fh*H3tLr_VH0Nj4S9cPAn;2%sWPzVEp54*nQ;4P(x zru(yyw0{VZku=jPT_$U8TkRQzZ&KMSauqsc3gtXT8ebT{3=lY zMB(!R1Cn~3`JMIhAn@g9?tO;^72aUGq0TZ;-ue)N_e#Nsu|0*8KVqz61?+m538Qak zVQs=rZv3M`7PrBgq zi7Irj`ojkeKL?^OT70H^zVziVV{-9iBwUrK5aIDS-mz|gzinzw+#BVkr4L5)y#bHl z;4NdKBv-?2c9J8*_C18j-@ieN=Mv8U!3kKF{sk9SX;7ay4im+(oY6d8a*wcQf^z^b zxGCf%!@o$~f9(=?j(QGD%N1$l+&QRZy$-&u%Y$iS)}uu6qw9naQxdzJ-9Lr9ur%`p zI^B7P-{yV+$(>j{EB1!BnpvPUcs6v=L1b{mHIy3@h?25d7-6v)%5yp}j?coJSwDEW zYYO;ja2g6GN?`axed;nMnd={^O-$({tokcUt*SiH;mmi?cFi9DZ5;2~B;9V7-eB zwR1p3sp!7@lnqpPxtRz$7p4^B6tJhqF^HC_fWsUy7 z2EdcBy+#wu!`A)}!XH25F0eU>T#N=S{VztE|9lkA9Jv|8l6Uc=#@6G~+z8Zd2}6ac z?&w&p$EVi3fmY{rILAeW1j(0iOMmsSpLabjT)!5&`-j6Xa}DzHpA=7jRwq9D*MN?A z3oM=X5(;#TiOvB6PoG=S@8`uGMOuyu|LELwHO(H9$k#D^EGN^ z7-OmegU^}LYwa~48EMYg=YRNBkN)7yozw7g9=pTVm-E6oaeT%17kshdJwE0zV})I6 z=X!?>#rv&{E5gnMzf)^KNjL~4>yyRr9-9&6<}*P4CrithJ%OeZ0{Z2o8T9$|@loT~ z!L{2jprQIUU+9_+S;4c=EAl3^H5_HyW6*prV39tc^*cxST61ufUpaO%kNE5Yjv4sr4}xidVkjjSe%i zWzk%mw)#2bCB1~Wm@lYXb`b(b3_#Mq!C-Q)9FGbTH8ds;@#dUU;L}04IRb42=p@F9K zdajVVbMwSb77tM{`*N{Y++^S8pH;dx+fz`EAsgEfwJ=!EK zI{g^F?3o2=HZ{1xZ#V9kr%xVDwj~WG&%v!{rD%LffQhSve!$y_LF(tzlo9;iNi8(PBlg3xdQ zzar9>w$6IT36MifU@}dz5c8SLUr3RFYR2N@*C!`*eGBIf28v0~AVuX4eD%Yhm zzKkL4VvenOdrI)eW)rgZxi;~eQp+!8`KBI?Eg0EUi&>RtK(T5FU4O!u1YUm6N0qID zLyJ$KaFjk*IwVKD*G7xfru6e?rgUS+rbslpC?c`nwCRQk@n}D4C`9R-;ODQ&xJ!%8 z@zh$OoH19sCJD%=2TCOWdp6&a7ss{Co{R47su-pl2Bj$)@b|SAj@B)NnNHcb%c}rl zwz1vCoY7$1u@@GOwWFpD7a(W)L6EfU7Q4$lc_r3=(Enyimrm;fAAt#|I_uHS@z3G-i*A-H&E+zk zwU~eO7@W{c#h(pBh>KGNAAdz3N3*-;kMGLF+un|4SWKjW?x9>YV@fq&834PhZ}_Ee z6=yaGU{-7U3XQraf z`qjAZqZwJ}#j+4yXJDMYkeKBt5iUuOIKl^nHd_ozSq9j@7>1R`LZqMxrh7VKP}WVZ zSn(S#taIkG_6?@hZ$87ohG&q!HGvb&)SxXRy*c4Zb$FvLM-tnna{HV#NZp!QpygEo zvjf=PbA}JN?Ro;W6W((NpQsV{;y&^0oHdxTw;Ple#<6?NYjl3%hxu#QfW_#QY;Ive zMRyKy`WahMp`Z(XNi3B=We=OH|{WWMl zIES~*RLI}%Rp6zP%niLgh-Sukao1k{0O7s>PKKKl&Bo*Z6y5(W?l{pZc^zG;?B5@-qjuaT^AF=dZwUa{u~|r&!Ka^8Wlb)oV@j=_i(SJCa`RRT_-cgI2Q`#u#SS2-B#{`6v&sw+C4j|aivo36zL@=#qFkMCO>QSX`pS?Yt} zd*&__yPm*hCs_vMr#yY$#XcLJf&A|!-7wgi<<#GPgy5^uaQssNetv39T;>n(4eEuQ zRm4W-xUCfzGRNPeRdPho>*t#2H4$4Gr$#V0hmZOxgQDyCoTj@9>t1Dxy(f)eyu?KD zOq02|Ph?3lQ#^S)gVVf3>oR9rlZklK1+NT!h~YiIaPHW{ECaX@zFiQ|& z*|ORAa5cBUiagHC<__=9%H0Ih6ym0pj+D>3RpHNk(@suN3jfYrvyM`Bh zo-0l;8A58$j^#q+H{z1^yLd<^3K|w{fWTuf#Yv@}V75+?Wd3Z$_md{UTX_eTZH=tmS*7$rl5`F3zP+KJs6hrs{ub+pq+=9OqM*S%NB8M6Cuul{{Fut}Yi ziS=L>n-6^~O2X)sJV@uoz#5htciFfJ)_<0v`PWh~cWef_$f|=#VFFBwwxpAXt58SB z?Of=SN?7^PoMv26r8Z`-z+-9(gv^ttH^QExe4;fGw1{{SJmtquv!TMKt-ROG2(VeB zO}qDopz5ht=rklBGta#jdwV%?4vTK1V`?)7JShObV~JdtogGI`suAT#Rk|%%n?&92 z#QYx}{7tP7_)X^#oMmTc@4X-S-Dj+6t2+D1*6J``<~#6VSy1IUhcQ0g6vJlra0ANY z(7CY{m##Vhp9(JH9fR}O?Kl}+mb3lbdOJQftpJh-X_2JBFvu&Zf%x+()Fq~z*YSIf zr4J5ESN+o?vHl0ZXFK!kI`ZN>+&vr!VY@-sRs7*ReLBn_5L15jgS*X6ZqUI&wA}hR zgc%LPeZ!4N1tf8a!E(Ig?|yWzodpY$nY($I0XT(y#+kRG;L)uC6s~xH4yNYxNp?1* z=$cT;7RJlH%G+y;aBu%}TsVfY(8elD$NW5i=xal}9fRP! zlK~MhZ`^FzALxAHH&n6v#Kq6Wcztp_iWUFDgb9YkG-@5{e79wpibprvMw*gL&qV0+ zi{$-xAB2(9Ze!qQUH;3JOlWaf0AE*{()?dS>@VMoR=$_u>_8RDDf>gvW*hLbz61}K zRRS!gSSgN!mL?aFEQ&9gm8L;N&mM5=Z)uUW-;^lUECo^0RIW!p58ACi!Bp1EEcY5h zZO>fC>)RL?LEDRe?SCJ`*}wPag(eZckaCjQ6UC=GS3qWOk@(49IqJ!}G3tM6;m-4P z-1x?r28%9YW6g1Bh{}N$cNhN4o_%OmcM;>33(3PbfUmZ*{z760avh3ftyqpsZwvtC z%`xzi@nGc!O#>^9L<}GAh&L5SU~2CT*m@!xN5}m{RfAh->=Xzw4|Kun>U%71`_6Z- zN=CmsQJ_D`53EiopfscuPeqwhyASFp>5Fp>S|R0Z?Hcf!GRp-#UB`tDvggb_WvGq0 z7CkgG9fG^=LgIUt#i&`2?iOn3vh@OpEW-Kf8yBIz`UN-&0a~3t@i7g0u)xWLc<(La z;{GOJO80MU(4oBkk4DzNNrx(yJqa2;1>Ppz#H8V>aKXZcj9h9+M2*_$es;f97|3xp ze*I8ew*(i;Y{Ztqqo8^9Cuq3PjrM1fq52n_6JAjSVU8_tPe+63y*no`k)@M-Ov$zt zT4Zl*I--pcnEv50U8w+n9WF(!urlc0bdb*)(+<^!_fdCBJv5d!f@E&FbQ-g`{+`tf zRA58iq#ng-6FSjn_h#r`^%Bo^>w`|BEcm9`qlNEf@ZPb4^Vs(pb(YQtt;et7wYDbN zm&v+Uyb($wuemm}F4==hOXAHsDgz;JAaq(X_|A$4QNSo($@DH?_3{(WyQjl?iNirL z(Sj&eA47fr<={2X4?_>!fU^m6@zNX-@jhtEPYu-}Ilh^U8@f;Wsac66MpbZq*)REM z7bjfOH;9CmJz-hhXB^y(!&a$hJQAMdTj?%&J#ZpSKKMaPuP@D!0+ zLwWo}WT~Rqm>NXu5#Jj7+r1{Fb#@P@lw8GmFZbb3?PjdUeZ44J ztH(J!+J@=bZIFGV4kBkq+&)RI4p?#gte11SY&sdkVjtu~}<*M|W zK@^ydx(Ac)$ddO*Js?1X`3M?jL*$NC_`k3rBx7YF?wMsu$4;-s7ulMm*Y+382#dy* zvj@{4^W`WkT*rY~s+-O}~epm-a*9mX%7{KJ^tkm!erl%$X^5@i!>?sQi_==({!%0?T(=$%D_zGUXD)+=nk6wD zl8P<|>p1r_tYexy6?gmT5j&eAzMLzCW7*KtLT4Dn*VX#wM4C|ok9 zBdb)2%?##Y{V^WyEc=JkLWiQ=ybMtEP$A8qV_>(TBDwm~hQ2m7BmoH~^qfN{1l|hf z5=|d*r#B2DTgU+9>|23?iyvIf`_nPFx}D9#?s6Yk7D#5X9DTcO9+oqoS?$c(usWOd zhP96K+-eQ#Us?e@uPjmjd@pbK<1V;OZw95iQ=oR~YCbY81wPJ*WDL?RC4sl{IKhJo z@!MPrYO{hly0RZ~qQ)zH;P+L`3&wcO?+-!pU0YHg_!LH~Jcf97PcMBCBn5L1*mC6) z^r@?3>}6|e8dnH!KR-s%onzv2)_pK`atrEvEX1C;77VYvj$3kA|N2qB^xAKE8k8sk zLB(9&eq1s3&K*LR@121+hhBxCGCxdOYzXFN=0vpSJ38)}$UmNMP6V?4B`a8kKzVH` zc#18cv~IrmunBwLcAkd?g!NX|zQ$BLS$h1(5Ky^dP9*pCLab9eoY|*9efEt&_xWF? zU&pABxhG}m45CK9EK$Md+D+(g{gVs(?=V-rUkX!Z4kIH9q!B3-C|8^zy$VNh)nw96FZ-_QUUpo(RCA@y!Gq?yLuSh(aDIvP#qreX$$Rv6LBFEcPhU58{YUIQ1#*iern_aWki8;o%@ zCLvE6F+7fOwd-ed@-=2e?x+Bbe-6gOI%d?(?JNW@Zw0|r57&Sb_aP}?9>PXj(IJ*r zWEb6rUdK9k%~$F)@Ix`Xzsb?mkdyd2Lr6r=gRxIBft&U;9Yco{p|^1Xx4d^Nj2$o| zV@I2i1YLPLj&&vyeT=zvDf(p769Kr&vh1dSupGZ5|8Q3_R{gUkN};#;bBQc_IMx_{ zjjhAA-Rea2^E!y0yBr^>-hp=j+ia zWvpAj5Ha$`UkK8w=e<=Nq}8v^~a#Wz{cLusolefhl~?RVcr zkxdsb2-9>8_h{n<(lYM(XBqORRgH%IGvxD}<6vPR%UlPY$I=BO`5KpY%pA`cuBnDt zcq$NKt1^{jjNsq9b>grW*-&}P9^9@LLQ77q^zR9Mu>DVs2F^|9#{av8XPO)FeP1s| z+3G=Ulq%e-k)wOVdA!0pO@fYzB}d&Jp-n0eHVRJYWo}5XY(9XcA-eQQh?JeNYdDXI zqcCUcA$(GAPOI0-(vD{u?ieQ^dyR}px|t_N`;-DE2%vv?4BC_*0QsoXnE0lI z-&A4+(OarOlClYd65F|dgH6e%vN2SZXj+XySqAAyLnSXiE_38K@}p~WX1#+JsQrja?RjhV-mYK~N`~HOGbKj0cQM3qCm0t@#AQzu$=D8SQm|Wvm`#sH??KDB=&N1e z89D+kt}Z}7ehap)8w9gFjY+P#Iu)*%hmP0gfPRdGvv^t$ON$0rHh4LDS9)+M_paip zy7wrvVFxPKJjLE&x8Q8dXjpi{glY|qz$4XhtgFRbU*8^bv!AH2Os)Yf4IVFbN&CvH z?`vd^kayfhOBvGIstrxEcR-<6J02f36F<#SC#}k#If>uj5~mN$yLfgUwA$R}&N1&x z=c9h~$h!r~SLWexMO*su`yU8&n88KdP^Q5)J@77CmK=R=MXTcENL|}fI5hty<6fP_ z(n*%m)V3lJZCM6N*JI($VI`V-$pF<^Z|&3=fAFqL}q|0;z)k7YNVmM#0Vqspw9-biq;+av* z$=^I-d2`P@u#3&`)!qV z@xFTW;+AL#W4R0CLwi^UNtsrrPJ=Fy97!tHr|xmncuCmZl7JD-Ad#qXaq0?0e^n}Y z<-Fl+Ps-Dh(?)cnoGdYNvmy%y`dALc76$tY$={A;APg$y3Qro~GBb5jx@a4(pQk~O zf9t`)aVBgAy&D8yjzKcZ&=g%d3^#WDurJu(CPt$+|4&Tl@zbb^471?0g z{}SFrnGwl&6Yx>ir^18Y?znE&UrIF6X4fP{DuUW{gs%D=pG-r;IwC+4%t#N>!+!xDFjg`~u0t|A`a- zbL8eVn2^!FO(30f1Own1=xdqK-kciDw0|a@9m_JCF9RViK$n<_&*6suRk1VSB|hKH z13vmbgMjASV7j6aoTtBnb-8U=sNe>|@*!NU>K2R|Q;&6_HE)ZI3(jD z3KEBuq^!z-riB6EZu^rnt?t41)2Bl{dCMfMP{W^nTILbu0Ms zMLQp(#h&LdYM!Lz;P`3@IM%kRj7w3_geDQUo6HIUr%D?K@+-ktQKwg)Q|C_k7D4E3*yRt0K+fCaDz)E zTE)$Us2T4uVxk%(I@)ro&-+lM+b3XMv&YJmQD$grHeb2 ziLTiJs6B9=Uw6-l968nq?rECh#{pL4$GJ8X?&{~&wO)ZzUk7)xQ<-k?j04H=Sl3{; zEYP<&$S4!(th;asr?T#B#r{LwOP0~?GX8<4g|^uF@GR8YX>xyFCZWpKQ&_rifSbqe zY2I5)`7sHMBQRf{hFtT;k4qC^+HL_AN#=mZ`(YTez!iJfzk@_uf4G>L40om!Vnezm zeVT1VroWvC&L#6uiGR)SO;8}-$G5Qy+kDvkdN4V5hV_3;B6;Qf$vFA|&salpH1usS zX0Ng$4trr+CEaF`QbV1#Sjkz~TKfkVFgug^SFU z++Bk3SdngDKN9)D7;a|V;bKwH!aPzTS? zMl^Aj8kc@ph5ljR`GR9QWYiRjqg8ux(F_}s_ZE^G3-CiE#FWAX#xtj+rgTG*D(|D=*C0X+FwmfM!z5rgLBfL`l zJ5H254&8T;<#OyCusmCfy8Shw9}Ev+s!}ubE2)#A>~G@}za9K7>fys=L$X9unJ7DK z!kGmu|N7!6KXJYZ$r%V?*};pP%wZ`U%V& z%4Kd{ ztL+D@_Grf7+Ji7`d<7JDB!KZvlg4T`?s1^MO^+N}+b3r@CxhYZSxD0S=Go^n^ z#Sk_~fQ~=5awD!4VW*5f-Sv$^qyzJj7%W1;ET@v?+b!wPv4crbYa(L#YK&mH&&~9Q~5GcAoO_pLF9u!aDD$(T>j-81fMBm{E^4p z&0s^iOr%W&#a~ODRUd%wq;hC-eui(^%q(V`28xdHQhlel*jbwemGZiD*~|}EF#RD0 zZOa2C<@Y?7uS%ib2(nn-w-0~uatoMm<6#CF%#P2&f)Zi=@_`= zviQ&0>!^QN1_KZFa<}Z&Nkl#CRG1_{`r_{x6rBSp-aeR^{fKY)>I(WSPceVK5=|NV z6WiO*VuofvUL5}kXS~oRvs8?zYwHyZ(eVfMWz3Q0u1*B{FG^IFX^^0@iQr^lz+Ae< zuw_aM^tH&L>7)l}|36bInKW6f;=npamz1d6c5UkUT8N61wTQrXB4=@NHRjpehG^DT zNZoKA3ievix~yQ-OVuE3(#f@6TntpV6W2dcrIL+Z;+%TMCwclx>^SBL;wS|wQo1ay zkX%J~?U`KZ3tv7nzDrD6^$5THHp>1pCZknyp?UHyoD!`{*D+R`d;eij@2r9&2N`=T zpc8UO?Zz}O8?y4jBP`x(hVCC<^JeBI)Tw z2e3PD0dM}i!-^CyHs3 zXFGb)ix#Qq{u}(b=OdJI&eC@|RoMCd8hC$Tz4q4CoV^X>-)w8Z@1=%h`DYbex3~|Z zSs&{8SY6WHupAsq)8WYJHZY&BPSUfAz}dSTHhg#oNeQ_qX>gU^?iSL{`g<+y2l-u|5uZfzO2T@DGXovCXEy4Uj+*l8EO&w z1ib#Ij#p0ziGjB&c~N3SQ-mFv5QlKV+^obK1l9wmR2z zSik8!7N|bLM`ta`j>;hP@--k-)VfdGo zj23g`se6$J=U;UZKD!%}e~-#>($*`OQS|_|Mpfa_ttohKn-X!+Gr-K9nbIxEoe*+L zn>LRB2k<_YOkX(1*k0+gm{6)L5bm$*(=>~0aX~q(+K%4bslH2iQg$iTy#z8nA z%9J|$(Kk6ChSjQ3$t`mZ;%?yCiJI8zVg`bZ2gH#+6b>~i(A@R9v`KjvtevDmdY@#Y z_g4{3TgS5K^aEomD$!-{wWyZDOHc~z;hZUB5!vYSrIS>|F+=9SI5rQ;JieII+;|xc z)($3)x`RMxt0J{?zQsM8tW9bR)XBZ^`XuK|BQNoJU6NtR?j-{OFuVUN9=xnU78Mv0 z!HE;x*_7>Q_xl^4ceN2*{dI_Lv=RMx)PP#9X~95;-_pEimh@&wJ~->%VBOL-Je;9G z1EQy~Ztwu#XqgZ1EKgw3y1Rs4Y z=t`d~)`6Drcwrx`Y__IPt`(rW%pGn_%@Gt?<)gKs9C2Ha53w)5fzz^jJh)bktQ2dq zSze5In*49rFXw%|esA5a#A>g*Y0TYLn&x4*`i z)aShWLu>4))FCqOW6>oGxmW*3(V2(U_;q3UG-{sb(xj5{mrQluwFx0}G9@HQk|}ct zN0OqHBuPl65|Si!-nDZml~NKCk|arzR1%VJe}B6!!|8b6z1Mo4`>s5OA|oHJdz35{ z`dk5xvR+O#oXtbFEXJmdXQA2T85+OVB*AsDFksdK*7NlUJ+Dva6dF>umz9tsDZ&ZI z8=$jwG>#Lk0{=VR5Oi=ginjc9lx%4iZ*?*zQ(O(%Y-KrSY&(sD$bB%RR+hNPHnNUm zvC#CyJ#e!Mf%Q+7sNkDk!HXO{ayRxoE?CF*Kl2bY`i`+YRS&kFSqr`2vp~Hr9}VkF zNW7slzE+rmu9il8g^?Ivbl%6laksd@VHuoo<2wkOr$!S#-b9dQ&f<5vAbR$~@$hTJ z+MkCpBK;7Xvs?3>hX#ZALNZKPm4rSfKH&QO9M^R80A{--afK`s-W>4-?H;L6q1Qc# z9xM%7r_14ojxp8PGY5v3UBda^M{(7lGbq0H9%P^DfyPNoZd7$Jj@)(_M%^)@Nx~BGm z_{Y(Y9zi=2D9-1)IC4TSsGiRDa$gg_cJE8nqhqXJC^%i zl*Ki?sf54Be&B@xc@m(s0#A;KgWi>|q2^yTEJZ~+y-|?}f<4%h&X6u$(h3@rdia%$ zi`zJQHSc=ejlZ^l^+AjF^X7eTxW_ZbL(hW>ER3iD>Gs<&x_ve7Trvsfezii8(mrwU z$2V*)XbuxIWJ$m-b?)=p5OmK8=gjRBgm1Sf(wklEd&72w-tG~wg4d;WLz1}EeN!O8 z<|*rFjmN8Ywp5}~D$c7Y0^yqmwEeG!KXi<2VhuioN;wW?&x zRyTAUrAT7iSnpun5!h->(N;kY(#soB*+q+nHQwcS*_Y$-J?q%+;5}}){sL{Q|6t>_ zNMU@S5O0NWgrx?(~{Ghbr)m}*GhYefD{v!SccTGH(JV$S{` zg`{DZxk*C~U>He+_~o0yD)119I&Ktb6fXm5GL#ZeW1^S%2Fk+6!_qS>i!#fK|Go1x zwE4XO59vo>Z9IFAEc?%K>0R)^s~_@XD?ton=sz=+!(x`yA_k2AEJh9Bj@({ z7DQL|a~&fuz^gGTL`F}A)K2^Z=E=S|Y?&n~D^KC0yInA|i=7=48U=8d&l_V# zCxkXZ*rFuZmJxzibnp8B&G;kv*LE z>r1d{;SqL!IUo!eQN#%EJ`5<~}i6xo3;33#uX~My?8K0x_3NOB~4$Z3@c_r-+a47T~zH3&XD;GJV&xk6# zeMy-n*#3bH6&j33(+AS4?t&VL-@`aMl_ldDIey8aEj)t->%_x^z`8E zUt`9!)j*Br2i)TBF7)qZ-T6<9N4RPNjGC!S>Tl$sD#zFW7QtMMLKg(iUW2WxPICe$ zB^qvJLm2`MV)f5}PRCB{>bnW%YRfs3(e5CCYiynt&v`X0g-+!qc=1>vyDzgWpw=$l z$!ai_j_ZNX($?g0<9<{L|B4T{$Wh_2G_c8FGVR;|l>93e>P^z6Ht&1EbzU+5TkkG> ze{M`dtyeJ)%`Wc0*J{L;_r*GAO9;%+fK}Jkslm^9{@<~On4PKx(T(buDqv^#hNGY{ zb~-PJFXcr$OyI1p3hn!ecWq#@T1{glO6t|egf#-`FaP^Aj;(`x@<0AtyB2)3v-Z7vGJ*v!|qDK~P zPsfgdW@!8`1vCtu`E{x%(Kg{J?_@a}8ry^U{O&}4v|KHAecXb+?gQ8^Hw;$aHz2EA zGX8Oguhp?`o%QRh`tO1E(tBC6hF^YO`>c)^4`?u&ytaW^r-UE= zFnP3HhbS&*`Ma#=7~Hr7v&6qZDN~W=?TJTw!_t_VaEb-5ABGUQNv7Hi?F6VH|J;rO&tXbC+G zVc$=4PES^G8?*Cqei~yhX~>{cqA3ctIyge_6?o!%6UB{Xuz<109ICqb-4lO-%4C)w zNLr1@k`&3oj}$I%W&1`)iLmU!ZBAorC~saghHL&6fj3L^sMp+X&N%Ea<|T7zk}M7D z@5$4VYj0xUphvu4`A1$>YB%hiVN8DJ$c_=!55L1XQH$!6!4HkmyWhL0Io*Sa(W|%wV09ORKGDM|TChX5F4Z#Vkmn3$`gMfE;REVnQOb->oa*~Ex;o7u}?Y^?+7GvE5Y5IfZ0`yN- z53$q{TU|9&Cx2e6Lrpm!#bfXSipTFeK>)BF`o0rjcPAMly z=o5ZgW%-mfy+EH7pH-ouPXcgtx(&5#{ehyu z<$TFEcDIa~h)v5=FfrI3O8#Vn=wco(I64;GlWWXYVK9X zaa`RbM_Pq)tWW$I!`edmaqkT2xZPVBFIkT^T9o75qLo+`aU27tPvg2nHA(avfH_eT zEE}^Jf{kB6SLII-d;kn-3da2}b*RMnJk+=Nz>;lRG;w+vmZhA7;0ng*e7y;;-6_P= zHTSVCOF$Q@nX*jtdw#jO0jYY|0VlMMqunsp51UvamW*lO{Pv`CkxvE0zoi4898@Fw zTMS7+HFG5Y83s2pH3^DJ(aX~aj;b8PfI?T!cMUt+?@7U!wtq0{h8$5EVS|YuGeNOj zjY`;VY`?TE5#=NmG)C6&PIqSuA57~7IP44aI8`#F)P^i7k|yK2)aaWK4HDgK%&|=m z36#Ie=YKm0m6b9iIVzUT@w++6*-K!)k@ff;?YXf#Z0M$$HK;S$1;^zv7Eov=y6+na zN_jj^YW~gkuPUT`Vh`xqo`UUL*sL>Lnuc^YVg6kW)+J=@(bWQ45U))X-n*fa^hao2 zumvS;w&G{4Y}YBTMza67b87n)=&EaMvNdW9=H#S65V$#8I<0C0`JgQ*!WnA6F#bfywWcCwem0)nltyoi|44P z7zZ=7KVsu0HF`$29~-|}a>J&Y( zl}K0JdrWfE=7ii^i1N`Tqkbjff;Xn5(;^Xa)&HPf(@;z|YC>0^QT(-9MWQjGlKWL@ zP82J&nD48Sd*GV^QY~+>OV)@s2hIZ{#WY;noQN6I9`mUSG;syXYj)1o-JcseB$fO3RX%okfj>pq5RUGtDyF_4K8LX&?vo3Hg6X39upPl_k2q-l4#NX#IxXhCj#1r zGp4Px67_m%ilg;~g&P8|?m4UBYJ~4*>Q2JYQ9=i6P z=MzVLLM3ZweCr`gDpzOX#`SXaoT4tZs~X0+`?8#Ufji_ZGNd=TY}`Iak2W>6F|YO% zzIbZ_T=G4I55AbviS_!Vtlb3f(pcqS&o*w(!j9Vkkl4`gt;e-S-+qj4qVqF zn(N&<`e|kzi&YG%3-XF9k<&3KvJw=~SvhCn* zupBP*{Xvb1?p)x836OvA4CiEhNbG6%8|0JY(WF`yG74;=*Zes+X9j`i*{fi=<``_P zR3*@+1ZfxEL1T*rHjee>tC=s*^;aeznPxzozOUqp9_f(>(_+x-!y$}YW=qg2;)dUPTYST z!N&$5LKWjbdZ32SWo*mt!W!itplz>AB||rW%np5$Jmx!e9R3LzB{_U*>R8Y!49An& zs$+O*t4GuPYM$g-_5czZL`I4KTzh z2%k~`iG8n*|2aQG*SlwNI%h$2@kg+diXga60t9 z3dFy1rc~kDd;W3w5Sp~TLO9~5C9xVC$acZwF-1|IK3kD-)x1w}=l_>pY)dPpi0zvQyJn*B?DE!!z|Y4?Lo>Ibk&m!%^b1mp+n z<5kX9AuGQO0+ZI6EHeY#TrD?D^;3q8))#P?`6qn7atowovD|*s77#f~bAo;`*A!C; z_F+S)Y1(J7%SSFa-Vz1-KRf)5K7%)S&*ZQ&unT+XVg5!M|p#28K&^9QLHO(W*}^w5G`Ksr$Ky69|4R6 z#-W(a*Nu(k?`~uHg(PpBpIeU)8?vy~UyjDVI?o#jzjFtcSp%qya!;EEUg_H;v9qB-o&-*Z} z=N>Paj;y$g+LLW94Q3;!ifvfNGyqPtvaMGZ^F6)y6t941uqjh+< zPmN3!T|hPjCVsK2z)(SpyenYr-fuE6*jt_GR0rbH$sbX2-&pLcb`JcDDnJ;|7*KAe z)Ttz1tkL`u9sG)T$+zRY+ok;|lK4Tjg(dWSnTX=be85X<(Cu3z2!CfoM1&`E(qvb8|)jN3TfWHXw{a7t@oCIe+k z4dR{_4(>`$%rCS92G5lv*Q|z+)|3vejB(fk6i>k8DU)%A{9-&q!ZD!a3Y+hz^Vc6q zLC`yC*v9gr*A)!OfQD_sU^ONdnH_#<+W>ZUh#c^ z74@tmd@7ZfE zdGCo$xLx@a{KsZK&p!;{7OfkQG0lQ%73$L&J6B=nltO-~bvWqMtK$59CxBj8BX0I* zK=ujaFw{E>!%k#Et0V?T9x%q%-%WhZzh)5sm8UUmKP11c22HC|Aba!~Sow1rX5n|d z$ap{(v;8oBL>q^tjM3#22{yq~F?ho%oU=-gE-NsgnN1&no2^IUJ=LLol@#5!U5&O# ze!`*zDe{@In*(l~z-rca5qZ4i{I08GO)8+3s}2qP;tVsS3qj-`!Pk#`gDV1>&`!sa zmvT^|2d~zF&sTy?iRUrwnJ$er*n<*te?G^c87f(S{ONB?;%b}81=dvYf?r!W&w!5@ zI`<;(nPNk>b0+lUpC*`LnTmGLSO&iIG#q^{Ag0T|fI`YGKFKXl*yT6`WmvvmG<+<( zn=(g5+;6erH(fH@L&7@nAB5TRySVrh*DyIso(@+jgkNcY*fE6=0rGSaoR!2o(2qNJy*;4j~F(y82x4ML!HVUPAK;tB8Du7ozhdG ziOpx5CQU~L`%w_smcyG2Pk{C(k1_1ZILK6G%mMcE)??pOl@bH?dp_qynx};dqf__? z@4G>`bO1lRlOZqu@tCo(mlMY>#>U)8l%GF{3hH+h^hLL#9i;JXVODg*Xf479sB~sS zGunH;gU!b5|L&i&AUO3EjXpiXi!;k0U~w7*{5uJ&BNa*Y@nA@ZV%!Gbr?{sSF@N_& zi1^5O7~@-EMl@rW<}VYMd}i6*_9JLFSDv>zp$gfXsPZV4QkrBCGDb!qf4OU@&y7x*KxM0!9#FH|T| zmCH?-H|z&QI~Vh_9Do6{S21LbYc8Hw$t-*=3N8#F}D0vY_L3qqg|sBEVQ7p zXene&Im!*;U!zUa2lV=s!z);g1AEUhXm$FCF)S}zwN0P)-M`EH2pd7o>M34+r%h^G zo}o{nG28!0AmqI)h^H3LZ`yD;?Bhd{3xgA7?iDp$(3y=mir5IkPdClEam%3ZgAdRCbZCC87|9| zA@85)lhFrT!RH~O(1rOm%Gx>dN`s7W(1ozADgx5Qh}bISi|%HbYGPvUAp73$=95EN|%Cd-w0^9#4@2W zs_-LBKmwRcYM@&{zb(3tCR>f+#ES;@9eT`tVa(8DSIkLg@G(sBJjt>Oj8U2U8&<^| z6U|U<;(BH>XS8t@KG^#TS{S=WX)8NteP6+eM!ItmyInw(^n?HX^aaLTI0I8B8PIq7 zIwUVK9YWu~!ZwBfu)b{>99-6hGja-H_TT3)>6I$Iq|10Y&dY=vQnq|?za}KrSMVwt zb5Ky^?WpdiPDZVlC2F3((RIa1E_+)g7k@ASdxM)GKcO32bKmeI_u8YN`j0T3b<$Tq zG@%MvdqJTh2J-MP#=rW4){YCu&X~K4$_s{9-vQB-5TPU}TR8jn zE!fd<8(SxS#2+L2aBtXPa{j?pJSa4zp%X4Zt8Oj+r_Q5UnFZ^*$uYl^8ZGnDpkoh8 zk#B|Oq&4IUFZ$3Wtlsz)Lo&~^9;y-XY8UZ=Nt7S=X%L)wE}*TCOSwT}RnoEWIjm+K zQOU9?oV|4-)O&NVCDn#X`nE6*qZ{%+O=x@dD{OFM`&E|BcOUcKz$OjV6@jz#uSQ!*~~>zbN?RWkTI8C#truUi2`x%Z5XO@ z0~-(TV7Vz%6unu=HLf$jxkD7;(xkz}X^A}8?Mp?a=^vp^q(*h7Tn5Q3q1fF0q4;Pz z%lI9*$&J4F0X|vEkjz_ysOQoaSjpZo`Jos2*6awbt>OT4GZpb+rL~-Y)ICg|*9JzB zk&u@rpv`@<6h;ie-C++HOL#HADQ*)Sp2OHX@1nrn;VcS;g;2cW8Mb=;BhZisyO+x&z_0$5r<1wc|&vV(!T&r!f8y>+T*1jlw?QrQ-u(FQC{~3v!p5t|u*q%--d>T( zxQ!|N8hu-Owb_DZFEXHW=R1HvaNCXMYzF61SqK4JH}at>p&$r~6rP`@NGs|sNc?(3+88?)YU+leQ+Byn(oA(F_(2M7=?u|5WIJ_d`f! z&XDNK>lugkr#N4!oUdGJMTX~o1%XGUFj(dw-iq$U48~!}@O{J;KB)r(!-t&03NxB? zxLe$1bss%`d_+A?ho-zUB^R@X0LJ_Os+jELLiP?*o$TXyHy^0TQdl?+?UFAE$*e&-CKuY(>z z!2EL%u*4L0UU5hFC|T+nN-+8WyYpGG?#kzpxOu%1sppP^K<|A)!G5+^Upfi|8^n(G z%`UjCM~P(YIgUy9=kgg(M}lP{o9Eju70xoLM`(@&``cY0F>?_n?`#7XBgD7g%xQ7W zIBa?P0sR-Af=zk?obhZjM!%`!qF3mnTCExVTEn=c?=8^>RzR|QJ{av@i-Gzay6D_T z73S`n-PI0#j7zLxIh!x83kQY%+gyt6XY}ffhkYAa7yVosRP~j^pRdM@<7Z4aJ~Sj* zD?WiiNDgOvN}8Io*+WR>CB|daB@1U(;@U;=*mWcvz4`?ZC8tFOb?cGKmD0q}M3!C% zXPlCZKWr8`8w~z#L+8iqvG7v|UYj8x&aOjYwY>^?>@y4NFNk2@qIUof7JX$!DiMETHecLpZv*NMka%b-G1n+je(-#NbK9GMvJ=>!S&lhK1Zer z+_$Gf`8ojZbPAqq71CPjZKsSrDMM|+XPGO44#31PScwDTd@@u?5iXQ@(!`z*IRQlDx` zeSsv$6yZvvV0d*Q66J;sCNa6HH0fsSO}#ExsAvP7BsJQ@ODzNKU15(&3T zEKLUOKZ{Pv1V;pjO_;Q6<(7IuB)XXD&Wq`@4DwaG1dd=B@(e#gS;a z?+>V`%>y&WPOIzb=-&%#eCmzG-Gr8z7y&KHf zxz+XQD^9EbDY&>eLFiM)hpjvfbx+2?^(ZIo>0#cOa3eBM@Qm@STVaxiIh7Tk1h>q3 z5Zo;0PdZBA!r!B~`IQ+lKPC+?53`w(xCQSi4I;a}u7jU)EcaA3iTS3+ih0IN^omr* zfq4bM>lqMbu^!ns{U5lxwef-QihFV>56vxj6g;2fP^V;!CL3qNf>1-!^x!38c@~_C zQzwGi8y!V=6?lP;cR@$?BdGor2@!NKEv;nS98+7U&4`7~zgfq4vAMV>$p*X(q%q%% z=i^c-RL#|)mqOKutp7r=3z>~Jt>NgAcM5u~s^DsrG9@OAS+;rr&-BR=Cz<1L*3z5^ z3LiTdtX%@G{*O^)@J5(F<_6blq7DX!lOgq72hJT92%%Q5!69Tnw0WD;gjPokE-wVh z#tME?o~^ZVC@`;0Z{q;u~e+bx#c z65COXK=7-DekOYfazW(=(i~iW$)TiKfh%a*Hn`e7y7 zFyS3^G)+g5zr-;_>M%sbYLTkAUy!O}!u+CXX!U3X%w{vW{3bJ6_o0YC-|-t?cbn1d z)wj^|!!>}H0Nyu3Afs>)O*@L%a0TKtmx`msxq!Cg!>XAmf%iyAdoXbCTJi|O>3VTiI zK32TjV_;3Td6|*+;94yFcptn9Hh^TS89X=E$LBdN5T{cGPPxOy^A+>ZXYM&n{+)m> zH8J3n@ml!KQG0^Ck2$9^ zGugXzH@Zwq=4ZBBlcckHm}#>K_pN!0VYVS$PIDDJP)!9f`x%{_`58njrV9n)Gh+Mv za6BuUj!A|$#h+CN6MZQe(m8G&)D~_*!RTX<;U5nYaVS^DJ`>MTX#i_A5jTIs>f^fL zP&N%EuUk-Un*>8d>L@uCB^2sg(VdT0VA#qWyk55#`erxs>Ob$`DNO?+!#JMphcANp zicpvquS~p8eF5;dg_`bKY^$#Zhj)x&5grQ{CD*`1;})c`ooi!z4mW&i9bk?sV8LZ* zv$_wKY!BxotP$EyH3u1ARWj#-HIWjik;cSeZsox=RAp|nK%+>$TF#JOW_+C)um0li zgN$k9=LXfWUrMs^&cxorsan=0sj$wFym^A4#kHPx(nZW5( zV43Mn?)BIQY%Y5e>Z?XTrPNnx!xKJzi2{AWpB?k)iJ$S8DRJU`WKY;W^h42@Hr z`8-J|oVy@J8f#OL6P`2{KDBR`<*tS0gZDN z$&XC-xy5W0hvz7hX7vadxAz+cXtbi?Mm1`w5DKDz{(|5wRnTBq4<3f5bY$mIhz&7< z;OmbdTYocG?wnNy;}ZGA#cFtTq#;?PV?@_0C=iK~26K1B;QFb$R7PEvs)fr@`+0Yn*SZ=- zO{_nzVGdb4PQ$$KgNX*?IV34)LdQlc8hv*RXy;nc3>69Yeu*C4dCLwBOD>_Ej0*Rq zoXvhu#qlrubm_=L-jJOi32iOqU|^I9!>f{@c)m59n502O-TRm7)nu{gSAKT5Rbyow# zofoh0q0j!onSyj&a>RsaRO|AK-KA;dIOZKoAA^zwVVsh55%}9D<8Ie}ywCE(l4#4^{(u7G#I%Xqbm}4V zaW_VoO@mb>Dd_#P4A!zNlWhX)aN6I5)*V)Opx_kx-F=FkiHo`1c4PPyT!GhjH(`9u z3XF46rP3#FKvu;|+}`^M$C*2T!v=lqjLhJ!A11J+<`3j2$zWjb7e2&G7d-b?!v4W> zWURIcIm~+LUMqfx$0Qk%46U2o$rr}d^A6jeJWT}m=qO%6;R+XM^qSA8D&QM0+~uQx zPKIb50oZOB4^N3IO|;3!4zv5vKWhaX_BNw4a?`+Qp%HD|_M0zfXY`|uDkOZ19(~~Q z5#2}cXWis$lM3*%cUEeIDa36hOdS8Mwd|V_EPY z{D(A)I#82{{Ue8f(SM(D z!++Ew`ql z@BYQ-dP_j~55%0h zttGixZ$ez7mVwgWN0{Avgl{x%70=gs37L3t z`K=I|F{w=9#2y^lsZRa&i||*07(D{mzV}otmlUCh@dYWI+n0B+I3yIzC%N$F7BK#C zV=t`yY6UaCH9?8jaK^W>BA5G($i(r?PjsUXwKEV+j?M?CW%69YLnZoSgd+8v?13|E z8nAimDfDr%#{BM+P^SHXb2T~xJ1g1T!Tu#4obrcx2o``rUkta&h&^}oLNWh!7JL2N zhT7Q$(00`u{S!)I`A17Kciv0%)eK?W7kf;)w-u&poO?a8&`$#SIex3nWq|Am99vOYRyUhl(T$Z!xIQA7IW=!vp^7*xqX^5_rWju}(P%71k ze(hUPJpD1W=B)w|*#dnA$M~H0m(bZ~25e@#Z)A@0hV%d6hSDe8qInCx$Y_!ITiT&m zU6BTV{RX!EGr;Mz4o$C-B})~5!2s=teXFkYsZf7~+(OMITg>^=3kg`K$;C7s1Rfz0Q!J`{Eg4aG9&7u=Ga%q!tL z5;N2oUp9|rgE%9)Y0d=9<$_@QQgfO<*Nnct(S(hz&A9K%S6rTNMt-iaCMmA+MDqG6 zH?C|x2sXEiPnrnvw2V4gcj6Lmia830ouo)`)OMU7b`mvY7J^}J1vcc=G7sI|0-uAa zFw0emy#8?>=Asxa8&fdvLJi<8S=he*BXicOQVsP^-q1pWc)Tx%SiPm-I8&dVPq>NC zes$mtmff1${21NFw}ERon^V|~K(+V%uq0TAJRdBF$;OxAXoV?x{%s_jxiOfUZ2p3R z8-@jTf6v1vrxAE!yd3G+txdaBhGCzZG>ter2pZ=*FrSPLb(&Mp4e`+?&X@`+(iW&^ zkqe)$8_~Thko^-iL)Bb*xSl$^sItuALYpX9>%Wf znd|5>D~0Q3{t}ljH{h*BEIJu)5W2higUP!OSfZH{(+u}Fp_OkIYa>+7K9&|A#f8Hcu42n2MM za`SCwq228ba6!tJmS6jVqAPC;gzGXfMOi>1VrKL>ds;+5t!x#Ty;3Lc+5h;YdE4>bx=Z-sOfeR)v(cNIvg}Ob0J7D# zaINhf)~5W!>`|NHjJG=V${z|N>rBCB^D9tixzpilUqLd8a>9iVAZyZNSn;P61=~D% z2!4&CdR1ZdZ5y!p(1aTTEQ$DR3R+7uzpvpCoFuPDb7n?E&atBy@a8tIPiw`$-%g{? z>_dJOmodstG|4A#k3#JoWcgWp$!d!;9sPt*aq z?FM9ZG~;I98v|C`*TQ70i-^X75dZBLH!)R~B;*#L$azlmX6K|qc%m?savXgb*vxMEj5qSHuA{lDdfXC`o ziPw5rx`yS}oJPrWX|exUzE+jbOl`!{r3vUdubnqnkwM)e6_Uw#o|{fgfnPaV#5RTJ zUd$BG`D%k`XWTEe^}Ed3h9!XAT-GDjJS?`ix`D$h-$2984Vb;8ocD8hf;LfltncfI zRq;PzV*-ylmDfOH!ep4YQi&d7`@ZmEih3SH=%~tMEdRI}zp;FGUGWRfd4v=iU>s-U z!um!>3D@?=ie_#84%vZ=xcK25%<+~YOSd0}w=Lo9{<;VfrrgHE*+#VP7<=AsYXRG9 ztRM9(7T-r8db9aggCgElbXEU#=T;^G` zSEXAc*#25oin+}V=+Z7-T9qhI-Jk8ofva-VP2QRcvh_Hx+yGw5YbE~fW=y-u?Ytzc zMW}7BO$NHY;y>ntJ+hB6(1Y9|{=#I2vSN1HHH3vN+F!@%oz zxZ0|hFtx4*^Aoc8FNZ8h>mMz;{+E%Q4|I4^6>as*9*#?}n7r?t81~jJX31m(Qg?qD%Nn!LE zFxq|=3R!kRX#WcGy)SafSDT?a*pHV#^&R4NC&IYnvhiStAau9Rpij+xVbu27>8HpFC7Ber_Pq5HaByD}cEOYjPD_%APR)5H5 z?v2}EUy=){OLxNV6DMI3>(->N(4k>IKY5L*93!N9~@cvjJd1aR!R^jVB)DNXoW|1U69q;RRI3RX^(r*@`FR1)vQ8GUSo!P}JS zhb<=5iE)#rPc@~DVS~BDk6Di7$5P>>-V$(naFmk>tc5o5d9bNw1~_w?U{WeiBhJ+_ z=Ic$i7L+2x7O9fP@8-B@dpJ(MZ9s-f8Pe+N1}=KHG+%za54_uYAj3Y5-?r}}3MY0$ z-Y93ZT^I(#7}w*6cQ;n2uH_=)t?BFqvFLk>dAt>C(ERIFKIK1k>i#l{x1D~8zr&xw zFYNQtxHO24RFJ39*P1bK9P^~OtU`sAb-Z`fAnN<42(sVE(fsSyIGyohnm@>Z&*B}R z;i3r77P1}B>iM92+khTgKa>RcI-;U-8TM{w-^0dMF8Iq|_%BhHBn`PJe9WADeWtT8 zF!ehZ8e0#kJvTt$+Q0{etilB?cTsr^bH0&}T>pn*a8sGRD@tO-fjX9y7q>k9Uj^nu+(SMJ4HeR7F0@?sS)c zROTzwtauB|=&a#=ZQC*3&w?}tMF9O&i8exn^71F3=gTurK_S`?d}q8=bw1z^W3MG& zM#-_c+%Y2s(qbaR@^owSoGx^T-Mjly;&fIVUAmforICZ%MwhUB&jm2qvmG2}Xwb*$ z?jV^{CG0Fxqrpk-upp3~>5YbQ8N+sg*QN1LmEnvktJr%tE#INFXcV}f*5C)v)T7R) z4e<6%3%Y#20TH+!5_$wV;nR#~7;W+f`&sVLWq~T)GgphOy{$!a))#=*TBGhC;?giMw%R-t4zHQu`@-p zDw|vW!CmDKu=57Myw^pHMZp+~W3w^aW(c($eh0(LnlXOB2#04B!qo0!2wOf1UB?cA zNn1K#H3Z{|K}KZtWNlg&YC-c~$3f%M1c+yTazTbI7oW~txbZgh_18z3SFb=deQu$5 zT@?g5?uVFNY*%g_h7QZ6arw_mY;kPB%!)`TZoUe&3Cwjmmfhi;wS}fbcj7xIGjiaY z5%oJG52OFy0jJHQ!98;&X0FSDstY=_c{qC(*InW#G%>cG)ei_crHV#&FouSI z0uvou3WEqr8lGVMJ6cuOaK3CT)-3gbq@{{EqcT^l|t~+@QvM z7>q^GNC#v8M>h9f{)P8HR011#mLU)GfW+PEP)wJCU1%z=*OUP!9ZN7~UM(bjY2~e~ z)lfy(4$pkbz>s5Jm~?zR_ZZ|*bV)^=`LF>}_pS%w%Dec;Lym6Ql!_C3*j_#CG0S@2 z=W0e<;YG5O?c;l)Y?>c58^+?}f>U5FHI);b)Bw*-kuW7mio)5IsK|CC4$B)j(b+^{ z(vlIFsO1QcbaY7enN_?Xr3jZ6G2i96YOeaUAv`sV#0L@XxLr-1cFy_Bo$=KoD>pFL zNBkt1I)QZpJ2FwMUklM7#oQ>JpqKF;c0Cc0gfB}(p%(44Ji#}InA2+? z*S&$zTtdJ!=>>()$QMnD4inify*!v79qxfG z`Pm@7(i=CONrH`GJU+MD4~K8c5G5fSJ70JUlh?32*3ONvrSA@eY?sA|pYd#Gw+WPv zR6uTl3zS4(g&?-d7kI_Xf`F8a-Ox**N`NHkSUonmN|2!ktQJ_ zm5?L}l{(MbB$cF+QbG}mh=eFfz5Dn6?YdlDr_S^2z1HV*-~2z2n;}h&uHzdu@%}#jOg&>W2btw~+uwTg8co3%XUS9pPv+&O!i}PoCNhaG3nAi%41JP*4a4WC zQd>n0I{8#LjQX96ubIy%?2wSIVf~reL9am2_34hMNS`LHRf2sgbx`CeDI%= zy6R=}ouN-5TG5vHs2<{q`ddWiM;KRh_-$eTK(Bl?l~I&7wwV$GTr z;FkIwWIiU~#<=$o`a+&;%@4rwy~kjj9`n$y@e(_E-vp87UevxZ8_K=Ss4DBGB`ocN zb^8VMP{VF?c97yWAJYOs(=d_n(YE2Sf0F>fF(!~!7z0%@;b{MFDRf4J;p=Nz=$g3( zYAyuAz{zhAw>FU9Icft~OjjYX(Xu2eg0XWao#UL1qC~nKx4_5Xtk^|38RdQ2xSA$) z&R5Jb<|fLVjg1jKS$7V~&RF5g7qt*JHyr+M9YTH@Dv+uCW!$poFFv``g$6e@NMPq_ zF8M?^WM;+UBt0wAMrzUT)jZqrXvPv5cTC+mpF8{hBMuC0#e4Y)*l3^0nZDhP z>xL@Px;z7tqh-f6dk0})<_q!Ja|BcWjbgqEd6EAiIoQYU5I+RgbotT-EIYdnwGPXY z6UW(EVrqz^=i){9W~mC9I-Yfx&y7Ka%G+$d-@=_rF{a9Si=gb98jU+;jEd6QEQ|dO zbDETq&XJ@e)+NA_vZvs4WGEW3&gZzvmgH{J5fHc~@!>Pe(6LcSzu0@i^sQ{~@wZVV z=pG~T6uiJ+o|@#hrhxkYnFYIODR%092RJB+P6s!lZRB}=*v*Gve`F-z>u!W=K55WL z(nYv^l_B+s^g{=!B1k01_|Xm4q;+))n1A>R_V%oYts#lS^=wJgu+`xIUo2Kjv%ABE zF(PH7)!^mQ%3Sb?Fk3wx%b)geHE~-xe@kCD(`!hoho)oEDCQCAdkOsoJ7DLsOYCj{ z;IDoeib`*Bn)#3MhwE^hD^wuQR!P$)doR%Rtc09xBCfXJCM3n!!sB=$Rnuj&zcdxd zx~GYcuj-Q7*9XBiu$y1mDnkO#KH+uNYS70KHqj8-?N3neRng>2C` z^eFGADI_63ng2WQ2u%0r#DZC8A+Un4G}e;rzrki)tCg{9wkEH{dX2Wf&cnb6=1q(#WUQ}u;N!gw|3UfMHm>XBMr?_16VEDErGcuZ;;}z1sNqSLmz6!m z2Yj{x_m)72=o<@j)(S|UOCsywHgHAl31UT?tKidV1-T0rqQ?;{KJ}0p=f3|Ro?9eC zM2&gyEhP@Us?_l9=6IAnwE$~e`?yWJbckW|7ns~#0gVzDx$k$_ULaxu2HsVNcge3X zz^agYRPYlOcAUT$_Z7&2gCfkj+=%kYE!@m&o!D&m6*V;_X`U0sqp9yf!lV!-1Ip0U zu^VavoA{hISFS+fH)pT84JW(x;GsuV@QZ4YuJ(ys2?(IVOV%s%$GGz`Tr~{@SNU?jogN3> z_W?LC_cyGwse(~!lt_rwVrW`xfIph6 zzb4qF!gk{S+Cgw;IGfQzUDoFmy@ys8OvPe^0Q#w!qNt3n41T0ta1pl5e|C`d zEWh>gGP6`k!cI$^e6AVtbE07Q8wt9#z=Ya6o#*7sy7|=WN8#`NN0^p-9p=gP!fDk` zoU6ums^4`$XF!7FT0cSl#S!%Rvj>+H8`FZ`25$LB1+sXIDfzM|0fr8gqShW8a_p)x zX*$CA%C!dg;ExU|D^RAwqJ3P+sKd7? zN*3~R|AeH@ONF%GQU?3TVAkj2v2(X7tr;2(W*@I%_9(`mEqV(MlN``~&qqubkHc)) zQt(lT|7m+Ib0oAFj@({re=YbcL*kj#X?T~H||xH7+bU+aV7I?!Gr9> zZL$T7|6IyNx$CSu1Mwy!>U zj^C~F4Se6GLWJ)Lj93%F*W6+mRlzg7?lFWU8~=rccW=Tmmdy=3Cr`Bx2x!>xw~*TY zR2&_{yk?#j>}NM3AFPgp-feASEKYzGWirI|p%tfc~dCs`bL4qg(v7)=MEo_8<3-nnO79lA-?CXMY|srqtBKS{&hkY`0vmJ=iS_+wbr-qz!H6MBvC?IV#0=($k#`$-!&Jq~F(sy1xFyA9(Bs#V*Bg+F%G{ zL8L&-Zxw!3h$>z9=q6?nJ+wZcK(*(1gH=20gxubbu5R~v6$@q3KT3%6hTj1nznPr1 zT@5T|ZZ;!(meYa`bO<%0%a00avLS+^*GE_#Xhrh5XAp5?EeMjD3L3Y?@dGgk2^Tt` z)l~~y9+Zj$lHK983CrqF3uigWOT7LOX)64?4mONYAlt2PqMy7vnRovq%#*2w!eQ^R zrF;jc_m$;pZZn2v@H%cvkUTl2s7i;)wS%JFIp)R4c(fT_{c4= zPd*PGqbIVy@H0rNE5LH6(NJ^Okk?&*11GvDlhl=6{MYn{sQ+D?rdsafXD2s+*>Ght zaJ?Dwh(4LThdG4)nvklKD%AJQJ+A%xK6vAxN#t)`MbOZK)ZLNXT4PJ%_*{?8q#DJJ z)86ovECW$_Z4&da?Gvw_t3YN?Xo9TaThM#c7!>+1gQOqVSVzzYr+7xNJ*6!@GF6|{ z`aXm3?d*HDHdcJ7Zw&4vp zb6OV`HSgmE&d&?VR6J0=(Fx-Y6=T4{VlHCgV(i&vOeBpeVM?_LdAar&7OkEQs>{p3 z-tHk^uVh0a%pY-&9wp*V#=`ONp1`Y4xdQ6%$8rKkwTZp{mQ>lH2A$U3gxG!W@%V5J z`mIr$Bp;2tw=)i)!Aav{#pxr$OT*@5W+a#r^ zWIkb@Ok;XHQ-v62oq~7cnjt%!WwX!j;uAwQ;^K3bv@K^4Rp)$xcEvM%hT#Ri=n>ne z7iw}2tScL|Pe80$Mz8vC1GI!~;$mtNFtuPCw^NpJQ#l)czJCi=e|!z`vI08DGFMl& zJi%2#55ZFR2L=ipF#bUue3q*R=jfwc)+$LVy?ZHyJ?Bu#atN{C+t0c6Y(|gy+Wby8 zQ~c0Y##l5R+=OHe3g^_(VOt9X8?(E)=?4fJ{S{M=$D;FT4chCX2q~e`WYv;tI2BWf z_Upz$LUS|bcfNq9Ki*(%S0?7oQ774-Uct+IQnY||N@s7Dq&t@|MvX8A!q|*w!Pa-E z(8{>jCr#+HjsMVF=_1Sw6;jR9-B9#RpK7!y(9}^wdFR{lB14-eP+q7!gzA;ri9#uu)5&2PM$#9VWRq+z!*Ewz&&>UvQSXntv8pqeU{d#Zx;dUpK3&-6y9R+$&2uGiV@km z_&R(&Ye=^&QzAh#1VnyJI3`{fqHkq3A3}G7z+{`Kd!-}Dyg!PGPiCUMc_x?YU&7y! zx`}FeMnw7aZpeu_57PRhL2y`}+j@#+WLZbC6jkWOAan8$7`x|>61aOeje>N(~zOxq++ zT1@8goyvhwc!Bl5=Y)WdjV(7`YX_e6UIv+wH^8SURy3b=m|ovAW{e^^D*4Wm2HuYo z^{w5E@@p1BQGo(4Sm`p+KTQgp$I4UJ_is=j=fGv8z2+s~4Mn#lZy;opIn=o9@F-X2X3w>)lop#o#ifZx9E=A==eN+K%J^qz0=du8 zD%6=P5oUA#^Rs3}<|B8H$f-Nt@Ar48 zoFawlm)to=6@bbBeUR^Tg6dj;>S+X0JjJ*v97RllvfhOa@ zJ%i$oGVH&pPY?Xof=Oxy&k>Dm}(w4da16`x${=C6~Cd*KAneMhK~hf4OetHcn=QV&>}LU@1o?UXt)_wjQ^Z1$=2%) zP)!%&^V2Cb@b8>di47AI-0o{&v@F&BVd<(Lo4Tt)% z!0iNgvSArSR0lEUcLLuiP{G?(hE&kWc6GCN@Tu+Wz1vdHoK2xPX`?b(;b%>bzH2~< z_b+iUE(_&{AK}uD&%is&g*35Ki=Mw{K!4L-+<5&f82kCc{mlZx!f@z(@Gd@*Q>QsG zH@Ok_tmv_YjC&p=Ul^EY4A$#+V;(gmV!J;epST#f%k7xh+{M?dsN;1iqERrH7J7V; z0q^0pSQDPf-0zP$)wmlFI44`|V{)4R6DmhmTiB4hD_Mu9Zwb$T)hC`3eK;#xi>wkY zz$GQ!7^n7}fAw9Dm=~}tNqG;tkDQFDdxrCZobBA0AI2m$Sx6FJ^rI?1fPty4VEB0g zo;-0GdP|@4Lp@CBIt_pP8=^_q-qs~=B2B1lZxML3Tfy^!U=#%H;|6*j;GHK$xc{*Q zIes@4+e=oXvzikA)u=^T=H77n^9v@AEJMYr2k^m`xx}5;gYHrV+M3BaoZYgdcU>ZX zEy$P%>uMVko<>G(`72K8E>FAVu563t4z%Iu!eD|dk>hhkVz{(4K zrULglDF~0munf|$t*Gzzh>s9Fz|$FOB-k+<8tYIOu7bplOHfdXc@0uchO_3?OrM$vj^3n}p)N)76mF{1AurII^?lQ1KWc`yS$S zSRHCe9FBKlAf*$1F9q?U85sz%9oXV_lUuV(mL@9H^7jk$NtV)Mbh@4hmg2qW;h2b7 zk?X)~XciY+!Lmtr7DG*V1K*;n4^78Uz_6@4cs)i)HhzkQ?fNevEj<~OKaYnqC2UXF zsZOQY->UxgFFbeEgluy*r>4&AtXL=D%jSLpp|uONL+zdH39aa6R<^EJFvdI3Q; zy3{40m~p-ip=tLk@cl0s17|;^pq)+OCJN0FFw4ohrU+Q7YR!0bCKGF0UH$b$d z5|(|lg4qV45b=B)Xi5#@n2nFnt>znJ8!m(wJ1nWHc^YgCz6s-Z-DBCbj)_w{hfvJf z42_%5a_ah#AZ3_{Ey<r5#(Mmx9Tz3Y^fz=BLXt@HH=n zjsZ2|^zkqJx?@Pk`g}%veizheTXC6TC14uC?!YoH73{LtBC^QmcaT+cLs6q ziigO>+K^!?fl%}>mb18IKvT7i=?IT2;Cycb9~4lBfkaXya4?3rR|-`6%^G+pB}H<* z4N!377p9)N%-a|$5o^W>T)R$|JgZ>*7iSA>XfUU&=mgiiB}il8c1((}!aZyj-MAnM zWqZ4!wl5NfgffSB#Rk;Z@&gmlpm&u*K)v-P>W@h0yH02Eo(ahq7^MprIbCXdl67z= z>C$bh&f!p3Hgo@MOym#8!-#`R@rPs~dd~fbBbJ&_N5&qk_ozm-lg9L)-YMMaqX$De zbI@7!IM=d(bzEPZ$D)m=xIra$&UhnDbN3dY?>Q%kI>=_dnwc;*Z8d(9E5O~ZinQJ5 zE5F+F4JKCq;h*{H)6|Jt(4`RtZp8|;Jhq&(n8SV-{f1m=fD{?AYb6NwNYg4AbDA0u zDDIqm2^thwU${tzTRvJqTGLhGx6B~=P7$%Wi5`XY70~3AhC5Bt*>15AW`Zp#VJx0g z3MTa8TwPMWbw8`Y2Kp4dmxA9fW3onR80Ngt!WCbCp@;uQws&Mb*&F>Rm}Vz#A3p`XTqUtK<_CIxdxp=F zro#Ijw{iIWmzbBJKpyN;AX|1D(}ya|OZ$rX{5-1oZE0-QHsT_fFu#avYbHMuzhQBz z2`y(_sZR@yN%_$rPPVoczEpifyD6_Bt^W+%wl$(-^FlF7%L88i_QbY4W14eGn=et) zWbDu_j0aPX>plW=|E`2DdDCFb?z1?xR+2W@J%;O^|1faZA+dAN57bW4Mc4cH(fP?0 zk>}t`G&V7Zne{Q~;T6W)R~kV}Qy8YVwZTpm1JpMehlgK0LW9wkWaVp3lJR1I`STX@ z{)#D3(7zH)L#n~|Ru6x6#6`>)_|2u7cJV8w8qhm;bja~K1>*W9fnVGbg2G)9e1(2A z^96=8u0b{=c;ACnQjBZe5e~XpA0hC$qqzOtLdHf>#Fm;T#+6r-X}VuAN582UCFBgTliPR9DM&eEg-Y&RtBX+rBqv-|8I>sB9O zfU@(tq(o{FXzr@R2i?+StkV;$_`VmlN>ypmk7lvYDAuc=eF^0K_F#Mea}0{HqVn6; zLfWWW@EJ?_TE>D1SX;yA^bX|)OrF7OD{Y!BcEmuh&ls@I4!uR^K=1TvOg_oHZ6EbX zQGG8quU4gAA(~hb^$B0E z>V)D+_u-`6No?t@5M4e{i4!Xg$o?&gv}2?`X>3K_-|QmXSZzTE%m+abs?O!QTfy&| zaxe~efUapvx!z%AoMcNdPRUj$j)gjq<6y)4i)Aq*rW8Mq{0UPubcuP~M6^{%;BL{s z=r>iCD2JL-`&XlRlfSI@xMCT5H^)rOt_^`}nu=tspb4)}R3UNMiu^a7Fzou(!Z=46 zcwhmKLFuiS^GA_?d0`S-%+n%AmA=83S1*xO-+g)q_PNXF+#Sjt zhh&_qr9`{#^>9xz%&27FKfJfM7(<3=F`xY%-1Nkfj9uvufeY2CVDw0F?d%>zp#;ni zJ&8xzPF|Ilq?zhHtQ#_g{}WH9I9|DQb7zYhSOsN`|T`L>Xdmh5W z_gbXn+b0~Byb8o)9B6=DR)jL0d#Tpxu895!N^lKA`9Zw|plY@@`SGC=g0(1m_Y|O2 zc{84W^AJn85XNNOi=Hqae7|qUXxn&nRXl+;&;KFJc7+|2~*!t7hBv|7Ce;^yaJ^7vecTJQsXIhK7YGNE02r}8ORRmi^oHl6 z5W%*~T4crD6nyKc#OBpqoN8_n#G0@j z+wYz5#G)7FCB;}iS(Wbj#^y8raT7e~Q|^p|IT^mgfc85FV{hV4uyJBe0f|NE^^1*1j_%wUiH(wVf8aejsD-9*Pg3B z)e8DW%fVJvj}93B0(H~Ne84{~x}`^zHYRNVf#-aY)1+=t=cT!bQTcr8bmnotR*c3c zB;i-*Dr!MF?HL)&?jyyb@O7hjSIvmL36mx*KQj2{6M?J?RxTR;(26*Js>1~3sTg&`2Y;+B01uDF+@U`=A-ky% ziUWFJMD`HSz2kx+wdFYb8C$ETU*`jMG6r;ACoCI23O?uK6sbZM&Lyd$-nbYI#!DyFcO;abU@xik$fm^qS4VdjQ?^W6$VUBfyciXDC@DNA3qMEv0*7F?XwDPz0dLsbx*>DxDGs* zZcZ~U)nU_tNvNAAO}(!^2eEf3G#8lDf;XojZBaXvf7F0}MOT&$34SIdI+OjOb@~abt5sH@7l!e zOdK8#lqVZ3rRbs0vUJ-SC7K!V3v6A7(9L12vmWLLEoZKxHFFv?9-Izwk-1#dyAaSe zGDDNa6t~>}fPxk8u%`JmR4e_)07n8j0~h)9H|1bapurT0z96+xp4Ja)QT5GBP}nLWE~{-&5h*c4azuk#TLkwH^aF{W3loU+wb?)!d!(yba~>2ff43>@!ov!R_Vm| z9Lw8x*76O31E9UplrH&K2+kcxxs01C;I$|pbL;klOW#@a8j%Xl=?_GXk=H@2e+=Cx zZvyMHFX3+eH|WaqO)XQn$9+D2VP2Lzri{KKlPSBS+H2-`N4|a#(VdEeaxIg12 zUTDVJ2W_}CtPj6*%!Ax^HH^MsMSgrq!PnyNpjS%KNBJ`7b_av~FiZYL zc0XNl3L>%=gGyQ~u77qDV@n!Y9&s_cK38H+qe#fPX~Ep9EDIaM<}b{p;(u`$2xA_K z1V4g!|0yFm!JKeWi{$~B?9~IlC5H5~AP25)vLI>lAE3RaKR!1 zhlG8>93x|N-M<+A{>sO(+kzo3{wv1y-sCHFw?pAMBhue|1ggUgsDFAa-x>TJn(nWJ zfFb`l+t?7!&@UHV^QXW*^+ve=UV_@W^})ON7C3WIg|4z|f?#vT^vcWNd@qF~A8$n- zv2L>F8O9pH+tAAT{lbSwV8v!>5?uce&Q3O<&dUQh(bIcizw|VJgyrqME^b8kz7#0e zd(GXtqE9z`QKBmu$E4tF5*O7ef_WLoalxmLczqIM4CoF8#q8Vg>puI=tox2WyQXq+ zs&brS4C9JkafBq9L{L|@;A{KX?j`Iq#>Jh&n7DNOB2|d-QI)8-DIVR1GiHm@AkL=j zvn%g{AMd|Fqh$%dgY5{147|hEwMRi?l{y*x!g#@JPl2)Nh@}OWhN|Bhdna6BMcXigWzBo!T@53;C>g)|XhSNDcagwB^@K zOt^Oi%B#g_ANz!h>viO3Kb5BcIZKiGt_nm$QiDvoW=6&+{loS(Ay%nk+or$VlAERQdQv&|hGv3u`DE~&ZUl#_C|Hbd#Izdn`Nf_23}H zo5!R6q#V5VxdOjSTM!>{jHvzd7@W6WpK9rb;-RkxaU64Ywd@js&R$jWVdyE`bWn<1 z)CfhtBva!1ToS)-)L~qaR9t9T55GGTVgD^5-EH^;G7_eM?#3DrD7Nt1=f8(1Swb@6 z@+R0UodPkBd2sE2!=);k(xLuO@Cf7G>Dt@xtsQ7{Q#dCJGl6DJ+_qXVDI1rjFGzoDHgSO^#|koEGz@fmeWwP=mGysVhlE2x2DxT39wl~ zom%9Zk+`6<@L>$=2Nd?8y-yWyd;AsG``|ctC}$rI*`!4ZhAss0+i1)*+=`l8E17pw ziFE~jz}~fNzqHhV_(%+jTNrzLmBU+@tnviv{>o9!!F&)muzlp7l){Kbim-Ec8VI&- zp6EQySX63r1!oioVy>qVZa0nw55^!0v>YwE*(6Pb=`CF2D&`?Lmsu!U+6!|mLvc;O zSs1G#Lg&axUbse)^Q4kcvUwURN$3#orHj$F=qo3fXU$yMAF$Wf6>5cF(7ZMsQh)fv zti8HaINY3zxVs&af%T3rn~~?QFJZS+KI$*4`hN%Tv-QJ4*)9p37d+<<=?_3g@p$NB zyvWJ7zks*B8T}GG1y$ynQQNgvU=b0Af%78@FV?Hj>h>mh(o@5F&heam)?U2W`T$pq zmn0>mf_QVET^p^K$Vp`B$n;1_01SV&7~X?*Z84@ zMmk3Cmmb;Ejxu5GA)R!`ZI1#N+_m$)+9c6CqpXBVGSJohdH0( zxc^3raar&@G&n9rpRZ)OxVcB5^3h(fi*LX*y_Yc5SdvICOaSNAyT!f}jcH4q4V^rn zbU@`4Fzy+* zytfuxZcl^M4~`%adlG9sE5MEU2Q`Cxz=Q4h<=L#jy4{dol4Y62g?l;kU2MlW;~E$b zpNJoSOVJZ8awNa27W;&)AhglIn(Rk>OIfnWd9$MUkxC4s4Ntc{F7Pwo&{Q4}63u@~dp2 zVe9c2=x<&NK8I38akD6Q^^6&DSlS1t=cte+0bjAgnK6%_Uj*2nhFQHy*uBS^dPEVN z`rH(1C&fY6swl4QX&o+pX-&do|L1I&i`kd%Lh6apQ2y&FYSd>fmNySw#iyBFhJ z9zD))bGe3PCGD`g^gR?-X%eTHMD!iE7;mab5vRe2kT&%#jC-s?{2u?p`yCfhz;6^6 zh^qNwpFeml=@>fSk`Z_H9^tN)KE-V-E}*}G3g6}X6^n+w6}N{ba`H1Wv0EvU<@4nW zn%*i>yPC(i<(@RzeIp+e2c!5DUrFL*-;Mfp^FVleJuhu&3#Ox9Vw;T?$+gs{Lslq| zg=59wHGhDcv4mw~uE@c#dwDR~y&L9@Hl^Q9*m+_4Ke6--OXwB5W6q3eXqfdGTx_f1 zV^AiZus0#Hv!6iXjaoje(U30ix`U2?dN_5de|$qvFNk}NgGH?>`F`mFepp(Jf?uk< zbGQlbAz8<_+=v!^UT#ShPUT}Wn(njaVFI#a@Vim{2~R?wU%Ta(h zUp0tFUo_@~zT~wwNRm`zbI_S)LVRv~6DHDQy*JLW`?_d7dv~+gLMFezT3=z2YY5YMln>%QrYd+Ka+bU&2s#$^$T-FbnM@ zZ1F^9Hx})5g6y70AeSRU*8f%~r?^`v@0h`Ta#tpz0+!%8_zMIhW{NZNYI*I1T{z5d zD#o4q%ln9e`4Gls95v3C#IHx3J^v*73$JkTLM5skkcgYUNs$|O*f}`V27-Q;;?lP_ zFnwM*)NVKnJ&y)4AUcmHO3cglN)ZOQ+03Ku#D5R7AVtcpqIF}K+t8HdV%`ei2=h+n z1Tu!x+FJbL=?{wYU!#j!A!K>(f^QxQi_O0j>=UbY2ycArgb5Hs#K?LKDppq zz6w6PD}%+%xoMZ33gs*hA*fUk4O2V*$q=+qI5>=B7*>78io46AJhbH7~%)WiR?i z-sU&$kR<-2bh*h}hT}&!Eh6g9heJiR5SKI(oMp{LE5_K69}>mzhxN<%_1}c1SI^Ps z=_JlXL6z}N<3M4F8X3Rz9|kjCdFq|{eD<05I6n0qBr0_CjWU6JOBH~ucRogaTZvr( zY_`4ZqWG_`61~szw$pZ)l1pYsaCt^CR`gv#XSc^9=T~K1&5dMC($2&}cK%&9`5?M# z8bZy2J?Pc*itmn#0l#UQbaHbrbe(mFj#IIC;PW>eu*(DkPgNqmk^^^-e1nh5=aHIM zVB9rjOck+bD!V(?86L&Jq|2h;vrmgo{xgP_C*Qeklf$q_vjUTjt3la^N_4)rPo!wO6ZI`Z!0~|$eKoNP#~=L- z4DALR?YqI+{U#iesDy(2GJe&8e8^v034xmTc;||XqQ2eL5dCKe^>`A<-L?==Th$d1 z!RcWj%;2k2`k^M?iPJpU3};4|lKf$hq1}5E>{quW5syCb3pJjCSGX+NH~-^X(sZa$ zP7QoUUK08C{^mTs`Ehaw+4Iuw2fvu*ay=D&u}0sW?=Iee>TMgKeAZLm%Wx@<(KaEu zgA<@7e5AYBQ`11YiCTTPrn4c&A6!CG81aM{~6BX=ivd* zI zKi*@EB%5_C1DQ@z{*HR30r^7w%wFGb*1xiac}jY0k8y&%SZYr)hhDrLcwrV5y~I# z!RA?(bkfL34D>b=*C?&$Wa@N?d!-H7EY~5s?|lRR5uV^$AWe6;GcQBoawvD|C4y?YU1<;!eDiMDe7pJp;vkruRLCpzF(FWsH=Ff1M?mJYA zNkIGS8SwkW5V}yK3fisN&my|ah39?6O#7#hGRfJU5`brpMstrBN{Kbt$_46QXM7OK`XT zjxkAIIF@l+3xv5`OW#%T_k0!V@GBeEOI~9+%D_K~CZ4Jy z%qx?mZml!1Ao?m~|G0n&(!bzy&288kash|sNm5tEK+KqM4_EoA(chA-=rZ{abE5Tf zf|1_}`!jcp|U&6F#{CDdUiI%+3v_G9!k1`E zX#Jpums!7a^mQZZzhMmowk8z{J74hPTNJih*Mnqi10+axLW|p3&e0;4OECEdt5)Wr z)4389MEsi&>M2h$r`S@H?=oa;NC<2n!k&3T#)fWi5tS79fNj!Kt~pJS23=*&Jj*k1 zV0tHdu|4m?oJqI^%JIl(Rk}f1i3pMB)1~C-)Kfpvh<#>0)70_Ofpmxy>OiWV7YrPe zq`sQr{Lf(Kg)C!xID;sD^&B&jY!V6kz7@fj8yRra>j|Wnu^HXDw_MIm7cS!AZg@7! zjcF~-$n8#9GA#c(>}&iC5N!mF{$-qac{>DX4`8{S4hkkn(3)mVzAjOT>MfF`I&80+ z?midSD9wSUPx|2hI*T_P8~_IW%XIludE!pc9?*R$ugEvzbblJ6%LJ?9-^xtoO?F7 z0)7S>kfwpX=z4Gf4KiA=RF-8~ZC+3GQjn)v536vVdnxp>yMg0acZeOsK8xyNOx!vN z%=WNs-MCP0yHJ99jLQa_wQ_W)(rIu=CD1ub~e%vd>JvGQCC-rdV`JEy)u-ZTs1 zJM9K{>B|!wWLbmW5F2>e%Gka~NAqqGcc6A&FO1)j1U}IV`DRxs;+s7I@BGlGF*kp~ z_wB4Vc&R3yT|J~5h1Vr(28#;lMg%PtDhc^=E+E9+4@PlGxO zq-oBv)BJbdnEJlZ0mDdyREdlH@U%9@+Ih}xefS;RJaV8-R+S7^s}tp78T4>%;Gzi`z957 zZN4q}l3b6MzWc((S&Y^0?usP~m%&TF2kv39ipBQ@G%@)-f3jl}cvuU0!@^!D z>OU_M4!jdTes9b&N76W#xxC&@t_SzS=CJVWHxO`zT=8f%+Bnw@c77ZO@&8BBnYh*X zbz%544=Sg*G@a(mnNsInJDD;kgvwNg^h0DUnL?5zNzsI43P~jm=Uux(QVB_ugd|Cc z5Gv8Pzdyi*>%8YZd$098_ucjbPaZjrFQSvM{z4!`udV_))8h!L>u}+q%OLG_6$)8? zi+5q?^W%N^exDq8I{+Y8F+kF;4PrSr_!;VRo*v3O+=^PL)O=3CzMKD@dnp`?8MeZ!S zjmuuMoOyT%Uzck|F5j68Jr}(2%)`GJ)hvU_oF!yBJiw+MW&8 zZ0N-D5`dsZs=` z=1XAS)B!Z?w}cSe_2A>hxVgdB{1sUh`t|>x`=N)}Xcc_yYZ^}_*HHfxHbwE$c zH!SvdW*yt#;*C}!5`RXQ%wD|=O@^D1u=) zEVQ#b)#>Tm^{<@&&N)GJv&zo$*cdtam^yYNCOo*+s3ERbuLjOs9xFc{J6kR90 zdwvW)FMo>{KN^tas7i^R`c2+za|6HO-c|65-_1M9enPANT3N5|B4$So0)hQxbhJ1H z>p25@%0P-_6o}Ap^>N@FKBH`xh?*^qfS;L0bX37$>NI{ZC&~_hprdStL0)0&<`T$S ztxAWNND=3X(~!J_iZk-%u}7)^jqghUMViR1G$%Qk6k^iDp>W$F%-WfTi!K__#+?B$ z*RdQkO6$>x{SI}d8KbD(gd2RZ2xVO2@RpSZDasG!^sa12g$b%exW|%n`OyHHQ-q|k zVh)bq>;QsatY`6gDgVZiy}t%#!@i5xF*P`Xe`q$Cu3h7b$(kNq@T~K^!Lc0d58nf8 zi)MmdXcElau!`mEISBgq3q%n!_yuAM#_MCJmk2R+`Gw(o0q&k^BLyuOCPlAXAAuR7Yh)$fv@;AOsV0^*@Jry5f z_|^(|g94iT+ZZ;9&f-sFE#mX{4Msd42R-kPLA~=aF1X{g!+P(tXnR1B@0WRm8nX`J zj650QG_6+r=FxoI<5Pg+))|vWwU&hQIgTRPcdWnqCtr4UD0APn=1-hwLH*-t)~ zh?n)l@b^&@BE{I|7y9HFo9PiIyS&6YnRDnBaF_FAJg0TUisYD{fFTX}7{m6gKTjzT zyHx{F{AW2vPP+^K<2S*!uJ3q9r5L*=-Q#T+E#}T1W3C8;P_VUOeS_z%U>$M_<*5a2 zdLGF+I2#fLBldmUsEYL;^l{T9HG~m8SU*D%`bM$+z*864bibH&l7{jDIs%d;GX{RY z&>^;x*>Im1P^a?gyddgP{z4lmdV}@3^t=oBU58%ad}k#R?r(*5V`K2&l1xx|tw7vT z{ovq2Giqkr0Xu#uVFguyo5Qt9&c6!g^VFhEda?ZekjoITJ`C+-SK;^UDp)zR4;Au+ z#IF7`Q1MPamkyks^7&Zeb3a%lh8f&-QN$v~ICBnc|&={d$-1bk%1R z^$NkxoOy-UY2#iWW0oU72oa9EVEw`jRAguBp;0pA+<#Fl`?UbhdrZc{zgAQ)fN@XK zEBO4^O2j@&2Nm8JlMfd9G<)BF*t2vI&KoC71R;aPeQ5*8052$-#?F#flR>yf8r6na zQ?vJ4^#A-r7lVf|F-V?_RlfucFRJ0*{C9X^Z#aIstVboxiM{+?E#9~a=$5?#T!m4r z$C8bTPp0LJOsF_`ljQOBDu7(`B=kfmNrQ~2t+ikQBs9)i7&F@_P#*gqJ+ zNbL&jJfKfpy=_3k@~@8U9$9oXi6i9-q^W%fzrErbX0W~RrHyA91EUg)6OInlDpO<-~RnrH|!5F(#hR^*jtN87nazAx)Yu zF?QooHx%6Ua}YJ(U8%qYi0vBCHDUwbGUPcHX^E)IY=V{jS(yI$ z49va%9MjiS!=daNFgLR#{zcZj;fgC5DR=?Gi|RD_S3de}5EA7zpK*f`;}>m}!OF_T zTxCHFcnsDeJ;C*y<0o0VV(ewy<kT>OgT)>x2|4^Z>B4zaXjzF41&d5JC>bu$8C?O7}4X?lvclW|VS_@02UV+B@N6<0Cgz9%NmnOS6{VGa_4L(&6*Lw|S z+n!WNxCXToM@5QEAs=mV#jqL%JjtgiS1mQ-z`4Y zbufKoasvH!KSK{TKVCPFv3mPrC4+7#6NM~UVrgJb5+c4py0;jwpJ2a_>iXj3t1H3t zq#;cYUWHC2?&2n^R^CXcM}&)0_)T}0!`G$CB<`as9bd5zEsBRj|DNMmQ8<`-ww8hD z*-^1mha6YwH3Q;)8qxOZT&y0ZL^J0o5yhdD?K*zgzklv|F-;~?{ z>8`c7G1i(K{9s6YxF*#2ZjMFbn^-&N9M&$YWBhUhNmI=)uKq6|V~Bmy_HyfH`8F=_V|G`ej;ye@v? zY?CVZ-+Nddy+D>uzut+}oot7{%Lg|owSbZ4Ad-{U#j;HldUi!}IdeXMm2D?f&(gxu zALab-;z6`tumIQWFrf2&S&xBbr}i)I2F>@Cxcc5T7~`TqMCz}3w<)v0c;G9Bluu{; zmu^fRYR?Nl$a5>csnJo%hq3AKU;f;!RP+c*L!GFVxB*YWo}n5fvrL=r%4NF~K8$Oc zvX!}tuX69uv~*NY`)J%gDd)^X!2}Wte%TpmmVouwG9i5ywG@}2|W{014{;^=$qgs zC_c3b?mU(zajMQR`hbwg#%zaDa+4pFehYn7c0gl*D>yB8hQxOo#E5Y>di;H;1&G|vAGE`>>${LY9g-F}pt$@11I zmNj5EdIxsh_TpWG61g)g^FaAvDYokE!WGYwaBGJ&y;gJ?)#D2AZ=4S4%FzPz!GlTB zgjR84!6EeWdBq6?ckh~sSeAEYCO4x-m23@HB^wzts{U^<-~Ct}VketWrCD<1PYv5e zo|wx`3ttW2<)T3lGRwhh;!`fDQ7_pi>nbBnPx?c{~H6d_AoB4yaHXLc>&jIygAln{v_D~q zwoBLY&qkd@>4zO~x9S;&-YLho)1}EK+el2Cx*Njmw}TP;4R^fv7BVJnW{e*1{6+;O zSnc=;Hv4~tNVhCVpIidP4^P1Upt}$l_JIrCCqsqm+Wg!b#?&qKCYbC$f|bRt%DNZ>)x3CIE!JN%0sdSo*1UU-KD*6O6qe1W|6%T5 zJSi!+{RZorEXgmq9#GE80Gse}_;#)d(b}>P3>6vMmAi@=p&eYI8{_t7%2SgB4U#_O zAFzuwTJ{g2wYp|h%eDnik1-=oEB(Z)dat0}uuNFK#26h%$u>FYP+x~6@!9VZ(^P+YriefvqlbhkjO$xqu zLm;m>iJLs39)Z0#REZ7MTXKSnKXw5>Yj$Gr1Swus(U^KE7mBxa+<|`v{~#!!7jrag z;k)%x)VS_~Z&v(;sMkKs`zXi{bWY&9Jfyjw_ZS!LPc3Tu4koU1w{RoYso>e%OjO^l zOXfuBlfnB`N!81{vM=2oyG_xj(c808avm=r{2Qkw}a5taVMXwk|j=_Yeu&|)*{OktV#Ka z8gRSbfTPyv&?4h?T-TJlnD$hFT@4j{!MtWre)k&^wY8Wx>i|4mQ3r3Xmch*W!I*YA z7@AHo@5VxFYF{`U1X;4YFS8OU#2b|r(o=HE(8~W{m6?^M(zp{|JFUjLZypM!A>*Z*wGqXe#tj0Hb?3rO) z%Rj@UNa-e1s?vM`ayJ{(Wc6*F;INomzG?u2|21;XPOKYK`V!lMa(F>ixOhkUVRld4 z4;!Bwk-B^%;#_+h4$qXP!EFCv?|O+l>tjVub+Q@6ISU$Z!dSh=TQNHPDSY{$ORc1? z!OPcDB)NG&vXSL;6=MeYcmoAeI^_cIB-JZPvs#R*B4e^>izShjbAaR1bjZ>G8B*lN zxG0}Opz=}&S5#WXEv;51&fc!*<*^Tx=IM~fQ+ZUhh~>Ag`GpxjE$N1+7KrL`!gBvI zNSJsXvfHJZdu_m><@_X+{C60l2G+q!^9S(bx-pIX8P8Z1nxx;4F?0V`ajA@%FWRlc zr;p8r;uBuj6!VH#s=SU_v%Z5lDw6R0aIEuuhDU}DB437FWL~5Qs9!z_1fgeO+7cW3 z;k+T~$d@AcoMkxM0`S{=71Cn=41Z^6k|jS9;c-5D?t{KS<;EghB7O)x_jCBrpepcv z)P}zj%W&=o0ZyI%5<}K6$B@QAI7S{}K`6`9Ue~5G!;ENpOCjvF90NJlML0ZMNSaq! zPz@-6;D5Wo(Ipxb<*uNYW*+90;KwzlCL%X_dFXQ4o+$|1Gm0w1WJ$IBZ$hvO5}=`EHGT;M1mdNhIi z*e6T!-xi>Qg(9(WwSr%@so=XMlsjKP0C!rNaqT(}sGXos6_Ye*H~V+lZqVkqX-7~m zB^-v2RH605&v0J)pSgzaY>2vbo1eIoxu+UxF;5f=raPpGEcX%J<}-Jrp9MV}X-&GK z6rgZ>1$ueba*I~5yVa3PxOXetd3-U1A}5LX{?5nP)0)n;hcn;y1eU`e)hO;Ox&kkX zROyZrC)x9#JSL7d)=yo2J`BrEnVmX{t0y93O-Ikp);1X-L~f z48ba=&mfg%NUUsYaCyK8%u*`@uR34;nvoLqIZ%N0>|91qvS+t9j$51_jxL-g{kG;l z+G-LwKE#Yvm8i2h>m|mnwdK5q>(Zh*qs8uQTsc*$Fji5ol_nL!0h5iFXsbv~DYc zFwJy!cT2!wr>{fXz3GtmnPn0tY7>cwykxN=3tyD*8~z$)=+2rf9~ zP`t+)yR268CP#ASKXKUAMF67lx8T)O1#wj``bHZb1z&@IFx>gQ| z1^`y|MlC<{l~eWz`l64w8y<%6@Q^Y=BUV8|l}oS5^G_4q#U zR&C2L@K`@CQj(=(s~S+8w*>{yx%^lSYf|xj8Hfhu;L6xk82?v{t7SE)!A>g@c=I1$ zrf@YEd#@!+kHi`0m>23pDkxug$1)Qqp!#tHb8pph8H&Go!8L0bGAs-(=b6Fa z=x->}jTWD-VXhwKDZH27I8e)EyU$tjL~waA7-A#T@a5gI&=2>S3JgC4`#Q_+i^q3t=Xk1#J@d0X(A8x#7uwtdQ=EQ+Q{@Xu>&x?CYp%za4J~2pYXN<% zrbuq4nNj!UcQ~h$wtSIWvt$*U#}!#z1FzE`II|SiBWIb#?MolxjiX7Bw^e}(KU;GF z%za=V^n#oD;|`a!_%l|ovV(bU=HyP-Tg+e8FkvVzF?NG;^44Fzmr=)){%j%?^qM8_obFUmKunNk9Me-AA~x*^C&L44_%T zeVBdv3wl_}lTnZRQFf0G$e-MTA%h(NS_aeLmP7d>$DfjY(yWiuUj~llSIRGZZ>V`2`r&9|hh+p;0B{WAA=4x0;I&4q2brJ(UA2v@I9h0=f= zKF0kUrY~eno4xiDf!rZj+j$pn?E8nCyn~=_~{Uws7BtD;?8eZ>=wOpC+F8g^DeQ%G152eGQw#6GWq{+wy;v`1FUw*g_8CL&|9PmgB6TO{UROkTBgM9V4XqF zQwU8)>$xHID{#!4SiCg<0z(k1&`Og*RA{-D+vjCLjFX^;IZv9L4ggtUE92;1rXB z`5OxPSid_Kq+5HSX0JNqKJ=lD(_=o_$AN$TSf#`a$s z=r1kH3Hru31Z)2m*E4U1)1_r-w&xude4mM%be2PK?^E%hyh8M{ILg_pB=G{{5#qT! z1@zFa+`b~b%6Y; zPDse;fDOK5u;lX}xO|&+6hT7mmo`_XY?4~%#^ z8lU`7BEbd8ob<1=5Xbn&nRC?X4y{DIboDtlo$?3H@FW`kG=eEHja zbKw7JCl}E%0TgHHV=vVrXQ!P<&FV57(2yf4hH{V)5(&om41~gSpz!=Su6UtE4D^ji zevBIF$+*k~FRbM1uYF*<0WTD{rZCR?P0nQjcjqB)8yy3F4`s=RUxs9O0Nc||I*2Z-Sf?^en(qu| zcP+gw&{l8&(iWV>w(BZ5qd|%^OzL6#8sP2seZ%g;J(#ER0*m}p#JsBt&2jGl>E`A>%-SUHX#BHaTG zjWw{>=_Ag(`-&SLs7cyX2Do`ru{gQYfQr6^arWC8Cn8)O1&W00*|!KLPE;Y!4;@6& zz$rL#Ntv)m5LT$DlY6U#)I@18X-W`+Z;}g4^y3T!J$f-shwKljfu3 zs=4f+ciH(b5gbqK!-eHGm^SPhBwu@h85@fEA>tm^PiDU1O|sO97|`|)7F3WMATbiR zU{81%cfr=0=;<8cx;DwvPt9gzdHEsC8^^eywSD5?8K-!;_i^YxjLnNhonh2p#I5dFmBI;f7#j4ZLu6Soz;Y#ao?b8XfW>j?=kF5=|ffdJT%z& z8=H*+P?cpJ!W6o>8+0>!7wIxD+*kgSAQpp#yCnY1ww=Z-MO@y>i>;$51Hn*vtB6nG0I zm%fAlz9`OpP6sYd%)_FPAL5VqPvHh1OZ;B(4(!be_(>7g#Ol=t_;sG`YdGd>khF6* z+21vW%}}HRYa!x}B6Tahh?PrTaMF>rtSda7ImDXL^Fc5+u2n~s23v?swni_;fE3QT z24eS5;F8!3;w*ET;#dHsPeO5~%t&w)gkgr-7;edaS*o+df|l!NLYjv%z1*fu^A5D} zHzsXC8RkJbGA$IJ1!2!-P6MvC`3wGS-*}%yKjy}Hh~x7;!Rg&H&ek@axr8Qi zD<8C?=g4>{={^rDt1dwOzg%v1UpaiJ7m}?$zc7DXBidb$0LAbY#tCSFP4ZIcWT7G1 zZz)EdU7?WWSb}QHJ~FDN1eUxd*IkTla&r-FB1nG6;m#ex2an zd7F<9F(FQWRcKeGK3|}m4sJu7L9$7K%N=rVLonHe=?og4_2!ZO~PBqZq3%TLa+ zd8dwj;Icg4%{YL0)|CM}jd9?n0He$*U&Fqp3Y9c3)ZWz z65MCJs8*f_yvDLM(Iq&H?KT8=pP;l^Hx^G^0y`8_q0=-Q zOBYJ;uY){0BW%PR=>cdX@!alLQqJ1?L z@tgCq-_KXhSqZ}aV(w&Q1X?q$%9o6rkYZ;}qb(@J7BMbESS;UBrb8S2KZ1IbC5=3P z491##K`-W$?Q*T>#+>X%pNhwflYSN}dx!C!dy3)hI1@UE7!m1-b&&Ks0m^7?7WZeo{w?t&jb+Gn9!*DRNm>&Yi`K34jgjj5roVm z(4;UM+Tss#Pw#vOmt|j2_<1UqXJmpqb_y`j^d!P1V{B}@j+;9fV>sE6Y72hj?5rFR zd^VGKZ_30*+a=KLGZESzL_+kl!{B@MEHov_(S6K0?0>nByLmyLLc&c5*=qvy@(60#9u|PoN=iLFr0~c_o znkDnDBtpZL#}Jf$278%X+)H{8Z?9xVzdKw5p|>4`sqMj>1=pZw%S^Z`G^92OI&{3u zDG(k%!>v{T^cm2iHg~%)E?kfCgvRj68z1uJ;)yuDRf#mc^kfVw1K5_63BFU*@vcTb zI{CyxRNO(XZFw43lRSt_ewBwIOD2Q-?2n-T&zhL~Xp-qajcH2kZ-{%d6RX1#pnb0= znig4+3`sd*?rxR&uRh9-jjp}C;<>KE_iG)U|;zEAwbrXAbLD_rw~o^v%a9r*rX>Ums>a(WdF&-a+>F zR4`kWg~J$Az;ChvdoMMEqE-;-^e#hsNs+7)Ymk1EXeh_u;B%)O8pH-P)kzxt!*V&H z>uCIyD*?T*7=BEB96R%`hK#~=5JYx~N376=&YS9_$1t5Yl~_<``~$rk_2VK?@6c%w%zGix?J^=cBeP)Fo$ct=(#203`wT(7N1&-`hI?c><@ z%drqL>qKNlHpL--%kiVzH+*F&ON!#kB)=9pVDJcI$)mLvLa)0}HCguY~%hVhU% zT#gK4JA&@}2H52p4wX4$`M`CB+*t2Y5bl?PN(~V&bYh*L>)W9#U&Ix!^}@>|M??1# zduaEog6tY4T2NmL@;+8nu50-P6D;qiyk#EX9{j=%Z=i#n7!`K1>a z&pIEUEYc=X7yP)?zHc})@iA9pJ&2misFC`GH@Q6{Ix+b;`}3Y<5UySUE$_VGNofvN zf39a||67umo|Wj}UyimOi#ew$2PA=sSGkoAl~C}l8f>uzLAzIPhARWE+P-wXqR%?V3s6xG@&a6m=Tp{1={j>_jQ6!#KW-gCcKv&Z)zX z>q!a)oByi#%I10Cd;BZEI4%kGAIZ>^r@1h8syUUq5rx}kj6o~tMY(2n4=hZ?5&r^l zil!=EUe(E1m;Kx$e<>0bs!2B|zr*oKqo7X9nkY_A<7*RFqQz1-h+kHTx0&lCbY3;; zDA&NtV_KxsOoqg)n1~Nv8&G-i8_=t{#P8po555YuxMNv5wy-WlQ-1*8Wfsh52kb;& zxi8!tK{&=P7ZTV#7kmpY@VkQ4n7{QW9ygVtp{7;9D;6St@Q1j2TcPQ*4sVP3+-J(% z7=E|FHhDMx^8Ai}s${5-K#YCuR`_C=8ELw<2(G!5U}cyAwqi5uqwUBK(Nd*W36CLf z+ixzTn03gcl*I=(X^^rv1|;`~HTiW*mHG#+G2)0I~-CwBl-SWVpMDX1q~VhP$g1^ zzRJ)hvz2>6m$7lC1pNb*y9+^clNB-EoD2IWr9;usLeBNEE}ALM!;I?Vcz#nObnUO? zyuT`v@kc{IX|Xx^$vEq4=Q&`q@epj={fHM?$nnkpF&6)EFNkv2;_KNSzB_0Lb09v; zxBaya#so=|)dAzNv~GCOoIs>=B(?S$RE@&YHga`1sg;7T&oN>PfO?~(iM{BE* zLgrj9-g6N0j>K>av){w6@C#t`DVpa~Zla!3C8sg*HF|szk+c;DQGJ>b8C&=P)K4A3 zka=#P_uw)wxc?|Wr6mkb&N+=&^rT2^x)$5J>#;nO8J8R`07n zSRQf}Elfsp~Zw#=NoJTZtg`Fs&asy=`O;U9Q3fpHyFa?vT_5ES~b&Rf8A9C30q z3}$|CsRKDUQ=)`3Ki%hVoQYt}d<~kySTdE%Sk~3=sJJ2f8|I9D0ejBfL}BM3Pz!Ft zDqhH^05-*MA8`CZ~+ooUg^nE9ja$>i670Yl92W1@I^``j`?It zW4^Ks$cA1%^egkVyQ$#L#Ay6!a2NMa_r;hmflzH#hV@yCP_XSW-!xJ{S48mmT2YHE zl9M6HYmy}vu0im$rUFWGr=#s&HM;!IRdAe^fO})Q*lZ~UZEnPIo4?G)h)v5OYN|6I z{?Ckt$Nz$+o7H?#YL(b~o-zrY`4Gyx7#D1tjAYDJLn5A#4O4q=u?^(YWj$#e2wYa#5?a`3W04^CktC56*1 z$*2x56bL5e)R>l`okAkCO$o=Of5&mDE8~}My^H@HHKvtos<|g0%dmfi2u9s;!InT{ zdT*Z*@jWyK^v1{W9zCYyCgb>bg}&r6qCFs#o%3zmeR-osDY9!~Ed&e~(#`7{!AA7} zIM-i5pNU*vdX76_4g8cih#-Ov&Q!t5^CVRFTlC2k|sl-^03Iy}T zS!3#ls>t0;Pl}33mHmDc!l;?r)*93^_{mMDLe}g~uK4Z&smQ^v- zqf($js-;v}UivoQu|$PzyN>OmJ zn*Vi3jh5UjXFfB=Rxe24D$Z?ymPcW*d`AH`jQEDhr`~e54THJ1gP*w8>*`cgki&Pq zN#K%(Zp002rdQedm1`)IrfwoTa0)Kxk1~d(`7~?d-w?`4J4d3;{=fLPh5bIsNkPV# za#WOe#)8DlSlQx=;U4E;ub99%B~{|}U7hn&Y(s(H2gx$Ve0i{{4=+fW(BM_3yk0>f z>I^-Ge|8%Y53}nisPDFSy5j^Qw*WqRLoP=7{o!{FV|;W;3GRzfC+Puuus-D>U+o-- zRypQ)q{x~~T&znVa|p_6DMFNK6#NQj%n^AT+N~7|TEU%o!d9Bbh4!LI+nK+eABy2i z*gm4{H!Pg~1zx>2B{!~W)2Tbz{UIR*H4H*9V}TZa4`5vuoh5vG(g*mqL5IqEJ;Ifp zh+Q99SFcNtI{)|zDi%KQq0@@&eXomM8bN%Loeb>r*CwSYk(hLFBgmV613hKNYur?>2)Dw~??E>;Ns|$+W4v0UVVot)ObSMexz&ZWW#AkXr<2=>iNtQ{- zTcZj_wX*cqH4$mb{Kyq;cjOxK!+^bLaCUqwNM(pfV8mZ;X7DcfRKw;vyA7e==mQG# z%DE4Fo}kada0t^6gayM{|84Xt7~k%VBDG3%{uT=b*Xm%%9RUr#km3;cz!Zv3K9!t! zC{35%VRpeW?ra-b7%k-YV z616?KoD+J#<$ChV`MInY?qw}S3p{?}%8$43!WkFruyFUa>o+Hpyzr1!L8f%nhEw)oWyM&+lmxr`|1m zi=024IqHXU-w&eA?xURT@;ojqQ6D4ne8D1jK056f18P%jF>2ir-tN39wLAD7$}47~ zmcm*T7>wa;E$?zis&r|L|8YDh(IRnnKX~u(OmuSVk=S>C=EG(kfN(n{`uNcUbj{z* z>%2J(V+xs@_1=6;GMb9R8XiK_g*l+I?I`*$90hlWXc3DJPdG8ogzR-xN2jU-5)#~vu`Q;=o^@lQ1D-%e{5gn^lqZTFJK)h24JzOI4a(IG>15We49v~ql%gZ? zNmnYC9*gBHWXf>e?{J)(tb*STFxJj`b;hK83g5zYNUp6B{qjac4@t3HTjdnE)@VY) z$GwC1a!*l9$sODlUB>xA`m}s^D`aSYhGd2~0~dFwQ}0o_v;d8Fs^Lmr%I%k}=+1S8+ckon!7I3*uTY%f|&uQw!6BFd}6W+^vydqn8qucJjbs zQ=gz9>VU*aNuF*qVm;WJUf4WmFrDNrBq{pRv{G{)pRIHm4$b=kMbQqD*gNMj!HwYS zS6z5++Y@Xm-vrO4S#Pi5Fi1?U;Xk|xGE{|bpC?5E|CMvH+1Z%kQpPXbr$#fU^ElT<1HVP-kSTJ1Kym9n zsL@p=<-MOlbbAG-sV+stvaPUHIu`w9OZZHG4H9gb=b*(n$WhEsJX_KMA}KZcQd5CC z5ATCfoIb65$?->+Yt!C#fKT0XiVuF3oPXj6>&kZxgWPww7$23QTaXw;rHi=KaYbB@ z`bXSxR0T(~9#3}A7I3@q8@qIuaWiopbbI^<&R2edOCZ|?sut%9T&Lu<_8x)m?mSdW zRj0!7vAlnG0!H}k#<2cdaE9$V20E;yKaKaqCbaKZGaTYu z7;ES-Hb$z`&^r~lGQSc0Bd2k14BFuOI9)P!R|zy87NEQM9%rlbh)a%L2-ALS#TVH+ zbi({IsHlDuGfkNn$ElrL_#p&iBJCl;ZX(hrmNY|k3UIpWB-;2JY;6+~fxZlNoqZLq zA322bMseu!(wr{nmm|sfsd%++9yYCh##g$gajV@g!d{<#bP0P4QVO#4ubc(7^-BaN zBY#MjI*r2wP0%JWhAhiNcyFW`nJ+(xG#I#ITjv~1mir+IJ9`8&EIx8$7VFSm&pPp) zyg6-()Q1f=EU&qnW#5+GLC2D#u-vZGYb~j(V;3KovI;jZ zZi2`QEpTO83644_Pp?=X#qn|?IMJjS$PUhyx!|Eobpqzr|De#UJzEr(k9{g_-T1uBVE*p;oopMUL(p9eEW z<6j-l#lW1M2}>pRM&qz*UIv&ON|6z*dg!mW4N@+i1>1oseAI_=TvxG>S3KZ@gDeeb zbZ|L5VIDXy)hNl&=@u+UnhI7YBp|5F7OSX6LD&t(+Nq(O?($sJx$c3!xqHAp;|320 zDXRRig@<>Vv1wNX{QKJjQMCblO8+YmPKt$)rZaG?=`My%`^TF-t^>O{#UMDmFhAMX zil0}lKrHGGr)4u2|A9hr*8Ac(s$rUY#N?)GNCV={@{owz7WaZ!oT02V2RT+ z^k+Vmz|e=h`^ZvW&nb;_cdy0UWft^JbS^3!c*A;fjwqfNgygTvuF!N)>lL!Mq+kqjYKDp8fFHQJxH9pM?oKhxc0q zfhX%D2d};flA@0gC!5Im%s9?+ZS8O}Js%T(pM~a8nc&}ig)`Z#OGaK{>?=}(pYO{9 z4q{m@eNG{hHLXA1w$*qr zqz0+S=V9?e0-NG>ars&==ovo+lQ&gxTNa$dU?)u&+AAWp7zN_FrgU-7yjBt!OpAB9n6FOS=tcK-R=J5Sw96hRQOZH{+vhvR6XUw?v78 zM3)S=li+OG!?5V7KC#hcjP#}dVE7jUT5++3?OELa$I*EQ)cC%C{Ae$AO4@t#Az7*O zTsH|xR#uXgot>3r9V97|wp6l`q@{VD>*h!$A!Q^jAtWJLN%FhDzrQ|z_~Rqz^gQ={ zUGMknB`NT*rsu+}sLz$Bu=Z&(Zen-M`W1{X8+Qn%x|$I2!;>WzyC1W66OZ)~OQ6Oy zk^5$piboEd$KH?^EN68P4!HdQ@zlqnh-@}b@jL@}sObw23QF^`L%Ye0OP;w1%b*a^$QEAAS!DcFF! zTLQo7_F1gXzl)(Ctm)&i?cg8oi6H^LI6*2;q#Z0%$J~2<=BD)Z#U8ffu%%|fY|b*P z4_hDH=cLX1z@w-T*4q7mXqhwkoXtX2+?rrfjulaV^AqL{H=+ApTEOxmH8N?83TfXH z&I^26xB&Nhc**)0MQX9=xK@QuRTxMD!xqAB(H|T=T8|_=9Ek6J%G1czGr{Z5P;`(h z;>O7Jq5t_B^u5%CSE8zM`-BVF&%Q6>#d{>JQ~J5d|Mq~5X)}OPIi?PkqYlj<@SX8r zoP7EiI4V8G#n07AqSAZp?qHmhn!6(L#86CZdUQ`jgE7|wYo5LM^1TR01ZUqsLQ-&|^%K zRkiFL`-jVyK4e@71$gMfW_HQ(oI|S|eCL3phVBQfg>U_da@z+3a2%xp)D$c2YfrW%lBDnRR&V1#>;>gmMCwbJbE} zd6^%<@mZlJZ3`&#|T!MwT?-2!-}v`qbZ|h6@dN&80pb!^IAjA@xozoOseDeo}`q zsc}%}4)2YJS4##EC(TmsijOtL5>JScAAk+;1Y}YT=;Bv4L|w%PMMDYBwe3eapL7)0 zhC3HJ-T^z!>s(nT%lkYJ;lv#gIC;J*s`X7^d&5sKbB-?QW8O_ACr22=dI^=A55jXJ zUGlU|8-9c&phK29zw%Qjo;#{f9ki_=efu|FyzPc$?YRm#sVYN;NnfD1aVlSJpVaesmApX0b zW%^cNPh%%HJuw6aowpzyEyfFhN3pK_CM1NnanCIV6SD>Zwvnq4sOZRi(U-BNGKafa zoez7K*b?_e54i{9Qo!C*j|QHY1)i+;Ye&{I#^nH!!nPs|Oq~JpBKBRivEX!94ItB3 zu%G8$Bm@nAi#z3p0BA4{^z9U!TULhMi(kS*3vKGNCkIR>^uhfFQD_{`{9F~xDPU(o zqAtJ3C@;oW@s@BdXFDVs6BOwyPi-5 z@uva)E;1$iFDVhDck94m)hA9~MT-j3I-Go5-!M*ODL-?g73pkAgPYfyQ9a}@s+t`} zr%dKX=vCwUr`pnjbDsMSfxXv#h%0}AUMBZoT7&^f zD_M>O%OkPQ=OL)K+TsJVb>Ow26a zlz48XLLZD8+6^6p+d*@zGFcT-isG3px7VJCE7%>W{%$$e>T6J?rT4hf_?IX?wN>)- z%{+|KxB;tPQ@rxlf@+*GqD%Bts0*Jas`&Q_eM*8*YZH6-YK4?MO){XNr9M!3&J7eS zYw=g_1mn-jmk3IGN~JUU!{qb-*3UyEz|JrV;edt z%9On8Z^4}Y1zcqH7hc;@i_P9_c>g>j)LW6o{K01Y`u~bh>fOW(e24Sg0d3;9tdC21 zbQ8wr-NO(v8pX>O@u7NWVXNd2#=KFaP7B+4*T8!a^(GgBw{6C>mlqk6!xrAJmtw2K zPCjH!BaApF#>BPb@NDQW)K>qAPAi&m?(`N|l3|G6>2t9Cr9O>|WVy{}vixVS7;w0J zoPRXzK5kDLMD46@aXFn+xW0ROkkqjR>bv4N4_#X-h}qyQ`%s3aB(Oba!C=r6f8Yd5 zk2?1}JHuU5)h4~~yFeT|iBG+CTU5q&=PN7>iTsp7#BBU0&Rju>+PpjpJKkE5`~GSy zQnVv7`?5jeAf!I<5Q?*oaRo7hiQ5exzCJyKQklnm|Kv?rwK5BbCR9UZ*DFqZOjS~G zCm9diybfYH1IY>ZXvlJtf?qegL;T&sYyTYv?_C%zvfwMGH+|rGHVL6!+RyE0j*`CZ z>M&#!kH(hfguV9p$gy8AhYX@tw-VEtMuQnO_VX*-{D={U$DO>o{6Xd8gy_0`4LY@w2)+gz?*Ep$BBWBhFVe)4~ z5;*?`H`+~>;8!7*UVnmmIkMn5QH!>imxAxbU~bE4mi_+|jtY_j#*!%qslj$GZR&II z?>!CIA621Sry2czdoOgY2X5xh4qWz8j!rJqq5;3O==5Y2@+9sNo>j7-Ed?L2N&Ogz zo6m_tkF3VRo$272yPpp!k*7Vg20?wJA=)_q0jY z+UL)*M`aLMZp^E!IR$?mqEM5T#psgc2xbkzhHMbww?% zw;}asHxDO_;6$q&O`P3|^)+w#;4f^BA{)-RoLSB5kGhDSv&Zr`1&`qI%tToEEE@vn z$Y3ed^xp)=dHGVdClgopeKZhWotp;01h4J>K z*&y$iz+C5AF!xL?W`VujhY9e zode@y_#cJmF>D6A><--7p+PPlsKCl&shE*+4cBdagYhXxF?F&k9#xhlPt8?8a8X(E z_;M=vZu}>bJ+4h>*2Q4zQyor0PKjQ36%eDO+prmzbo?;U zh{dX;Ywvsz{@%p3u+H?Tl|-wDS&6v>SrM=qG2HQ9wb_w$FeY0Rp2))h3h{biH^dPV6S)|+ya|m6UhR( z$rBl`%bG;%#bRW`V?OegCm(!cAZ-q{Ar2M?`1GKseCC27P`1|uFRi|Z_NRVeTWKg9 zZi#@!D(Xb)93??Q~>!!pyry)N$k~bbryo3xf#y8X;Eh+5r1%6QTOvA8?p( zo_m`sN2l*PiWkDuFv)BeE-PeN4)J&FR%A^5>~pXzT7k}$%YlH&Jc`j#V%8-CmpAmW z_qZulE13s+{}RDNS`LEnO_HFA>Xff$JO_=ElJ@5b;A&NY!%XF&RiRZhY|l-U2CRpV z`kHj?EIZ==aW@K!PV*)H68x>7i-MW=ciKEZ{ys0hvWB%RP;6xLgo&}E?pnV?Hi#A_HHuN-*PT@OkI`u8ZSft#dCP? z-|4(ADAR=DyTPp74<$2xL+XlmlIo+cfK{2mPr3^m=3UoR1 zuids}-=$rpqUW#7$TIz2h@3J2!7Ua91BY^1r4}@2&}uI9P&M!DXhC{Ss!*qv^_aWd zily255}mA@IDl~l{SUK$kH&S9fa~wUAm$eKPs@k=kA?W+wG5eJZb~8=mSERy1A1g| z81rr@pz6wdXg04U*$4ZvLmjktj3vkXcAr;hK0%_q%&gb!W=#Tz@I5iQ+zEh%e z!Vu>ym!%gfjCVoD-6} zw@g?r<{N0s7*OjKgJ@7j8{Tpz~dg=3uyLC?|w(dyu`VLRF#j^Q_NG9*Dh=h*$v8T4=! z)VrVL^;**S1FJSJesOcCe9U*sD@@(K3|RmO=n%tpJVyg z+0SsemM#h0>jR?aTTp#q0vx=hLw)6b{_tXuqG2*%*owpRwU!fQ+%XmNFpObIN^`qZ|EtxZF?G&3`urJG`L7y`CK!Xw`E-!>sL|Eq z-r@QsxhRa1W!|E1kaT$<&daqVla(AGQav8TFZCp$cekTQO*#l;b$O4VY&`nYhB~v} z&&Cry=)3F{cb@&6#eZ@+-!}t!v+Ek5hG~#@n0dtvb?Fw_aoAQ-fjN7D^P2w`_Qx_l z4$D&de_V`CZHmx6`!CE`xPy699-)IwFmxST!JW=yT*8ZLM0)OwD7;shUYNWdg+JHh z!jlx6Yh_8~^;}#!A{GVRze*P5nb6?bx0xHqlD-J}jN*>1lA7W;e!E14ia+K{e1$RC zm!pbNs~*4~4HNRyPMI1c{ek4oI%MNE9ilW>mKv=vL%sRhF!3VG9X4oSj6=6e?8a_>iL zkPU3Uy>0=I#fx3R+Jt#j?>>OZbGqFor8m^_Tj1Q z9-&WK3@X9q^&tq_z~Y$NhuGQEg`am_hv28R=u=h)2U}IBpNAY>VI`oB>sdB@K^FVH zsRhdeazu;!3BKFQ_^Z=i;IUt97B#|zH(B-yo&_6{gMqr#$xw@Cy{-hm0f~$&a*^LV zML=%7G$+#T(-L8@5#+G$YnRMAuAX&kwVy7B3$fv#d(4~^d(4ALfrE)-cOKY%y2H!W zuR^E(=@_O_$#PHMaLhn*SesL6+!apxDO}RM-GHXq)_~EkXZY=_64_x%&U@om4h(G%^kR%8uatuYPkM`F~VJAad`z}>evTDLf#0Fe)1Ew{6Sn=qeS?H z14!POdhkE5zy}(6;OzxpQD%D~8pwyi>#Ro*v@`?!m!SX6k$m^TIXFDaf;Lsj z+PSw=Q%HCOadnKt*R63?#(K$^FJcj)rv=6x+^9il(5Swo9>+f;IbE3(x0*?2Cs z@EKp+sE@&c+7O*#M841U1`ErdkUn)bx;IQj?+tBSrRMR)fv4Dmc2(j5yBi zMiXr{nvgvb{dJPKm?_C2sW5s!w}8?_-ic9}B*i z(<_hSQDAn2TNYOify@hyrcrp{R2%CM=~1K6?%>dU5I(7wVasZ^dv^K?8AI$b(BeL~ zWV{y$2HQIKmfnHMI;=}DwgRG};a zKk}jaz6p&Cn+tQ$hU7FBaC35sFg2=GA{aQ?xzfazD(^=e?ypF91*?0n!AN~0N5xHWwRFSzYfl3@RhyXs^`g2$+#AZkKMXlDh-r~tTqX*n40RU-br z?^yorG)ix9Aet>pRn_CLZMqCC{yiJSi>)QE*o<*ncok-xj>36&&fyoY3X~52#|`^_ z6GN&ACWzA+B{Pg)lU0N}hcZ`Lg&4Q|`vD8hwa898H>jz6j;mLxQO|K{TutD45GQYf z&`HDLX8m5YFK&T2SGEHVGOJtQpq&RKmr2@f``z~P=kTIHrj z2Yu5gzQ6zBZOH>%yR;PFO}53T1?E(SvDY$O>(I5R9NZ>Wv1i{O?vt|4ssoMSD<8>UzRGxgUNxNK zj%*ZbRXZPeG!6^(TEXt_K~NZKiQ+rcMGw9vL22n{Xdm3mZ6s#o@aDhZlA{c*tYiKp zMTV>^7Le9^me6U|gwy^ilFY>u;quy4RIXs>3>{VQOAujWg)W^{)dh=OwaBk6X}Hq9 z4X$*vF6+nNT>9MkaDaL4e7*a)@zqCh)rcGnc$Ee+TqjC7b9lcs09bcnMr`8(FSUKwAfw^UGG(Ly9N+Oil)Z z%!;=#u1<#-{C$C0jExnheGS~QS$^o>SyABLVs3c+U}CgQ2jiAs2bX=FqH5(oP@QoM z1Gn4YzMU@JcI`OUgBj7Hfzfm4B{Xg5ZHL~J;v$$s|r&Z5N|>s9DRw7>Fi$17(!#+ zl}OiY6MprJ)gWrlfTwN~L361D@7_0{^%FJNoHd_w2ndGvs6zS*hTz(f&p|C|K1QC_ zq|$r8c-NpUs4TRmIqF*&J+l#lzjAzFX1l*6eIt45E1z8FNq0aeZXuLqFL34g3O=%VV1kM zld%Sjn2O@zqqxGkuQ6gq5k6b0M^r$L?irkgDW2KzVpakA+Z1u-ai&B(=Y}M){WbG( z*b?Uw8#=4olKSqj;5RqO5r@IYdGnUT*!e&PJ>Sb<#^YPyqMpd^%bi?~lMGiifMu+7 zZlHdTA_?j5guvugurN0j!+8&Whu=^(&EOLyAC?dEH!M{Y2H%AHa#F zo4{|uSTL9&q+a3^FgNrSxO;Q>LQ;!=4ylt%YX@Nca2-11_E31R*A_g@)tQsX1h$!J zlc#wXAm-9AaJlk9Quim9?dJD!(@~4K92n0_YVzQ-(QJG$)tuHO+~zyu%%EK<0et&( z(PvOCwCvHP)+{F<`0oHFUoj%hg#vPg-5-rfc68FVJF4vJU-LY>Xv?#n7PNwuOISdXUyKR{m3J-FjzKt^5S zVbZD)bo`@9<3}se%R^O2!=YBl|9u2pb&8owyNMS+UoT4ieu~?5_Yc|$?{NN>Io!bk z)^r!^KKH1Vb5c1kKH}y{%vELV-V6)kQ<4uiRT6OSNLyNYcs{gVKO^z?oW=!9UGS~- zZ*!CO1kNZ07+xas*;{5*09hN!%UOGiGT%fBxYw>L(#?5;edJ^qL?^~EgvcoTg) zuEUA6omd)dK`Z}taVO_zVBgi-_!M9@ES67??N#> z0#>8-QReVhm>Ob5-p#%V|Lj<qIh5e6CV}yMO#0d98rRFEjwe<{ayDUdy zYIfnI|J14HsErWQvJCbONJ8thPdG#CGQ1t5O45UL!7zCM5p1t=hR8i&sBJ=r?GA;| zUgnkQYQU6?8?f{1KlYt8rP=2+X-e5dymE_u=R6}I$5nuT`{Ph(piI35pE#+0mMD_R zEVgXcrutc>=((etw_u;C`?EG~z&l+c6g6RsHoNAE91~RVDp16T*W?96o zzO)1nZ0rOa83bK*Tj4+wW8drK!}aqlCvP%}m!3yaeP927(-neVAMJ=MTCJOyamY4btFL2dI0I-D$7lCsjTAkYzaA`0e1Z_$+^s zaS*PaRv=^MI(QG?EMdk-D^-Owt~ zigp#4^OGMnpwrz$T+O$0@bhsjv|B2H@9LML$)5M{=M6pj)%q4zisLcCX$n}bmm%(Z zuka4zoOq|QR6g=I`mh-u4I{bXJAWG-&yh@+K~RpWoPu* z>wNeic_P+(QX+d)NQ}mx#*WBj?7q&|-m-+%i!e(r5r&;i zf!~7rm>TTF-F$Ht(iD17?(G&B_MEv(FNfji03%W>VY#kvGho3h4f12Q7G1)t;E9<5 zkn>BIoA^tg%o%wcyJduQ-KRmcs-+HpOks10#x3AsErqm~GE~X?FeVEM@ZFO4;B?^{ z^dFyxJAa!~r7_w3%HZ1|e&hh_*vu7uCEZ{Yo3HFso| zE;XsM!SMw(kQ&s)Y5!BB4zVlv?YBOFu$)a)d~Im{d6uOcuS|y=mM7IQHgsLqX?!?& z5N!eu1RnFa!&l_!TGi2LwlGSfV0H+3*3Hy9@DsjrXR-0L1_@J)fY#%0xCq%Q%(|9} zE606-oIy%hy#EHzt&PUbGQC*$;VP`G>4Ev(Y(D5GBl7EL;s>;7lg>dtkRNSMhddla zRmctSp74afU|bLPThnl6p*}HFR^p`XKSgoI5}4#Mh{jF4171hlVbKW{a(gcGW(~^W z9(C{*Qq_G0c{@ z$*?=c*0p@YH3RD2n8e)+mL*#bMPS9-UogtA5eHbR5$lLSw5(BtH8*x($WRM>Fn%7` z{`VATePjPN>Px|+ZXpic)DQLMt^Af{Ik37*1;S(0$k1tF@NJ3_d71GJpG;At!`4lJ zExCKp;PhbXp;?HjlP;mw+?B+`m(fBu>AJB$%Bg&z)!4 z<|Lj5Xb_nLC-HZ_37a3k;im^gW9af&PNDk_dY6^+F8_@ZozcCEpHuUo@J|?QPSqg& zGg?8X_%Hm}YD6MlJ;Gi0%}9ZKD4IP#%j-AU5|vq2w7cUCM26IH#pm2%?!ao86rw?9 zNK7%TU^%w#OXIRkib4Bc0*e1`fQMzrFzD?Xj9rz0eZ#jxwO=hB%4VKExwFLr-;2dI zjT8_*6>oD?-6<>G%EgGHO4K41C@KiDkbhJ*wNdxAWx@+v5y0TLe@a;IJsoKzC za}uGIY{kW=86SMY71(Hh34Ik=uhhFJo|!+0M#~wIeR|_DU|J^Tf60W$zm7q8NeXKGwWQ*P z!IE^LKFD313Vkm(V8cppR4A~-R@t>^zTT2HHSfaIgZH@`LUkgj-Yi+tcM{8G^O4jH zrgk%fq0xzXPKK9)kp9OFR!_tbmLd4W?lLa5nw;687F64*L;LsL0%^Q4e~ryjd$QtT zrulbtsnimQC3{8c?9Y5vswGB0Oi>!wCz(FOglhk3!`{?;FyZ=O+Oe?>+oYAuX|)|I zr^ushvjuVQd5iXgqrmK?H(#HBgnQTa3koae;W7>8*SerfJs)0%&pZz;BLdN2Q!{X8 zwq)hlt5D-SgR3j6K)0U~h-J^$fK%m=MU?5+#7!Gd#skMmA+mWsS7LD3&2Mn|EH+lQ9amF_)@|WFBK3O7YT!A<(c{pDNB~XR(?o zd>;Q6F0xFyz+snj`?|~A=+9=fbDaZZ&Rzp2s2AiGc{Goj~rHrF#CobR~CGJ7}5dFM51B*Xd}JS?q1mz+fI-rop}uwh*Rr(E!4&WzOZ zG~RZc26df!2U4HBk@Rc4fvOWNpvBH&SCkD%&z(k2{QR;+p|TcdWM9Dg*V?pj?Q_t( zi`YNvFN6>t+h_gYgc8E1-)8QkP1|AMBw1QMoB7@g2rsCxf%5r?DtMXpBTfaav@e}{9fX7X6LSQ#hb#p`I+;u?eZ$v)}%&sm{%@zVk}nn zAHjx%Jb28qlkPWKxg>u94gR8y^%*_5$@@S2mSjc79Eky!gBF~>=M{eKx{tVSoElwx zU>?h|o|<4n9-$S<`Lq*M85<~9(}H}H z|AL#;uVRPE84ygX5xpECpjJgD5Y;pT_v)}*s`VR@OZ$A0%Xn2Tsc0Nd4Yi~#zc=EA zZB;N*#*{d2d%-3i4^di@4prY?!uI%TSp4+_PTPJKhka2+kL~*~t0@|Gb(zpM0s9QI zjzLRDC+ICwM~zL{sHdicy)(Tq<(3@^K5IFvTs#f?#xG^O%Xm?OggM`YXGD!JE$Ggj z7f`&woeR5k0@Jq#;m*ztIB~2YNo#wD6TQyke?HMzbK)33YI7?*mCZr#o#nizl@WMN z-;ILenK1T1HQp&=zMj2ZSd)2#v;XiAx|{mJ<&2FeW%O&PtM~>1*IA#yUzLQ-XcWjWqJPD$&bY4E@#3Af@G8 z>w#OGl+8#Ri!{l!_$)k;02mtnhJ!3)+IigoXKWvUOWd_Vw@H?y4!z4KF|S+Gt7r)L z`5Y2Qegy|FSFS_N6~_cc!cut)@@2R-k+yxq{(GGu&<=vgUlP7$bueVFR3TwUexO2Q zHe{!N!nK|~;4!lZ(hYR!nJg3HySoAN{2zee%PY~}I1_4lN{h^$6AIVXY7(8QW+<4Z zMm8+bqGltmz%Q0l+@SIj^epDXm>AZjFo*zJXiem;l&QZf<6g?jqnG~=ICEG){I{>+ z-aZi$mnRP;t7fs8%*CHg8CKSAV>P5jP=0g!ZXC)7VNUuh^)A*BQk+eIp(D@!4M?VqFRZ#lM8} zVRFQ=A)952s=;$(JI2k^r8?JG2W5OBvqgvE<+-ePC6~*&j0@({4k*y?lT`6$cs~4a zjl}5(WQjx50q(>(83?^{2s%ICLK*F6uEx9XL3ceD!{)X%R|msBgK(S} zehY7Y$c5C+UA#dv>x*x-z_NSqv0~_5?AXHGb+_?mp_=rT7n|*sIX2e!`ye{bQ{MVe%FR4|;RIXKtl6LVp#DB1bJ9zwg>uAA`~BwDWc z`WZrcWeYaUjD;JOEA~I~Mb!W{pIBJq?B);*wY$Hg^jI7`C|0LBimyRpPzq{M zZD@PuFn({I1S7`{pbACLLAuuvPEXP%{_ke+?RP97+Qoz%Zaf1@`%Cziu!nH6LWU&Q z=u^QfbsARJgl4t7_;DYMh+UzO9yd=#J@suEJZCzDhD-5t(;PRqZ| zAoYCDxy;?l*+0mH>eGrezrzI6M!bT^tX>qq%aZtc_`&?~MhInFAt%q6bJ|+UiEm|< z4D(Ti`h!J$!rlv5y!SdZSG%CSBv42)kthm zZ3getBJljXksG?L6wTQ#a&n^_T0}}<^lDX7&KxpLBc?Gf%t&}=JPsR7qETT&A?C%F z<2aL}7+?38&9WbIe+=xT#9sWV=PuYQ%V`*jU2SfEE567wO{|1keN@G^GX3&0Xp6>^K6L&pt2hu@c& z(gO{T!N-90aiTLgpMgTUFERw?Drixu*E??QdTac0NrP(ckR?q=cY_lx;fNuDFp6y@WV6ol>Zi`J1 zyqP3R##xl$s+&6WrsWsbbzX%^Gi!Mi#G~O}9RksMbjHFybbr~*Z6Ed&92V+hWz;|_ zP&zfvz3Lq2Z}E{oQ2zy#_uWJH5fyyutaWIXZq8W;3&<>|TdV^Vz#It_#bb_1_HFZr zeG|J;FiQ_=FcY5IEyj2WyDwIKWzT7II_}6oBK4CK2_{hq<01e}vm}|DV(n3+fmvT}4z0d7&2|Ap9Si zXPm@aK5Tz;z>aVCtLEz)Z}8PUEx1;_8UA%x5j#ay>i9hi+@Cz+EAE)nH48M!m@YXg z@HOX}UmFo|;sx*{r(pZ!8!+G32kgcuqxZld^erltRDUf6e}iOxcnr&MxXuD&#&U`_ z6w8D`W(J#31zgt)kuS*t~+mIW( zjEGs&3N9+YA7*^p3{(9ElJt<1yvoN!n8tV}J!fM$=~5dAZdnL>)-b2-sC&@6l66S* zf1+1D^PPN&<-1QtqN05g7Be4B-p3R0`2=%#?hsJ3{@q+{yb76H6pG?UKe!8~EnpQg zm{yS zlQ`+QL;99C--WI^z z6_;Vq9|fwi;SB`ta)m=jjfu;0jw_o}hBuQt(DC9CJREfn?5#_nZ^lM2+twj!D^sTF zM}|YSU@}>qF5g{=bUS12M5V&rd-6oDzm{9tW=-mdxGhUsIBM$b(c5}5{}B%Bl57V|E38T^)_T)5byM;>e51>Y~hyv8(^HJVfl?hC5e zjLwexDkDSM>wfZC$%T;1vhP3FdScaOOEj@$dF4N{RNed+dasb7?#5G5xMeoVu^Ip1 z3`25$D)U!=I3y7i)bpGE3<8C1T6Al|8JxX9j)aDoLruLNt-qFmt@mwU!!2*dHq;bV z6!&9K&rZ}DMPX0FL+tmfg_?o0V4m9m@;&JiR2pxD0_F;9b8mwE~7O?o^yWk^I9&TV9Ljm^bg}fkUWteGe;M?A2;LP zAXCy~0c$Ava>eC%iY4I7g*7cwtJqa#KO7xAcCAE7r z5u#4((`(IzcUQoNSE6Ye&5i~==~y`8zu7&zHZIMy~_(w zFjbqkpUQZAlZ+%u7d~Uo(iPavcpWi}DquI3IZr>8an*|DVAmbR=U=g;I;V@kYp4}= zbquDdCq_fUxlB$dwF554oOoXJ0GoVvzYp6CzPaal*W(BZW+%Wqu$Md8n2dtb+ZeZw z?Zh@Yfv7hJRvuz=<@Y~C7KQ0xFP(xGU7@g{lij_YtwoEzGhW>2pO{;tNpiLf;MLO4 zgZ9yQl)CBgDJ=8eeWi#wCJ%82?Hbf?*l>(psYd-1F7hF(n&8rf$t+j6mur~O2Zs}n z;O;xh#C~%l>xow~-<1~S*2`19#1ovJ`!#OT29`_K+Rmlce-o|Up+al4;$iO8Dwy); z0v@VYr;1H%PCi?PSfTaaQo zeOEr@UMUNx+J`ypE^0td>nM|;tDo^byI=JDwW2@8&oD8w1jOC{@qs%PpkCn*-mkfW z7jlXry~Ub-&KiXR_auo!D9>5UW?8*!cOdZLDXwRsKa}0yf|2=(a9qKL`cEk50vQ8V z`hFaLDSHxjO#pWOUkYx&UZFU}n5!!1FfODKEDj!Ez6}{zdd-&Bm21&!Q8rZYFr&ot z(=^U@`BfZRbP@amEs_6aKo;IEN5}Ik@W{adWcTiR=0IP7#b-`&gN?&*ez7t1ERFz` z#rbUilZ?wS3i30SquEql?)Y@ZauC&mbfSpTvUwIY6>-tu-WiqvKu z%h0VQBA*0v%*?%r4!f-BPsR+|^kNMJSH!@jCF-=XOpiPaHy}1_reWXx4}G-X;Fwow zShXY+GQECaOGXG9)uw^jzSk0eSH`)`UxMpD|3+)hl0LhA0Y!UW!~R)!(XY#xaqR7J zOb)=%m~NPNM4uQuN&MgKi@yxTu3bSXG3YE^iKH!E`4S=Ir=Vkzs& zY;n%{Pm>Sb@rw`FF(jEn85k`*iyh-%VDHIBIBU^`Ue~XJt-d)?(0K(ZD!<@w)j5=2 zJ-{E8x23h)1mu#hJA8Tk1haZ;L9ougM5v<=O^?j!whwy5rQb(X=_3OwcgErK?P{bk zUye@OVM!atEX85$&e%Piy_ffl=VVw1A}p*19GA;c8M$tJQo)$}x$OD>cpN)d7~&n* z!Sr&v5*6kYawn##gVc5mG+BqD@5=6=EMh0{3|eQ8-o+KbFgqfQ4Vc zf4vSLzsQ0PJ^c`cD@r)&jt&OAJM938af$y!8GYdtj%TF3&S?fO8E>t z4IT}t@s@N`vJn`azKH(P8CW_a0!1=OEax4;YwB8(QGz}wt1|)>;TQ1r1W-L@LIRnS zD?7lJ)O1Ya1<9TzHShJIuWKyk9li!?zy6P+^Ny?WedG9P@4fd_A!Mb_bKPXe*GN`& z2-(TXK2l^QN=tGisfc7$=eh1AElDa#NQxGcR7fSi`}emOucOZMJoj^5pU?YU&e%A| z;qfd}vb>M&2d`?=%i@!`G%*(X9w`y2#}=d}iS5sGeuMv@b~wM@l-vnsxw?f^5;yh) z@LlZeS?UY-r>M~SS%diWhEgt1io!3)8Mr>^IczMpBhFi5aMGWA$UXE$Qk%Ai?K4MX zRcSZw`=v-e%`zapw!wHl=oKnEUjV1&nf!xR1=7_mPiulVfVcAjppoo5B3OowJ-*ofh-oXt>QrO$@2h)oQ8dJSqfzCryn)>w~^en#4M@i|Bb*Gi+G>uzOT#(OqY?OzdhTptf)=1XF zpNP>HU-9FO#o)gv5ayNZku{0G;P?d%dKE2b;L|tod%%YM4x_-EWl}s7Dnaqm4A^Qf zOJ(L*k)LwsuwHv4%StqIjk~VHmgXx^GgSxgj<+G=pK5f{?z>PpB^cbke&B3B=%8TZ zH$3JGp8kq9&C4fNAn4Y#&l2lezE zC~wxmBxjaKJ-&jg56|PnhD7m=QBL4D_YN19Z3GRY2NA;yBVgoS6Llghu3 zadW;kO%#h@slg3M@0$UpTrNs$DAGhfA-ks-;s{GimX*B%+siblvFvYj6J3PHQL?nM z&xnSsumwxiz5J#HS@3^0YM6?Ik6EWe#(X~kmNxxdV~HV%m4dkPvfXe`E(nj`nTF@t z_jU9-b9zDX2NW)29rgID+{y3K6oRheg2D;#_&fW3<%jaEi*+#fLJenIG6ROhJccVR z+Qi}ko1q0paUPq3xgp_opq}iC3u5KyzFs-%aZI0!PwfP!DI;L2Sd&WY7DG)vn-jH9 z;RSxC*@<#{peV|gxW`=w>;0_vu%v@uK72J6A7W?jPsb%)Q}v1I;x-h|)x+}ZzHqut zjjZ*LX1@9_u1*KyxU!&HjMPYn+o9jUu(1v2WIctz-fN(JC=0@^Sq?2y6CZ!f$A^;3 z(A=*_PqX>Clj9i-xj6@RZZV}*qs*w~5*>PwL@_%71Rnyq@220FkLnsX zv+X_mj63*)FV{nNlPqoem5siWJNd>NW^~iHN)R~}XFIeCao&6zV!-+a$(J>0N6#>{ zylV{u)xTl*sVeY3n8_9EMDrbnmw3w|rkwb%0Q!#DkP90c!PBA>;?xwecGPwlGk+Ph zIV$5F8y%8SQ48~C84>XpRmk0-2eL9F*t6}QtKPcDIHuA7v~mn+*{GA~xrZ^jPWPkN zV?#J|Q=SgF*bD;aaZtVHDX2ew#`33gV8M;`V0lw4nHjYV56v1xzf85ITOP1%r1Mku zE<2hzfkaUMK%Uy!7K2Kd4DnE_MX#(sm^{6L^W<}&ZA&bAUz(2NgP*|l%>ugp&?T&I zcqja$mX7gq7obO3it1Du5=~_O^NTw`d}M#X& z9dLkOr8}{4-xeJ8ycjg5$rHO<#bEkD6Wg!61%fITe)FYLjz`twMf$iMY?{vEiQE_fkT_MiNJ3G zx7e0tKfSg1{4zxpA1*+CuMvH*MV0KjZc2M)r0Mrk1)}oc2`2p~PX(eA!ZGU;AnS2G z=hSwB^SJKL-f7sw)7BXqHdW#Ov+hHdNnm-~NN8d1yBzm;Fr0B3Zi+2QmX0#2Cd>h^ zWmmbSp65U|*nsB6OVJW{Hixmc&+dr7%6l)c=C&rtkotx5pfmpiEGUw~kI&iO>;})P z6cwW(V`cb#o`(MXSFlvpqe-?0ai$vElc$Mb?-9mGeinsg$2&k(b2qpTQwL-25tQfO zf=yQ{&@cZaKlhOeS+hQcacLrP=EdW%%US`BIat!r)Q8}`79?l?O(=hEz+4R?nqx2t+FCsUiMfBgsUN6zD<&jZl9 zz#e;d-D5jo9isVZ5dS;If+}4o!d=H>!1>ra96dD-4fTIsNKD=F2R{b2 zLC@DSD7|I?Jei}j?|3r0%SG^^ubRN)iz%nHz<>^%YJvBYOYr{UC9p|JkLLeQ#gH{J zj1QwvGA zTw6tXyu~l(QhD>2ANLw@-^faAVVU>(=YJ*pmS~fKt^!!Ta6jx2-^G)gPT|$AHguAt z(_nIUJwEM2APwkybN6=2**n)8syS&3o_0@ngr>ofyg#OGJ6VR zCC`)NeocLi^>(&YC}{zIqX!_+iOSCKn~#U9Ea`)D+O*}G2t`^IuF>1y@LN-5NMU>@ zcnv9tyO+bNuya9P#Na=AKcZ7g2^zT3QclqSa|x zMK^l+F^_Yv8An?zXvjESsEPl}kF_!*Hph&~L@iw+x)cnShwFsqx$<;kydL?%&L#D+ z`HWLK2X0mwQpeCOD2nakPQS`U?_=^{ct4EyvI^tHzov8b+k3g%1B>9h(PtdeU4?E3 zjQDni5}aFPMU?}tqOI)?nENLUoodd4=%6ai-9C{Myj+v5e~3LJRye_d+eXwwWP}cL zjbP8_tB8zcplNLk+h#oguTQC5Y$wZ)_QybnUKifjCQWj)UZcijIa13UYt?sa**PSG zt;<7TOn^2d98AR}A*y6zK{zhnm5PeLlKDX!4T#xrbsCVe8Qi=wC804bAXh9)vg9%_ z_o*Q#3h@Tj?7^tt+5jhejcMeB`!Lt%B=(;F2X`&l-aF$xwjJC7OB+~cdy6bA9-%`Z zelM1*C3Cx(>)5%W9nwEN;+AW%UfBX0{^fX8;{4_}1QgF^JIOLInXN@5&KuyY#2(yV zya3%+Kjy09@1Wo};kw-wpmOtjuvUGH#myqv`tJw<}8^YM>al)xf)adOi zDX@b%cgK%p_pA*?yyDX^P)`x!S{;fRK~EusIh4u+*nKwY66mzblTAbGAmW4x-4k{Z zT(12GPgL0VX>=0a8Ej78O^QLmU*_Xz{|2T_`sfzgB*|kli}H7mxFy?|OW+i9+qizf zQ|~)K;Lz`q(Dnz0cvs`>uzHMGW{RS8NB-iV+nCcQLH($UAbPcquatd{VKXQ2%Xb^V zqL=^-&mBbbzfOfjpB8Ld{u0f)rO14#T9%Kg2GP8w+%6>}2-+?K^)YkNFnWMD8hD2T z_lBa_szD;C`G$^%VFqPX|mkn0Bm8*6*@ZyyH6@p*&!~Fdaw-Ye3oK| zAc0@LXE{cGVXRn7iO{zq0#bi{fGICEG1>MMS0Z;7=3M>)K}F|r&Fx!Qzw@|Ipr#MK zSJ>Rce~GlIG0#^ivLJXt^76`u|nz_4?Z_kT9ezq4=&!ibUt- zq3x8r=&*~m_099qQuBqdIQ$uZa+)1I7GXul3>*SEB1OKbOOb88SD}9MV!nIYB*xR0 zrShyR+dX{{)vfS?<|FL>r_c7Y?JYQaRTq}89E%Q1KJ(_2igD;D8#2x$6O%UHgT)Io zvBNu!zp&^qirRvBQTIe)?7s#GXlduuG)r)OQ6A(TohMX&(|{FYRLR=;@o0N!9t_E? zhItL_tmF}oYuPn8b&-IKy}mEpjz7a+1wfvhJ*Tx=L{D8AIw@&X~l1e#7^Dw)C~OJ!${(7)MPggV(opXjt79h;BH?nJag(zFIWD zGtZtpd|3dUW|jwt6IbL%#_0; zV%7y%Je$whTnE#(X2N@O3p!`Q1GqndV{YofsJ`wKI;}0`FUOh@!$JZw<2A?>uMkLZ z=z|$ArD^WgM9Ig@d*C2_0vf+kJltnZb0Z8Tj$`C$^0jK-I$$l=|)S1{Ky{vMDhnew}N;xT*rqJC&oG!ck7?ksUETr-?9WDEOqX{f}?}tA5S` z&1G_YPv%DO9sLpi^Ei)6x@`ZDu#|Uk(!i|Q@eqIE5sCZkhct4!|^5= zn7r^b8cw;+nMNq1^!P2f={|GPm`;I;7Xsqm634Zsjsds#ap2Z>8+YO@Ji~HL-D8E= zeR&V_g%eDOzlz!HJ1rhni4Hq=K;gGyn3FEV>k(>nJ7ez)>}G&vZGq5Wh7$SqMUF(O z^n-(f0X8EePnFZZmd z5bK<@8MoaF>XpJIUYm|Vd5JUH`)JWZowv}QA%VE9;SjLaA9T+ef{Ro$FF9EQBH>lu zarkmD)Z7kU2lY^VtD8^W^cTy4j4joqQcB9?UI5NM zGjUuL3nsN%fRp8T-t52wJg`ue7^*zR)2q&)(FRkpNbW0m3})<(z@M;PON%6ApG3pv z_x!;gSJpXg!PhbD=T#?712fuiZ4K+jrIhp2PQC|s@hI5${0xR}Edj}$68P6DO?o^Z z@;{!b6VZ%kl8|_F_U@%fl>(LNg4wzFgY{0sFI$q?GgZi~&C;~vwinAbo#hrB(Ln#V zov8Y6HdH@!#HnoV=#3KL%AW%;fw}$ESGK~!M028S5)6gXsW4s^VX1r>T(GYMIp*=s zGu?w->5MO1@ewmD^r?b_2?=ETOzH3uSXyI5jS5-kdw2vd-hEUe8|n#8A0&MGAvNkd zmPd#4JTH}EK%4{rLOfKk{;vYLR#k+`jCUEWdYg9&Gl4SKJ6Q2bh2ALngX3p~fu+hN zs0o+Bi1~Ia+=+E-)d7ZZYO=l{wL4J=HO>{_yxKW1q5TpP=S`c0?s} zlMGGgx@9;2zqig8uD=V9M%a-rF@3=07}15CCehGuMsMA0?$07~vhcbb`K8?o;>j_R z$-hl$Sc5Vjvn&J2T{GISSf7**R)>5qfP<#K&+l^hdFOf1x$4HZKSF=4+g0pbBRz zeFi4ju;)nJFqB=Rg|f|NAUOX8JN>fZ$7LHLz3n%6=+EQ3-DcyJrK)6ajuZ(kivvI5 z5SY-RPfnlGrEjDA;oCoLl9=lRsUbWp`0bCg_9&veMJ6AYy$nPWKU9=0<~>4UIG1rk zC=T$3J6`IP`%j9T5*@+RC4u0!B33f({yWrP&hlkzO0z#EKY~a4JjQM-fvlE1UL^C6 zlOErUp-G+a*0=>S%I08p=6wwLZiF3)@A+n5bDI6Q0X0vCV*O-&e&JX(dbYX%=RGne z+QBujZEyiRlzxX{p-J%B><^|do5XW$Ml1cR3C_+?C#i1FA^*7*Oh|u$ZsnsSQ2ZYn zTIcZ_5{<|Q7s`0Pv+&>-4G3^O&mEjR6<;n;AwOfK$caKJI%bAEgif4~BIyY*c9cH3 z@mqv`l7;-C#ccQJJW_bK&Y0#bybnJ38GS#=!yLbbm|`PGZ!j-Ze%mAnu4c2$Ei6~> zrOP>XXris*1$cY562>R`V0VNpPV730f_V!3r1{#^>rF6ZC~d?Z`4gOF!6bAz=77m7 zjljN8gES0duSt(vk3&I!g3#eaG%q-y#n1Wh z3x=zz$f9>Z1YBm^n>A)^CmE>*+d3wrw*}>Ijc&)l)C&+9 z&Yn}HM`6vle5haYn;XcBhUZe&G~n`FP=4PH2bb-Fj&GVUhxPAvE-;}sEC2Wya~s z$>M6d+W7X*9gwe}1QD-dK-sGV{QlF$OV2gP-$OjxB|pU5{S7eZ``#$A@0%yWU-!r&J<2;7gNjR_22$W%w53BTx z_;;EtNnb>HPjL|}y2;+5*G?0v-_OGpm-T7Qo({fVbr=|jpFsB)Voos8gZDF=hNt3X zD7DJOew9h!1&0yl+Ca{%*C74q38v)O5cf|xtebIxxgWEIJ6|x*l*>L!xz<#_ z<|d>o4{!xT6H(4ejpS^qgqxq$Ncuo1C!=_WIbEm06=!=gB|R0={qOU__v~|+p5wdM z9)aTgGJZwuAS%+iE!?g&fLcygbZD{-?KroI8||q@Wq&xqkfJcuoNyHdtS7!){0J{? zH74l}($q!vG9=VBLi(j(&d@#q@<*{ARMbXp&+mUQJ>w~k-@r@;_nYukJIiP7su$kf zEJfde9u*#W3)2Uyl0AlNmbi|ltX#ThyX8Wb#QXxCD3zg^snO0mX z_W`}x>@hLf8u=wXC>~%uQh8gh-sdrQ{2SXfuDT9(qwm4rmseSSpY?Q3>5(dT3n)=F zpnLWyQg!t|5N)0eamF^Nt&tCf{!Os%2+w%@_O$WGTy(1Fz%g~Z;LF-Oux9XCj9AG! zeEKXGmcAYxPYcmepbdhaOQ3$d0(*`fg4=$Nfq)SWTpkVsHP68NN0vnCtRZzbwxthe z_Tv86`H(-c0>>YdrM17Nfk!|aKW+OA9Q;FvjPoc(%VEQ~1-8sbHg5#VmRDj}su3gv z9mlC*rlcr{>+7vc_!57 z@@v#BoQQ%mm$~69ROu80#`|Ku4QCJLm-!oo-B*Xgulw`S%XkHjVI1|!aSiCOyPun! zsz5|rnq5OiZv~OPnD6u;SSbA-qrYpxsSm;MYx!%~?aDZ*MGBbyW*PT$NC7Tz&<1De zY;b4&7Z0y_y!D+a$jXLPwN;EudIjW*)prnuw@XI92}k4nDtK)tO}wHEaI=95k$n>a zxorOO<;ozM>8MN=8|hIGZWdjSf2e7IWP1;Mn{$EWMQo3udl@!e^{U`_l_1r!s!* zhbt1*6E^VXswoxK7V#Z<{=8ypH($MZ6h``IvHKz08SR_RE&Xjm3syxVtkuKQ`~G0n zRSk5X^N9<*G9R<6bm=H-HUaIjrk>|t!QuW4{G=^Q%L63b;#utcXY-Pu>(6GMf%l-d zEggOC=z&0-l=;#*4;72AK|JgGl>JwaKkY2Z0>c;B8g>Vlp9+G2(SzB3w_Vtn8Gs#Q z6L=So02B=vNZf7M9(`gMsy#o7Rtv1iUgj-#G`#@==`q5biT~iq{s-uOFgJGxge)|{I1c-w4s(*jq z?@HrkV+>$)f;ySDItTW|y@Xm#M+gcS3dMbPP`~96jsgjcTxCpJtLpixrt2`JVJDbL z8In!D&p;9$fYvMzw$;|0x;{+9n{%(C+ou|dp|XTOzu$!9OqHY2`#8)ljl`S{>@}$R zJ7@4hp5(^(NCs9Uqh9!P<_Tc?tvo9#AMq0UM(dM}hbu65asaHaNyj54DL7{HC=kWC zahG{%vPZcZBKGgYW#$fa{pkB(d3h96?yLdjlpICWatck=G>n0A9K8Sf#1GF ze3B%fv-FuKB{qOB_ig3$3gxI#=v@@QZ4genQUSIl?3^2aSmIvn2EJR*!+@zOwd`ES zJ>^(;N$U#k(96Ipj2AZ|N0%%#QY5|=rJP&SQ7AT;0xg#esl%sE+|ySL(kVxA;iM>h z!uAUu!;Ns{WP6sUZos8}ILVy+1aUhT}`!slo(?31_c+P1M{AVLs z>dL$XKKeqcn-zKTN%#Pk(N*CkOAX0*8`^ z=rT>29@?x*jcvmL85?8BplN7y$B2Z_)+0_g6EN(H6|HYH1>+wTP(Qm_!e-@+RhBb( zNxF|Lf`0L43~@1o^z{k+Y9BK)!$N^EzhsGc**y5h{1#OUHsbW7Jl2k#hY^=l7-QlV zU$|}>`gM-rGt6?KI_Ej)YYxVpGgy!0+XJCnloy0;PU93)bGTyPd|uRK#Y_A2;{E6( zD16xi0u^n%lq5&Qt6xcK)|}xThnmvamKLOH!h9TacP}=V+=Pi#+2I1@Of`-f=$+^ysK~6`Fpcp7XSS4ubk?`0-9YI-Fz9tdKPP zrQC{9cdUu!qse?_&`B6Ko@KUoThms#1TJ~}aX!jB71MedYtL#HitHQt)VF?^u;2rH zJEB1+~kZF7p16AWCQA+>t^|U8_qFm zGwiw10kceWX<eNqVJtx`C7zR#k z9uVk=wr{!^|7$(>jGE9QmWA>!Ta3e9zCp$Y#*~nH3mey{)8x~IT=eB7s8ZaGzbuF0 ztXOGkwKfw!j8vlaZ=|8STowvd-$3MskFc-9faL#Rf2O2M^Ol>^v@a2;nKll*PXO08 zJQ#Lsn$TD;<|*^Mie@gl)H__4u0NcIw-ff`5bOI;zG*IOQ|*TKx-9tlP>o!03CCll zazs;08pb$$!|37Gc;`P0;x|%@d!l7Rf`)6NdPpW%c^K3EnV(?bb~2bfmL++AZ(?m~ zEDC2y@Eg%2e+P7FVuk@N(eHyXPo!X@n}BRi(I&-)MmYAaKGk=B16TjblU=^2z@@{T zJu6~dq_tmycjZ}L)Vzup_&(*!7kCuPy^0UpCE6|8T|N9pMKaZOAh~I zPPnsCP+uM)EIDx%eJ;m>24fD7`L-UWM_STmbuId&R);1m4F=KnREdV18By}EAw`TK zZhdw(Zc5dp^OqdQoXxt_>$^Ox-XcY7*4*PQOHRWS#xi?h7>j}jVcD51e=fSzBD7{* z*yQoL=;SF6vMk>s+FK-PWByD3Z>$S($O;MryFsw34K-QbP%=FhO%G{d2xGYp(@-Ra zuj;&5ENXl!8QH=puB4mq$`>80k)3Nv12&=Za1ToYzx49oGI## z(!sqGj$?;Q3TLE$3f(UJkPOX`r<+!Uz+$#r6>s{(oN}l6E%FB;GqnV*WfZCHnOP_! zox^rbwcN#%o!I#qSQd2`=IT@a@XPm59b{sBUS0yVIrr=ji6*6o8 zB(zXGjmBj|P}P4G{An^KTbOgi<-tnE8&Q$`%qhj8x8%uGeMsZ@-wxHO z?&zp0kF!597PWK*j6*$Q$w^V;(LdpB^(p8xmS8r!C!BD4i<6cdL^lg#EPJ3zqq_$mdTJ#iNbhxe=``QL+%HdLD)?t#wz|KBpXDi9exQV?>~Tw zB|E!WMe+jkC{XKQUSRCta(sGWD&q}WDlg-1*T_>pze?@~>q)!dFTSTGm7CN44^kh! zf-wKNoZpo7T%g=N@U*OhbcaOV!A=Du-u_1OK_5V~GXih?Fr%IRzPRoE9K5?xgflgM zVDGRh$nE>W_JDIC@Q^L`_;jL*f+Y!kH4i;o8rU3|ofod(`RHxI`Woby1nAf?Y zKpGFP(#l)e7G_X@{V1Ee*A#q*H-Fn*!pyD&X|cjmwBrq2tB%;1y|x3gbd>tpG#lU4 z;obUHRQ;lkhhupNY)pcocG_fZm=g0)f8oq*Gf}oK1YAlZ`M`QDDrlO)pMED#wt2Qd z<#)ybIo5{($$fZgdo~omQlPnqXK`NV_CV`L4e))Y42yni(biFGVb{ZI%)Ob)r#)nu z9l;vrir`TkGVOWI|7xrui&nHy#4tmTSW z;ravDCu!4x1i;6Q%H$WC%+g=C^*uV3{(AsaW6$6o!1W&T2jz|Z5EVW z{RSTMwsW(}<)Loub-d7d0~Ga-!>RO3u*H&PNM|Hw_dd&q!@JId*U^pO^ko7#(K~#@ z=|SX#tSa4XtVxHjz5*vo8LMxrJ@r0e#MLlI{Q*7ZM6Q~~bucbMM#c!n;AsQLCTUFf z9El%>AIDld6S^VNoE$tb0Zlcfaq|@*r0XPjR`3hS7XN zKoRrLHiEac6517X!7#=~C=V6Dh?y3o%=0t$Yu2LaB2O5IvcudYSIN0tmLWK3P5DV? zr5ST{7xe#9R3A~$qrcAp-;PY4Pj;Ceykrdne&cY z!9A8Tp{wjI$((wGKe~EE-?9ZY&Z*F>=r5Q&JevQ)*!`X?w|&|`hV<{*h$`wQv0YA` ziXv--f&@8^8-#ETH78h>A(P+Eav6iXwdjoPb~K0k3FVWofd2dEc<-bh5sfcLts!dk*-ur{ z-qyhSgd>C=C8hZCnhv$xx1S$>;}W`wjrp<}4(K6ZyY;j$(82ae#Ooqz){p0(1%cgYtVwBPBkzzOpf~Ms$s~rGr0NpW!%61D9*JzhKrLJzv8_zNgmz6@6P{> zHG9Ij$BnG-#Q09W-FNZ!r!tf!dUVf|x6mQJ#vM;$o(lOG{*06e%Fni7=?un1FmL5- zF9$=i?hP(0x)Z&-)cKy%=eSW;0vfMA9#^G&!;s1S{1rWGB2qV?&4ZuA@##f)t4)SJ z-208~37GdQfz28BThdbspYfydPxv*z4;m9+U|oR{mEN!kR~gHY7q|3Dw2?A8JYbCJ zohOBX8hU`u{zE`s%R;hBN;hYlb(F)-e)r8>lcO1m6ZbOe1 zg*?1F2+rw|kX|oOZKprLx57G*{WTi)F0dzg>pS6Jx;~j(BOtFErOBDMKk;DgO3;7X z3^zw9(|s2W$TZ!@;BqCOtI|;bgV0EaxX7`e0CC!sf%_ z;C9%Q_r9?mOpDy%nv)d?-pP6e!){`r-ClGG^5Q0$crhMdfJ8RR1Kd8C;QE#nu*;}| zy+!&Yz2qLh(qk?Lp6iG4QJs*d9gQ89R=jG(MwoQI3q?P_3F9g(n5$tJ=jT$(W!aT- z1nGO?cne>B;x<+q?HRi{c}4=>F-k_Qr6(}@=z0vmyPUqvc^F<*2C1`7!YQ%~ z6*kI{p>qW^p(qEMr^G_ewsxFQDNmA$6{*7`OYC`D#JP>g5+3(u*P&3F?wD>u+#k1a zd58Z&)?OPJUhn~8yZhkfKLK&Iu_eRrsnF&HB^c)H!v!g?0Gk)baNzI@NIiFt<@L1i z<{vwHFpqgk*4U$9mygTvtX-DErZjOr;4bfC( z=c+%PPk9$2kF+`hO=Za*%=bPZ>&?A}L^u1^HjDf_@%)FC`^zZBDcN|7xc zcX7zBVe5iPZkN$OLt zae_ouNbJ(Z_?mCv?Gl1wjr&}5S^@XdTZv5gElX4v+tY2YwxV(w%L<(D;c^UKqSwVR z1ewPevbPWOhKHeVdo|~{uN6gm(z!gt zKhwEA9s|%jnEl-DF%H$+hiLSg&7^XQc+D5@xSMk9^B%`X1u5zpw%ra$D097t`)dFG1X6Eivlp+zg_ky%iJU9#v zMUll3UUx%1`dsbA;yq2If+ zP#rS>m1g#|zWgX&PP>OuIa<`GNQ-{k6@$x7!l6OhmUbL%=LBq5DPbf9;ti!Uc#@~dRoafxrLgp^JQpcRo`8a)|DV^hc z4{mCtqO!tL#wsub6UOH5=)4ARhp?{GtHu2CKU;9hvq|WANT2R9jzAZcGH!XrYUFP< zVn=`j_dCUg%3rF6w8l)FSIqXk>Kx|Xl%fCil;R?dr?B%$1DlU{W3K54tP(3xfsvW( zf{PuD2P8`qz759WX}KsftqC9d+tN9_2%cu^k&flse3hXX*1ckGq_3Tr*qM%xrGKFy z&0P|im5rx|Sd*e`E85z03-Wi22I(>J*z3*F3`#~gb)_>?dLUaq24bJX0cA9Q&A4aU%U?qN6uloB$$U8%(eBt0P41VhG#Kpc*w?>2v+>a zPAX8O9U;3QT3o;_ye~_Xg4vuw-G|MPv-lge%opFD2=UKy!E&fI)RZYwm1So1;Bjeq zd+{Ic%G1W&;2xxeb#%Q{X-M2#eq}-xn2ULct!=@qNfV)dm@&)e%fp417Fb=#_9`>u zCFk6j%TZm#3+xnxe6nn60fGN$(>KWIJimk)`0%Zbd>vgf>G zx%S-Y+@|fU1K^|2ce^ix5}jP=u9Cxd8OCJ%kYA{s5(llJ-P{_7eHhN(#d`*2ak>3N zgyUxE5PAPd2yy$$*QXDllY2U6e?*I(>V66iH_pNVH9K;jF(K^Od39fa3dz}EPism? z!8&VuBCK18VT)_{UCEK?yEPLHe&phaU&rvlrGJe3BZ2MF3S_CuCbZ1-fX+l|+C9yb zuAgj!zO&~+`$1Wjw_(gr{r6mZ*KfS7@)pDH%J4P1FZj^ApI{K%m)V?@;D_*I=+&Z+ z3tqc};o4+gBn*^@-^FmbDLaG-<%ekMZwDX!u<8R)IX>R zbKUbKQ#PvMOMO!^NZXbq=yzc5>ydClrWiykRe5p!KmKUCJ{=Urb}Uampzgj@=y(~2 zdj1NezR?pO9t**e@UfURFOEyN^A+CQJBJ?rB0kr{n|m3mPSSq_a({D~(|Ly+=MrlM zxs~c5KDvwB%DjjV7Tb{0y6M=n(+(cYPr&)d2eHge4d{Le#lCOegI@{31c z-{?L}t0}maHxh8HKZxn$a=G++dvLn(iC=NVj9NGv5ktKuuI|-QaP(Bcu@yNueB%HN z+riuz$BsaIS|51s55uF1VYp~f15~^>r{y6J`BPKaT>RlO&c*8%XnfP42h|dwHqRVx zvm`=N-Kr0>0sMLmRTo`{3tsKta=+aa8(@~q0Mi-;+gAenBW4kG~JEds((R=$xdZWHB#g zWlFHj1P`d05QX1oF+@v&X5SK!U2K1G^3OX=ceLTg92*CPtn2kfa0^79#hg{hV=VX4 z<7Lc@i0JnjiT%+*MBC~(2wZ;fzLTec(`F_9)VFn5^jnqC8bijghz4<*AMf4MA{4A$ z#fyC!`TfS}7)RH^@SQIqc5ea9`FjFwopq^G_a%5`E=78he)8g#*8IDR7NoaZhg!e? z1>%-9;C}fwceuF#%13(x)w_&-SF*W1=NobDNLezXRY0<;GPx=40iZKki-zS`a(`?c z$lbS{ctTN=DtA<|y!BVf)8R72jl4me0^<9lNSZ-i#=c5Z49p z!&jLImIu0+nhRi7oi)UD2g4L!MlX=jB$`7F(052SzhGnszWbbm ztMt&WzXy^ozrhRh+u^@UcW`e)5z3pzV@eV`m+h3{i}pOkvzO!X67y?pQaFXn0xU?l zjWs>QcIcYf7kJUNWN!OG9b&z>m%Z~QNVBZ!1vbxkd+Rdn&sC?onsspx%*u)B_B1P|?i zbmL|G`pNQC{&W$Dy4Rxgc5V8+I0XatrNFxL%A|FMn7c4E1Ds}?!0j#-8Wx_&pH$4l zIk9t5haW_8``5r3dq?aza}O6xQJ_tXMLs6bnq?lYfPcw7u(M>JFdJq)g;aHJkK&^u6x!dQ@>;iAyYCXnKC5_S3)HuNhL{=BuSFG zXYGU}BuS_wX`pD5B&mG+`va8foO91!>wTX`peh3a%KcEJaS;P!7JyxNDp!PuvFn-= zJ?l4w+jUIwY;<4iqf?E&kTr z0DCka;r4s+P&d^d_7zgJGiDBrQF(AW%^kg;X~Lf8m8eq1y7mKC;nqL`KF_QGpCtqE z{fGfcVRIzGyjD(KM~wu?K1097>|UI`l3%yB07h49(`3I$d@UhQMV6Z}T`m=Ms+WOt z(><=Uu!%o(uOE!ZSrS884eHCWEc7eJzBmyH{-gZSca|*I+-V0Z268}Bs804-Xp;Av z1ax$2wlu2FE4%nVm58Y8Yj4_mlwKW%Ed9fS~I5RAz$-3=cm8#o69trBc# zm!(59OL4`;qj=<$ChK{=gie7hR~BIjWu=OI5eHO5fm$OX7)4S)< zVuKOoSvMg5#C|xfb`0tmPwdTOQ-V=z;3*59Kh{wqSXtDPJx41fsUAht!qddB=I~Vg;=f z7(3pE?9G-Sx|fZpi)jNW9Ge6YRt{{g?+)V%-$KCKZ%{YBMA(&l4<_U%zzX;2Sh;iz zm_)AS>|FPw(7gx(jU}l`a-P`ZM+Z91*~s#gsm!e!0|Ec^>7Cs&Bx`09cE4vFIjiY- zg>?`fRfI#-v)K?>IviEl9xLNWCHl4~p~1x4;9D%raN`q~0gKU|td4rYVz{Q*unNRyo7!`O^Z zhyS{nWlZEMp!Z!IpE>*rKW?xFieWAUbf3oW8LW@?tCY{$AVn3}EUZp%1n>E*TsZXV z4TMe3;PEO1Dt(S};XjPw%>oVbpjC$4=~E?+!llB5orY96`XkiOO~caMUue#}L^@*) zY3AHmSf-{)j(@0R&;3;JaXx^hZ*n0b>k2roS_T^jQ!p>#1amuW=A0R`M%L2+eMNm( zdhRn6-1;u8Q@RSh3(_FxOA@T=oxs>eUwM~?nRtXbcDJ!zvgCQjQdu<{Dyu(X+_eqp zTj|5?dVd#khYOfr?kqZPf5`RLvGYv7DJPJS&eLDhkD@hU4wcxJ?`oI? zNc`kS98snPx76U!jPLN1`7C}MWV?$spHLxl2Q;rb44aJ@^Eze*A9+0uWG9b@k!P>L zz7BOF%b0W}|D+hZbuXO#r%RUV_d}eNC0ZUBO59dy(dk}Q=;t&RTV0K5t*;w8FKXaj zE)K$V4_P9*mf|4gmWKrvvN-+Z0Dk*Y0mq77Veh8%JT9LL3(iRql^N6cXn{9MTpq-A z$vL2BT?Oxg&fvEDw{awY80vIWg&xHTQ1@mvFC6<7a<2#C0hI>Kjwr|S6>K)3pUa7U zs-W9%H@M)dOB@%j1G?-de*4z~qKJe1R?$T$)(C`{1aoR4u;hJoScXFS50|HBN@V?| z5#Fm%LBbK?0)=9nYkLhVrfoox`YViG$G+B$>3p%#GAzlA1lK9cVD6Za5Ye(3GBT3j zCG$5z7jpuQILFIQ{tssEO=C=|OE~Cz8?=5JQw?)fI2tTV>Xvd`(Y{BxyF{62q>RFA z3v6h?ToJDQE+7lo-X!q50hqXs0rlxYIJ2q)YrxIhvU(J?S!LF z%muB=SnzqtdT$P`d_p+m^#sj=*C*q#cUwFx{yB_14p5{j&rD%!QV<4!Ayr9>-B43>%@2(-_Jtl z$Y{*mZOe7{ZNk+`?YOe>6sj!v#H;9}^8yt~oG?V0u97)~p9V6}YT8_svTwix%t5R- z{Ug+{S!)O@`UrlV%+uZQ9KC%EQ1PKGUHRYy8fXn+T>EF@&4GqgaJMcmOX(#(nFa#yBJ~ONC9`fQkpoqKLPvl%Mi5iCHT0!2GNE( zzQ%|3nY;Jn+_}au^7sLW({{&h|2eSa?oeVqM~yE1UC#2FQq*&a7Vo%YFYi+?MUCn& zL(Mk!dvKGdvMbZjk-Xt@lPN#?b212W6NLc{f1t0?5k3Do@}}%GrvmtQ}Gljb<5!X!$Xc9Qc{dZ*;O_aMXRG*_kMd@ew(EB1hVa0sw0d>%Q z|0gD`EQ8aVt?1-+Dn#!02F!TZ!!mF!_}u6w1P@{N>X-(+5&IQ8bX3XK(#5P>$7Zx^ zkaOGT0gnEzymv+#L>y&qnj(4V^snI0#-^kD&LS?zstP9mkt35%iXptx95C<(eA)97 zcJHwuSMqhqik#Olvui8_$>+o0f3n0$-jY6WSEJ4CAGoeB>G<2wlBB(*SloS$yS7)3 zxbyw|qY)=@REsY4{`QNjRxE?^2S2blKo`5mX@b||Zp2)IaC} z|KS{DN!NqV?dx#7F&$hl@8%PqnBxuBkNGPpOa4k*k*GO|V9Z<>)dzmVgtimdE;#_D ze&^7$Yy|Y5(V{&%KGk3yv{=wG!|`Kd{U3@laVAfUzft!em27loDZU}l@1y) zw{UpqS5!D`hbqUP@{bPGp#FMm^8S$pEmoId?+6}z*d`vvUza20yKck&NL|w9HHfDr zn;~P%OSBnC4n<>cD#^MW19 zc~-20Ets?x1bcsA3w!46QtgBqUzXP}e}cbt%t>&n7CrXMnhaZOLDZYi;ObBv+AD70 zoi{IodGAe#k;XF|86=DIKfQoc@@h0oDjG&DW__Jyld+reMYg*?2CpR@u+_>4#F{4{ zxrpt5^B?h9&QD<^4u_sUtpD-(9=}dgovs-53+?Po>52y}aD3<+xENwZ_Uu!n3fWpX zbH!*lan6$LYhYZhdn5BwZyX1UiC?iXZwv?&mT+0BYf#`4;}CIf9+$&-p!+2UaW62p zmsvB^-Kpk&xl^3R$&tMtCFr?0UAV928czD}7IpKX^|G;k+n2^pn3A{l2yMtSOFDkm9VEN-Ld-W$KL~Hy%I2L6| zDlKAc_%9pQSz=ad38^Nm~6Qr%(N=zGtV(5>7iYa z?iL8~FO=y4H^ws`F$RkLy7(`;cR@pXJak%RpfLV8{`u68Xe&$Ck1!{y^=f4Me@3*t zco$x<)22^CP3WTc=@7T=9@1_BUE5|&-QU)7K|ikG(#kr_{$fg>7Aq2IJ|OWeOQ3LQ^(+t zDwb_D7z9VPY+;>z4eOGdL1ge7xYx<%F-@7g{1JH~toq7+FP)IM>5X_pvpgN_7sJ58 zQrxwlaXa?OQqidee86yH8lIvK?tW?9o^BPI<9iAuSH8pvx?fP>Me`gt4i}!7#OBkh zZ{fleD;gQx3wgC_MjEHYI#xyXZXG=jLFe`3e*b>Nc;~A_PJt4MXxPk>#JiR z5XFiYPa4FxFG{dkEXtGh7idI_+9qnDX+wqLqlyyY7qE|qRFO}jRs|0v^ z&Vocuu>(Jw3Yc9SiVY7d7zg}1-?if=q(0U{6&KcR9rK>wz0ZO<1=g}X8tXpKD}XrX zCbYjb3F~q?uyHTD7tD|IE^~Qk-4_R*?8frg^WBX-u zTD&@*E4QkLRNeWoqP-kk&rf3F3$hK8i>oCheu5<$g~SX$i#KOaL=OqjK6A5vzD-& zahxi2>7QY)!UDdmpqrEX-3K8G7eQvs06Ujg^X|J9X`8Yf+YkIjKeJW{?=%BnyOVtC z5-HdtW5l}Y7Bv3FFI?f%1mm=GVYd<6+YV8r+}-1tQ*TV2{#s#eat2iD=b`9sS>Dhf zHR7^731TX`_~e?Y=q-5!-W5Cq?>XDC_>DIDFFDFJd$iz`k`$Cre~PkiFYp8R8gcV| zGpbZs0Er3SoM^Z%H{sd}uq*K4%AD5l8%M~H!1TMQ^4fx*ym1J5z0{Oc+Oye@f*h>d z6b>q+yX2=NMnA!^FxRiG(R87JftcP!;B3y0FwmsX~GMT`M8&6xI89fK9( zG)xqPa_-5O;8SMy(Pyl3tqH?$uW4O$-}fQ&nXr*DSgqDRl+70W|SY&53P ztGD2u?p{T644Q*o4=v^E{g3S%M5;5jCLSEdWL>ydTi zhEX44hZdG@(DT-cUPzOpbEhzl*4BHRhP(>P<@j)RZzy;3)DskJ9iN-&qC;ycSHdRz zi&jgNsV7~)YlceE^92%Ag~&o?*fp-C))xX!RKWE}OB#Q3A{5OnfQaiFeDA)Upt!mZ zZ39N&gaupR@kKT_Je{2s*FXZ{}xJ?8B<~8Q8;^&ZN&C+ctEZM z1I3EC_@OE}k!(#|JyvjuUrcZoW8*|~RbY1}o?mw2EBc@Rh*mjqSk)v$gofu}^!Ohz zS63Cjc-4SNMujU-YvPKgg@fZZOM17(n$*=V=CmEd?u{GSbFqc*u9v}L#YisudkT6R-r_y&Z-By+lbDI= z+~~R-C>XknZ`>M>UxzbRSnEUH@#}cLns%Y!=~%40c@4vF?!^B5aS${4B7Zt55B^B9 zT(_4NU1WCy&Q36;Ve2Y@mMhT&NeBGH_A0x^+{dFoEr=lHt=RKph4A%0E7~*b5!k<2 z1T`Hsn0EIvZrQC(1pZSv+tr7#*;;_{gEt{?Q7y_vSYX*uTYT|QowypQLc!ifbo2KE z1(~g|X?h~A()Neln^_-5q5+foDqzz_6LPCsn!K*MgP)n7RqcWk+J1Qnc7=WXO;aWM zDNlze#B7Ef=DfRTXFz)5^=amfbC|-sWc@Oi@v4p{sU6V{JsU;rY*)`0dnaJyU=8** z_wldTd*Zxewo_TRi&r=t2L&JZ3Z)sBb%>@8sfj(xJ`XLy_eTPEYpN9St2L%>w%foV zO_oe5NkiYE&Rp8H7CaO)l%AM)8U==r9qbZ@@~e;MyF#|7`yz}q2g5LslWaZ zyNsuxS+z73jGD^l1O?&GY{sLQABc&u`n-GULhQAB$2&95P?m=o9hq|kWZiU8c=9*4 zZP2AIf3Ks}#4ae;$c1SGY!0w31w@6@#2SsP-*N3bn8rzx^kbv&N0$VBbK8{g-BxtX zCQEvgS7I~8(a;kuM}Lo3C8C3`^76;(Q+Ha8<=jIwt8ByKJE4$ilFIp*?#73``lQd~ zJLnbsL%x@JDu1h!iHtLS=iDni{wo`rwFB_aL7&g%I2^UyHYr_jVG}bzp`t47TCM%;%y0a8nv?p-Ck~j`9}GmQhXkIY#zer zM~1}LcM9jRJsys#E08~Pt*Ao15~N0r#bcRwFttEH3!aY?J1$$rU1;dVob&gj` z_p7eXAJ5Iya+de_S0j8oHyjI& zFelKv5oqVKlh29`0>S#F4x9dlp?6>(mq=U1HVWjF{f<~leom?G}~sYW}b4C$kEWok0JnKNI<+!S5Skk_O@W!+V1 zz~MVk$47|QbTE$7rG>a+)_XX#OrE+Hoadq>wXk_pFrT~p6|T}uM}tspsOA5HtN9MD zZ(%#6O7-$-cp1~5UB$u3HWa`jq3?(D+!m{s=$iVHYnUn^w-R3B($XZH0yVfS)|}d9 z_%YwFuCU-ky0DWw%cTb>LYCTPe7)-wdg`Weax@he!xAq-{@2_fIHar%AaTVWRC!{OjF=c>~|OgR}9?Vfyuu9aOV?Z|0M;QHu?nWuj*yz{9t>}1Lwi>Oga9^S0^_e z85ixP1PYobfr`ye?(kDRs*rORldklE*W7a0-x`5hpCVvqSSYpZs8btW#XzKemGnuY!X4PD1Oi576q5 zHXWJp2fyvF0`F^@Alh5TIYlwY$LbB-{OnqA)HH>)cEhO3-|2i3<3npI3?<+6Ph!-h zujre%oKKRf!9Tgur0mmTeuH%d3VO@M%LX*5Q`KHHT`xuJLh8jiV{%}$u{-+36`Smn4dfr+;8H>g8B|AfF!Xx90`1A|&JMYot>qtAdp>_^# zvQ5M2jXXfI0ZeFmkC)jyTt4lA!PP~0sD`o9uXn@aJt^oo^8v5I+wkFg+c5LDBInzv z&(F=?3FGaPQSEslh*}dJTylP)i9@q6!TJ--U2cJ**<<)A8{dPT+#o*EsQ_nVC(sn> z(0ejkr1{<#?0r5599s?veT6;z>%aQMakd_0{+qycr!D|)WFEfPm!c;7a`E{14^Wc0 z19Z+RLc)a6=z|SV7G??^6WMc4o4tdtTLUk75h@i_!4Aec7cZ#;i*4qN4LF4MyQxuw zoK29#-Nr@fPhrkFbuu$R5<1njs7v`0^o@$ech}1x%qX+*7!+Dp3J-{4TdhV zBtCc=Y}0;)D`vg}=NZWLzWk0)yk4T;!+t2}+RHUNz2Mj7zC-23*O_bn40=~hf}Q$` zWRFJzOaOHd$i#rCcN`~bX8i(ueEv&E9n|6%jU5crvT8@*H-f%Lbc(LTmF$gbsbG<(tWOA~Hgb&T|TBoTw~7Q{Y@q$!g4borW^++7{JwD?J=Ko;~H$vFeH&LvH^D-fpN08 zV6wY5v74?>$LV~=(%x2Fwv;`Kmuuslf6uWsU_0AmCBimw297t=C3UNI^Whc%vagHy zQD0Q(u8Y|)Yw$ScR#?&Ad|8k^mdO3vpg>ND^vM_hgN!Zkhm-6m!~%66E}3->R`1j# zJKc=w(G&yXn=_7ERj!S3zyCs+j0Uuo@;K`@%eOQBm#W+abew-%xT`3e?UH^86O`9r zV4x&iojQzquG8oLHd&CFe*fVznN|$iGz;Cv+QZ0I*Ks?Wi$$gFg8gIl$-49HFs1PZ z6n{W&^pz%%owFDUJiU0McxkGWm&fV)J;OB`F=*2p&U=;TfY*E@s+QA=gNe0}@c2Gl zJ;t~-A3ws%5f_=mnDqy4sX65MF2i!cecW)CIW-R5SpZNQ{0UERwOp{7*2@v16L&--tN5zmuVr-ZDH@}ZBK`in01jr74*qT z=?mC=x0CDCl;tB{S#mKm z)v4Orm$2?=2&_?)AQCs5VeCXHvgWG<_1X0srVOuxzMw{U`Ihm@7nCzQUNs0L-($SS z0sLb63qt&5z-2x{#idh_zvv%+OCODsb5Eh8MS#$EeI0Lp&5C5krf`RpqR@MC9ArGT zqJ86A!2Sw5d-q&LAVm*;=3=2JJoMYItMp_*Yhm4 zCsxm!+HOt8busS0;bELSqW}Zf%F-LH<}_wv4L3=?50hrqLI0owh%W!lYhPCa(N%w- zJJt=f7BoTTAn?};EJ;AqWh`19j1ou6QB_->I!i|JLDp8(dF=tnO8NmG7~4?(QwVO$ zVXi_?3p#K1GhE-KNwmdUWcUDkXKLCl^k07yavy9!&w&tO*~*ig;C%%b)U8j~$E9Jv zSrR;ox(ZqGRiL)~I?A|o!oI(kP&{W7u5c}d+3OpzehTYNCw#<7tWQ}cVaLh&vU6%s zKI#nagv7`kVVrvfnmD@Pbi0S>8Pd-s*RO;Z@}XFFS%$wPX-e;1(kGYm2Qf{>m^Pn3 z2)|e5;jk=YvUr6mu_#{*y8mg>Tg&q>G50s`<(vcEOV49n62(;2MJxyEfN4Lzfyqk3 zU-+p>O`3<$Mfy|F%jN-eDi`r1rb`mv$OPW^=50==FPQh#Ix3$2kU4cmiNUTSiMP7Z z$GY?#u*>}~kXfO)kbTeMk!ShNH%+|vsXzP@Z&ez=dKq%((!l<&B=#P;$jLC~oJ9T$ zs9f`jm&h=rzwffM(x@`%3bn?VOB^JO`-|&l)H63@Kl%)3-qK44c)|Wwp+&`UICaK| z1cs^6E3=eHS)7pbT2Tj&rqp0SST)=6zT@WFCE)B!&oK0xHdWED;&aNbLIn4fLl;%B zw>E;1!Od`nagcB1v3DRBE%bc$fgAW}NK~Snxr}!r=w^)cp%5JP@#a)LRanO*Q zdmtbg`-0$2vkbkSnT$%a?qC@^^ZAZj$b0@_UB?A>aOAH7ku3`1L`fU@pdC-x4Err- zE3Sq*pLfE5!cZ{rxW~ohhoTK-&*6ndY|oX1QyDvQ(LOUYIkkox9L|IPY!$HoR*&|d z8n~feYV^>>8c?}_oKCSeOdV@MN2|3%NT4}p*fQ_%9RVZ=U2y5y7K~>suS3;gpq#Bi z6&kFd;6?_Hi%mkogNNeOyQy5nyLSFXHO0%1HAqcaJId}J;5>ylYW*)^adY!QG`z?Gs+nDAL z)WFE$^3?bF2EP3gd#85%C7yTlJhl$;gMyz*;^}rt=qWXWi@)Is#`Q|HS6GcFFJz+U zDJ`zy{vAwSC;|hmR21|Z=Xov`p<))}FkBaKGZa+GaR+uD{kDVi+EEDhF-LiiLe|T_ zKa6f&m4h}Dj&NJ=%mlgB4e+Ds9Xcqglk3Ba(Dj@it#v$vHo=jc*M2#Q@+puxbsRW5 zmLoCDKj#3&&p{gZ7Im)b4ZiDY95vm8z^1Kxf=WKJ)l2C`-P= zFHKRPK_kkbU-mj<&XwbTsV5kR`Jb>@b_y2US^%l(LA;=&)8X)6S?U4bAmo4ql}cR= zk4(d$IWdgez?jl{S^40VmWR(nA3({wH`r>XKqG&jgS{SFL~#3Mp2+_VC!>`JD>i3B z>eejo)h2cFxm}fZ@04UKZzC7jeZuuJLvX0uY2Ji`N%q zW36Z+>I78tZLLYnTj7H@T1;rC#}@b=$T9`5qppwK6<`9CREak!NmGB zKF{L=>YQu9%%lH7#-Aw0lMr!CtVd!)OsMhU0vzSZzRtTz{FwZ3bRDyqd$_@vY-kIH zl(hRG`8Na|9?BAUup0s{PQ;&$k|cbw3N0`$=HC77!{ay3<8#Ws{*Mb>Lq{Cud#}al zh`->!F%UmbH6k-hHNbbO2EU(b5u1VS?7h7kXO2~%J+jTCUxfjg9`FU09?n70_yyP%C_&36 zjb>TeTwZFrEOwroz`L!Iq#NGPWaQbQWZ_6-@+P;p zL@9TK)|=JH!&lMh`$Cp8aWw%|4LM@6Jb?53nyI+WAG1~35%wdwUdd7F*^~r7!XDGWQ}jU#R3qjL{)# zPu@YDZJKb{5H`=!PQhF9(&RqlGpW7MqymF!!Y#!tyETX1M>}6as$Ljx=cB}%)XUI< zg@?G-iSO{Pu7Ef#dJdw-1HzG)EEuz@%fac+0N5P~=4hcY8KueCR?FQvnUWrGG+x9< zFDrywmxhoV>Ax`R730;F1fz4qNj}=_HJ%^NKL4AX@q@YviHf&{6-N!}_F;9X^I4i^ ziYoaDBM!jQf@aXlY-27+Df-Gj9aT$)(Bh{XA#P17`o@=uE-FLB2B8DFvNw;5;) zlW~!ACLA8CNWLU@K;W_@kUwThM#jj4+r5*hY&?wUrA@#Tg&yb-vwOwlF<#lDK+1(8 z&^ceuSN?jyHQTIZ{EJPvbAc|AooB@M*5zE_X&pGhI0eEDdGIz?jyNCvjj1a zZh4|k%Vc+RSLVr*wB?Usgn~TLc^M29T1BY$p%_QTP6e@IEHng5k#S8tJo+~oeSJ6b z_H)mJqNfy%Iid!UQPpf8@l|-5?e#rQ++=LMXCN?G#b->Nijse<2q`fl{mxn#P&onL zr6plH;rgn7ROp~ElOH0&1^I4l+yOk_Rn?W5r09~m;{ zh%prf^*hX3z~o53bNg!il3fv!p$`7yonjDHdVj{Ev~+4OVpf_X`{-e-BZ zp=wa)eir&ZFo&-7d2!)}1TdHr0j*I}VW2~anw;(y?*8$P@$w}xCg%zlyD1G@YnQ>z zos6eCWDmEu!GLxSl<-PV(;;+3#_}j#uSntT zdsGQ8;6;a52;Z=7+{2}u?)}eE~DM9mJd`Aas4bm2_ zN9wK&3cUwTa{gP!!c+NSq;&mnNc4#m=5Mb-X#*4Ter60I+uTA6XLX!3yAm+&AXs#$(bNy&aCxCRJ^#g!xL;obi*A*`sBNm`fq@dO z)3oOvy6O@EvCmr?T?eX+;S#9ZjdnH9`235H!Qq!Od3ULZxv|CwBh&AKT74@_tIWe} zLlfpJRwY&AWQgH6)+f`Er)h5=gF)dz6!aO3!^@@dJ9DS2oOr-jTPaZEl1i3`?&C!Z z`?& zWCpq$KIT2AvW&_^Kd@c%13IR$K27jkuGl^dCKVoL^S&tl$u^dmzO(>!UC!f#uUk+? zRf9GQy&=T#4sSQ?JRf22$~SLj?5$Q4u#)bB)Qh=XW8ZIlHhw5AN@ZR|x)xXWXwwJG ziQ_wBCRb#9m~rCPpttsF5LoL8M|y2WgR`e##hK5Lti8QTd8M))o3(kExTz}nSh=^9@CwSE59otK09_i^aDW*L9mDjj1A!uiVEH^31yIB6Z$XUlww&*xBB^I3x!c&Om` zaq_evu~$6aLY2UVK)mY6*c;bpef|k`L5+xHw9rbm zA3_qBV6oj#)^B-&W$pI-I@c9gyPm_t4i>~~!zWnD7=I??B{-L5jJa@c8{%XSkRLjf zw%6=NH=!K{&XdQ`Uo3kT?7^EnG34fQTTwLaxv*@>WjO871+Mo5T$;pVm^pJ81}$Rw zVu?eLxG|2mS&_##Zk`Q^*-N-^T?@KBI1=g(wd1=dMeO@*#2*@JWU*l@7TBJ^*DW%1 z=HuU(@^2{~nr}*vpDF{Dt=c&3Rsu$z`v}u6+(2XYerRIRgKIj{uxObSwZDFYvu#2E z-4P&MPC*c{C(rX~3NP?{=}_+Y9X?+8i4*QiLp95dCCYu~%wOKY{gZi2vaV*&(o6W* zDS_qeLqUHG^OOa!_n8w;aC*;c{JAzCjrbOv%IOdZ;RWzk8-ZmCDSUtCDR}nKg3fX% zL*XPg8!C4M#~I7SMrF@oVS6R6e`!FzJdmYv2N)aE{~n*ha?Js=opD;NF12-cMbF!6 zXczI9v#MjB*SZ#*eAj>&=te-U-2#ZI)@0`h6?pwXjXY=1ZNb7wevii-5S*BnH+_~e zb$T@k_b)&!c9x;T#YQ9|FqDg(Crt+beFin=M|#BAYc6e!JAcp=7cyp*?7@>Ixe026T%-G5@zRrQVxUU-R4Nv5T3@~?#j~Vf8>cHKu ztdCZ479#GZaJ#(1!Kx$^1Xq6Z>nc*%>|i4=_$>v3Rs$${%42`LFYB@oqX~kyc=~$* zRBwHZF~bhxA*Z9@INXg3RW+o)%=C%M*=KBqWJ-M=n&YDWZ{V42$-7Tm51y-8-kIf? zAAe+Q#b*zNnIjp`R=*27JNx;=PQ~cBQ&XrmQjsR#&u7flAjny{4aFC$LHO`Ke%Vk8 zUELp{u2dU@M>-($eH|yo_WN2Z^1+6RL7JaF04oA{r9oJ##WdeD?vqLwQ*6$9mWeV^5z>piGtIvUh~B zPDZS#lmYI}ulZfyU&EPQ#zbc7KbZ6B4c`BL4NKb@7yf2F`p*pJf3qBCe~Sb2*oEJMw#P z)}p$a9#yJkyt$GXIKEkeO804!lcS{Qqs!&6;?#XS%viH^^A176B4awC;Q%}~G}*|MzF~?v?WM_5x{#uEP;YGg?+;0G3Z< zuz16KEOyA|W_`MaA&S$XA&cd~4m9#M!dJN8`TjYebC;w7WrEC^o#wqXxItzrS9k0Oo(|0i@{Uev+@)9Om%8<0pw=rE{0Ha3tqr-C< zS`nuN8#eo*aP~iVcYh4(4b!E?{$_C8vJh3iH*(Xj+z0oukGSJE%`xJ$73`B`J|2`L z1tm$GsPDc*=2{`x7?kt1r*a|7r5UD&!C_(UYt;T1tj)#!4JtR_|*Rx2A)_97xpR+<>ABH(AkxjcVU`fTKa6m8m{oaDUvH=|NQ=sl&nJ0JcKR#6>gBz4b z(DN8VFHMpq<$WK(HGy?5f6CC9hR6JZx0qPeT!E9!3~Al#RAJ|Wk=(?E;kd3; zK+B@!xK-Uduwd|$aK{Qo@@avk)=`-_j_(rs4J3o@k4#XUrA5kP2GF_cJr{HFEAQ%M#^Zo1Ok?lCiE5EJ@n|8& zP8~+hf5^br`O0GrcMaUZO|{O#aSO(yK=_n@UUUPJ&MwD2Q7jL*BgSF>iAQ*Ok_}O~ zy#nU1NrVonNA3JnxFU94d3t8zzpxAx#k+{Vj4Z{uZ==Eg&k-~ZV3~mbuHb)tmbCWa z7|goJqwKyHe8Iy!;op@K)Z^7B{PO)fEb%ZTQ)epE4Pg>&ez}TEj#9=D4LPhZV7&8m zbDDQsn|3NsL3guryjBnE{d6uyFmi>}j}1s(_Dytq>IlUGNoo_H!>6Q7$36523N$y1 z1$JTf5l>b^EV~|u+sl#)myIxfF}uGEx_I&MCTJdUny=m50T0>FlECl84FxN~Tj3RF zKJ@}la4|%IT54{|-wX)9c^}7lb%LjBEH_di8Pdmp#++&9G?(>43>uB-5|0SB09AVJpcNktjl}GP0Qko18(s} znt5>JO&o5|Hlw>@TEVJbh2GhwOZ6Ky$d+U!qUQM*Ked_B!{ON&aepuO?&SkGx3>oS z>h5BJcNdo#(8mX!NCn-qXR*G2Er!olgOK%Rbb@pmsH|AXEmi#sqmAxDNV^$Cs91A} z%J0SU4a~K<_6>gd_y7cB?mO82;Ni-Q?^qVH4(FWD#Nk&g=$TPNY3I#x{Omq9M|pD@ zD%LmPSSkvzbhf*zgF;lxdtD|L}PU5ZFnA&4SjoC;Fh&4asPt6>=j8`{%tq= z{4C|yXPJ-!aROHq(gk-}|F(MzyS642b8d4az|G<)>f=9+FlHStaST}ePbA%tT?$2ue*$pjPU-2E1cE5mZemG8CK9oMa zZA>OuTES6$Q{sO(nG2cxl{@-co7g9u<+D8(gUs4O*uPYo4DZn)Iv09*laop)xlNmj zWv<{xtK+!kq5*ridk$yr=i#a&3vlRjZSr**V)kDhly&!m;}@3VVEJdLPQ3`LK5ElR z4oxt|+?2GPsKF5ZWbT5xIVr!F05K2K_%v(g$o%m^=;c)m%aSZ;mE7i1j0j90EQ7Rw9^;<4rlbPloQH0Co`iu)Cql8}yUhJ?0lf6&E! z5qbx$29tPinEO=?#{_j?;kg#PzTwz=#&Td4-KXcftQCI@7Qkzb+1+ zM$Pj)&m~jlI?vikLPA0^)8CkokR;(qk|aeEQb|Zc2z8#db)=GnD3v6cDj`XdMDKn- z_~znjIM1{9TEE|Y3nohy23~MM!RmAuf%7Bh5o4`smofW4d1bm=y_j{SH*htHJAt2W zM_dg*W97LR*d?n(nl-m^)0u~`l&F&vr;{<<#)3u`u#U>H9PZ5ABe-nE zUR3teCvMT1{PVo~n1CzctD+6{yqCz2>$(Y50V#a5X+Bp_u@sz@Tllxr-=pRw4PrE| z1uobO;Jc%tcr&RJ8S@oZIq1^Uta~b@ZB0!Y*WrIpf1t%cj<{`7DJM87;L6KmxLx!N zYOwp(;=xs@*lR`tcbZUthgwc0!j8UsR0lJcE0Pf|R^)2t8%$?&zRc)X;BBr zq*j4G*>fCpszzbTuL8dMo;umhxD5+8F$Zqt02H6QhRx*@xZ9_g*IcQM+q6fOI3>u? znszJf4cf==n|d34EOO9aVGHN@~G>~akZ%fk^DY_d)q8%Md5jn3=QX` zqInc-e^8jAt_6Fv%M~MTnA(OJK}mR2GYe)u)+Ig9e!{0g3grG~ zb)q5tkSojD3qj&u_Ewy&&sD!db;NJU4Mi;eE|V(0R9+%?&VtX^+S<_tEWaTVuq9P^>qT$I95OKKs< z`!(1mJiw>=O61IYT_P@K>}R)4Fx^y-IEomvO-`GJE8C$>h!TiK{c%a@ujW_n%EIgu z8L(jVZ_FNL0a2G{;3DTvDA;_L_isPX`7de}=F2G4<93F0rQL6gT9FM;u5Z9`f11(r zRU3bI$`;JuupFEg>(Z{aIFMnC*uc%LthYNE{#G5w@DpN4$VuZII^XbP?BuZT)Gf#u zvY&gQIEa{!m8UB&rC@XFC)6!s`O}9xAX#)B{H#8qsB|>MMQG8U%^&cgnFYN*OhDXZ z)x{}aa(J~M8{D|Wl>FM#g1Zh@;`jsRWZPE81-%d@RA>(X-|6hL6|TxP_rDNFYj(hz zE4diY`eSoK&g0!O8M-h>mAEgvjgt8r#KPbG&|_VVZr}cjBU^I8tyhCKY)c23HbWBX zD8kO^r+I_=Ph9q1C3Ji>jODc5`RLBq_(Z7$x?X(2Ri(=GQX1>vB`ZQg<1{?#@)upZ zvS47A0;%o#3;Anpz%6tQcSH6V=p|l($GYVtax*y*$c_y1woN-HmL|({Bp3XBMH|tk)Qxdk==T+JIWkcl^6Sh01q50e71$ z95R=^vul3xO~-CR_F)x#9VSh@qQ1bBVRu*uJDE!=@(1DQm+;7gaY@h2K$Sx?K#)>+ z2Uogc)+ArR!Mcz!XCDU1+S9rp$;_Rw1HHHJ#^M)~vAMQRC`eMq>=Em*%Ks=Qw7H8a zefw}%pd1nK{|Q5+Cg6lYMnrt48VsiwVePbuI45HRx~aQ~Uw)GzC+}&{8!s3$sjm+* zN7g}|66+m%eSt95J3PK*p1JD#9FeZW51Wobhr=hvw0^?ns!d0)qYt5~=NCVw@4 zRjKE(fBffVj42Qt3Eh$Za2@ji+XhIH+;C+~(;AOo%mW~6RW*)uR3`}&1++r-0>sa= z!GUH=niBdGhcA_;%0C%rAnqCOwlj(sRjZ3L&i+KT!yj=$_#?CuGB@L)CCo`^0l!2R zM5AE|KkqAJyq-VKwK0FObJBkhW2H&ceKpC^7-_0->pE`sc#OlZvR-W9N^meUM{kwi z5S#E5GH9x>d&2>+xICIK**6kT&M~A3rpqCQ@xDa^C%EkPIDWw>St{~5Dr{Dq#erNj zm%xp*P28{X^@kc>{Y z&)l*Nt}jl(-XHa-Ams%Hs`@Z0z5+IFFd*Fnt6}53KD=aTOKhT2VfFUUD6&{8mV4ER z@&)nGH}Mai9A`}rKGGsZg{*_a*v8E{uQ?|VFL)R$OU4g2BLyqEv2w>HDAH9WZR8H` zv}HFhd6y;}^CS)(7{}KmSp(m!RUlK77`JzJG^aL?b;jlzu$i`u%bnow7}L+r{{-QNYV-q@!kma!5EKvN)mqrUW2p(AE6?F$fc~tytDw0R;1$`F+_+WO(WwXeoUSDbhT*ptp}-JH8uz zz6|Cr{$!QJogHF%WoJxKUJJ?BScZ)`>5opxhn*-*jz*k>&gSLZq1%i?( z+p5-Y<@EahWJR6k|q9-v#tW|n!kfUXTS5T3l%tb znlb$-Z$s?c*`0dkN7PvB!~Nc$f-e@y(LeKVGp_-=Ukjb_MUWBgnezpl{%Y~D8$D69 zSre8RM>EcoJx=i#(BN`KkZ8XbfB1a@Qol#Q_oXR#iuI=)>%@#>Ud_+kbr&U5cM4s* zmFN;nH~jZ_F~$^FV9Mps{FC=hcw3X8#svj>t|A6!C^2WzFAX|$!A{6)oCBgB4{_NK z3ggeSJNBU)c+P7O=?Odvp1!rbaygs5ePs^x33`~?wG}0MP77-SYjHtjI_%%T7^tCo zV4>*EzaM=LoA*uUwf7yy=20=6MZhl1%c|iPh8oaueR(jS%9F|;Q(=U=3Gr20gp0Im z!06@&930xh`rgt|cEb}_J=KEts(3W|)d+e^c<9*Jips+tV`udqKIzePNdBNfBUR%u z%pd?xPLw8{Lv!)a2V3G7d>j0#Sq~-E0guj;fcHuTDu|Zn9dAAd!83oMT`Oab!$Qs? zG=p=h+rT;5On{M_%&E`Mi(K{)33s+ymTXCA#N28FmY;i!7W-4gDZ#b859a}n<~N{i zeJyvWOP#omeTS9I3-$j@d|c)cY?^!(j6}%hsIEev&~xx(fhq~i(xyI@tN5U0KcF*Z zDA(WM1fH>te1q+AXo^X|jn9>dk_l)T7A`Cn0x& zIX$t_oUUW%Fze@xCt|vk?{v7#cfU8q=3GVaV()*|xpy$dxQy#~>W&w_--Nt?^W2A` zu@Lm=0=DdkXROmq-g`woB&C)x&cS4Ky3ClR{{PTwycreUxh;-aWkn0@s=2WX*gpHX zEDh6;gL@^`BsbtSUhrWY+sFjlrH`$LtCZOaZKq6sK}^- z6#f}!@VJiiS*FWM6_$*fDv>sf8PX8PH%@A8ESq3~_jswB_ z$-JJ#3*&!uLe=>cUNYt-CfIM}?DSURaCW9C?Xjgb1E;{pqMNTWONW;ilt|v=`S3(| zn{n)~3BxYFLz|*g=zzv7H+ex^pH<5T6fA@C1aI&ulLpy8o8e{31FRdRO*fvip>+8n2SviG!jIAPKzm{c^J?5UT? z6%SAdl6nqXwx+>V?;kL&!-&kYG9?BN#z635FIeKT5&DW3<2L4&47*s%CA6&JR*V#Z zb(SgdV$UbA)WGQbzrkz!YGle;{4(w*PF^cTPM#MrMu`j!e-H;DKTk4-ga&P@9*nc@ znG(6-_EbFj4fK9E!lk62<7fC?!F!t+FJb&VOr6<-Do@5i*n#t$sr(O=mUS?C?%_&y_FZJs0_WPL$QHpXI2y@#GOrBb&{!!lmAyw4Vhw0sl^#dlDpJoz zP1lMGUKl-s?P5Y^L!H$*RG@*l@Rt^;Ix?J(R8wKbGZP}v zKO@X}mkv>(M)ckudFsWTggu$h@W4w0^5ph4csMl{A1YdsiSK@)#hG#h8;K2gM_ba~VTJrV<5F}x*n^qd9znd1 z9(}8H3KvGG(B6(1bX{bHi;7rIOzty3d>YFO-rfdAo8Ln3#17t5VFZrY`3ODhbHHW0 z9B~^^ry-ZqFudX>F1x{DNt`0fb+TU6Egd?xv<1C|2=DA)!~G~@VtvGf2C`23XvQeI zJO2#$m$31n3s*W42{WI1IDn?HK$LXGGLn3`zhXyUYiXJaysm9%5-0r1-$8!uw9zP0;dqkLCp+H~nvL=7RpJ3|2SXh}O zMZN{Go`m%eKD_7!cxJQb=%tt3+8}kZ?0zCtnF-*^x=NOxJPpROmZU0Ho@*RG4Wc@% zvGFy_8V$X~2eM8^w`9l~aR>%)ILd!Ms6t*UTac`*C7`&* znzZ%CVdA4E>^zFMYOu&gVpT0 z@>-urf}4dN!&UImEo16_Wg&*meu@T%r^D`d|5$&zjGZyob6K1jSUle+j9&g6OP`2Q zbYz&&_p=J5+$`g+%#k9gJ}dCHWB`&fjZEy7H-Zf3*L()O$KOviAl?y$*dwWez*YLx`RXrj!;0e&%XUVo!}s9U zbFxHz=_SlkGeg11EUuT`!#=!F!SJ3>7&t2sqb6Ly=eNtyBSi%Z^epKls!RGqHb6_~ zPjLEa&HoxxiQm{gUiyjzKNwG9OxA_C)gux2#L1Bl^dZJgI1jrvnvo0<;{bfQ1Wr%b zu3^hRVaA^~;+Sm~G{Q9k%Ln^|({<+BYER`-V&XV~?{R0pDQ(c!p9UUDN#HL?#Eb(; zLP5%IxVtMEyDDD8wZAfSvp(C)+YA%er}2FDUVB=|SOyONo!|!kmf(Ufy3-nb0YBhX1%Lo^hd{2rZ_u%Gt;@;C6ThxS4(yH?28?NeZ2~&&-JSeZLFItBXNn zqYmmjm!e%3>opkufwl?NxZ(GA96gBTL9fn0C&7Gvn89aEkS*uE2i4(%{6>89HX2V# z60s%ZJ$4rNaEYH1(d)rO+~Ph1n>CZU9vwZ#{<#V|>&ei!fw|NF>*aP#HXv8F%F%`D zwnSF83{wuV`OO3$@$i@TQCc<+ialDuceFB09;`+k?8C6>iw%yO(}X7ShhfGyb7=b$ z56$xXdGFX(2xYurp@RZl;ip0cPmdNRDqlla#Xbm}8wu?jKf-f3327^;pz8ima5}aD zES9h0POK1+<@>B@{NTqB<_(M|?kjvg+<>aR+y_tX9LVPjMs&vaA?Ws2lQw60aPjZ7 z>BUM_(x|Eiu2#`_VQVz-?Y5-pUp@9rx52jjq0pCl75XmZ19N!MhS*71CQ1dt@JO-7 z_jdeu?E<`Gd_OJqI>=Jm4NHEUgng%~uxJ>Yk#AsK>fT!3%;Y#~CR)(mwr$Wg%?&lQ zHTjDwibUP31h2+4!&=5HeNk;dZk&IEDf3mSgT^85JCULyodcXf^*XR)_pnQYPNRd{ zAY5n5_Vf0U;sZ}@i6HuxP-L7ZmR+C-87~_+lNFcY%w_hw%rP&*x1B(8-(bQU0d-t; z3^bNT@~(QNF#hI!)EUr+X@Av-!m0CoLEKGFeTgn9|9Ki}tJvp6O^Z*l>tp?lq5QWr zQ=%l(il){I#DDHGu$k@-M&@HsGV&*vcX$N&A6~|1OdsH47gwX!?mYab9S;khw{gx_ z2&5NFlZ540xG5Vx!cm1mRR6gY>FOSUZ(UMkR+$P(D6i)I_jvIOKO50?o4RntP9Cx! z>Y~?|DlA!+2@{ANt!=shL1}M6XU10ew>BRvVhXsAVS@-^dnPBwraBeA5o?qVfJ5^| zUPDyFdP8*}ZEQiM<(gq}u{!Zl5b-r31jhMNs2cl+`DMHdBgJ(P7rq1}8#K85tL13r zb`>_42#DzJ1+f?F@qAf%39FP3VC2O@JZzyx|D9odJHNfW>d7n=RC~grt|rJmqJp1% z4TJEDn=ysnm6^+4fygdzxQpX8LEJ@t4`&jZ?3i1@pSzjR9rD@n|j6IbL&MOY{ z<5}J=Yk@lIqG| zcjG0{;rpd)8l( zgm5tt9h{%nAX=_qK(p~1e=)F%`Qy(BJtOAh<}IaI`Pz^^aVv)mZ9_;2%;BW1j)Ews zPo`4Hy3>LzoUTZm7rw!b$8E^$LS544{DBL>Uziix0&bo2grY~yykDRf z)}{|Z`OYq|Zjd5R9~qI^f{VBXO0cF=i!K;j&CggaK#9&L@rRfxIGE-9&ULZb!`xW0 ziMSe_4zrxsp%*~IO7N0~0V%p4ISG-Qm~y1*01BsCrHtV^d^)nSSbuCmj|~I=E7}^ z6`HZbnwG3&=giGx`3}DX=(I4TIx+j8BkejqdaOxqo?vr9(HNI0SDG<+ksRM8cOAib z96C)H4<+ixXrTLySNVRAodK%x@rqOEMxD@qW(e2Avfvq&SH!xthNQ}9CpV$^pE^l2xZVG?L9O;26i#60^7d%%aaAN1 zxK?xZL9=1^>pRf;P=gkH{?1iS8idl9UV!#0TQa$>9a~4*F@IQ(c;IymdJko}tm1TN z-l@m0V~mCLZfTlwTM6@f_ra+d;mj*_6SFqi;+NLNn3Hf9dZjbCyP=BI>E~$ff@dQr zePz8Et{y+!U>uU3xufqfln^!f3AGzy!84T1r9Guo8+OJ3mJX^(O4Bm-(L`v;%$EK9tz z0|ZLJLW^ex*niUpE)BOLaj%*dRrx+1J*I7jA$96>{4WKXZTW!9NJSuThbH+S@M+3~yIE69!$}YR4Wxil+oK=)v}|3)PqhIt_~F?7^CdH<+4fh_yZcusLZaEE&EDQgZA0 zeD6BMWg}1*S)a|t7K0v_F^*{nxbYRjRl8Qf-mng|>ez-}#uX6uCzDG!5XSAQQ6`-& z2lx!T-JG{{8C<$$O-zoP!m6>V74U+1w=UTeAx|ulKl2A?n$gUPPAHmt z2K5uq;K;rs7#dIs8a7S5V4Ef9`hP~P);c)sD#El6o7->5A#VK=3MLnz21iy@zR3sDOV>|XZ#?l z=5&Vqz%;y#Qoo^M|5hM2oIMpU$9ljK51F*iT8=pv|($cD*3U8xYG!Ohk{x0 zc&Xf$4bfPBlCj()yaK>$e=lxmUBH|Q^Mr(;-7J`KXaNKUltSxMWolZiOtc2qLp;|CaqC8~UQ9HEIl4nZM5dmEa&EUD+5IpZ0P(R1Jtoy$j zyt1@te#a%8U~m?j)7m)6v`V33C+jSP7=X)LeNy+a6Ju+B!`?=FvQTJ6(lpb-=II@D zGa1Y;Fp@$AJ>;b`b*PS}6%3Wng{D|JI`@Vd>CANpU&B=(x%yFD@4J_aYn1{~*QLU- z2US^DDgZLNjJb{>%`o5CmQ;=P<7;gnKvm*Kt|nv_=v`rM@PnHCf;BO4`-Bfl5`uZT zDJrx%IUd4Rmf`1Dy5#FuV`9}@2`wiMW6>DKq#FMlF37my-Kq(gGl89-Ufh7Hqd{D7 z`a&o_*vvT{;Q5-WHq^`53o2%e!#BkoX80SzHvfz0U(X9??^PldANIqF_%3ieeN6b@ z4jmfCOW}>hFJbyN8xql+1y@&8a7A#vo3CU zdmgw(*06ibZ`OBQ0pg#%nE$mOZ*02&+u6PMzgZgeuK#&xSj@wCOLdx1vYH!yd>Q8b z_`s!kJOq`Mh}jd?u=~PT+^yP;zNRkh8Lec4IW_(JQ<7=-$ z)4W0SmQ55!IJ%>!yamQ6m|>S!32bt)Ctqd=aE+8HHJbGX^wUj<fmVO z4CbO_yCB2a5Xygnnp-Qa@T=WuG>5-^Ef2|L`L zqW|s(VnNwNE_&{J@Yg*E8C!z+ceDSY#nwzHWV5XIXhudqW|=9zA01N9@EHTvbZD6Y z+Br?cPGgqe(;f@6Z=Jj1~Lv8bT(mKUFjW$(2K=rMF58mtg=L1rv#Etvt|Unr8ix4*b@og=() zu^w4I|lMXU>u&AOmQm=JoGvt}%9$C!^e^PeK=xceU_A#(|P{z41QPu%I#jBzP^2tH$H z@^yO$kq57&Y49pPZuh=}5OC}q7d&nvWR_OI+d56sxt;MqdUK!{%W(kE{fh_Yq z`9u!oVqPdyN99j2`WX9r539K8qil#(q!=o^3*n=_2yJqWssH#h+{zn~n8Vx!^Ee|S zUvLbx57^LqC59LqQNoB_eBIh=*ze(u;ME7GH|5X)0fpkz@y$1?{~L=f|# zlt%(~GDiAlFL6O@0w?%$q0sqpBQNah!tlF4p!xVPu3*<0`0!~EuJ!3d!5GKFz9G5b z>t%r5XU(zKr=Qz)%Ye?F-HCpMtMT8=DcF_CTsA9HA$0936cue1E6_wPx%?Ca-?HVD z{0(W|D|H&h+{crPOi7+yGFMgd20g!Ka2XpL`RtiVeDmpIu7$BjIt^R+ZE2^le4i@f z6%UwRrA26x63O~H3s$K{gR0*{bY1lxPAoct^^20dJBO)|DZ|v4P$1k z#u<}RK;?-DUTw9cEm<}g*pLKS%wv6Jl`XAHupyxx6ndt0gT(#1Fkpfl?*42|7wk6& zr-ETDTP%$R@mrv{ya>CWKf(atUyM<#&57IqR(?03K3fmL^RbKZp1VF3jrt_qWMxjM zyaDOE^bIWTU+0J3lVwE8au`8nX1sBvU#@G64fatKjAeEppb(j4YpHPJ+fi!N1x%WMGgfZG5K$;yiZm9%+OF z2h6CmX)^zZxvy)MzD29<8z5I|LhC(tpuxVKP*4yD7x%~F)rb?(52 zU(C&*{0R@U*-(t%YtAikFTh!KYT|E+@21I9lU@3BU6(caI!c##3#;H=ej#XB$8ar= zu7UppOWsLxmJ>QrEZBLC|MS6~Y`djS|I4!>8F&#SgS8*b|hlI zHyrk`C-s-Canng-8Xl7gQ{KnIvUOg##rX+5SfoG(1z3ui-S*qH^GEgHg z3OC6!W{+Y8CzzVeuh_z}HPWZ~JYNN_=&nbF|6eQX`VtXGYzc zO8E67Wr(GLfVisu!HiletY51_`|lY-f9pgrxoD1B=h^ph{SZ8l?Q4jZDZ`gjw8_jznq(t$Fep3>fFW@zB=5>b zzUrMlSQOU^N2cf!chwl!KT(9P<0CQnJDzO(098!{MTVb-P$e)uU%Im_x#vAVPdB+=kO&^ zn*Ijj?ed`aj5Lj3{s%j*|G_Yob8zyR1##KOvj0w##GMDmbJM2(#YGACk$<0nr>A7$ z&h^L9zpj#xX7`(oA{q9)I06|{d-2c=8Ire5fsWh%9Xbl5@P_F^Y!ja6!WR0#lG90$ zcIr2VTmAzJgK2y!F@mGZn$Y0&Ffg(9#(%$`!qNMs=rGY5ZB8A9u73}pWk(8{FR&&u z)P_6^wIFVG()_!cBq*DT@U=*v#z_8hMp4XZwDK$8zRsNfZ80FtOBeFZCgZuA7j;O} z^sm^lU=TG3C_>q@pV4zzI)C8kISf_QrQWht5FO);y|KE~h|L|Q>uFPc^;8r`yoCBP zefnc#8JdPjQ<>SO#8n_hc%(wtoN_?-j!2NicJZHrDPFx31OJZv#E8ysY&I|lL*?1e z$G=tVc6KEvT0cyjGpZMB8rfOtlYp+szk+#EySdDJkub`m7w$fc1Fz5e^aR^Kzh`sj zpEEQ`&v_YIyx|cR?lL4dBCp}^o5m!2M++x<$BDnx9l@R_O`tPM7wp{`%Vf(l6sxu>1hbn+V284UXcVDti#aKR?M5v_P8y_ad+xD|gr`MZqnas>2)c;YE9{8VX+19Mz!waws^{BcD!B+4h5F2MWjQw+wWx1(lMiE zM3yF>dW4(m49KYZpV+o|3rsqyLq<*g4vUh$L*(Nybe;AXdiQ+iQeDL8TI-B$0$sSe z?(lgPU-*h*YdpQO8v|#)0YPwM!5Tk5 z{5e~n7_c2vL%ZIXeH33{fx@!8})% zkB{GhTT=EzOIjr3_#4qsUq$M$+5@UiQ(ofsl?z*I4k_0*g39$DnB9Jw<;r(J!u>Tk zg>}-i&x`rwN2BrS1`9G|q9$43tO&Psz*z2Ijo;~|;v9vl_r6!MV3qA_l z|FE+j>$inw?8bsQI+(J|0{Rtm&|7ameDPEQ&r8=iL#aj_zWX+G9lHdQ(#71G=asl) zwt&=yT9ced=a{E03*xu3x#S@^s^)Bq%f2#~_SANu2kv2iOfBonXT#S=x+G_CD~y&G zkcMb0bR1zo{o$riLnjmhtS|Ag)7h-6*_<|h^u?g**3@#%F%%7{E;Lzp45obCj9#tv z(ERiw^q<~{nw3j$UZ99e5dLOeHtnrac$+p}Jy*<0Sp)eeqG2}tXOOzhm;%B}w_ zOZ)=9f@@9-G#pC;^^J-&eTy>H3(Ua)Yb|t}xDf<3W-dd{*wPV(awNV(mvLDXgjoOK6t3{3ZwsWO(`=mY+E{a zwwLi%<}TzPpR*=r>YY$w=8SnJmLM>f;|4oxP-nG7mccQ{`axQB+R{SwN$Kacbnhdo z2SJ|9GOq7Z4TS%E1|i`g5al}--8zSZ#XLjCyZp#^1Rr94h-+fA$A#c0UWGSUN9o*G z8=A5wn^RdLk2kdy$f>1iumw_~=!6xGvo%78_&+te>*a z$9)&)J)y8=eJ|FpGl0>8gY2$tK+~#k!BEX)xWu{+PG{Cbm4OfDZW@D=hCal)Bq`!n zD+PY8#mI-N(6u`8Skk9M4RU>1&n;T)ozex0<5>pmrHwd4&4%l~vXwD6O8Elzb^4AJ zz~5Uo6JuR!zfDg@Yg%SL<@&RvySr6{FQFEBrS+5qhOk`OmA22~53& zDf2GzQ(mz9CYx!*yTxPZ@EnjEB~JshnZNRz4Q+@kh9$R$f^WMy#wsP@3bkAuM+YEb z{eN&xq(F5QlCaNhKXdo=a2kg;@t}1WO-9dR4i!Vx={SXl-xw127hyQ@y)4QgV4t!MfDKtBxMi(!)Cp@oC z8xyBPg4aAQdG$@G(Fua!q92^m?GMna@&->_lp|UzGT<#chks2rr54sET!Y+5>|tyy z3x^rt8s3F}r{`c3IMD~4=|G5Wh6;6tb{j>?a7pehZ07xW$hrT0-FQm_{a59FZ5Tvx_-z07%zJ_Swj z3UpC^4Em*ChcVmF!{0!4y6K)S$q0{tslzNO+?E0RJob!KSq5_lOlV{4U{Fe~2Z8L{ z!eI|tPFCIp752n|QWe`B^$(&~Up7FeWHbgeY({^dN8*}<^&mcd4T7x%eCU_)Y{t~b zzhifw9sXuyPRaw=Zu1$>98o0o{=fLap1)Bvrd#+Vv<6-;lcAl*ZE5n-7qEhqf$4V^ z(oQs{4(H2wgN{esLe0Gxd~rMm9c0;p$(dZvtm(=YYg%{QfW{nV+{3K`d>>{&elX8psaNZ8F91geTBAel1vZE#ZCJhd}$vU8n*| zSY9{=A8IF|8_O1~sTqld>#a$~lK;4njZrw%jAiqdU*xaKFUFlYa-?vs0&ySl6inx_ zUT{-8PVZ%Be+LD22UP%-SOXBRWi#fUKHhER5V2@!GH0-EE$(GsgWDB}&`n*R&z$iA zS|4bWV_$Vi=3eHV96pFYW%d<9-*!QOZxbIA<_ajwvTiZQVL?R+Z)foY23oX9j(#oV zUpJwSi^{?O>sPKm*_NgR{o)#CO+u4`cu+|-hBkG!r|f$O-ltB0{~D3_q_%+c3@L*- zKR&{eu(cRtqzV?T4MN|wt3Xi4~)Dx28D~qp-!YBbWC{${m!EyFK;Ytb}(e0uXy;g+>E6B+JZJd=h1C` z1`6cA39o%WjW!0d^!CIDusdKIy6yfYTqEiRXT}S7^6?kU=~;mvuj!CmTL)3WTMw6^ zmdjvx`hBSNKaHPEq^aZOB=B(z;9^-uK_YXRe+Zv(zyEcNSTYL!r3_}A^I0xF@7*D9 zc^vo4Qo z4;c?ua2w{`!nR=-d9h*%_+(z>YlJbVI_VC^bTpzz<1c)7APGfIay;ZJQ-OOVX1xE) zujt8U43H|QDb~fV$N|h)&iIvAO=+k0EzU2#90Ce0@Z`oEyu$LY9}gzsYj!?TX1y@+ z^=AO-dax=w9j8{>kiI1qD1G8Ige%s-qdHl##^egV%d7yWVfOrklS)KXv;<;W6``H{ zfUdkx`0oFi?B_qhPZSF`E-O>{&l7RDNhSEMy$HiQ9x=xqF=btU+uPH6rWA$dJD6 ztuW}L1l0z2gX7!D=r*O0x4m-^{e6$)hj1BaJC@07Oj`xTJH4?pejMCmGu)ygC7M_% zM}A$($H4pVVUh9)%%H|loRA4FktKL~y#%E;9%g&^xp-!xHt9Yw2OTpH!FXhQ`xFtL z>iY{g-T^C8DqvWEfN045z8nZjECtY_Utj4JJ+X)ziNEZxqao{(1PiBe zmtH2L;7L87Ln`!3`(eSn&uAuyS}t9y5z)4q!vfUmSLTaq*t35cfNpH2Y;Zh#4QtFd~`tf^Qby*!mjg z-z`OhlLGognw>qjWuk`V8@%sy2rcO~JkRp;&dS=1T@o*Rvy?D4! zsu>B)?1r-MQE0gT3byEfgINPIWb+Dhda9ni=WqjGq4AtKUT!ho*dy`XZ|wK_=>Tr8 zXC6?=ahCUb2H8&wxzaOY)Ojoo*)dn3vwIr!@U}G9Oba!pc*7{Ohp?A9IsLY=&(-B@ z@ZDC3^2Wa~zj`XZWbEEokCRX@aXU8jRzphH0_IS=2p_lf;ti)0&=~HE8izVi_M8i( zY*WAn-`6PnWfHa|-h!OoW}LENKQ5T!0$wE#@$)`y@~DRWUf!#Q9MM3-N}!En4hb&7E5)OP_w;QO#a5MXK*yS1^pJb!vf6~Zq3wibUQCgn@e8_4bHKjPwr68PS%Eo z|52rGDnhZmYBiiujK^rZI*1Z@=8n&xi8Z^jD5VpKc7WKaME zl$_x=WH>?I@F4EXAzS9SoFg6{V@X%|vToyE}Slq^4 z{r(q>C0Fn~GFF3XKG(l*CfiHcaS7EzNL{2&BX()ghG{Rs>e5?`UY>@M{2<7loWf6T zGb8cd`ytA407F_WFoOl}E!KVF7V8zD`>1>1@4z_mmOrrjpD%={4ug!#TOc9HhRbWn z$EdyU&{y*~E?zE6gY`CYohM##L7RU=z$6{WXm})CUM?UqF#=M%<}kQV_yMBT?9O{h z86zGnf~Z|1u{V8ybJ$nJMP;yi>7pLkjyZ7`C+?%iYI; zpt?aEl(q_u?mU9F;tXz&Ot{_Wva#z(KLig1aiWf~T&3P+JbO`*M2QfSSnvLMKl8voW~^1^ zy(r0Q#f&$%dAW3bLY^BC-+QaUZKoNGm1c~p4T{3l2ifSbc}Y+|0=P0#n$(PP!nSqtG(S=cRr1Pl z(b96LC^`eiq6ADaFXnc}`YIY*YLlURmo2DeIbz7{6gYG@8FQXogN^t1;qfRfa{Irp;Br8l z3T~zIgH~#jjFKAu|M{c8hgrv%9OjQE9Yblh%Q%+LI2|H(=CwM`zg&7A`}Zi)-`^YY zsMG&AI`g;~yZ4WecC8~V(?0E49zs%cUnj|yJ=u~i`%V(FkA%vWY-y89k|auM?&~CN zLXspQ6_O%}QX%y_-`~HF*VD@~bKmD&*XQ$ozk`SKh4h}E867cGgQ#Tjd~}lt>jUTU zxuxaM=6Z}5G0sU{!Fw$KVgtujbxBNw840+21eA0VpvEQvmX|Qz$&Vk8_z6a1@@jQ*oYtj0$} z?tpTP=={n5&d zh+1uFo(D*h2MOri)2vIR7s0)|#b!gnXJIW>A*QxNaq>f3=2KQc?T$xK(eV_Iog7HU z3iOEfYPS3Po+&f*Qliz>=lLX?_mJ{y4|E@#5V zQ)#%*_Z*0fL%3&?mxrBMx*Arr!c?dB3Az><*K(|9O= z;>mK|WrAREGyect?0)XHGYx#Y22!j4mV(zE9c*5zM_mpzKR=X8Ke{TIefz0KY+yZJ+9l~^lRh4H#yLD+W%E@~EF)>cjC zFlM=|3Vj+@vXYyBk-5LWcj3?Ck8mnVL@(|vM}eSUj_ZAWNQ;Puw$HC6*ux@seat`pMw zzn!vkW3_P7!D6so+K+6an&9)q7Qd9u!$PkH99dijSw@d=dddP+a30B+uv*mdRuOc5 zaKU%IEDzrQFK9-w_pHu-eqGHOKH!xuNS4$>$@EA_FG}Sz1{x9_?`SZ1DkQtje&Q+d z2}m2B%{YM3{Jv|7phOMmgi)U0Tc-=DXV>D%t)@h2YCUrpX-K+eESQhgB6pw5)1(=0m~|zE zWkvL4Z)gsz0|f|oIga^d*H9q7pPd!35ySeTc#o?>{P~6P%#==X>Fo2+9Hu=_M< z53!+-3ZG$wcPa0Y{*@~kNkI5Wg$h^Oa4vIeu#)Y%!>&!_xdd|(#;fxK6c|rn*kexW z-@+#kJ`PpU>>jd02v2LY$p+0ED7Leu>CfY#`r{uyJ9|F}oOPY|8>$g7)JFfo2B__o z1i@>^;oZ-c)M08g^O0ZRTsJB3LNbhNdtwGYRZB2<@H0+&V*)Anv3CALr-- z5e;GR+&Ucn$1cPko42_7g@F9bo{i-ppZN;^A7Fo}h10sCMb?NsQ80O)vx?F~y!@H% z(pER}(se80)V*I2Jz_4%=`FzZR@qoCsO4Rcq)7@`7IsOyB56DA4kdvt_%1<2GDSMn zy<|0%Rt$mR*Kech_}B1poBhnPn;dckt&wpNDNSk^~l z9k+QQ1Bv_Vw-_<)HLp@_59?nVu|8H4bDZ=@PX88D<6%1?(^m+AXEWeH_L~{m|4bF!2EbRN&GF$ZTh19N@;1*i8{1K|P*XvZ+u?0FT=;!-ipN>`!* z5o}*IrVqSk^}~*dsxUvk1BbP+nZSgNFnjF)k~T$xGynXAIduiNDM$~0`nw$ z!RAKcpr>j?TYP8ZgopDWZPjb|d(nujyP)g+!fQ$fACAN6f_1;ML4 zRFbfHbj1$f)-cZ66FW@YG6t{x;xSKTg)6=%!}SRE|9YR}EBFD_A#^pUFo#Z_nw-p| zj&%&r_M^)fH*@wvJ$j(=GQ4pdK>LT3;FQCfR2V#&n@N+wbwaDeMoEGdth-fh9t5W+ zm=nE(KFD5vjN>O+5vk#~oLztVkvuaI|N4LmQ4KDI><;!kXDoq)R4pRs{{SM6d0=`k z%Wm=mc#n$-u&?4csDI!aJBE` zlpmUqu<#}Lc-J?`-b?vu1vgQ=_6Dasx&g(!HUHd6iF}mxqm$&=9sRl??R|R}?W!C2 z>0XZDu+0nP)_%fE+iZyJb0z34rx162H5_>V9HgT<(AYZ&{&umfb_YAN=s$vb@439w zof59Tc^s;rFrz<51);qI7o*~{Gmu@{cFQ6 z_I$1i`V98dq?w=9S#(HGQy8~EjxS~M=*Ab zSZx%S`Cc75heV=o*cBA6n8)^lv+>CvQ}U8!3l^#giOuT@STnQQJjVRJI3|ZrRAD&{Lr6Lu+{EIssevQ^$xaUD?r#Cp-sN1 zTmVVz7qHXH<+;uQ#Ct#x=ufpIPX2+M?;&;mOF{+|Oj98@8Q*%Bq9O4aXURK9F$bia z1cc)+bC29CsGq4BtxXR@DT(Jxx9i{lR3Lfo`mnIL+dUP&9XUz&xL9D zGRc(u^-v^wR_{QNrXqGGj-Knu`mY3~8eKG7$gI!D33eCiDK_x@Rb7vGNOU?tO*bOM1At*KYhi zdkIYa`v`=S-gB;u`;;HE9h+h;v3leV@Qq{K)F-3SY14F&d+{80YBC1*eTt=ydLWn2 z{C%-1bmam|;&E#@|Ng^sByY+wc6TdyS=4ck&0jF`TQLY9gz$bXtdIHmE8M*|kY0AP zBAp&GklvpR+Vwvn((WUO|BFL+f5v!z+mPe&b_wtEQ<;8Y+|ps&DLcPis8qrov_ z`gN)vQPY2lDOxX}@se z66L_2M>Vi{1Da)P&&&U+`Ql8fx~cXPHCi4>fIMKham%dHF)%<;$xykbOC z2U*Z4zczFoS}X~&`;O(~BKf{n6_PN-m>jJ?2I3tbxbnFhLX8+l`c1ZBn=StRkc%Ai?qoJgC z=M*bDiuR+uxSoG*upqx5Rq1aB<(X;VD!2$Kqq;GnOM~&wPw|Zs9(I2-SO z5VH3{n5@ld4n&;U1%GauQPv3d?S!z@5MEFjMm~jAEaEiz5_pl`aZ`9Gn~eYQcYd>#^mh5%}zI z0DI};}|D4+yJwUmawel2RNA_M+`j>;dIv1lLc@pflV|_?(!ZL09P_ruz1e&j$)!ANY&!9C}v@8piBgM?^ zm620_BShlD+`C?rw}Q@3=I9;z0qbWP@ComVucOc z?$RbZMlsLr;|Pe&eunN_b@4*WW0>nR6^?o66S;q_XnblNxGO*9(k}<`k$PuQvapZ^ zS9gMcA;F`sI1Dfd;n!{G#gj@VwDS~WX&+2N!Jc6GUeOlO_pepxhwRhf2_#l!j&HpFHf zo2@FhLdX<0*SI?zJpTL5ecjfNI0_i+Y~y)8Fi(x%$thr-;$Dy*k$~=GYod*xAi-Wl zYW^BQMW#Mws#VPTF9JLK?eWN55vjVcA4j^4#6|}jym8T-c>XURl=_xy``wBzmXq;- zSp~NKv!Z!loxtOb9`AG0mYUuCj@k>$;K~N(WpS}0yY8xx2QRf~myH6kdz8!Hwb7?- z)h4{>$J21HN<_@ouzdY}ly&R2a1TZ&<6~d;XBCEGc62qLvakknUYe3>ZW|=qbaHDh zoPfldOHh@0i1n?mzzj2Mx?vjYF*8Par|1BN_Z)@08jP{~Uo}6x>?vlw{QzTX+4Fgh zm~k-8Ve+q22#tYgQTG$O-^+tIr5{9(jb~2!1kPKW1`0y})0;xMlYu;n8}#u2`9%~YP9wS^B|8pg6VLR4|P5dts(bt zQi}rhWJUDygpb@MlN`hzYk2igk46Z8aYr!&s%NeMRX65-c{3lKx}V_WTj@A)=~1-1 z+8@u=jfU!dwfxWg^I)}qGK9^uV;u9_Fzo9SNY0rAe~AW}zvMM;n#y(|ok8HSfpr36 zu5h_UGto2TA7f~wbEV3F^^d!_*jP)d>Cp)tTl_%qP=&TLPBBTcB{L1)f@_L1PP}vl zU6=#=mky7AO$CJH+tO1~9!{`qtlpqB@L2qT8@^dU6tWaxwwe`*c#zL!T|I^?ir9Ji zl9Mb~w*{?K=VEz;Jy-Cp9b5W0VB?q&(4JlcrBj2!^GYS`(7290B98f&s(8_eUQYV; zF4C_o)ApnZ+H#k2zLpcvte4GiJ9}`!4ofoB+JfaO3!wjGO;Z0UPnLho35Tj$LG@J| zNWXms0@>NjF5*1C==*^JX|b&B8+%ubt#lUlPl3S>4Y=1+k4(MWjWX?IIJCf$mTwN^ zXR&NgzsXu;WCah>S(_wEy?4Rx?JB5X+?4v*c<>r>0rF4hfMCfo&TG*u=u|3%jW?A@ zj)OkC*WZIJZ7MXgknt{E{h-b-3Z=RcIeAfYWQD;S(9PjB2x8Vk+<`o_OH6cyurIiKxPe#c2weIu0(@^4qOx;;>XoBI&wbg3Dz{p>=O-*kzP%}RaLQ*4 zv6DFdDC0@1GoE|lY+2*}0!Y=VLhG#>M0S#WFNf<&+8*rUYG%pd)H&5Cj&TRaBY`MN zGXP&@0eHT$#-9bZVEG(2x~>_GUO!^-%IYTUJRzh~%62Fz?dX~_h%4U2&Q0CxK|Rj~ z26<}}m%eg1zFV89I0&eca0teGSy91t1z?cvV8!ZUd))K zEr+>qgK7|eq>_5WR7gwi1@%MQVClmMD1RBvz1Op*`!thr@Xrvi|I&|sUH1 z9fm2WNwx2fzz?x~khr%G$x2o7(AkJeecPQ~iqE1(i8@)B@*8(2D#DkcxcnG>Z2hrq|>J$H0p4Xi5i#8dwyc+{{SGJQ^> zQMG{lFE^g-5W?s8kHh+lbHOIP11_jL;S1AB#9gMadyzbK+;S2&PZBVfyefU}Kae~e zl7!)Z4ud%T68|{Rn5K73#ywqAFlNqST)#<;jx=TLxtFWC%PTbLsCU+6*#`lMOi-rp z1Z~*ZSO*golQ6d54cPHfhj_4l!PxNw$i>&s@lsV1Qudkr8)m*xY&g5X=RzANu9h;)neZ9^iyzc8OwYBYKD=}2K!8;7+%Z#oBi)W zlhZve=eaKRd9|M#+sXO`r>>ekX+I=0kthAqlsR0=bc`5bdvzS2nTEQgkR@xg3WcZI1k#J+Hv}i4jSwF{dN@ zsvzk0RPc5hiMv0Z!_96&A~LeU=&~=k>wr0RIer;#m1|LyMPpXlS7^HKjKz;8Vs?}@ z&D*pY`XyS^poTH{?#2nMAM{(cGx$0D-fKqs!Wo+~l*cuxJ@8LikCZ=az?WrdICEzu z#`ek4;p_FtQU3@ic@qoWVf#?ne>rZ@>rV^MZh+(ICdB5@6WCVGcGy|+T-d`&{JSzO zqORo&g0T@YVe=c_e(xM;(hlRDEYkQnb*YsAk17IflaRa&!839jlDVAA+2 zC|$dUFWEQ;bUp?{dhI#*Gim}}lvAbB3N6V5$D1%y`zL(#>qq*}Q6)pi>SBc@g%6zx z_|rNW-l(5rp65ugv)BwN<6pq@It?=Jk_{nG#q>1e-=5oUNPRK_c=b;M&@k&eD(#tu zVZ&^Bvo(6eaVigMiaKGq(O07|{P9gl z-+K@oOedpo^I*JrSc9DXPmy{pLT=PW{t#vSmsn7szb}Bq=IYc z4VbPK1=saWXsDqL4w_`f*vI{8)D|grcxuDdxzCVXzJ@YiGb;bYiZ~h1fN83FWc4Hw zEi>AP`A_@NSseoMM{5gerS&7zEA#LiI~zsM2nWwo`H*(wEbIHRIaAYZ-cj@j7H218 zhd`N%b)9k8*HG*oR0mu9E1~>^BG>F-OBVb6Lf3k2$qmO#tn+5Y`NR+4uebLjkDoMx z&5(z%Y3^U#pTT-N)t6x8!0ot3B&Gq6)o}PUA=QywgLwVnxaS|^ALo4Mn!dfjKiw7Z zWN-oIItN0yO)k8C&pH9WC$l-kSBQ1I0BI`SFfJwxj|SA@liQ&f(4xxDgmXAo{+Q&| zS5>mLHyoF(uqGAq?eMhWC5G3zU~Bp#*ke}+3mL!b^+X%ucAL5ROoT+rX~?(@w=jK( z8lH(ar)7b^Al~~j4%VHF_B|ckpj#$nV3QnmV|NN0mG7ur_ZC%}7vPPhHY7Oy5*NC! z4-6|SaFUZJ&dsmGmcW(ZlDQh)$qe}8n+ii;3+amSjG3d;%e(NmVO+B=dzaRs@mIz{ zp2@le>%4fod_$0`u9Y;6W9RftWlS~+9VTSaL4xf3dHh| z9<{QI##Oi0W0H3ZsJI{Dv!@^BdzGHSYFpK#8XS^s$c z70&0`F|@nVzzbxHLC}1g50SGYo6XJXp1NdQGJ|nDwU6QcsRQUu?|$@WWeD>EAH#>6 zJJ3FAIbXOp5~g~ z_B{B5qF@b}Pa80|@&fRzTXp@s22F}zj)5^5P+EHn|6F0tAU;jva8(J5)CUr?c6ug8@g@1Dy9ao@^uRNRt*GXeJjhzCK{t#pf#f=G)N#~>@z<^()wd;sDn(?SYARpo zx(^1AW~^sZJz9QgH_l07ck~JM94M-R*XIkoz-Jwji6J+T0Uo>}0(;h_{d$C7%EZVOO1knToYzZ-; zc0V?N`=ED89ur2Q# z=1pth?L${!z5ia$B6TU&Z#^yX&OE_#ZU*pM#h9MdQ6gTKcOcxkf(q-|{jtMIwv639 zy9dv~PNQG2aa=JT_K3v4?0mF&EX({%XD*+Ox?F_WFD!I21gGowIPvt)c*mFB6*Hw= z{=vT}%Fg2xn5XA=U@o2WnG4i`#ho4649=&f4CH%JqGQKy4Wa>=j{u ztTu5maq3KTJy)ZYdJvVWH$ZQL9R7Pqco-R~0D2(5DyVUgIJy4h z^Zs%&564XIEbAgIWV_w6CdSf!+QZeKtdTV6PlWWxN4TSf=dt1uj z$MwVU{V6NDYLf}B^LxU-`oMTApSN(2uQJEk>pax?nu_3C!*X)XaAL?5@E&ytL?J?! zr)U0Y?Z4cGurb)?X27lT6@frcfuFkY84M^JKxbB5MU^{-FyxUuDWzj@X~i8BBv|C= zZK{Thms&(Rg?$&6nxf`#=4H_G;A`ifg||B`NPUSv@4h4zH)O~lKfE0#&J4k?nP;)* zg&A$QKMxlvTG3qpNLbinKpIU;q1TE{ zGEhJ2jQc0(QKKdcvRGb&{%5B{@*e78!Gjz$o^1pHbH%8<_0j)x#p>T`Ky}9t6j^vd zOPeCLb)DsX+O4?vbA)tEx*1K>WX>G@Ns<=kx^46Pjfrus=n(iH+t;@77v4Hxb#5`= zEBBT;+~T05=M6qgVRxbRllcQT{y@NsD9(DPJV_hR7){<;lBhZhqCLD7XXR?q4Hd_5 z-(|*M@_K}>G9x}tVoF||yUwHC@11yD z(U?e#5^=_gUi=eXflJH3;;F7oSleMu)~Q=Vx9}oN7*vPL)gR$pEwnCwV=R{qGtaGag759bw$N_ajmHP#~VfB>Xr21x{Fc5Y)FOpwV^_ zaeh32WaT@f;KD&(XRjOhu2tusZR$tf^$1By>lM7&X+|dVX}C(|7-~&r1KQ+6@Xkw- z8nMi4`eBa$=fd7$qrZT=+YP>_aVkiI$8+^%=KT0G0umOn5vz~$A;!Iznm_h*9f*vtZK-1`fB9GN4BjO1Ov7{iu1<}@W!hgMywg6zLa zbn>h;ET0s{tCSkhV6P_L(`zB-!PqI<^gX3e>Kf+uyb-Y!`xQzKcPS|Du4r~tdM|uu7)D=SUyQdJk=`?<4zlXTd z*Ey+erew(?TM}=Z12*f+!6sPBc4fh6@KBkaax|u!oR8uvV|M?j9faDSSD@hT6-oY> zYarGqyr+_YhOv2nUR(zEsc!%o!)A%W&ztxQ;mosiR)ycQW*qJbjD)-)m9t zsPC}(d~~W2dZoldzsqdaUDAx@Y`%4>tQX3SUh?|6W+Y#;4yG_?sblyRSRTl78LB7v znyzYG;-f-`Ej$SUlRNo4V`^E>=n5w5mvL`TTTo9H%ScyMMZ0SvoSnp61NG+_SFjx3 zjZ`CZSG1%1UqhjFvYXEoE_9IgFLT2uxP8;s!_ENa?GbK* zrWxn?FYnv1xity@UQ5RAsKemU9)am&Ja`Yr^ND`ii(AMgI4@NsVX+F}^Kl5Y^=5FX zljgzMJ{`Jnoi3ft7#l*yRkoP<4({6+5If!DEW;hjFS(~r8=c?b=GPhcx>=5Rx!2<5 zFNUP=yg8{aH{w3D$Wc9)ED%oVMxDk$h-0&+PkI7U$y}GwAFr`qn*jB?&SF@tCkhRF zxa7&RAbaToDF5ornLj&@ercbviRG8$pFD;;Lvx_-7IU2k2SUy#d9o;+u_TPo!q|m3 zvB+e|E>QXZjb5W06`CWtuUZ+C^W0u!v>hpt^E7KuMs~KNcf%n%l0~OIph!^KV zbI~QdbGZhbN&@*dwIz@X2T^q99ZGn8s`W^dn5sXAyg*-QKii*deiVsELQF_r_F>7P zPDLW`e*^D2iHLOZY3>f|Tsz1JD9GyYfdTWC5O#K{P3Ft8Z-UB1DZDvZ4IT^5qvbdS za)g~{md`Y!V}I+@VM7n1e1#banrjDZ-W-MR7nO*DrwhCrYD{{PYB4U(h%ekOhLK~1 zv^;1Tp7b`S0kg030mCFPYLgUI42NL2#UJ>$%z`<<=0NhkM*MxE6`k6Dae;03f%?_s zr|vj(b&QjQ=;+cP?A^1{BAxk)e)C}#JA^J?#PD(mbeNdn`MhR~bod5-FHd0O9XHk)8o{kQJ_pAI z9mmE+2XR}bHF>5pfO;_=KK(Yt#iXOV>?AMP-sU`* z&FsQ#X7H(dSA*c-N{O7{365AHB(05=cqr0_sE^zWd+Hj&Ho=O-478@w6?T%Ql5hAZ z?>4#$vU$J6%g~{}0OV@tV~>0R)-PENZ*q9h-tYkg`lDoSqpM)*%tJ8qs~PnSKMWpv z75tZH0~vd12n-pcPkc@ie!>Dnsut%rd@btyOrO+W zzXDeaDpA#>3jV&ZBH|n&ydJ=qkcAH*;gT(}nVo^MjA(Sbu1@1ysu}NgFS_m%(F3s` zVdVA(2n!D97f<2QUTXvwQIpI^?+k>lQA>?nX?f1hBA`~~bzn#UMU*U%&E9A|9#7?aN*fuEb3K>VOpwx#h4 zINnmCmmH5_nEG)LEg1}zSy#~Up*nSZV@SIr)u7MIm{czi(YRlsaG$xY181kfL#Zj1 z{a`FA)z3J%xfdd)+hcyoWz6i2L+Q?JS;?|7SUdR1~!Gxxj=E2viTI4650r7^3T=tsh9E1&le|gM3;2DeC z^yNt3A_2+C3S_z2-+ZyAfcDHVfzZ9|dDxsGDQ;elfBJs`-+z1gxGKgSn0XYuA2Wu& zS`$C~&MiddR*VgB#QM?E98Ks)toBC0ZfzYRl8fL}G@o;>f3qaz1C?mo32ojvD<8{~ zLm4M4N>)3y2R7~80!Clid*{Ms&SaMwT{}mPD(Uy&tQZmT(lx~Q_L@|@Y#t~?>f*TP zS5WnY3caKvM*?&^dInx=IRklK8UBxZ(`i%NZwnc z13HJlMT@F>s7?O`ZwBz7k!eIC3(moR!;OfKi3<$7Bu7nD3NY`5D))@-9L7cupqWp{ zvF^(c2!42*8y66db#`Y^;PzYQ)Xw@0`_kcMC1bq>yFx)*Covmw~mYvDQPB2@p*G3LiG2p@hI@_e`O zDS18U(-Fbt9shx`&86V$H5p2K*&J7EJhnZ2#ATGKlCEMSI%=5_%^lGQo@_?)BkmH` zdp_VFSPi5l4f))bF?Q(cF@`(b7KwI~(|A+XZ4Yqt$NC+P{IKmZoVLc4_Gj6{Bdw*3 zWpoAh%@a|N)gstA`x~3<3n^)lBl4E&bb-5&gs*uIOJ>HP|Hb7X{bnsgze8Z*IT0&| znV?;_4~UO1;QS>0>4yzR;Lh=FIL=3x_}A`-&}nht_aYWrBCOEEwU3Lq8^>)b491KhbtChYGa0OlN4Bc}rm=>(Sr5aV|f6ZNh@;vsvmFZzWg z{ii^=p%+-Z?*yCJ2RNy9GDeRx!gV1U;G}8J@*Bz$hgWHEru#g$IzPaxI_hNb{SMYk zkur|fSxm3^jngdRaBk57i0jRlh28qjrzW_;x~Ns)F;$bd6Ib&pogut$=0NViY%A)$ z=LMF}>*TbTGbWB_8w^QKK*KpPkW>|idK=h#DJ-Agn=K|`+wSqsz71IA(+h&Lk8(Et z3&+Nfx1hB?3-SsqAbFPw4Zfua<+C!lFA2Gjf4Lg0y%I34VISCpQP{A#2+sdgriJ!9 zaMSfuP<{O!`YdKWEU^t97}ks_qQ~rcyPlJN)5O~)`ef6TB@k!%lq+Grgz36rxW8PT zZpdkcuniXUpI16Y1{}pddt1Rjmvxf@D^RmRgDx6&2p7k#L+Lba6v{lgWMwto<$#Km?m@DLA$PXows5c49*4M(Y{>~UajUg#E z#NZ0yKSg;HS#mdJPyb=L}9`-LE$)ERX4_M3$~T4yIn6FufS?X|p~x z-eO9>D~4hBC0{(e?Icz!p5tH5k*9)1#W`Nv6hJg0iFXVtf|Q>hA>wu~+Ba*_mFF2p zyILQkc|Cfa|As6LNz$DML71i)<_0qt;lvOeL(Qm2W)Alb|3&-B_OQW@b+5Z$gS&n- zpLLmKmTNXbh>i^9R<6hPZUv&BYDwI@Sw42{L(aZepLhM|Dsxz!27{-cM&Eaeke0C% ztHN1cp+ASd+9$z#Q4U@{EJuclnFBDTm)kP52Zv=A;hjw<(I)l;PI|#IE-~r6@y~%+ zeYTFjwDdkMVfU1w%&Fe7QUSVk4)INj5Y0M`2r?0wxjQ&$z0M%d3wadnwX3_gSGG9fw$F9{9wV}(?=#@Q)LwAASSSR zyMWZa(Itx#t%*m#8s61=j_giK62#Sv;0(S5qxiZm7e9ja#2=31C2~FReWDVv9~cU| zvcIG7Z+Le=0T~i2o!*#H!qnTg4?JeF~U_+gm6VZFzZ@APi76m82O2iqaeCfS;Al(ot zaZT%yx!M-V%p{38{a7~E-zev&RuzLbn=kr2)aC^4S&|EijAM2FI!sp{3gxmzT=s=e zd?&CuTVEX-J!0JF@dkX0=~x^ge2pz^PZ*{s8m2iQRL=Fsg>4heZW9u=xpk?`2HfMYX6Bx*lzEY8X%U z9N34i=Y#fd#)M;8*br|48G{9+<*F%cSoDJBnIdt@Oj9bFTE?YC=itq4iX_~<7_CMP zgos*Wy4Ay!xM<%4kDk{!BgB}7zUzYXui36CrXP=TdZ@Tkp3HS04eH%DVEi!l-;MLQ z!N0@NOPI#hOqmErZ#iO>^Fz3uB__gyfqb6MKVGzbIdnt~$9Hf0krvxa@H1A9W|#JG zOZh>lDe%W}kCaK78S8YEeZ#iy6fW+d8n1iNl=McLP@k`=bi%$?RC1pOO3VRos|aZ-5LsOEZb3hbtQLu zl^Ok#`w#*|f!H?b6o2Elka$$*alVmXIsTJ1>3^d?8CfM|JWEe*r`mJyF)WjITbjhWzcBLUeC%q~r}>Ajq0P}JAQkHHu9G)P zre&PPw$c_}d6qoUUXueK%^NZIc_%2#Tho+RPhk41boBXU$H}W`)53}O&?zsF@fTSp zg%eZP8H^45gmEkm8pwX^Ql(*)7x|L^#B|9bQzG3n5n`Qc;SpnumYr6kAt8F?)B-IU zSM*nwwBb8wI>+OkZX4S3!V5>#XwvGBx3IXV2BJzn>rZB8?0 zKa9ua|30IGpqOPMius%0*j}_b6S5}?!Tnwg6n@}Bng~i0bB?x$V&N+Ropj|r zk}G0jr(;OV|NP^P+l2Jbgh}WpW%B{WUDz;4hi*G9A|FIk@Pqg-yv&m)zDDC<-*GXB z_YDO`;pgpxCh-Yx60vYf53UOv33B)g6yB}`Ps0yj)~iRv6Psk0`mBl5ia~5Q#G?~G zkjBNdO2SW7g42yD7&vMsTCG#2abg8NMT_0_jiz9=f)Rd+$%f}$TGXZHH;P6F^8)EZ zNp?to8gQwdyJvP0H>Eb=ufrxZZ;2T7Zq-0jbq}{CDhrw>-e6sUyQr%93{;MYFnNq8 zj44qhtrj&f+anCi(^A;`WHaUlwnNJeA-=F>o`74rF#MSnoi3KyU?u^i3vQ?Ek6jO`hx&vFOxiIIHJsuW0{xsv}n(U5*n zcn;Ofp}3dLKT1l=Iqj&4xVWDUX0m;?GV?jk+;pU{CU0 zTw7;GO0Icf9tGM|agFc$c?_jauOyGN7-R3nS%3fyNSf$|YzW$^mCaueYE@ue)(Mi(ix#L zaZZ+G$}KjVk{bxw8D%UN@C#FH??J%WF1}UuCdkozkSbnuPKePbk@78Y-Sig1%oY+D!XWCF` zbH9miQw&Ks4%pr4{GCo8Fb^0fzl6WG#PnLtSI=(1)ll=N`{3{Ff9%`^roQC zXl2?w?i9aIX#{8*F)rlucKok)C|Plo$F85>Q0tgBsk!qREFSki>f zn;yevy|?edY6Og(`iSucvcK@~z?y9o2T5A%M7>o7Rj3^Na_6AwK(Hk->4 zt8AQ#x3Y{$!L4WL^Wq36dGuK<_>{#5ZC(p?&(mR#)mL0$I26ylV0{MRQ0%PCtoO+cIGNOk4akfn~@dj_{8bti>7oKj8;iX|g?%qVtFe_7g`<4V;BK8wT0gTPBRUB?I+sukhGN#_;%*2ll%t9L^sBzyAoKa?4*( z3NOII;S`ium%yuao}~JS!jPY2Wsvf zpb%vWc0zx&U=g% zYm3FJl*naeGh%6IMqk?hf`E4$AY%4+bTuvGHQ9S4Z>ATdgj=DYcZ=}f2*xJ+RmSU# zD}-?8_c(j%Cy4a0!8>=PNq22LAO7S9zj)*cj6EC53u;d}`hK*+$*jMd_Q8@4oy-_G zh55oqms_!D!zHe=q!o9>Ge@bg3-wceL)5YaXmdFWHO(1phG+%jV`RzRCyb}ERu5uV z{Ks8BV#wS!8d$*Ir?y|5V9r`8I#*T?Ja*^v9gi|`cJvr*2wx3IkHfJ(bF$=s8_o_^ zqz$oI5K`tY+{ZitHfP>*Zx-G|1JfS#__z`KS83o;YD~K2hw;)U-b1MA5TZ9(mHs6v z>~A9)1CL*XkU!TrVOR~M7w-fS^PCNrQzZ}CeXBi+qD$-?Jh5Dzw8?jX)ra#??wAK& zI-~iW>pnoXO(-VMF{QVs%8}dyN1^?lFDzEnBkyulY0#xQevH(0l&fuJGtOLmYGp>u zxItVOWkD_3J25cXh)Q%)#5G-hAV22{jLR5C#>BF&{oc3yDCXgMQ`iM|8{|=J_MXi^ zd%3)|8{mj-4vIGy!i9;yz+>Mj*85h#b>6jjsbU%mX7m*)_nH$ar&m~`eU*0u6MnsJ z6104C!nB3a!oIh^VLHnLi-NXss~MNt{JJ%9{>ZwSW#93>9h=*=O@!~E%5=vkP0~LQ z&yCd{PKHQyNa~^rFbLG34?NV!gKP_;J0+IiBdW)>?Dy9x)0E`xi00Ds!k}+%4+i@h z(6j{{$d)MJE4s}Zs!cKIx>WX5^0Rn`Va(tsL&zGBFMC_adJRplQy6}@P5 z?F+U#IHA9p6`o6ah|{FpnFmx`Ea{Nr3}?GQR?u9~C;gDJ_8i)%rE|W_6Md)aE4tVi z(YR^tP_{A(3ruaOZ=NZ>K4?PPE6?-ID@w%!6+ZZk-6iWL?}dSOE!vQO0XuYBFiZX^ zZ+o@@Yc5RWwH(Z8n1Kde64U}a{xc)(`*S%#$YPv5`Yz+TDA6+SEBNtk9*$b|4F_ueh~``1w;VfvY>%U@tvbz5I038N zhmeYmr%>uQ+xt4?id+7$Gc4>T*LZOc7|F`hW&UU21^MaH}^^3$cxhD~sfw+F*Lz!RhBmOIIO58(lVZ-SAdd`m#!L6Vagd8DWy-=cbHO(Q;x8_Hyk~mzQbNQ8O(KJ zUIn*CVPC}u>`a`DCXbZB`Q=Xx|0f<(*uKr;=5%;ueIHEc-@pWyc9t(civdq3absgz zFu+Zlwx`tLg4wm`ZY>AAyaKsVH-uQvxQ5!6BWTxFIa>7QB>0-MeRonh_k6AqsdfH? zW*?eB@*+aKZ`o)_A2)=0MT`Xb?t7rDIE*Bn7~qZArJyG057)7%1GU)iv&ZTTh}YN% zZoP~pb|4#_T@P{Sa}$H8jgVe`v>6TXh1VNhZDoH zAvA5ZIhRM5K%RjcCWJhPhV&ntY!Pv-l<==wXT+c}? zSe8{ZX5M@75`7iElT|0X2C z@~nM(AJuJ)DXiNMDyOYjen%huR+>?XYm&HmaR+uJi{ae_3u7zdH!B zJ->0@K{n%Zv*+Ti`*E(GCF~BhB<*GQd1bX@*nIe*(DV8y==C(9&1UC?uG`dT@cX$q za5)#kQ#$$W8rz{+#|=V?DtKZ)f_%-BWnHKx!a(oI7wPfpX=u&+fVRQ;(lOctvY=zRv}zv0v-svi}_8T zphfErxLt2!?E83L5E;t9nRE-R3zf*elO1?riUN(5zQe0-y@8)UDiDt~pRn_$57=L2 z=YY*b)Nd;W--Uay{(UEGp8g2k-mK<&Za0Ih0%O&^e2i5FxA;?27!xVR1ANLva44P4 zmjZN&{Kx{hpTTDE_D+1e;4ZiOkOhf5ABhwDVj+YZ;KtmtprRf1{G8qLOst#6V2JBnR5cGSx&CwCFp2(i_?E|1|GRGkIjjXT)fe0 zh^u`B`K5k1Q>;J^SGvHCriCLgRC%@4p z6J2^?;c+)K@6Sem))TO~U&_UgjD@Thx%`+rZ{X~{5;RF*kHO**SUWoc=7)WT!R^~{ zcef(3^|HhZp*P|6RCXS?ez>)|RGa+x%ChnIWYFzl6`ygt4<++E`SvHLp<&V;&g5m4 z@V1j8F;FC@xnvghxE{V$o z-FF*7u>OL0--kfZ`b}|7d_G7jLdDItO?;L5Nib4;4JW*Gh->31+`^o_%a;x#M(>_O zcX$R^m}bKLO_eb1?p`qLx(FGMuHetot0+o61xeu&&f~Hz{H&HGGiH5<#L285++s>M z3YyXAf)q_}Ve{ZEmUM35Ri5XXeRNW*_=&Slmc}v8z6Y$IkX$`jvu3c2Fzlg zLS9EGm$zdMXD8ML|7k~{d!;US=%E(rC@`RsC26o{Y7vy2{DM&nhS1rA1yHp>%=hY= zz()TgnDcxsRQWWbC{U3Xn52t$Rz897O+Wdxl`Z1w^_A$AJB?dCz_PLxsi?D$@ptES z!8gWLnlL;cE7)1N=E4*F`O%EZZm#Wi$Fw zGWrtg9jeCkKO?dKKM`C_R3c6$`t)4IeSG;T2lQu3Fzw_6uGC`~$@=aC9r*$paLb?X zH@*fn=|TJ-FB9t8$#|4Uu3*xXQp9PaG1aSuF{W<|_gsAqU1j}@X|2hpJs!a3j(xmV zwG^?mH6Y&)g<#t830%qD=Pc8<2%bOupNow34Nol)zIW)s7h)a-M~tw@DVA#&odAEg z3helM974wY5C(tcA=C2<+T^|Cd^Y59mzFd#$6YDLTx~(Y@jXu8rfZSr>C)UsPetmW zDuwZ9n{mGkV{)vj5N=|hvtyAw(H--d5B~fI+;(%k=N5N#Y&gSxKW9c>&WwUfil^~+ z$yC;ZT7x#xlfiF-Jvgv8gmC#)tlDsfk2}~6r$q`hK{Fc|sFN{dqQz@QGge05JT6-( zN9}^^_=eS&FpPCej&SEit@e(_1dhu8QwJ<_=dJp5fP}&+vKJQ2M^lgktM493^7+=ZVSu*@0**-@~%# z?nhDj+#cN2uS;-s7IYMt(Y&pD`LHFQz(4*tNY%=c4}FUCmnA!cH?M{fTXcw$`F@nF zI3>ov!x(FC8pe0eW4XvDpj7$|TpdpE(&D?&Q1lo>e`r$u1wJ@SEKRwr`>46%8aiJ) z3*S$glTo9mWB2|a{P`<+7@)EcYKJ}si;X_eebE@KmYu+5CXuM|L6Ng}cnc}Bd{EMp z#|hR%aw=;=!Sm}?=A7ZUrLJnUxo8SEM)C%d2ZzzmYb;5w)KFTqI143XO+oVbiE!f9 zI%qMV7*Hb5wfx+ROYSm87_7#T&K`=_%@gk zpNcM5U&6w!L6$Y#!5dDw3?@nbyv>I}T%!;Hqk8M`=shhW_Wp>5o3r3R9Lowm-H&xY z$3RuZP%yc)3YSgEWBIf#=$QD6Pj*x#l_k*-zvmr>j56Tg6g~ruOfSx6h!##PYX#}d z&1m%H4=Cz4;o{wjbk*~>%nusJWpRPN8=5Sy z<}?g>es)MQK4uP0$-Plr$a;O@l~Os9WLShI^0W9KkMzmCkq7b7wJOliNZ}V2wW7GL z4{kfi)87Y1V#=zI(CE7u?%S7w_sK&r_L(Y4D2T$m!gHJ`QImRYo6O1EHKL@VS}b@q zjqjdf12b0{)4{u?AP_8P-uvgEsmH^ZmG|+{r+-m0^9x>bI)cx>SyO8E89U-E@Z+uF zBxJa}IIj34#$M9kLR-zrqAGdf=lTycrbNPSLv7Mnb{rf&=V6O?88q`n;sCY1{Mu-i zY4JNK?h;eHWPKVvhn&I^85z1wYXp(^R-;qdereYVeX>HJLR~u~T)4qexY3z`7m0v` z?3gS(Juv~(CYo`QRuw$7mhHtOJ@HkaEUB6*N4IL)(D|1Zpx=z;AozTOJNWxH)a|+ zF*zHlPdq+FaL)H1!qQk>ntm%1MJL>NlaXTHXI&*P`Mg!Up0Ue4q`xE9WWbMbYnnFa zHZM&5j+1vP5H8w^EVtICZx$+3+rObONhF~CA3mZ^S`U1#)+a6OE=R8qA)lk-(fjB= z3@m>F-Iqpk7Q0Pwt|5o7u8tt}&1$sTq!rIxP=g2~BN`+95#pEUqFP4-)CONgpSD3> z(!7}Oe2@!0hn|4Lfiy_yjE0mQo$$?l7=0RX2D6%+peJq|2K60>qN}5sFSrgpf6hX& zT_4y4S%KH2G;R)K9C|H{+_V6*#;L&?Gb`FS*92RNpF-HscnI2} zPBm9b(|ucm(4+Poex58#1W%^%CG7J|x;F7U?@Yn$lyHbxcm<7qHex`i9Lh)kLfh6` zESFWl{BOq4)?5i6D-@}?z8~H-S&-4`#v}xk`TFt>Fr4iVhe8Z#h<|sn;^ay@uVrb4iSnw8E%!{qVcvEM%+8(OPjozKktK*~g<% z`|mag_}a_o7-+$HYbj!Ist|`ixrpZ$x8vnQ5h!{;Rov!(9W?Iz#jh&ySf6(TY#LX> zzMKGD_pc=>3a{eiwO63V{GojL_!6jWX#fZH)3|eME;M-S(^y&7>rKrU>hAC0B3vxV z#&y?lXq_Hi5PBRd=BJ~q^BTxv`NeK;W!jJ)!g(#aE|lE6#aYR-{Y9xXE#r)+>bhLa zWcT!X%QV=qScX(`aiBXygFemFBK<@Ij2c>DP47e$wW>j^k`8_QLxbK_e~9-h-hu6w zMNsu|Av|)Hr@^9XcqVN@(>gcs^NYX0nz(GRa2*FjRv0ly>QC@1|A;CzBf)w#>qcDa z;x^r7{fxA1c==MA8v07nz8$uB?X()H>bc5#4`Yy-H@SW{7lCU~!JRI92MTk}i z&fahu1O*}Dn{1YKhw)p&`brqH{R2+9VNC@JtAz$b?;xDp2S#m-x0|X+2iInz*VQE; z>?nlN{c_an?n^H1+AKaRG6vilOR>&g85&sseeLuwc-Sfhyd;{?r#uxe3RsuJeK;zu zx&op@2N(lW3iU4(!lR|-5ac`=SLBAVy~n?dF}cxc$xm6j!Cjv?2&3^E7mG8(Ucu=@ zX5^5QF7YiEL%-imK49lmZq*TUvfh*3d2fEfgi}x9e5Ms`Q{IXbC9P;^rB0tlv7PqT zOz~K^7nuB0hAf%*4LTI#vC_bjx+s4HJ6UV^;jBqwACz(dfmiu6hg7J;B(|^pG6pm9 z^oWAgJ+#xZ<;TP~!A8avedGHBzD9Zhx|2q!bYC#`_s?o!%cft8VwskMAfRN1vPI2maaI~Zj{c-3C1%$2W{4(H3X6le$23^?_&=xl z%(HQ=&XgX1QHkCwEooruQwVTqk@HW&IbFsV z%246#=g83JZf&}veK}r@Hl`lk%ndZzn&=mP!hqHsu5RCJED^B2QjjO=`#ylUyz@}A zw1ac~)y##Q8Z7oM%?0monp8KoovYe64gB6|(njYYtOJz8+2xJn+y5BQsf*bh;?o_F z&2$An%2@P~L7d>}XpkgNf+VeFpx+@!*XS03=F|b05UWleuPwy;y9`L= zp^X@q)(5kD+;E`pK75I1e5*&6)KS%hdQ@aX(b)0OO)dENyz#J-y-(&aKY3c>ckceV zS}>_-;oi?@jB@P`DEgDgEh#$;f(w1TB#IN7GKYt7ej(iC-l2um3yc|7fP0JMFj6}R zf4xv6Su1que(x$A(o~LTT&r-W6+&Rj3%IjYli2Rs4{6?E!uqU_kmt~ZsWXyMiN3-$ zaVIcs*`Qea9%I?$j)hsainM*tO3(ci=S5G(dRluI-^Su8`+Qt_l=+cT z4N2afV|?X7Jt`bs3B5iip^o#%zYZ*~7OzdUFG^7_NwT=VDw}VNPlIYh4H}r42t`93 z@L`x0*&wV!V+FP^F)swGfY0z>)dSpDcLAK8dE7R(6_uucfG9V{o7fx;n#Gxr{6U)j zR#c;2C*SbHT?Mqx)CI=HX%kQB0cbS~VBC+_u-vR`n5bs3#;Y=J`DqJF9lrl zq60K!S&u;B{4Ls;IpXX5Sb}d*A88^wEl>=1dgtrTd8&9i9kp-HR}3dLY#A zXoM@*G^wE3V3K`MIV=s;Cux_psiZJU=vLs03sRNH?R*Pjvrme;St;;SLm7)ebVE3D z2jg3;4d%VLNZjLe6@BX(nFr=9SUlT=ZU0^a!(AJoTlYP`!X_2=X6lnQ;W5x%Fbf3P z7deSlsMvVA4jB@D9tEX6#SyEeiQs^ilU>g#zF+qy$NNi@-HPV)g{**e@D!+H%3^Rc zJ_+)&>(JI(07YJ9oLz4=n66Dn-*Lies)o zKJ!1tN}4${gSU{w`0oi<*#5`qGw-Cr=6$Tsmai9wKOLn>hiw#d%|62C=33-@1>-=h z9mLryR^eR-GqRF-O45`!!L;uVF!#<<2)SRy?KsGKbUEo*RS?H3{@H+rik+yMCr90W zl<>pMx-n$+d!Z=gH(pqK4Fx*xaN%AegzsGjl92rnt24lbO@0nlzm`IW_AGSmzkwou zMX}@SF3i4E4Vp6w;r93uq|nX{b#Hv+0~Eh-x<>an$Ala(*%J%v0=0Tp`cj;dqmyTmOvX~j*yZ#k;AHIfTcj=I07A#+yYEnGW;REQ_ zSyQXy<|yxyj@?y>eD>m67;daUPUlD9hx{m9o-QETRsJCIT*W(uT9fX?1irC412tSr zIFX|lH%(6&>fDZE&6#7kI4K1eMf^msjlPg|ScF|l>NG`r03<)ng^T>IFgL^v{HbR^ zjM~0ohQoEtdZ>mWQXD@^Bu$S7sgd`C2PsW{p4QQydCe1$LgYpxaAh;tIyUMSD#>PN?H1ky*h-!L~WYN>&oKH#o;{3TAue zO*8npH`$%9jxkSsnWM-sTHGUShQj@GagP$aYq9&W+gn@K!(S{^XEUGXOf7oTA%n4n zKSJ)*JXpUy3tYEG@sf+r#QtN3Q8#zw-Y^&C8{-B@+?>MRtXHw!MUS>-H(?7W1|^&I z82;fVzvk+D#+O`y&f+T&GSnY;iJF*S;40s#zMAb}7ee-|F4)w52P0=a2AhkYar<<2 zoUX&VRI)6y?)}3t*4vc#-}eSy#`xlbm+@HUWJ4#)BZ$n6gchGz&x&zO1C`rx+T|TM z`GW@OXABU>eF=QI&NK`;RmnfL9!3sNu*WC6**U5YGopH z$2D=W$4dCYX+7}g9AlL&5%P;{;xKE8DWv@lX0Bd+2uhoPm$${?yp6g<vV>6a_P`5K3WQYz~rq&M4ecLf$MGh~CJphNc$r8JX z>$!b#iFk9g3~|=EkB18mK!ejfY?)My60KmNxtt>Nv&3?0*0&%n`!naejd4zXKIfVh zUva6kPQ!?oX=rc#0g5Co{Oi%uqEHGh7BDyxGm6QoWQpM2r|ZM7ioQ~qPKqqEF|kOOwF2k$AN^~yEk9wKu)3lH2d``(r^sMg2 z6#p$KJkSUY!%jo&!vL=Q`djQjKMlOjE`s{MMX+(zBaFXe3Kkix7c}D$A3I_wA6$4G zmfNshl!6*LtH8Rq?pr`{{vKHROM&=RjYPZoqrqgeIv+fLJL_zo!tUSOIKGvglYfk9 z(3-cr@3c;cC6>HNRxVDxUxxLMRwL|mfiEn}taK?B{GRFJW=@?vtzdj`sZ;14+KGiG z;W(~DgXCR2fnEoX3cDFOJW_Qs>+K|_lLyJd%S+VBH-!?Em+Jue+sw~>@wU+WiybcbBp}u&D)63t zDo$H;2*N)9h407Z$%U9CSj(vrrRD^<_OA-@v+l+FZL;)$|4?eH@(lCnG|uk}bC90- z%v*odAljKJ;FHmZu@_``j|qFxC4N30c&kSp_5?B>KqKF1(ukv{3+UqjMOtdBO%h+R ztet}%jh<#kbS8g5eVgOZP|}DA#*5HXxeT6)mFU@nmZXk#hEt`A!DK~;uxj^Nu6DN- zJ@MR#2w4Zc-^~})`ZS~?yd9&kMU2rJLC~o z9yOx>-4+lNzihtS#E{sO5%vou?)!yaX%D$~?wRN~{y7iQD)doAIl8#Eg2s3!-1CkxD1U{b*{?Q; zH2eq|>5aHm{W-o{F^qUOSkn`a?%*r;2(+nSuH^39Q1pHl?3{ZFWfvtv;=hd5{rMN9 zxqRZ*&Q_+0t5k`|axZ6N{Db@WdKis}*QF+oyLqX_?0a6l3#E!wNQy%^MwXv}m?>27t1=&gK4j(!i(q3g&kOr{*CS^`&S3ma(XeuJ=Q7B_M`Z@y)k*)BuDID zsZg0D3*vk-AF90^;DnJSdBt|lf!ljQ{QM%UlaeDQ@9M-Fjwbxq_EN_0ZYaz<`-;2q z^ARS^XUw-mBV2d>8V3A0&-iHqFnFs^G%nv@pQlG%zImem=!&+xk5(O|>&PZ2W|$V@ip zp0AY+f=WrTThuh}?oD%Qcg7dH?oPqLd9T4v{|_h1lwtqp)ajdjM`7;k#d!Z(4Mf`Z zL#K2GXw>ZGCk%UmCL39Yu*M3%e=wkiw%_pTI}b0;b$^Co7^BejTn}H6Rm&x+ELV zV(Q6M{QRFfd0u)D!J!gYIJ-jVFlEwtGZIwlM?lT`9?r!f39O%)lfj=2P*Ao7lYZ$? z;m{CxEY*Q8PRh|n!C`D0T8bWDk{ORG3z|E#At!n__&Vmoz{{IB+I0v`=-7_09w?I+ zj1>mb$0y>!g z2xOS^3oFln(GGJO7ZHe^UANKav;iHn(ToO(7r=%1SQKb7Uu5uN=vLXlm-%TEo8%s@ z;bbISmeVAixi8uG_fc4STc7-F(4%vBGrGL>DxMZR#(7L7qY-knrgIyN{Hj2% zM#_;%Nh&1PjK@83eVF~D374!$1+d#DMy)eU+<+n$i#cGW)@P0q* zcP4i5{u^D_OM7|rUe6iEK-StF zJjC}ofvFyj`fkR~MoAA#p*kKsQzOvr`lwb*E-2WJTb8(@@wEO-6mdC2*GIuMGKB;sR zRcNEQoMqce*YiK#9mmsn8&5>CdDJOO+PuLDO!8;q! zj-De=!Q{jTbdOV^2j`E$kZNTvq$;uabYUrWJu#xOt}}VFp0AM6t-!d5L+SJ@M{(vy z#(QKr5zR9{7$b`?#;1N)$lqR3oS(AuHJ+ddL3Z~38 zBRwx$;H7*$TzxA=rstO7fm_VkS2_$#4)em<$p%zOwi&NBj38d+BRQ`kNAAKEDH?h3 z9?O>X3qw*>Vd4;V8ou8YvYu6=To~)K-j*Sk3V$)~=Rab>;SG-4r@Uc~7XbUcaZqOu1tNkgQXjcw<>RI;dy({;*NtbkVT*9J@2cU~(-}km!(EUHzo~cEX zc=3<9Z)cc4;LuPI_)me5L@n-LcN5YevkR|DtI^u&p%AvAf#rX8qC3kR2!3wj2ZFnB zzGVV%YVUB(0wvmK`xagWv)$)=3%ae&khG+CfN;nUI6Aiz$4J@G=JH^E%z}$BB+!D` z{(A}}ip(uFGXYv_RMDm9Jnrjzi^ug&I+F-8d(Hp!sT!v(Sn1(7R zmcb2MQ{rl^fdBTY5%-jX;C$g0?0fYugfx9}T2_1nNKHCQMaYw8a|bScmo&XGav1SU zIS+T$6p4uO)Grz2LAA+yT&k+dd_7nc^G$(zHdh1s*+YNNcYL(v5ag|4cM9(tyy#IP zccG*THas6e1nY)S>7n1@)I)a1|5^wc>^_!uq7_}Ttf(j`hO5c<;J+A~l0BFRdW9Cm z@|P(QSh6|1%t<#uA65M{X%%BcV?9IoR%U7YXQ5Gl7WOL$WiCE_<0FkVk zuxM;PS1ap}A@&P|3DO~WFwU6PbSd)P{u&r?ErvI#%n=8)jN>Pm)?iNCA2iPHMNHMA z{qNHu>unilUtaN6T(A6xva`|jATsz zALG?%qvcaHYN&=sT_b4sJPqhFt%sVXS=`RrC@^`}j^V7!uyy?)UcF;ZC)*9D8pDft zx9=jbIO7PKY_5Ohi7E+Ss{;x68@u=J=I)GFC1fXa5BQjniHnt~#KKD0_~i+n-y}s3 zcIeaIZQH?uWh4T|7}M`(ji}czS2Rpggobi8nm!>6${0hvuKPS7yHhqy)C1dGb$aYc zGWP$@Vc!5gx_%lAf2rfO|8z+O>q7^8&BNH#&)9H+WgX_)2&-!+!O;;)WRAH7i9Tsf zD_0JJBMswMd|n2zE9)>;EtJpu)`21G`o$9S_k44g1&uwtfbA$UIeED{B#bW|p3c|; z)8ugR9to;cjYcoMNQ`YWrfxg8@gip#@xhUWIF`Mm9REy!&Ce9cSSm|v)w3|qH;N0` zW(#gpLU{EJ=EU#zD6pB{!~Ohr6ri-;zVZ9%qpvZ_eT#n#YZD1r(v&5sP4le zESUTle{3)%bA6?-c}0TIZRbCHKkJzJyw3v5WF7L!yAs`6)lpYwDrkfxa#qLluvsBr zc&G3t9$tAIyhjhCzdxKpiSJo%&HEF~+ZSD|T^b8s-`8;SgFZso@-SG-m=s>uRIsck z10kXpZ4Q~C=)pKnG-ovi#^l3!wx{se>H^;8bD?>;GhdSR2KMyyL%?xMSfASrZes}F zwdyYT9MD6_zkYD7br=l_YeK80NOU??jj=O7^5Gu}xwuWYAU4be7AI{);v59=T`Kn&eG<=~vX4vk-|2(2GILB8}izWU3~+eK`SkY9&2;<4PQsJHOI zn`H|YMscDqIh-IlNqFJ85$%3b&E00q9w{ADdXMF$eutRj`dS{XTx@98kxfvw=|0YG zdJi67lQ}n$EYzQNK!M{kZq?lLxGeK43|BIvrAIYMNSq(9tM-X|^KAsJa#jE@k02CW zZ4lbB-eHk?DE~M~mkI=3H?w<_FfZ#5XI~Qm=YQ&wg;joMm#MU?s(%=KX)9)-yo2!cgy)vQh-ex{#sy1z$s7XEFy@3tij7Ut|8oa3| z&30a4V$Yo{zf!mf_v-1Ay$RvCyH|raM#>{bn&6VJiIC$Q4W;xkYJB?5SH&!cX1Q_P z&+7){;}cC%`*HwwF6qIIvo4^h>%DN1i!zBU>Bi@uW$5A}0d4nkhRRcy!ROl$=#345 zX6p~Urf~|0MjpnorBtjAnON#C_%*WOkU+@|B56Tazgf}hGm?Q+SpOu0+U&?Te;|=HuXFZW^w_pX^ zlf*8c&Wr2}xXk%dMA3-xb>)s?Nm&KsaGk-nU4P)-HC5VG84rb%E$HE*)6g8FK%X+! zRh2jpY@{jgm7B^(vH8@camaZ3ZM^Hft<1+a1!_(f@fOl2fMIsgt=gE&tM%Yw3->ao z!U*6Vo6_EOmeg;d9OV7$%Db5wLie4&{J5YvtPgz$4ZHKX|K-V(Cf4v0$I)V6jRsi$ zHVSo8ieZ5U{SFPx(yK2ZY#^ zn2ya&6SyEPAs!zwobIdFL|>10X!a@<@^)V1YFc%;GlBqEpOpuW_jW+@m=3Xb#xW4( z7w|FhYBcZXTJU-Mi`!{<7fg-_Xxp5V=y>)fm(yhh%WMy#zlJUik(HtUM4C}cmWdKg z_{PmAiST$J56kYzk{*3#IXKOs{=gfW}V&dCt!)yEtr>4 zhoXXBan>1M*m<`e1P%*|id?d|us0XsGRwV=n%991{`KefNsJMXSt}}WrIt7If^{D?*3A)K`EBODoXyVi`@&;zqThoAJa-511hwHa$QQMiRnErMh>RldA1n~=a!I+y^KlfkO zuk?p!2i3@x8>-~Ai7|=Y!k7^AOv&z*+QjgIC7ikFi|J1m;_{_ZWE*oYg#CREEhoN% zTHi3LD^Lde+6Un6c@cWfBw{%11o2Z{>bdJN)bs_w8;$R9f95{C@mPv@d^roggI#RL zxrs~gpN)=wTOqchoe$3ubIoo2e0H@3ZfEb6@|Em&f8+;{d#6E7uNf2Wt~xP#+zU>( zt;i-L4PrB)ioY-Zi`_ThaqV`OIKg5c-tM9t|CD)0dj_}RR{0);-$!sIjYaQm>R3MF zIyBEs=F-gLghAXzu3>r(m)095mi!+_XC6=G_Wti}o~OtZ+95Q_r;+{Ks~o8uO&U+B zB&k#?&5~*-NkT%Bga(ojlGx9^GNh7(BuSE#5{goi#BY6nf1Q7NoqBEd^Q?8>@Aq~6 z?%^wL_`<6$O)7-{!QDNE+Q>hGbcGx}cE#JPd1wLj>VRNts=1m^5on-tJ3R!RJtHLN6UmXIUuNhLCOIx|hmh1R; z#|VS}D-7DwfH5 z>c<)NN5CmJUZ`^bT;sN3(plE$c~gq&7C~sbUzr9UItJUSw1|?xluTX5 z`h64XxIX`L+_&}^d?@~jPsYj-4JkEZx^^F0+@6V=Zl5{t;(VOeoeWXUYBV*#99A*s z+P7jA8d8*n7e6MjY`|Oic21Xc+sE^@{Bf{)^PLMaErVRU*&u#qBl`2rnED-j15??I z%_1@vc3w!wIG*jxJN>cFq8sj>zJO_mMkCHqhB?zu;IB(YB=w>Ve4Y}4QpT)THn~-l z(EA$GWZ#0~!6$qY|C(=O&-d3m3fZ#lemt8syeo>?88A&p_jGV?5-jgQ> z{$);TlVEh^_Y{z zvd_b=YL^%yQ3c>*U5dkP!UmB{02 zT@uJ<_148*=y=Q?4A{@MydxX5n>0welQI36@(8Sr5ekoJk^ix|u-NOO#C?nv?VRQg zH4B%a=G#=RJG_AxSXT1xQ{MBrG!@I9vAOi8J)ElOB!0ebM&%dX$7_9t)Wu{ulr2%j z;k(c=_wNTE6UV{4CQryt5Ljr$G)7Xi|wDFt|s>(ZJy&c;jH7J1cB7GY9 zFqwJ!62NIdj`p5;1e&)Na{jC9QPB5sdeKE|k>ZJI@L*gG#_pSjC7&8WlMLsS|1^P1 zzK~mRJOn3FmKmI~h-(=efeQ`a;MAFUtP>T6Hk)5_Te_q0y`nPFGcqA-Dr6xo;4f%w zH=&{9oFR7TJ)gDZU(QCy1=>fKfW<7+ZOt#hQ~OM5O=&HR`m9Rt9eWK?+wP)4xC}{uY)(_3pX9@) zYLgLTPNB`6Le%DsNdNRb=$qchuRX6pmd!oHScP-Zv35HMHk>c8mnst#6(C73JsY3Q^)9;>^nQL{zB+uYZn6Rw+3a+qR$!YjVIJezAf z9>!PfK97fbuVdeOX%J{m7j2voimQ5cX->o-ME|*rp6C0~Vqyq$yLJF~)qqAA$iSSj zosf1k7t8L}bD_cUaP87A3~z2{e9L+8x?PI25p4)A5#vgqK;C8OH(ueZ;9ANCbcqS& z^jd_}t(AT6*ByhLU*Eu~v!54CtKqE2=i<+Os>HXqmLC}BfL@E+FhY7MZ+lD$^hfW) zoOS)6eR%|Vd^`)4sw;5s?YSU6(`(lqbOHuC_0jo2Fy3an%RAS9VMS9Wn7sZC{qw&- z{JW)aYKA3=n0$x(PFyf8U=Y>wuR_|`An>_#oqPB66E^cr5|tBXbbm)bj_B7SIs4v2 zx|261L9oPrtAV8LPy(-VGZ|e~{(_|WI5->sWb?%~&TGSXaI60dZs`diDt-yiGTA+R zbus=pVU5bpHz0*^ntTFfxalmnuW^gS@BQsCR<8iY-Z_lfiv(17FPQsn zFp7$@&td?tPqS8O;woKPx+m`;3hya#?v*B(oy7Qq23B0;=gdwK0z#vpGBgZf!Fc^7{dXgqQShpo*8zrItj+lO^|-}^wI!(#Mh zzmt}$)yT6;GDI%#HtNkCMf%K}IAO3FE;CJl*qpggwM&isSQHNK``th$Pl^U-7}Bbi zRzGn+9#mQK9H0;k{c4094*Gax0Kh@P$#!q>QJ%2k5inJ&V8({ zfa43~NSo(&*ru*QD#zu3o^uiQ%b$mccgvWUUX9mW!|vz6c(N=foY_|kQ&a>{;?C}u z4==Fn<2x>{{u=tqcHp|(GE~@BE^65^56k#oe#i4ZlsczC*Gg_+$n`zge=HLQ-Y)01A%REdLox{B1@Sc{9*{LY7NW&577 zA*jOT@^1KZkh!W~G_kpVJ_P(ofxy7AsIX0joGMSlI?WUiDsAMp z=n5!WZiRcvjcC6t9&C4><~>j9rlC|R$=d1GteT)og(}4yUtM~#w`DWwK_ru8c zFy<J7 zwHa1Wz047oA2tkkQAu5!Q0#`6NT)|7#bDPn_Ya)wXlCo#mLMG6D|9GN0`h0kPI}#O5}06qq08gp-c~ zJWGUb$3ououns*Lr%Q?y_MnT*A@qMC4Ril7B-2^9TAU~lg%9;W>gQu%WO@r-616$a z_C(Mb7m963@A1X0O4RQ+z=95Q^3Fz^*evFFuT6S$(-(hp;4#tH|<;Bx&xUC*J(EHUFg}Xd>%M2MJ9w*JYk66ci zF)di$QiG{Hdk-93gqxx%tUOWASzmjCURFn-dg))3@7sYoydlJD`k;%I5*4qrgNXlD z^ZAYb7(O}&_IPU0Nlvwp@%N<}8CKb$Plh@;nIEv(L*VSQOu`hvk-o zFk`R=ZRS5jdnLCATXM8$G+j2wf?nN~ipQ&2k7h$LKbcn{ z3BpIgNXWA<_UepYkqR(pM)#Y8^=Zv=Or$9?`;yE zksC>p8nifZv8(7~_(aC~iGh{>UgoF9NN|Nq5f+>>CYf$u7;C-+7RIxV`%eqZ`Ci6; z_N_Q|niQ$FbL7i6y#<4cOuQmE2U-7Ua^lcB%-tlQBIyR$>Lf+Q{tb2qM=%!CzX8zv zl9$-)YH<<%yLlCT0X?N=NdzHN3L-+3IQz_GQIwt$DJ)6AWsPS+QSmyY{%+&HSuoyh z#C3kkj4^0(HU}IMS)L&J1~jNG!p{?5puT(ozgAg?3MR#H*A2Vy$cQZT`0o*%T4O<^ ziganY_Cu)NR*efE*1#5=PlI4o-Ee?Y} zZF#7BIv>`)W=xOWFVU@|2s})hVL6-8xmLY^;NPAghlZqU^anIcFr)9UlwpOdHzq{O z(Y|*JAyw%b*PRx{Jv=x7lbGir&np-P4#Yry`$cH=Fr&YiGqLxxEER_t@PcRK(Z*zu z3;SV8j64>DiKv8iyZ7+3EpkvfiE&5DE){sxjHVSnMJRgk4ej;*$6c0^rFCyJp;_Y> zf9trAl=@DEq=mtv>V>*|`JxCAjTuc#F$RUdmvUhjSpRr@WI=0$4p})TjX%?tjSUap z!5(`pT7BRJZ*i-bb&OgWH+C$Vyl4SQ(C zPK8*p%$k+l2FSf0hAQF8B<0p#wC?@|KNVHTzg!0<{u)V|u04RA{Ciw@DHm3yvV6c@ z3z0)yAw~X2`1(l< z8}k?|bk|v4CD4-O{#^^JKZT1hyF=TyR$p_D$^IG;E~Fox@t>LwYg@%FRSRuN6^xO(Nqx z$-+V?o4gVXHeJLx z@9TJt<-pa&$3gyD3v}n%L)N)_oX>V;I(C~SUHY#voooFX1sgxxxks?wYmPg{7U)9` zd-l1#kl>z+3gq6!3g+?tz~!4oa}!vnE4<|{#3mkvteAP6iDxgo^G?O27xMgkweNV2 zaZe`h(x)FDZ2-mbhTzh3gx5cri%DunxsnMn;2Raq`B-0u10M|N>iz(1JA9e<^*9K# z7MfA3kGHVIWB~H($6@oS_0X4boO_4nRFYf?VpUy9aPMiz=QBBoG=wdCjHu7guUzbY z1KPAqNJ~XtaDSBoX-z7_4_9@uYq%Vp*J((7Bck}qgV&)cuM2|SvNPh*NJ;)ho?9EB zMedZ3qH&&|V2(U)j{9IF?hZ{62*DJ>Fu<6a|(gNCT^#jMx2r8Ab;Mfc6A^ zQuAmpP9(|f_gMpD3@GF5Fot8C7Urj)gs9;UF@Cf*%Z1DEI{L?Oud^1U4s~+3SDwN) zr7*7f`zl_4^>1F2Wpl+Jg6-ge9L)AGC;vHGl6@Y9kwcpY9kOp%YuzPmXKfKKQ8z>%i z06jme!#8tHqJK@Fru^E5p>4@9c0w9fPo2d*xc?X1RSWP>f{>`WSP>bC4()uMjkZVc za2=avNy6Uixc__woLK)0zZ9F$HP&Ltn|cd^#`c4BO$~0&Jc0HrUrM$rB%@889v>kZ z0q(annO}4#*2i4no98`}%-^_)F1@+4EtF{Atj+@;@lrZXxlW%=W5k>HLa)>EPb2!dS%~ zoV3$7_$)e(u^CSAFryL@3lwR}B279~nK7N$zr{@HTu2}CW<9{~&={!zT8tC?E9n+i zY|x>3#a&Rk&<fABe<*#F04rbPT@xv2eNDX6^}PU=_ggq6y-dFu}d)@k}QfH6@2w5n6pwK2G* zPKyW}=1Lwu?t~?OKEW<|6HrGe{ zmx<;dEU2wF&zqXxMBAXhgse2uvDvV`(f87H^nNJwHRD;Uu zFN9LRo!Bha;>NFB3x66%P)GCQSX7|^I*N?>>30(+W$|#{O@;KhHL#B7zxezR+o5YQ zhvPJV{1G^T{d>3bOH%DnaQhmRe}4*ZwB?BMl522eK_9j+i9lV+Gl;qNoOu=vc<0Fn z*bM$P79ATUy5jLajNLjGmhI)B{dhFgmr3}i%y*c!_%(B{mWyP4=3?$)Q%JinNA2&= z=Jc};gS%t{mmhqRtLavvD+eF&|B4vz@AV0=zxz-m_8re1G#yS9hjsCb{?Vq>(v0ba zSr+7(N;jJLhJhX9N+cWV5QS5$3t(b^rNO(QHR1jVm9$IKJaIqlPlBM4!jft4rYM1PBgAQT7j-j(o{6DoB2r{!KUskXL;li)`l#F z19u+ch-zaZ(q~>yO(Wu%#`sr7?C)mZp`K@QB#LEx4tdw2(>x(YIL5%eJ_&rw&>#^Z zA>5Z&hNM@i7JRlZ;fhw7bMA}ob8+vO<2J$oe{5p;?HMyAla{`M1@o1NAjZmWt$PB7 z)Hg%XmpvlEy9EA9#VYJhP{ink>tTKVO*CMg{U#CfLOLi=@t0MiHP;#Eyki=gWV1ct zbR~LgNQ#8*Hl}G;UPI}!oj8GI-))w&?|+p#dIK~(HeB<(z;VFY>0vLMTG1btDKgQ;&cX@W*E z3ho}jstxMQpY#)xVoyn8gQH>1|8Z3&{y_`h9H6%X@%d>((n%6Ap9XRQp-I7cmgO}v z>%*a_ICPqhyw%9(T#@EZxLJJ>tu^Io*$G2%&dGuh#a5j6#FA!ZEX55?1(>Q@%}@Bs z9IFRQc*UPB+(^bQemmBf$Ys64!nX>Hu{Vl@n(o1YGC3q~YU9*tUOE1sX6RX1 z0@+yLa{=f3ox$(h4`5=v37w&`00Sy+KzP+{FmsV2b6gY9WKRdW%Rc6IEA!CsI|^KF z+1YU5I(DCm;D>Voa;J@j5t zip1~I!``FsQ0%@4a^7SB1gyXyvlzm`go#x5al z@~y*PPAcT#FKX#OC?vW?E9ZrB{rarZGVGhR`N&F6(bXa`;5%$f0!pFLC zpl`^!NNM*Z!m-==!S`&x*~Id;UJK9#(s=!sd%#09fDygHP(9-|x8HjYHp0HSLeCvVvjwW*Ne;oH&Pn>T+N_8#GFmUOVmp^QI-K#DHiGsWMY70R zgQgrFO~-CO4<+Y6%L+Fz8Bq>JgU+HKiS|(E!FbU5Rp7yXcj_(T@Q{)g70CV+xg~tU{!kg} z_O}k2t)fKZ-Mi5x+6Wwv@z}M1&5&Om=Wc0hlcpTTygl&=Vr-KGY+lClxKx3k`(KK~*6 zA2>KLoD@{Wqt_V^h-){;*-K$<^ZJ}X77G(BLMyI6}d`vmx zx8#nZK8ISkYJVZWc+F+Za+Tw%eJ}85*Q$}l1zA|$VTR9UnUkRQCn(k|<&LSaoOSso zmj7MHOH_=heEuUym{tK{i$)NkktyVS?8gY^NpsTJjj`;G>}OO1ULS{e&*zI_)9c5~ zBV|aYmaxu5w z@W%+Qxl9fFv@*Dxs^3sQC;?6z_2A)ga&(OKHT0NZix=FU9MXZv=fA;r zfe9Wy#y;oyFF@6AJ-)mnB*ID$a5@_XE9VVEf43NTV!-;6W)JypqQTiTByjUso?~kK zaMD*o`Akn&bPUX5yWuJPU%yD4aySA?7c+PFbOS0JN{856X}ph|0{*vwITM~4U`Y|% z>u*+|nRiubjrc7<=2Xa8oehR9524E@09~_U;q`43;`uTOWE+=Z^Kl2T|7#^#lTwZ; z^2OlF*tDnOR7urG#%$Z52E}(Nu6;0^T!|XM%`_5 zvLq@o{C`(2q1E$3j&_WqT11olCR!xlTYxc7a>2gIR5Yie9X&GYVeh95$Z!@A`{rIr zmAV3noiPtna9U28^hUO^zbGKRCS zkp=8iH6!J5Js@!Yi?=p1_M)ydjXccq8*8uPl$X(H{3sI>X8nOx5;lkOTmtXO zFKISXMS->&icdGPo}Uw@t1ZMy>a5eUn}UB#3=R$rv0Tb^yRVFka)+_I-&z_GuMR+$ zW@C^mQlkGnR3~{$i$J3D6srAf@iz0RL{`59!Fd0I5jCU8`_)w_I(Zcq8a+qBW>vd! zie_Z|g(Fzg*8t;uHOLV~WUSmrNbQefMYkB7mz;;@!Vt-@h*RMHEg9A+YSR3`zj#Qy z73JS<#y+pjXsfdb{TN%u-qntGV{UW76LEoK%s()n^F|M=AMkxvDPAnA#-inGz-Wah z$nKVH0X$n8%P@mQ5~v&W#Q=AjeBhiPKjB?j{nr;iOG-XQ@HB zL+by{$@n2ND^he)i8+0qXfB%!$D;djM_x7d;Tb-KxpO_Tqo8c{ zCiu~lhEI-ah|3@} z?>H}tm{7^bL<(rX!8a(HyavQ`^+gB9>yw1YU%)f&YugP?Y_!*fqIm}J!NVFo;um23 z#t5!|;b=NcrxUt;?(%^ztfBWv6<8F@(+&ICuK43LOv%s1$j%S=<#-Iso~hB`KkC4p z(54gGjY!GjJ`|ovmwd=JLSLI;&fF>;W33u+#Xd`%tS(KIH)Mi#oDuy{dlqoP3fSuW z02+=Ig4OSfTy6Aq0(hh+-eHoxQuh$Y$k5J~ht^-CTd;FBN?< zqPh%I3@ae~mkVayxXI0#@&&5h45-_VR>+&@fLV2G_%qXVh?kEBx?Isg-F=!=-i}~d zMI|ScTFp29w-;Pz-oZwyMLSi6luk7#+G5TB1uTH}%2%GwNWabremn z_M`jtbkT9frT+f?8tgF`PV8O6xS|(7Ma_y{oc{LHFyX2hc_H%`_r48B|0^-*vE>!a z?=xnZ!X`ATV6z`;Ok4e=+4FKHn_G8cOzUwd(q#OrO|{(0@oBu~+EmW3vmV`ZH1LeL z5XYxGV|eLj=7G1xMuqd>r1F_}YL4ce#w-SlO--PreU)WX-f>Qz6VaMw*OJYK(dMhE z62*{CPQ2rr#Qe}45IUEL^5?(jEA?Jsbp^{;4GoEgtvG>s1%<2=ZAE&zjYv+672CrM zaMdHuagpQhK%tWJ^#$x@_DiA*PDA&Mc(|Tq zMp__;XI4sDxXP-jTha^S{eDEB6|Hneq+|S3SiK+w7PFEftF9r-^!k^-2AT z&A9av>wkaU$S;ft02}w6T<-Q-+;hN$7z`gy&Ls)y<~;%`__=@+juP`ebB(zq+f;}+ zm%w!_)}+lUYjGxOTt~3eTd0S20vf zU57u9$ezDyG7n6|*ky@)>F{xowQCVHTs;eg%+pgh{}0IhzJ^_&bV>D?-<-H+ zlw{7QtB`lS6bwamAhB&k)nPVR^tX{)HQb1_=CLfVBfICVTn2)DBVncfJ1*r(7r5Q& zg4u}%BskiHW*sz#z%NHJVOJ#zypy@Ozw0sSXPn64$3YZdb>nVNQX#z|tP8Mq4JX$7 z#5I1`r8>4Ob9wzQdM)eW=e@{9r=rO)*@(Tf&mDq*tzXgR+Ac2XRUj-JUInS_`=)!R z7@|M1%-Z;N(ao|f{CKwtZYYl;d9EK|r|c*q>0@^f`+x0hC*A<@qzR%>!@Ibjdxo(u zmtpWq=AGDLK>U}TgX+L#oZ{~1ctpPno|hX_?-EUF{||6WHV=V&Wh|F3msxB|A06062Z`G6&l&_93oR7B@Of7UAZdmUd{}-A6oN0HU~+MR)d3OfTgqa> z#9){!sz+DLI_9CM;j)bOfvd+aSSH7KP{MNTjy2|NM!w*>i!wN=66TsHP{f2=tZNbV ziGTgVnAmg-aKTp&qPy7wUU1aJu9@8dcCQKsSGLdj&ip|Vi%r-A$1s09M0!deq_vXF+xH!NEII7bO)=6(Z z)y``c?#EEqtx(81-a5)sIH#@+4NdMs@0&*4?==KjZy0CWAO-8xeuKdpMZ(W7#YG>C z$v9gDqS$zoGgV{H!8TcNIk6YA6!fUgDhZ#{@*LL>&%=j%`XP0T9(CL*gYiBmarQPH z;=3e;KUExqnhkGon!yFkTImboj8CY3O@mH0e1$9K9*43m_qbQ{l*u?%ZPM+(3pV_7 z2b1!5!%F%Y(8!(sloILdCY+~HP09+t#O8gZ2LF%R%9kt(-+N?<7 z+O!lwbb-wWUwMIW<^*o$0DB+qc1EY-=Ul_n+t7CLH1i^}em8RmKij88#956Zap4|* z#z=FzT}h49zrMwtoA?QnR%DAlW_|{tPqf4)Y7RFiJPw6l%%P%a4}9H_0Z(FHp_!Eh zJ(?DReXU`zxypnnzD(e%%L*{qp^!5=W(oVeRq3sX=P_bj68u?Du_0?QwgmUXl3fjG z`iePTZY@N2wtJet&IW8|PvVR|%7KSEPFVtltc&f#(=>^}Gx3`d=ZP-z*cDIA3IBoXdj_z{$U$H7Ho zJ#u5H6s-oY@lLZZ@hPkS!LIFkNd^8B!&@UN+lxAHz|5w8*VV zEQ^w~T=Ko*2`KJ40tY86(#Y9`_~<5^CwV5rH|yir8}NqtSr-*J6i9$rvHlpFwZy$@ zgR_b3c{ZG#ANglEw(SZ0R5K?H{Y4%4RJV#S|J4-s?GaGgC#PDRf0;%rx<^=0!aVb)7px)&$+!!(>Z7WStCw>~Z zS$+mXtw<2ecv@h8GKQB5P@%K_C=!dlTo~4?LyrXwCoVe=ab8*np@?dp=zh)F( zmGJfnvV3y-B@8>5fx@llIHA5e1ooh{(}Y|(i#L}kgfrGP`<#;*7hz-{ zI_f3B*Dt#eH@L&jt;1>asc>G?$&)kmz7AW9DV}B=z@NRQjl)_YVv&v;11-?*Zw(|GIZN^H*4hluMd_|!|${O=~ldpX!FG0f?Q;?HK} z`pRK6`4aOak7xPyMc=Xh>sgSRFp8MWzQUZ#s_6cHE?2f-AJ($|@7?qf#I`D)@5p8@ z=r?D0t!g<^7I}i7Kl&gVDmBB`t>@wC8s@{G& z$`?MsKVL_Z)Lu0#R}2B=u8nv;(vq62Wxnu`Sgc;7PUBba#9D26y5~ndnkW^*@A03} z<<(al%2ObM<_u2%Z3b7nydTBg3D~J>O2q>$l4^_5(0QBre`iVX4g0-pez$-NkyoV3 zMZK7nF~C2+!Fr`F!^x31Wl)iI6V#QuV6Vp*(CU{WNy8swS<(W~%n0X%(k~=g`R_T~ zN!Q?Mr8+TVoC|^9O8%bY1j=2l!hfIXkiuv^a>a%DQ^sm@;!8=eiZPhdRy>9ksWs5) zeSyvCUT}3IE%0eWDC?z<<%1s9LE(&W?0r%I+gF*>W~nYoleRfkVdqlcJ1JaL%Saj# zw3XX6C?pN<+Te?#9LY45fO`CSXl!5{=Jz#x^9eoP@mdOKUTDSOUw3&EDOVKS8O1$! z(j{(xSU*-_1qe(Q6*QdDq&FU)z_eYx@HLG`?<)pyHA0?l(bOVM4|kybzA}lQtv0Q+ zXu!Vo4Qyv{5A)0_z{g`bxE?a1_3LkN%D3;rZ)bVpvNoBEnb-i<|5(y!w%4;b>5aGU zmSB>X6fYPTX~!v7qt6#vKISKa&6_HIntUM~2uj3p4$*jk(NXM|tBD%kT|@7v4D^)N zgd^#rs6|{0WSo&A-I7%9nXU<~=ry9BIB8<(JU8mZN)wh*xUVq)XWuB1i8H4Bj1pkId6$tki-TC!S;9f%TwwGMMel zYq`?KWZb}bpyQcarQ9VOtR#l?MwA}m6U}G6AoeK~r6mlZ z+nRisb!iwO8~ZTCFc41uDMJ6ctr2y*UE%BR!GE7KbUIAFFD2F2>>cDZLB6PcTa{ndBl62K05Iv^}*Y6UNBol3}wsSai zO;RV_%U^S0;g+;Nv>Pl9lu5JSQLgCR6G^qe4`OHsTwgSr%Knyz{GHyYx%e}8+*O`B zz%5u)?+)kkgcQw9(B1hFZ)D8)Rsz=DiZA7DUd(|3DJ_i7j|H;IlB6%{Mth+*_cJ*F zuYWs+o|6uM=fp8^__zVxl-Y?nfuCVX(Hp>}8d%yWhMi$7t7P#A+ArP2ywk0WMPx6E z(JTh@er34kF%bfOe}#%#I76lu?rxPQ|e0N1UX;&IP%PmB&L3M<< zC)iun2||GmuPajxnQl>_cIOiI%Duw?&A%|1GzDE+lK9!?otXSxj?^`_Vyy!Q^AlIH zTud3P+)<0at5xWTZe`k(?Tb2-X<$2r2<6xvJ52H2@Bf;Ds^fsG-zMb7@b8W70q%7=Q(>uB#S;hqI)k-N;HoA&N3bgRU3 z+4b7Q_IVipFuD<%)~#oIfj&N}YY~xMrz1jpl@0rJ{YAfIWs2yB7Mqw?#4mR-vpvVH?nb~0&k`Ms@_NdW$ zTTO`cc(y;jH6CubN>k_5d#nfb3FU7Uz`#UJG>(tNE9E^P_dgYqjcR27A=X{rz5-I$ zzvV@4k09(^2THuB;&opcQgnN!1n;bdB{^eRzgU)Yd4HKJkIIFApUM$I{Sn*8UFsw< z{0q2moCswFeo!)8mR7A7QWuuv+x%FOjJwx^1ri>M_S;KBB6}E*NrFfGQsJXLb3xDl zz%uSGcTESjqM^)H2++QQ1~u#+Ki87q`(-Ei9KOl>Bus!g7K3Qtc@@qJhLMg*Nf>CE z0P)%SR9LVWik!dk+UhA-I&LHu?Th4mp9b&(VjM+2g5=OpRg^j*JFj%fZBNlb> z@>mP@$w{KIuJxexArMWyo-#gIGp3Hb$Tv!KQLRmfvFOHg+xL{=zV$um8T=Bh`ZaLV zI!kJmB?UIihjC(sF9iaXnf%?^jKSJ=8!v=QlT+JA6UW0WOUN>POJsJS+--f5nf?{| z^_JwUO)W5x}hNB))94}3y(&cGPbQ9)VtV4I= zkB_>9RMPzpBHUK-x-W(x?1UC=(EN^?r^3K~p&|Wxd<1o0m&f&lYZI4G9i01bO}+x2 zvVPhHxKCL(c-LNj;t)GCRqjByzh$`FuN<8F=i@+?99mC5gryaQC@eGun?1KVFKut! zAQ?tq3RTEep#srweZsfzjsq9_emUpFa_I#PJ*-`9ev)cgwIp;NJz$exVNzu)Ym%T>-*Bq{l$+(RK=V{;@qZF_KipQvJ$7b2UE@$RP>aN` zZLR2f?j=-b&gC*=&!c4fcWCyX!TpHRqkA+gNY-8h+B`mmZ_T;Gc$x3PsZh*&DP?g= z<1|QV<58%Y$~vlv?-_gIH70#L$VEC7z-6&AwG}A9mCFt2zQ>hUR7~X@QzwG7Lob+1 z>A|U=qsb}jF4Va+0`EKPQf=lTytkwr8}}N4{Ob>(!aRj_3Kt>!ryOwsElwMhNU+Hx zUUv0Na7;I&%>#ZIwc;d+be?c_-A0&-l5A!Au zu(M7SXLRmg^t0`O#<|H*FTT#Hw_F3`!Y;I*GhNa=!;Zflt4A_hXQHy|0nBkZi2{Ea ziJZ$@blTg&$-Ie0o1%g2DEQ7wikMmMA0nJT9 zXk0Q8ylf0y6cs)#AqyHLXVmwETwLFLvn{By;ER6L#vCG01bqIMX^GJtE~vSdj( z?S!J#RPJn<0=>A$lJqw+M|pNHta-ALIV8WMSjIx~ZDRnopH`r|qC(Mjyd{2D9zutW zSHWZ2Z^q^b!f0z_@bn)8Mk-Qh^C}K|W$(Z=w^tx@PK%_)-GQP%(?v-?`p|lUD{3SN zNUWO_>UbLCk$H8n=*0j|o5i^B$09*`Q9kRQCE-ivok=R)2KrwQpd{!6G|!t2mX+6; zKP?jCvYBJ6;Ui`@u-x|Bn)rEOjk;j!Q~r;s4|4Ov9=C-Zs3Gnanepr&NB;v7dXDW@(Z%Pnx7z z^DL=k&XlB*5Ryt#v7dXHqJ)rygd~J0N)n}a{ogMgM_;$SpJ%Q6zOM5;bbks)ZeIk= z)2dN=%xZXIACIMpRj|$62OHxW(B9`ZL~nfx0dlu6o#jk_zOKS@smU$^UkwpNH_)&`46mGH;1&4{0ZQ$jb+Oz)7oO z*`4_l_(~2ZFMmprdT||+ds&*a?QP%}{dWo1?lL5QPVU83J0xlTD}W`CrXlu6wf$R)o~y%&NB#|51v0xPZ4y78kj%|BrLH8|}0-!nyzO&4I@L^AEXB?oI@KRwD%j_3^OyVn6wSxl7>>KFHQ?)`FYI=#~ zbnYSgd>+{eq zDFySICu2>(dv1bvDj5A@ZeYDv=(FHR1)7i|+6~FC?MFCl*^2i||aQRb;f^Fim=2-?^A9eq51^M<<^GjDqg zWLypA{^*EN;PTM(lL|cXHlPEJb~sOkc}ILC=^`oS76>@ZC6rzS?`5AM=4>mk;Qj#; zgv>V-c^74BXb+jwYR49eT%l<;f z(^}5;L>M?2z5~U=`*>@vF&UZ_1~q3Uf&R({_<5DFD+IS;g={l)9Qlc&o4>HUunaR8 zpI%x)9d9t6y;^b=W}MCBB3X~u=gTq7JF^SV^PQ-*SPNQC$`R#V%EWu|OW5tQ5re0U z;%2sYK;P^+xLfW&tm*p2`R^Y_++uGq=V?6WPB&o1l{`GZ)fik<@9?VIbco!n5v(Vr zz}TcdV7V=j_ey$$h710POkYfZQxyx?%rZil)!dEFP6?PcdK?D)5@1G>8N~dn6;`S# zpky=S4lrh7t7SX%>THGEH`Hm;wQHPzwLebFdI~ZY0^&TRLp?jfgjID8czOK|)*Vh} zcVb`s`OSo;teyp@4k%KAtr=K1HR9cnK`j4i%3BB53rp6#_*ri1qC3)`H(P&ODF&{rCuy?Lo zJ&tYHA&Z*_V7-(YF;vSF6|Z1-PFsCEwYm(+FJl_3Zh^NQWyxH(4-jx@32R`Rj zsoUguc+xO{cb^zDF6nvUqCo|ktMClxkJh2S_R?e^{XMiZpd|Mst$5m-YK} zJ#f)?F;YHkH^kSnZu*ljeqqTN?6tT7Q8u^X9Xo@ZP8Or35sala%Yy#LI0l9f*MlI& zB(uitDUM?7aLMm4U`^|H?5Ouc&s0um7^=!!mOCst$l*cky?+c0$Az&2tP=*Y%wYbqAkeJ2jq#DWIET$T zZB{(sx)&F4LoK7w{~9|_{OiW5v~-BsbzKy_G8p21zrf>@KB35I7)?5#i#f9kM8|#v zaNFK#P z)6|zYV(%=a6B^vu5OXRh`e(a8OqG6g2}8@%|G2F6{uo}r5Q^|W>}1@@WxdL@JIagy zF7k#X_x<2A^oiS2@c=inIZqD%TqqE%<{Eq}z%kkY)(%8tlyxu;Kdn#1Z%aY5?`b$U zJ{=>U#&U+fe!`eDjHjc>@@}*Yg=J#`<+}(+5aG#BNwdpX&v0Q)Yz0|6$;ivZE_EUm9lr0QYimtrxVArkg0FreMvf*clh11m z$x}A>GetYd4{gRXr?jX`g*Mm!uLDbZ7UR3E3PkqQN)%WO7b}5!2ga<{ngf*|v|tss@Y37Pflz|^f5OdBAREJXlq^Jp zMCPG96$UA5yRdI$9y~iajO2MYLFzXRE;7H2f7Bx&laeDrvX0G3T)ScOv-^OiGvKcE zZxn3aEJ|H|1*77s*nU8n)U_HwVO$`5zp@J>gJ1ATi=%m5I|HtsF{dgQZ{jIkcK>NE z!RWm!@$@IQ+g|Y)bDkaM^WOBLAm)K^%?~45B>V_phLX{Hggkv^ra;^$hI3Z4TOp?S zm1xb&_h9UF0X06n!p4?VVdMl=AUd~kw}KMw2u#HrL9eiz&ElI^NV4qFDqdhdfsfhB zII?qYL$}&4u-o4%vRS$tzRJ`>-Jx~pZcxKH?TiMi`G1*j_7z+-XImTY|8)?b+mz{hB1vPn98|NSi|>$9ok>N1)i4cVE6ic zEI&D%rkhI8X(9J;lJ6?U;B*A5t|S@&U2@|=f2 zU!w{P{Tqa0VJ(n7QiFtzHlf@9$dK5EH?UGtiq1GOj1)gtCyhFru!7wIRduvUc=l#! zQW-?0?(ul&_nR&Wm$#tP=2WBl zF_s0DRKqDn^HJmTBXCi#6bjisK6m$UqI3BxCgm}Ipk16WzCjaJB!0uP-KymNzFc^| zAr0KNxWVPAW~8%y7##XO6a2c~gS>|(6eqpJ)2xdblMydGxz~u~94X;4W^BQtbJ?)7 z`Vq`H!CV>6Rv53-irw=xI4j-=CI9-u?0PROR5$=R_SHha9WUWzkqId*+67Np2JL`0 z+e7{e=0j6i#!nXY*QcCdeNe-CmtE)i}>^G9*PLNJ`O3EJlis)aWjP4W$yalzC%& z#gwUioEZ4d5huMr&OobE4W6=@0%Ka3gS3rhQAQu<+$EVC?hm{FEPW+>FknC*5_u|Y zuV5Z0bH29hHG0kp6qdtrt|6N-ccrhQ;x=<~M#&eW9*suZZ~D}6`#foy; zw8Nn$Ioi2Olb$p#!`OSpj8AzJoX1Ybv3+d!EanAQQ|0N+=sTE*-Yj!*fe(8ffRf|& z>GnZmqJ>>NzWadH>zJ>`_8GXuU*^MZ$`bpD!)Qo;7P_2|;O#6|3ug&Ti1>}GENfV8 z^Je-;TCP&h#{``fzF;}q*Kf^8|Ar@Av`#d-&}`29S^!8ze!@W23Y?}M1{vYnv`Nne z%R@Ku5=Js)QEooYV)G^S+u~IFI=j1>o6(^iY+vmDDAQ$4m*{?PD@d2M@IiCFfJbUQ z-n<-x1%(RKQ*=GEe8)`ACDVy_QJuw${JX&R&oI^v8bM84W@F(!O}H&{6-CPI`8D+> zx^^(HjGhK@nWI3>$7Zq|b}1Jes*8q!MWU=z?_sg^U$DO84b`az#L?(DR9SaH4SmZk z>Ng;NG)GYUD@o6uEyJNA5!5%ap7O;=P9!{w>a}|EJztIGJ#CMI_ZQahC>y}6 zH_zaNlMYoGdJE~dQ(*VWc9y}gu}uv!hOZhUsiDw-YrAwA2J)j&YsGI)(41=f-$?_y z@hD^GC&=Il|GBuuS&=ariedAPSCBE$kk$p5&_&*AM6^BwlzxlxxhYEY(99(;)wmVs zt4fg{tlMK1!FZG=uQGEQYq)VARq53;=dfYUKd?SZ;cMyVYrV!#^(aOEVX;Uz=?9t;EV24G3fV^x$iZe>AlMX=X8| zO0MPBpNI8 zwh;OcWblT6cJUH(6VWqQ88>Q~l0jD;VmDq%*gD3D#)_vyzt1zA<>J zmi2R<_=>WYFcP^IQ;$gBI@Kuf;e-or%j%Lvulm$ z_BjE}S#t&l1G2F$oN+y!y>|cy6A^MRpcG!?lYtw8EkI1 zpbVDJ5GULGb?GasbcBD*SFo0eu3db zFHyz38&Khv$T+-yAm}f!sT0W3l7-CE_?`7xUW|bJLqWo=|^{s_mZhC?4s>vL`(u6k0O=3JtbrfrS3@%k|(4Yr)tCXbm{ zr@L$yB#vQcp`~Kj-Fpt#8-!wTOd>3Hdc+*Q(tOQ}>)4$+9nPFmp_5`N(Myr-q$6d( zBlIOmhwDJijXJKj?iY4GbbwIdP4M-QBeO3_GH3C3(9_eTju%N_>lQBly*?gd2~THs8! z6j|lw0iSl86T@e6T>pwYT-D9DAbR=(V#L3p)B5+YC{Bq+`JV%Kwaa`!{3SRTsY_Pt zKSV`G0g;S;1|erH$n5df(3TL-O{px0##lYBuE30{ttAk+{ToJxhw=g4$KZfdHY8nK z4O56byoS=RRg9k1^@Riq`om0I?-=Pk6GnYtXFcn>Qwnhh&V02Y20?BnH%!NuI z<5LIccxVRaF1wV^OP+$l-UFyp_7Y=d7NO_VT-(Vn{Lq;>)s)C={#Kd=NuJk&hQr74 z!CFI{-Hdf2#f|LTxRLcGKgPj3d4IGvm7`7kYFxLYj&-tD;KFTsnDSu?#QV0fZtqS0 zSDiR1tW0FR-kaDF%3SPc#8BhhL&%W#C*h&$IOTXN_FI1BUJl9;t8F!~XQnbaHgy*C*xW`9rC2tz9L-I( z98UU2oB-#o1}Ob{kWZ6tgtsYa=$_uorAQ@VxSck7ELx327u#X4niyautdPG3ZuL>@$nsJLD9Kq=@*bLs|?CXqZW05(^ zoj~kjd*9i|mxJh+HdV8j3NlTqgd1!?!NCg7aoRm@Xof7!QC^0_A1aYKpV+RZJ(o|P z$)R1sDZaNif^}hcLdW6?$mvuRb(r1*>0hyYsdNp5J~GB}*JWYtO9P^%>IO*-kNFkB znP9xR5#kqL2YfOHuAH@KyV{S24-yBIgCO(6hg6I60D_S&}-FFXe3&S;gMG&H@ z$NW6&Ilp@!ApF%O2+#I}15cmhoo*fCX8xQPpT8XQWp%0U3O3tWG7GDVbVxQcv@HyEe(2S4(NBvHxF28Sc> zVboMpGTJ#1cd!oQqH0}ga#WrCIIKauf+;+^C{8`5vS+}fR8Bru0ypbKL*q~g->9$_ z4Ug?b_tBobAm|tG`8r$pK|PBxle}=}?o7B}PB75YALswqg>P7|@_Sb)I7V2)D-RVC z^`i_HjqyjrZhbtt>;-zdod^489Xwg4O?NIRf>Z|?{{Dz|C~s-t>|1>Krre+K_nJPD zdM-uAkFX@3!F?h}iKkqqcp{!s@5G*g$7o}!O$F&OnRT-m<7~_uZtKP%@DU8B6RMNY z=JjFh|IB>sN2~C;s~5f$y}-Q>VzIyBET^;8lp3G>glZ1w;9$Hs@pOMIv~yS@8eKbx z7Zc=Zl?mbqD`JSQHYF;<{s&fB*zk7aRq2WHrEe;%h}`n8p$Hq zKUtPKx&!uYsYjK`2hbt271{(}c+VUUoH|#J3dS$sle>R|W6wnBVtgIpLvO}Ok%NC0 z>}>2QNnPqcaW5p&b{;Ipx$`?=u2eCs(P)P?*9su(!V?IXnT;|ljj4gIABKup zpUGJoY~sYQDw5@4SC%Jmeqohs9e9pd$vYOiaJSgLGKSyI z=jHu?=)PcB+;|dw3eNLs>pS4ct}fX5jqO09mT<4B5pj~M!*%CF(eU6+(S1=C{F$x- z`3FN7gH(oGXRcrsmgN(uN5IN>38GNK7$Zskwoocg4QF5F;#=ft<{m97*=j;7lyqr+ zv^B_6H7ZP8hi>vyVQ79AmMp#t_SWL~{*VHRUYZRRafR3zk}S0A*u*;? z(1Qbv@i%GQIr#OMby6Rl2G8A%7(7*hdzWQF9OoO*HJ)J*J)SXiH#~#9|140U;4?bN zg=5lIp4)uJ5C3Lm;lh3`a46#;*`p8|Cus4GH|FzWkIIq#f_gl3XCk-La*EOb6$1LB8YmxHJ%)aBRZs5 zwlx~nPw3O%$0SI5Nd(G=sX|fg6KJh12609MQ1S2Zi1MsX5n=GaP(aFfq8}e;OoRRh)Mk`5;PRq6uMH*ZSDvRkH3f-2M=JH z@_X=l$ynG5dYE-UhsyL<;-t4GI97)BLciv~L2+FwY(!XZqeQnY??4NG9%o*PhuxZy zcq(!UEPBTrXi5^i-=ep$f0!Q8+wlR{vK*6q_c>hsKUdaaJ$h{G9O(ajmrqyjL(gy_ zH-&S=#wayf*qq0kPMi-_fgB!WPCljM$$Y_fL+bPA58qbw6oofeVT937@G+VS1r3i; zdgn~A=`ZCcPctBSZ^|I1DNeZY3Nu2n9$|eZn-Sagz|*ZJWc(TCwmZ`dIDIeL|J(0=CHw*AE>B-MqUN*a1eO06$EWdbi zv=V8uQ>Pkn8!=#02xP!5-mvE@KNzP$V{>0X_ipBD{;NPfv0O-bi8mj!@E}ar&}O;9 z+gNB3%`aANgky_`Aqg5mS7{yvo7`NkP1^;II=#Tmn^NSzk9C;(yPWgbx*x~=_2L8- z?`&KZ%_s|kx*B85)`J~L z3fozyPfJA~$NjM7+^4UG=+zx?Q{R%lGq`|RMwPIpow81sH%`#{0$JBX+5hw5-%f4v z+7K{ zF@krTDGgTH%x~m*5!=QEp;pQ#jAr~2W48uy$##OVZ=&#Iw>B*v;fLl8jA7_ApO-w& zvSxXMFeQBjPW@#>4sTZ=d!oECH(8%3?m%=A%z%EqL*O=Z0DX*np>_K{oED-_qvd~M z=C0q&edEE+-QNPn*>@NtRT3``L}Jj%cu0(7emCZyJ~SLy?pq3?{(gY!u_h#m`Bo24 z_QOLF_hHiEFnk?nO1zdlhL77@FhW=durUCH=L-{G1tzVv0ok;`hxqs1}&G5!4hVnX5xsbz4@Ozf1(X^4Dq5n@3w?{#l zHg>h5PC+GDcC&j`?jioKf)Bc#Dq;Pi%eLBonD>0(5v2U^fwQVgB-mvZc>3NG)^LNI zg7zbDd_EiR>%0SDp*yCXjDp7AZvOkeepI z%|I&eAIf+H0T$=wb`()MybJUQ(To(*?|$|Zi7q&2{I>&x>l-ddP+XFfRdr{Hqi z2;#nc94~k|U|Tb77L@mj_*EOkAg});b4H)X_*K0aY%+tJJN!C)QJ13%)0rEl$DdC- zQUn!-pK-RmEgbGiLr>is{F4H97GC<9@9H!r7g)c@czPtnWPZjqJDOn8%xn1A&Xfu+ z+Y4iUzT}QQaD+F*r0JH?zSy|kl&`$Y`2TgKoJ+|({(Sj%bPiYs!$<0q!&AjcO@1^N zvt%P^*<|yte`%4SzOUePQJ#Kg`!q+6<+?6k2K)Fp&O?rI3!{2iM*KZW&lZBDrWD>^)CnRqc>XK?&JQ{EW?*yCFvin$0%G=4SVOX*~z(K)WND7 zwlBDeUzU7E!GCKpc(^GVK3>EtU9;y~RbRuZ$P3^z`U;oxg}KOw-G|$Z4f>yx1d$Iq z2r&u-#;w!C1sC<`1C~8`eN3HLzSqUADatg@<|5P|)}}||rlG@(Vo*}^=UwFP2wOcf zF{iPPyAhOym5LYOyKE-7N5;Yt^$gUR@)gGCX_6JWJut>z9J)2;@Xa&w@W{3|P&4yB zUsxT^XXsDHwi_Gayg~pZziB{+Ik6B>d=#cNl!0R4Srksa3|o7LW1GfRe(%f>uw1$w zQu|hMmM6EsUb+;Wx`B4^GeqxA0sp!!gg zzL;xCv)hzN>Lp9wCsmx5Y%m7%>L%2m& zC>tY3Cj5Q_8eao3HvSG8bqeUwU@cPQybaTzZ$QudlQ`+;9t>#Fp@qs`@L!xbp#>CU zFMS7%tFE~I)NQQuSq7$wHFzN4J4`wjj0b-*uHbe*oHa~~n2r^IV0(bjxyq3}lO?Fq zzU|!Td2*z{Uz@&T-DLiHGP?IP<4rAT@;r6`TusXB4jLEvAIv7q}LAfRd`}P;YUpA+;TeV4a zT&PFZe|UtrpYdR4UKCYD_CRMO%X=|)fF2LjPh9rT;rGn&#BSE-YJ6BPQeu7y!N_~On{XtXH8RiVDQ*0tWl9}Vs?cTCR9?8M zknJp6L{`B)u&BKV(`Cj$=JUIF*t-?WjoeUTlL@&oPLh1ky@ylB*I}Q5GIhK+8@$K# zVjeN2sYexHg`oqg{3^h{oxAY9au-N4xAIi2r}*=(GR?7N*^4!AGktT_$fPT^Sh(vr z_PU5MpZ5vjlV$Z_^5zka?H@s&F@BqtPY7&0ZBC*+N1-RF<)r?Jk=l*H;2C%=Ga>6C zcFgz%2OoRmhiCdkDd{nP$9Xsvq&*fYW#;mU`RY`7zXMvQti#-P72>2^iu<+A$O|o1 zQY8Kct{I)jzvjB6{InguW}mfGiwud86Q?fA3V1wh;{uo`^n=Zx zAYOMZ`#wjkxAG@V?9J0JB!%=i$@oya;eEUzJGlou^5UJZ6vZ*xVb z#$amiY)&b67H7A3INxumg!#8k>0~!u^2Jb$C=_Nv!D}xRdRD`%X|Xu$trSsc^T(l? zmm$9KE1sV=0w+`qz_e0xdM-|r*0iO7pX?uyA1+QEk4@wR^LJ&Qh|!^bw$gNpQ#-`G zH{~K~Z18rsDN)K0arWn1_~k*GB(nQH$R&&)(n*q2s^9{5^`a+%6irOV+`gN(HK9{EG9G zxFU?rXU+wSk1(&A-3L~w(VDEAps&L2=*k|@?5G9zzvRK%8M?G_TrEn!$z+_$`M6G9 z1FQ~N(9|IdK6teO|A(Ei3zY*f#wS-eE>MH>%Vs-Y$&L)6TNygUeE_@VnW72m{t%XF zMD(`1;%sAMd|zfj;`hYj4Jk!Z&vLTgrb?3k{&wQxKhNM;X&^5v7((@2d2-2yag|h0 zqVrb{9z2#HyNlSn=;(PE8>&c-UNa<)viHEo{5|%oNAr_b1mdwVS-gJgH+cKwI(}ZQ zM|W>ir}C$@8B^l{7vLHV1HtW>)G!g0HgJ62-*y=HpE2X#mvfuX7C}VE8mwuJLH{Og znxhlU#iZ4XoL2mVf5$Y5gLWHqhmGeITfSk8d>QsJkJdlyr}#YLBdC0ujD{XRMdhOL zT=TL?V0!!x2#h!LE_WAkF3lgg&wg5D=cfYbv`ob}^VG= zb63l+z^vy#87r#>XHOpv2cxBl>--_m{t}Cau5AVz$~yG>m2t!NG$@F>inL9iG})%Y zrG5da9621LqTiwC#kC@>QNi5RwSid3dSg3Vs{lSp&{-Cw|O zZ6VIl8$qTxvh3NLr7+HYDd$MO;<0Ue;M5}>>OTJ~USgkdQppoeMr;I$@aVzU&r5LJ zNoQU^&X@5>7IH~;bFpn;Efg=*CbbcoG&tojpXJ1QF)L(+3s1&@XFey2eEXD-nDY=S zK1JY!v!$33>;jEX6NO4YR`YFJZ}VDW5g=$@lhNS(3>C`XLC0MY#EWF8)By=P`;{TI zJv_(tos^=U5stjz@lsA@VIbZxkRfKbji_U3C*HcPM4!hug2R(C$b0r0jeo~O`$SdZ za4G|ikJlod3lBkIdpf>nJxC>^J-m(65T3uxW{f4XVa#o9%#eS`_Z5jzR};279)1$c z_y0h32URM_IhPr*!UUdbYSVy|=Acqr16NCj@T0a0*)1Cco}Y#?YYXzwW?n0o?W;PsJc7GlWGG^Pm zJEnB;?Bf^{Q6;K!n~4pqhdg005GEc;#9?YF=-GQhRQTl)zff^EO0Pc*F6TZ87nQ1# z&JM=-o}~@jC+1;LWEsfG>X61yy3~Jl0v>xZ1uBmA0J!;})6u&iAL9dlUSC<}O$ctT z`6x&$%lPXiLcilH7^`vyH*<{vmAg}q0Y;iIcT)oh6jLuBV z0&i+g^FIxzl6gGb^_3+Tz7Hc;m+R8tSv+=%Kc z+mz36a=~W5i5PZ=b+m8DfMeMV$XR2}o9(=a$F-%Y_tQ^Mc)Xj}kq$uXCvU*!#V;r_?n}^ z%&oHkK07ftu=5^HYwdad+FoB=E5`Pxy4plcvj#TzB!bBipIpYw6Og)XG%x(K3GcqvCGtN%WBvwvv=jP? z4(6JWYg%fgfH4DhKX66y8^h81N*x4WmEg zgxChwf03^Cy@Fx=RLWHEQjNR>=n z*K!cMqYk0nj)j~;bPDrC<}e1y576-)PVJmDd4r>R#8Du}`4qk5>aKagz7qDkO|v8`x$))JQ8{e_Zz zEj(9j#hR7KZ5XP=4TFEMzp9?so6Fww+dNR9`$T9QoeOW5q@eZU$FP8!vcG)^9$0f0 zPsZHE&HlO2ary?R{hf<*jZATn?e4u)Gr;p05$a_|f_FhOjAHw`t{_>ezD<>gZuKya z@i?gHZNu;SbztkQLI#HUK&kFW5N*hU0SjX+dXoz1Dvvg$-Kh7p9gkW)L953mSkhFA zL2~tQZ(ulS9F*fKGnRq7(h~4xnW3QTp77c+h-F>Ggjzm7`EgJGa>I0W*zQP`vcx34 z-^e)48yY#K4JWxi_S}pOTZ!(=n)z}6u0T$9Kc5qNit9ethF6$xq`1kDxDOk_eU%!7 z{z5s%o(_P_Pjci=QX+18t3k57&FOQiItVOgnV9O+khm#6vR#j2ZV`Ry7XIWmPW zx1Yil2bQ9p!!MD}&1QI(t4q5(M)Uk31A1h50_5oR@Z?o0u3)>yaeFuLeg|xE=u!c+ zYOlcU@8qc+Q4*FLJ?9-IhjCZRv`D$E8Smxn1*=Z&gs-Mz^!9JY?HO;)3(A)X?JAOm z3q$uoL>Yy`;&v`3M3wFQ-hsm92>5(Lg(SzaeNZNI8OIlc;iG3<*y&IlXFQx!daxXg zd`-xG*FiubT`5OtyY-NAA`I?6&>>0x?r@%4Kq%Y!73Sh!a38w|QfH0fB#$!Ahh&4zyfia% zp!x?W{q$n20a>*B?={!x;>!EkZ^8BvWtdn07F_PF=LDm-aKV-m+<)ncq;-u8c3*Ph zH8e(HBe^6jWnEKQmb;s_<~8hcA3{mn9hhbO8aTGINqC$CU()1B&6_~zsPe>dN1Hes zt9HK7U?j^VmvcJapJ4Zm`=C|lkI$#~p!At^UiQo%=v`%mMcVav>^Om)cU5U}b2Z${ zvmlp>bjj3Ib+Y&dLP&rSZJJjAeopUkjL!wwFsuPeo!DING!I<`#w0kolehP-aicx{~Xs$7L z6CdTY6C1Cd0Nmb=rzaRtkL?+7NVy)}1WWO$8|%rg$$)JFF(NuALwoHKvG)i8CFbH; zmv#|s7T)8666$fb~CUwPuzW8@gQf%N-)#r0Lsz3P?X%8TI z4Z1*%HMJcag!$E0kz54sjbvdet8$x`*#Uz$bIkflxJ)Gx# zJV8P01dz^PYRxnJ^_^`OK_h?5Cfl&nn-WcQ-o5lK;M)iO9@`U18cF{2Azm*Dx- zC7^Us6+M#MKx&UU30SCs%_#y*Wn4Y?_{qHHpgFZZ_!-K_1apS>n49|6DfC)(lkM{N z0ViWj9=v7uSB1BH?Xw3^WgZFEmIH8Vj~tcRWuc z^r`CPL9YQ3?~Q>I*FK<6Ni#eCB|)k#ed9(#o6spa6zBAYAr;Ui2=NS4(s%&zGq&ZKy0-C#%-@x zA+ixyaFSLO7^(GOWmq6+vpGqmiy=tYY(%*bHY%+Y67zCHHVm|Cn8E(5>ieIlKgZ?}p)UoS>LIai^ zv^y(IPm`rFduw?|^&n2jtI(YKEyDb}PN3C40wq|s>e&KCIy^^~Xw~NMK2Lfu!uclz z+g*p=N@aNR&JQh*x}qSblY9Rm9?JqOsCWtULg<%b=6@G(#4L4srIww!ESZP&p%)*d zBA}k>8lZJ&HVVYf@Ta8U-W7+O6$8nm&sNn2qkz>0FTp6iG8fxCbI+bJS8D$6>gKIGHo*HepV%fd6CR`O& z$3MUI5A3Afu&DDN)N2_N8P+|GnjMKbYZ*JX)tu@mS|I*655<~h;rAu0uy*nv)`4an z&Uqt<)rJ6cm;H+CPO?7e>vbT%+6#lOtwTrUqn!S(ayUP$2`d+G1bed#e(&2}^tkf~ zwY=s)!uCE`{JRB$^q=AkHe0VNz5thO&SLlVMc}aJH!ST_rC)RQ;yA@WoFMmr&?YLI zziVSeqK)pu*QswID>4t9KiA@>RCYhh)5P@)#fi#=MI_)>GNIcA|E6GETCh3}>` zXk$G@OiTi+{nhxzhn@K|ck)W&gvShJ=pHo-6JIl)lJGY)KEK2@Uc1Pj8fS&=WA%tu zi6JPh9l_Tf(7=HFBRKRn8w8iixnLW2@cWX2N%@Vusq${HW9Ppl4->k95I8*Ap zJO(G2x?pnKcZ92#kPj3i-Hc&wy)P0Ss$PLz=NaLNykDqO9tL?`9U#yw&eWQI3o4Zu zL#K2jb}IkGxDyxfh(adgOR!ATzn@&s8-1EqqDckOD>=cxNZTX$2{Y$vQ`;VW(sj#} zrsoW!j&+Wl^{BVF-GV(6Bm}f$uo-e%hlG=sFn+}Q*`nsi!(eK!0~$sR5dLL12#SMj zT{dNKOF03Va`zN!+sP5JdFJ%LCCuTL7K_0NHYib;iwfd>AZ?ifF)zQ13eOwk#=T`| z+sJqj;bOS)k}jG3DhQHCi&2{;AGxAuPas&n5Nn?=#=$o%=d#I`Uwon&_TFQ;vJ<$`t>fLd)ZmL@UFat@1h#EYQ1_Y%QQ_-AM@Eg>-PpkOs$YenX`VPe zhxrDB_kzt5O|bfKjCInc;JEkV+_6vFz^6~dT_wzeskS`xx%*#ml(gq_LiPxoR~y2L zUHwpLxdoH9#>kM%aQ~?nTt)BFIryfq2Zt6tVbA6kWdDL+ z%SVjS*W)CYW8(YZmV&{~?^f%h`@PO=WY$IejJKkq`g=mEAJ{KH#H zo6_gisuVM~!Nt4A)XKpO`}?hNC`6j_E1K}-H8CQv@wT!4kq>$fYLJpM9vmCLat^OQ zfThwW&f$R(?woTN&2$ZE%-5U3R+Ts4o_3V^qU1@k20McvzY5{z58y1x#x^-!nvt5t zS3YxxETtIu{^cxWS*O8viD4usIGWFSk|G@SU7bv~Qeh0G4=`oKTYw!~V6veF%X!D} zc8*(Fm*%@LQc4nnI)1>lvOfIh(~FIHHJqci3~&F?hmSq811q1X(MR)#5lKsFYI>1z zPyHTpE?*z7dE;SA;qj6ioyb&RpBLBpHJV9}5`jThIWGRmKDlD90C zC=SJ)znNn+Ab=OI)Ink4dtAV}ley9ZxOJHt7^<7lRf^So%L@Z4?p%bPX@~hgGbL!0 zNf3w&_u#7SE&SFOqrj&51g3^)(T~h$Dia!wCs%6F0n<8I%e-v0!G(~Wtw-lqsuG2Y z04VP+!@$`k*s$#@RIe<=;Jh;sS+yJX$gv%U{zJYxhV=#>dGaswM-sbMH{toM%b+3o z8Kk6Ng18EE(ycZct6o-u@jETruj7Q3kFxRXbjCV+a|LXsJK{R8mC%&<0ut;*@K8~X z>Mxd|Bcp~eW_%__Bz}T%ToAnaWlp%y(xmRf7UcIU5$U8xuJ6Ap2wtSfbzRmazN8*= zoR)DaTf4w;z9r3dH6<}#+Tb$thNy6748Pd^2tL`uc3qMZWS*TRUHaIR5XS8l^sdNU z#ySEqw`2L~`3!V&{NmM5#`k23yT)d@G$ z!f@{KlT4g+bs`+97y-7-rKF@9z?tJenAoRG>}M~76VKTk*Kp?4-P-GMe@Zq^Eq;cA zN(JGzg?d!;tt?sC9R>${hEp%aK-h-5B;e*}ynJ#bnbbTU&mRv$!No)@IXw-RY|@|_ zUL|0-&48CyX1T3{#&p%FM&4)lJ8q0#7v8UW1joLc0m|ASe@ma-9AJD_12fvF8U@R3 zuc7{iO*sFM4!K^QjPd8ELs7v1ELq6d&t8ExSu2*Kk3=|leCUFKqgydFP~cXi{>Miwc#}5|KsS)!)pBA zx4oN3wHq{-G$TGC)PC+o=FE{HbLNa+LkLAmNt8rNB}qs~64hS!B9W3xND`7HNu{VX zkazvwza4a_-Sez<-`91X-y!$LYr61MGKK^?!2FJtaI2Hgk%k#drxbjG)ZMS(&)w5# zpp-^yVv40!^HqVoP$v=LS72=559r@|9XXtUM2Wdf ziINyv+mN#1Z>Zw2C}?cI4Y&HK5{b-1NOY26n~rf7(dQTpP&x`VufEf=39dM-svL^i zD!_l=TW6Qm-z9kU0g9HXQ<-c7R<1o8E(V^#T+w^U^sE}_`Yi(?6?X;C_IUbnmN`pp zNaB2HbucK^#A8aTWRC4={OG4nN>@LD8iSLNFhiX+SmMKz_ISCK&bQ!rqa5U*D}vx~k8ENO>9ce3v0r zq;06Ht$83Y%Q!4tqXW8Z77RSrjmgDF;WE#gW`A$Pj6G>EOqu~{Tlx9q z+(>EDE<={x8H>)%$8kx|QxMH-b`IUpgX(i{q1A|4+=-$i5i7e3-p-NuYPmYecz7J9 z=U(SNn@==xJ-Vos|FF5%866&vWuI?ZNueGP)&LUHy92SK&4i2!2yJ4;k z=Nvf6TvUzXUuQA*D|;LRYtHA+mQLn#`{Q_H;C~Q&FAon|RYG#qO3aR&4hDPe;D4WN z*_9!8pwQqa9U)2szkfDFF^yI-xAUd@tY@ryQ(EA zJGYfK@*I@*m^gHdUJAoXId6QfHnuxyvYniJHnG=$D(GcKACbFKAYaXSfa zjkYCYlli~1Y&<;U+~N<8n&kZ~YshsOgbjPY(SyMmVAzM4`1U5<-ofXL-TZq{b4y58 zZ$sl)Ya;Hopf4XYIA47p?~D6jum$Gs9e&wYSx&%c11+iSX$cdp#DP;h%*EJV)CglzB4ATCX!75Rl=@Z=?J zpSl{(K2C>h#qr?(<$y5O_X{?E6*D{<1O1nB-<+KhTHNY@9aGiVco`#B835p0)rKqn z{R4|;L#A-22GjD>Ao0Bo^Ndx8vr}rZPHrvq)``i=Qd<&pESz`!Wx~)F2@m)l>17ovUCKvL>+(^x6@HwP0ae`7?9>vI zee`$E+EnAS|8}S?y$1gZP1v464S3gxzbm_p*!}VSnajfwLiQLc)S1?yyXOVG8jy)% zg9W(hsVr0bt4uZjykS{g1Om@M0IGnFcs@xe~f1t`#j;N9~A4DYE^PKSJqct1qm4zNI zfx?7w+!1K>ohR?_Q=M3CqSRlPdES03IekAC)w@;D^V?gj_^=KS4(2|G))C+_%^haR zUq|`)abVM;!K&2wjHRC;{Fk4M8<$(NG_MpKb6k&!)-9%Y`1>K^*jIezX2No>j+S_= z)`W!*-@+5UEXZGX3AZcP@SUO_=`xXHk5kL>dUZF>ic=!7ZZUZOTNUiD{scz`kAm4_ z@}amx#Insk2(dqs!C=i1df7^aBn1*1#ws-74%+|!gvKslFB_3=4<^xHVGl3PM2->%c6bO2kG!^NOFx{=VUHl;O;uJ9KrviHI^3q!`_bx z7`fvYq|ES#bem1!v91m^1>UuCS}TcJ9WS_zTM44)vXD8h2VIivq;um$?CxkKq89NK zJ%6+cd7k+&!X^ZgPaOjFQgd9O$!CF07eM<(35-`&BqF<#oGt4WSdnZrjQM&Cw}e}> zj8|EZ8(kn({re4T$^voBVlh$AI0$~+F@NZp^;AeKX_fjn>=PQT za=}nXhD`MFN6TIEEJ7SlOKfe($u{n9UQh$iFB%bl&gQC&n2s=T97$?z-qq?5mo<&UFl{|j{M2+m^N@)z=Ye#6W_1Gp63i4Ze^L}rlR9-a= z0)!AS99@Q!o_Ap1^)l`V2^R*3<>Sbws@yG|hTA6?<73w=nCfsBTxX`!dCx;()ZSa@ zqgE!2Z7af>f_UL%K_+HSQfB)KdGGAQVX!klf&p(ido<7nCQjlx>QUCDcts69+AJcq z|JT94D28Lda8it)|_ea_`m!Q6s)ur8G)+3ip1 z7XCG-Y@lM!g$P;cAv8yI(z!NvE1IkF9CcfAAZLbm37{clUv2kp|f^+6MySzhdii zsbCrP0CnT!aciUusY%=`Og<@M?k5J2>!wOfx;`G4y-$RObI-8k>s|cHa|Er+ogjUj zFRhPyg~|h5VEwUj@W~4oTo;{z{DslzKgU9tG=3G7@=onP{pDSrrwHniI z`wrQAvuJ3B8fTmDK{}7K$Y-6!wbmcdF|!h%6c>Z;>x<}?IA7Sz&x50GipZRwQTX(s zDRB+Af>noQh@|%!=;odTgNHj|eNzq`n0AVDUtVL(lW^gNo+?Ve8nc!QS+I2%=li-1 z;-0B4G}~AY7AeXsG`E(|N%hd_-$giae;!r_$3tX$1}bv2*V;l2TqbXZWm;neC+-?7 zwp?T{hyy)y;shqc4d4I(B|^`_B(PvX9D=EUTN4J2MPWOowlFj%t@rg)U2 zamh1$>2MOeu1toqOPpdgd~wfm(j%I?AXZ>-Pe|kk|8P5MKAX?&N z*+g?a?IaQ7chc>X4$=+os*GK*Cb|{v*s(^7nIZ2&OkYa7Chx;as{?R9YzZcH{J<+4 z_#JCYgAn2n1T9y8L+;gdA>`kBSZAWf%g&)Do6edp9w0j zUSOgOzkj-Yq5IqlacPMWTTyR^A*a7l*DEtYzFNeNzqpB$qpo4JDevg{$zbbdRahL| zpWL_*5C7Eqv8rC)gDjM(0ljhONM6gbv+ z!n_m5pk+cIWQ^`c^M|U!>5(V!g@FOvuvUrqEI%fQ3fE_AC*;7=i7F)WTn;3B>4Lhc zoQJzz%=TR6@A5UJf)nqD_VJ#NP1H#KZ``5dyiM6nr)G5edXgTz%zGR@RXF8JFqFRf z3CSs2aQ7ZD*>-FR`m$r7vGxESk~Jb}%b!E`owq{ZVq<2=S%^*gfnfGM9QU8th?;Zw zzF_KkVdtJMP!|QGD5}d@R`mg#@0B5*!f{DTZl#dDE(CHiG#LDE8n&djLeK9{;PR?o zxZ^5EWETx!lb(t&H|+$hQ!Byydw#(@>!0v;wk4BQ4}s^8V{jU zpZj!firAIQMc{oQ5zVcfX@9j7_{3=pzH}~xlr&kC%74esk-X#l1! zY5hD94ZiIB++BsF8Mfh4YRn8oz4#??0JA@mjGtH;;v#*vM_!G&One}OMVXV;laC<8 zEyuENDneYZEHNm%Lthy@#|2gjL`hDKtv}rhkAt1Dap-Z}w(mRIsip`^a`|)NFX!n+ z{{{VfGNeK;2>ieLNE2@iV8O2%pyBr|8Z+&$uzI8>+ZuHP3)9@7^=b%ii0(%s7I{+H z6+UR-SV8anvSn6}&EVx&HD(-L?`9YYR~Or!A_l z{fBzPr110<-|eQpq@JBSAtdynV7K)M_?Z^tNbW9cfB6}ApXg7-dOxVtBnqBgG$uvW zoHusoGHMms5)aK7XwPA|&o3OebS#8ivKc;G2BY4XBVa~8!`+sz=y;|M=cwxu^ZjMi zXmc3ojlKl6L!W_-1E0t8UCm}c2YReX3C{|a?)eMzIvb!o?ILK^ znvt%pChU#Z2~=@t#^2#jah}^#Sf3gVjyriy4}MI6_IVX0H_%BD!fk+X69Aro@^fevXXQGl{R zpXsGJXV7n=35ox=4A)Nig}$L*aShMN7bJ_wp`>DzzM954>w_e|1B@Yi)>JUxafV9F zW3bB59oOHig-1oX*c!b~(9=r-X>=#l>)Zm7&dr>LIX_S&eoMukl$thj@7!W{+VhC_ zxu?p5rC6CfI3^*548LPlVU56zjc6@@_i{EoT+6kzuSqHi0kEC0E`C#GLzc}njJ&3Pp z(_hElVSRNIM4wb9iQ69vUgomQZv9<4q`;c!zZ^i`S51VgI;EJq!dbe@F#yY=mkINl zYf*$L();}$LVNF6)H|64Jv@u^_5K-bp%20Lge)$etI2-+;9T*ymh{n_=a@S!7R~a% zLd1=0n8)+*wH8Wj^up_W=e>bW{;f_r9D>oj)udOo;(Yo}>={HR!Nu>dNeJ zv?XyL7J|#z3GS!9B=kMX!=i(;(DjlkdzNiLboCrigXyrwkSa7-I@gWpB;$>P2B*DcaYV|Osn{EP!zREccodpI3ijf-a6ke<@_xV?BW zO`x3HB@&~aUIdzd9|D-M;t)xU~JC z*VL}!yvHX|{>4Jb{NW1n#V0|Wr%3ej5JFi#Zai+r64N>`!T18~lple^Ecku(+7_zpe}i@zN6{R9 zw;41x4*TW5#0cTG;J^7u&OTiW7GSmlGi4g^n5{NjW)K6SJ&X8f{VHc6M5FQ9N~F(i zSXuBzaA^K4FjOane!M5XcqjPYGQ!2ps$_q^kI?ru8uAA6d-~@#T)S0mim}_;4pQEEGge!w{(?Hf_b^_P_ z-i{G83@%5fVQ$q8>T>A~^)*g`ngij~WyDV^)>gsxjp1Ol=>@nXoso)P*VC2$6csD} z!_~jG;FIOo!2GF<(3Cw0EfzFFZcQM%bZRn>-_v1m>v%j^!}%@tPH2DB6zt;Dsjp)b z76uLi!`W{!WTwCHn9n(DmE z$EZ52x_F4+|Fk8=pSFPA-6ATzjBsGC5qnc0N8Fr4go91V;GpA2=N-xb&usw`(V9*g z^GlzN_*VyG4_M(&&fe*In+mIsMdRr+TBN177`<(M!Tr7gJ2{ejBAkuLrtnC(q; z3$F_4)BJ>vu$$QE*#;AAd!S-A?;Us?fW;ya_dW|SrR5rq#DOGo@h7M-c?NrqRfE58 zET{$QGJ`F)pgh(WKHlTm(EEGw%TJz{uF+$a*MHN7%0TFUOp9C_orf*rJPg24^jmC5 z%-bhI;JN<9C3^(KNFoL6Wx0IkriIIc^U?4%XI<5C2L8Vy;TF#jyAIZY0!zMI%$fxz zI>yl8+(kXQ8Zh?Bb8ra>mexdU#FySZ;NtUJa&REe7bvV0jvkdIg_$uh@JJLcQo!nXSN z@V~t>#CHKf=iU6gQ)5o{?!Skf!+wGR?}@n$lA(=fp5jA0IdbZ7KUU-WL;CB6IT_Z) zxj#L{H22L&%EU^{Iaiz1R5jC@)l~3|+9~+F*N0uwP88(~&9Qmbjdu;XGjBKVr-o51 z;Q89J(`&#nL=G&oP1%2kAECj2mSFDoODa-b3BK8Tu=d?|G;gSoetUfrlZ+~0(<6k@ zHP*~HlJ_J(oWe;*On44XiyfY>#Ez{KlbbV7qr+A;>e(?H`qvep`N(qVu>>>Xl2jq} z@4YRsENxQ%@WvDmizu3bdd8f$bhHG+;n2G}tcS9Cvf_E7yd1ncf0wtxY`rh~Vk|LgGkg zK#W_c@OUr(nQgn9Gh#>uKYy69upm_?;cSYjXAPKMuPL*gZbJG-)WC_X<4k^&Z^Q(hq5w6p~KwHT3UlP;t3-UvZ<@mtuW$oqt!cEgr2B24>u8%{0L zX47T6ppoxiBO^*-`z|#wU%!A_?^k1~CwY#$^Ngfs!UvkmSsh~M96>eXJc-Y8T+<96{546ewLiY#*qWt)? zux)J@>Pb6b!YNB;^nyQABa?(g&LH-m;D_g=f#{I13jCc)Bc~*rLyien~I+ZpJJ_(76(*^rvG5G&?RH*D! zW`TEYh{a_swq}eOTj+?;ujnvpf7fLW&$yML;1am_UIguh*I=>H|1p!Mqfc`(Y_x(K70Xszg|M;r+ZMC59s)G9GdiB11T9tgtCSx?$cU=smn4!9IA}D@v%7T zM;Jc?xZ}bL5#Z#Yf#%gyVA@JsvPtzdw7MnTgD;9S^Am z&r;uie+~K!dti8TI4-Gr1T}U%V_13~qe3!q`x4b{=to{YMOaZGu z4#NM-V4jLE!?QLSkRh(adc_BD&$fO!igm8Wn55B&3q{2+``UZx@;(n4p&gJu&l+6*W0JFe3sHI7P1rL= z%*M4CFwv`Inmc2?Bm|$KBhNM&BweL@H{C(q91m1Zdn?#n;v7rEQ0`Q1ppUamu#VsH z1U^fjkTMa^nVYlR{WGMVxiMJ!*b@H`PkiFc=V@WjVbGqpm^tw`cJcnCG`R`=duPy^ zx|?+Ac)lMAc?{xre`sCX29VEK3I4lQ2rde*CDZ@DfK0Q|Fh13mvl%te)8d)r;OAK0 z8|a0GUJ(l~2?MKAD_lJG*Z*fC7QTw6F{4#boIi*j@jnmqN2s&*o{cbK>n#9JJskXL z8aA!nh1q962_7Xubj8EJczNakQlmA9=X?8LzbG7Jq$;H9h$V{&JOT3;s-yiQebx{k z1Wv*+^xp0W0eg6^ufHe!?Nnf2J8X&h>dUm$mAmd+Ho=TTMkM1#GgN*22%^0rNvrKh z!63kZ{nZM`++91RdoRe6kU&}96X#A2^<{X|O_8K)@!y#{_oOX%{h`IP0?+lC5qq&7 zn-pck_D{JB4x3wql@%g#B+nVnsqyEAjff^#AdC#xBra}$F>_%k6ip8Y^Uf$pc^obz z9#<6NJh}UHqbF6q_@0(&oTX`jeE(hGmlL5TM?XB}pFPC_*q>Jnc1^}Gdd7Xwj158m zUlYOh3}@ARxJKuV`Of*n|G@l1hcx2oD2TZwY$1XPYmwXXnt+ zOOx2xlw;TI7JBzD=lE+GF@jLQ?Y|$=b!R3jC>(8C!Zm&Pts>M?=Bq@NdCvOcr?U3x$|7 z+*?*W3T__dT#pONOn0~oMEGma+;v%WVdF_C!V4g(x#0ZzjtsLK`-o=s{z4OIvTbV}oNwpn^4Z%Uul)lY*c^*Pk4Q0b%U(Khx*6L(>^LkOmH=j)B`-Rl zjfsc+1leE5;S+!Ed#aYwZ416a^*`>2R0+YUrUOX2dlE!^zX2nY^vQ-73gnXeXSC0` zhgvqbP+3EjC2fqudEZ~a75z3C_Fap8eI@1`>y5PL7ti{>uoaGUt7FFOBFz1JQ?gwl ziki*(30?;K!9B&ABoMw(uQ&of2mb*3A@1mLZXGJmeJvcU?nR%^n_<4G1)3Wh(i>Y& z;_!RgtaB9KE3e8DTAPLg{ga00Tg9X@p;<6rdW;s|tiZHiGHmpn8}O`1j`&AE60Sc` zVB@2YV&dl-YTkdI5Ll>6avfX+o8BZmZeT~w>uZypH+nJob}za(4-!u3^7rqfKsec9 z$Tm*qXYN%~X?y5um^peb{CJSU^R7pQ+WS)YwtE2U-z+8u&$ZZ{j|Rk!yP$3P{wUS@ zCHNP2J3sJ{W$O3)fq(uy3cDA>%F}mHamF)nnvqPkpM1ulPI6@Rs{pvQSB1Z4PYHE1 zW!bea1IeQuS8xaKI)2tMB;HT-z-8_~VXf(O2%FCthE6=M_qzkXZ;>bVZTIkFvpMse zn@&Byo|brcjKSFt0Ebo_$JqD(L+j>G=(Z$6c*wh?xeslm!M;*ZaERx7|2P!MZ_HVG z(T2=yQ9$vW`&81u2KS2eNXw+hFf(};xC~XL|212&stY-_SP%h!#)Hwa(W>8wmI9+ed5C+R`Ffl6tt=# zAgj0p2m3vRlJ)uoFNmNaat!*#XptMi+%s`dMaZepCnNejhY7rcmA8H@ehf2WMcXON zR+D2pBKiBO*?`5(wPEea{qcT%8&oJP!ew@UvCFJRSmJRG4{g1GomSc4)pVAgc&&*u zC)wbzv4=4{{x3Y<7lytkg5WmKCaMp5jw6rilCkMmaQ-wUR_rYU-Jdl{f2Wd7o~-yqhwRWNR%IVh{Baj;Jsn0DJ}CzKcjXvv z{uV@e%Os+!Ma~WpUMTfxft453iNn)AsxIpf%Z6O%4l>@&>p=R`ID|V!4?=CfYG^FX zg)1N5K(0a-z8bGj;^{$XWdTC)>>qrd?M4&am6&)2KmEJ~?sl@sH1Tp-!S7KWu_|N8?2DA8Z1jH-L@l$mq);KT2kO3X&m^1@s z?bc>LTRD>_V=#&wourB5B|^o{XgKhNv!PxVf@_F?wUZQiZm2(wG0nStVoIn*M#SS3-3sIehZ>!9ZS&9TAmE$ z8H)d%)FsI)HgUF?26yZkkhqfyWX?b{c3nr0WV^ReS-l+0{=|DCkAA~;>sxf|BYwA7 zUL_R%@dby~zBHx#1{A1U@w0Oh9=Y)ulTUxa>Hli6X1y+aes~KNWu8K2lqGJ7REL7W z{mI=~Mr3EsIrwv5fo;voLXEdc@W1yWrs32Anul*;8C^wXDtN!`>K6!^@kWTBI~Luh zQHocCA@x@j*iL_ep3`LE=e}w<6(5Az6-GGEKN!Uq`>5T?00_^nz~uEi;2jkax5Mp% z?wJeNlFa!Zrh)Kb=4Bkb*n;KOo)C(3*5Fuj4eosigMMBnY>HL~@77nMP4+Fwu}Q+j zUmq#Ui^YUTe?YY61$E~xi@kgJJ?_b2%r#$4M|Ec+w(kJH$>!upe-U$7o(P`1)P(iT z{@@;MO^ysC;6J81$D`l{?b#q;SJ6rCkTxKacjYi|gC)LiodyQB`!VI{DZwT01f053 ziHocBSxvFL5E5gKAIepUXw|Qr*5?Y4)zyeka&JSDg)K9FCC^0C2hxWtW zBCh)#kMLWUP4BJ;eTf3QzXrkl$uVi$@;jK+^%nx3wLr`sGmNbIiX$yFN$W>Do{4(k z?CU-ZT>me_bg&bS+@1l7-&-M@GqWay^B#Wc1EF=3m$1Ko8B9p|0^1DKSh8UNCJH}r zJGlkh<8}!dW!&eS-U6RbSh1pcAK_aWcVsPffhBJmaZHaPS+ZJ#tu0VO&57EqtmqPT zJSIlF(jLL5@(2~@Kch!p*@El7^-#EnGXRDZP~}6fh2OiZnQGfB{5jz{$eqqZqu<5w zw`Pe(paAXOe#V4J` zoi%rX_rXb&2k*;y_|wEy5-N(i;N0-;3ud3ZPjC8(NW{uHf@Y98k#((xo|<3?cHE3D z-^w97zXLNBKI3ri94gzrPAE9mfi+!YFwq>>R@2|imEm{c5`hu&+ac1+J zClC{>&So#2isyLWRp-!QeD&6xcn1f-!qaU$H*m$7-|Em~(Eq^U*G=e>trHYXl-b1n z{LZpfn;%@TW{f)`qA1a(N^$XNJ%)yQBz_dLK;`^?^p;sr6 zW*C!otQ0dj8%!lxlWn{88Y&hXgEv81WMH?L=bq5duH*Jla#fL3 z$5r5>rhe>wA@8*-Z-Fyy!!h^g59z%nO^|T$6VQjrxGd=k+Uc!@=V#2BOH>$IW)2{4 zWsJzHp?9%ya2tefdVv{@TI|Y=znJ(=4(2&@WAn`ZWSPQB6n}7`J=4FS?%flx%*%%N z?aoT(_E?an0y8wc6Mz@1jo9Y&+i3h-i$8x9VYTZym;`>X?6U@QJN-wfo+`(5*Pyy5(S>xI^Zk!6J1tWvgdI+q}xr0DOZ`Z@#{>;{teGT^q@j|F*q5W zrC;&UURfe*b`f292lZ1lXZoHDp^KBmBzyL0Fex_ymAUC?c~6UM%rPS)-7N`To5P)h z_IT*WMJ#>$4`Odwv8rWX;6-&9c1>}@^Q7P)G`K=7nekw z^QXs*!}UmtM;KLH{1i4?Yp@3+lJLayV_?6#290kF~Hupz5rf?^(C!yHT$>Cj^IX zHSCJ47ucnXxQl0ZT^Ei5zvh1AxRMP?-``8GpRdEuUjK0Nb7hhqTqZQkS7cM>KZg4^ z|G=YbHYDYU6}DSBV)pM!!Q3}Uy7jg-+uQUU4L@2jpS~k>XNMK*y=*|16dZ*U+U9V5 zWh_=2mkP<{l~^%K#MZr0V8Y8(EdMzhtDPobLVyZ$Uw9nb&*!1t2!A2jb`d-^P+>Rq zjL3mos-!AwH5#0_DX2$|Lc>5kvfTF^=Uqh7=bD$%)JdOhpEQu|8YxE_Gy;SIJ3H2; zIv!sJo#+18O>oIYmrQp^L)Dy6aMj!ZJ99a+r>qXHWb@3Y?;hGuy&S8q*l<_kVp!cb zfLVs}Ui+%&aALG3Tf1ThO#e`YNh7|ae_aUv=I6QG7gcz2znB^IG=uNkv6w=(fWh@~ z=s)ngkezx1enBa^?GZyn*9UAp+A4V3T?2zHR|O-_3OH{Pg{{XY!>}`zux<5t+}Y8D zq5&<=u2nlQ_Fy>ovG!-hQEs?bPm5$9$Q6<{^h}rc=S_snL;ctxcM~xo3!g!Dx6q#2;HV@ z0}mxZ=2{-#Qald5TW_KIPcN(qmP)paP)7CZmTc_IUhpZgX4&I+z>q)Y%;UBe&)jYn zJT&Z~(#Hwxe{yeom;>*vSfaPAEL8qZptBlI;i1nKWc2JLz>(X)BUqO0S?`SHe!IZT ztPaWz>|pRO4R)cPbL_s|gT%S|EOK=RcblF;H=@aMbN`bZ3=M+f(ikYWW*DMth=u2l zqvxs!N#!RkNSx(K6XPX|*b!`0WM=TW-mYi>ye)WCOOnqFbofC;-RHd=A+B9a{Iv;ITl?F!C`aAyzkG$)+DL z$>b%*D^2H20}-jIvx7&|N_f_)0Ir@2Mvp~2%a!;6i@(XU_a9b(!jpfPJNbj8YDy=# z%$$iKX0ecXdj^f@Qe~r`W@7N|HaIy|iA)--373Xiu&8WpQtF|~)-K}Qnuj{9VnPnK zyYypAcNnt~jyL&k@u>8Yi460JRAKW6*}?lEL7;k9iFFTCA^T)Axfi?~%xxD7CT}93 z1P+L2Kf`=hhyHp0I@QPJH?aVOVf89%P@Pb*idRwfZrLvK8P> zoC!M{REysG;vmXGhm{7E!or<*A>xz;NFU}w?pilk-Fu&N%e26SIKo$h80@RfLYsW< zBKW6+=6_VN<{Qs*Sn>J&%nHfa`3lT@j0>(P3I!p9=PA4m$%^JT;G@%o@qb*<;;;@| zYcd=641UdBA4h~5&5O{(&+z-2PUG;O`b@mu7sDT3MfLDQuq=HDzWn(Yr+n|g?CClOXb2#AVL{a2hfY{FoMbH18^o zReFlr>Q5li)E9GaP|3mnr10_j08-eRC+u7k2{u))adg5ph_#eujaHhhd4L8{_L~4D z_3>EO;RHjE)?!S03ocz_!0O-dv!kImjN#q>I(s9uPkxRvPu1C^5gK6sW-_`?HbS|_ z1K0}BA~2UI3f(~iSm^O8zRQ%t&Y)Tl@770$-Y}?1t@+8xg0eX~`v zkw0I)=jn46%R?Hus{@+8_o1OPXR~|>l?=`@U?uzx;o^CgUYTjfoP+$T$CcdBi zMMV)Z&gFjZ&?qDoMs6`C0XM(HYRzuckbQ+7NWfC}6ALXSM=+!DQ$VYuaI2-Ngm zk=}g3ox0;osLOeY^phFyB&c7e<{8{s;I5pLXs<$V|B>N4TU~l_j24^u=NOp(_(KPu zK8GaLm zcwfoFp2cn8nR|`4=R{)2mI!pX&zUo~KEwKy5{P-$LLdJz!Q519n)pvi@K9}|%A=2> zH^hN*=o6e&+<*i0HA(X)O{UD9YyKaU&}6FztkC-jy4T(DhP@oT*%HNflic4S+lBiT za$r`+BYat}#ysnSrSf*1_aUT8_rHGzA>o`$qInNPIltplQ#_{BxL|~g4RwEV1`{hf zX+)|LlMfGqdkx&>MV<6sr{y%5 zSB3-}P4&^PavR<^x&|q{+x%yeA;z`HviZ7W!2i0V&^1$;MR_uG%Kb~tw``!X#&=*{ zub4<$d%<+@b@cL#6q(0=-;UfGFKZ2zhjy`ahd2^+^sA%-r8Yd4!m~AG!uQN>zzc^>w_wNQ>OF z5V3O#>g2?csUX=l5*LP>u!U!>nf-x<@Ziz~T*m!};&NXYtWW_CY$Fx%_k~zVj;dVm zM|N&agj8b(>|XK&x4T`Vim{uq(CP@BsvSTcZ_z~MK|!3!vJ?8nsglNF>u~wW0vs~h zjJzBg4MFV+WE0PcPcGIVW#+C_^%H{woJka$UJV8ZtYC(Z275B558Z3?uw#Z6QJms^Uc{!7O?yG7vsK%2PDoCfcCzb#^c9HcEh2l;l^#N%uu^ANFw=X z-7ixp{n!o%r{#m)+TQ|I)?jUyc*nro3e2}Aa@JF0PVSu@v}LFUOB7lqstZ0qRbd|_ z&HDpKB3HxYXeHt|*^U@IEXCg6A!vT~wq)vbQ=)rZjfr9urK_s5IY;cSFfTn1vdf(y zci(A=;~CB;y=DR80;Ex?`gbs* zm(MS3C*zU&)sW4zqiL$;POh)IHFaSRqmeZ=!0dvMu> zf$+#r#EL4f@eH3L3%$Ms_td`POppdFlaUjg_W8r`2O4B)TQr{8x)t1Brb5>_MW(z^ z4%Jm}z*L`hJUlcG2hS{luCPx+zeZKk<9roAk2ELA@1kJ5mnj){iO&G;ZNnq4jWKOh z5A3*Q!)(*}-XX|}#rB*9uK{vwd!`mM7##ph`j5hdjJHtM_eJp3c+Gn&3&76K0B!8~ z&+CH~yXTq=gKw2_H~K_~*ByzKqlZG}wJ7Qmx)3~@(`ZV%I@muAfDzKW@MPi`bnxlJ z0GT{+Uu!@f*B$`%noVGtmVwjv^ZxjuPRzA(qMF~+v0YUbW@N|`&nS^ldnE$u`Q7+P zRX<$#p$R;1YqCB49`N6dQ0~Mv=DSfVD&NlUl$xek=_x{U?;Vi6?kvWR?}nJ`GHA6Z z9JW3;C!=rk-O1k@^hKgFaqvtNMDv#C#EvFd9%zbIJ7dv)gdTfYeH(Ww#X#k%TXg5a zqY#>N6gwaNhk-KMr0U{Ra61|dmu-VFujeJac`QQ$Cj5aj$7L9OlY{lljU1cNYRPPGDYAoh^^o5B zRj}8Z3N=PX%&Nc}Z)gNG`rnBGJxw(BZ2Xf0-JRnn%L~7xw^( z!?R^rkYr7;Gyt+SvW1;N$MC^J5m_|CwUQQGA=8cP6}_-= z#Vt?^uqF7V3YW_%6E}-k`p0NJ=qxs2tr{KFX-Eq#uF+xQmL*`nMhvY1r9uYJKQHG` zY?=R3uu|C`Yjqaly^cH_!gD~G9Sqa|J0{F891Sj$uTl-SKCpk`1J@HPaU}0;?Rurf zK1Lcdv1GMiIQa+qCxl8}JEx;v^jBP=)dksst}slEv(6{4grEprR(tCev>%>}si(5g ztH2)I5AYmJmb@^T`-a{ty2DPb4^ZYMkE!$-dZ&~n=MuiwSlKGT|D)avEICt$68FUHoJFdOcs zp0{EninLQf_4pXf(+T=elUdH*Wj-#&C4A?S10=!rdonX=ig$tsfBTtqj8-K?58y;{` zKN+vv8Ii)_A@C`g_qjAhL_E?`_;iTRGzTc*zxPT+Wh}o-c)ZLRw?L1Seb|kW?k_g?FH?i=+w55toWkDxedI23T}BnGQ`8PjV7?Ant7 z)(?imgu9o}Cz9=rm}9N;$1?2oWHb5EmwCVPA22eI_3G4+%i4Y%h3(0FL5U%sJShZ( zx1UOq9jY*C`$2AFku0q}*@K=35QRg3aY<7`B)?s4h@YPmj=ICT6?;B$cIm(QVAkmo zRDHVdvLleIa4i5MH3^J(`VOkwzv87$Md+lt1Zrwm!U)@Kcxkp0&2Sh2ueWJ1hcU~3 zy+6k{7JcKk&2vZPA+5Na?ZU6j*$bWBx3EXCo?m?R25#;(ptXy%Y0qo{?Y6NYd$R9A z*IP?!khB!N8Dns(jS)FjXhj596mv^h=V2~mX3Um1B9`SvP?~Cn?Qv3M$$>a57#hmw zEj!74z-KvaPbFdzEg)eVm63m?!S1fwG~=@`@2!6jVsD$!r3&&ireHAHGJFv)JM186 zdK`oiMqjahE$jIFF5wQ1)1-DQJGp3IGy1IE4^m&6fo8Q0ov^P8d(?YacG-z*KB-KD z+B5KFyA~PuTZ&eBU*sI_;bGSlBF^GLSFhg!9&u=#({=^khT{eTD?*dkyo&qk> zr@07AQ%JgXUt*tn7m|iLvYzZNh=33n$L=|{D>R9hX*uWeFhyc?Z7k!eB?9`GVs_MAEJzd8nsVI!yPkZu&FHu{yTOK7qNb;!@&)3aH$maQ>w+WwHfFe z_?(;TEJGEqf8kT}bNJ?STe0q$Dz2G68#fkR!%%hxa~scGPcjnz=@NZ9;$#}mWc>_- z3T^t{)0{kFU9^vz^YPvz1JYgp2`wTG$*Fu{2B7rvT#9DGuopE!NMhi-#Oov9)><2lPLlld4bH4*i8H{!<1!DKaa zzNkj^V()rKY`#7p0uy`TXhc3vJYq$BeN%al704Ax$3w4Va7l-7&TxCnV zo7BMNz+6f1OlNEiP>0bshak>ZrT$hj)Xb(Bf2kRg6&9u>u1| z+UKxsx?;<+{4745L$$>(2n0$`wV4pGJ@=)%Co-uK` zJpnu>jl~wP131S?nRd9eqD`AMHL;wFc1pi_pBZCtr{`g8YLur*0&T`gZqN0dFpT%J zU>^4reX72s1un$skeDnRnkst0>8WYcB)>_#r(ZIha=d`EeS@*C@(S;sD@*?>=V5N@ z6I5pW7^5<_qgj@Op^1y&&!TYLIai6K#)R{W$7dmFGACiV;aoI(j|pBr=EZ}%MTNmE z^YJW@E4Q1@d*9K3f*{7EDISGW!xLe{tzhO5HRD{;X7l%{3GqF5m0vwdid0Tg#zR95 zXifZdxcxzvY*U|vO`0s1F*Y2#KA#8W_yX4BO?JX%AJA>WbCix=jh?qPu!dzKKWm(Y zbcJtlKhvBD3ZCbRb*FRLm+nCArxwU-(WA4RB+&BY5p;IR!-k1x(Vpc@y-(}H{J<2@ z%!}etG6N;cc42v_6nA%P0q!#Sgo>p};Aim@68xmF@@)jj9aAT^MSsw-Ef|wOvv)Aq z(z#0NBZtSB z5z!C5?uX!6^LFgssZPX(izR!K?t%4l9X!IG-*b|$<6l=jnqce#k8iR}Pk}zBJo^A! zzAfagnaa@Gs3urxYeNm(^3Xb96-2935S{)EV>343<6HWqcvvjloG_S1Zu@~{=gMGy zF3Vn+jN}v2lfb>_CR{n@f$>u;r~~V-SG9iTX67oA?xY;lG!Ni=Z+F3th6dDBmg07A zU^$dqcKpUcD&)!faL5sdV#iq@%vpF6Z!eC=h7e0CUK+{GI zauTAd$MRkOu7EghzsN41*$gvmi0qZG*tNeD_I}qV!qL+p>GE^TSj2Y0_QBwC`IyB1 z&`G?aJfE>~3xTk1nuAXZCT@HUeJSto)IBS5U~eaGzi}NkUuW`#>Kn1s&=8ldjD`5R z%iNdb3@iw}&$}dkG!Je{>) zlNQ*?VL)6R_xn&5elIbl4lg!<;>15(&cHR?;C2mC$AsgykZ4R#yaquLR=7fz{w)NMP^$z>WC&bxyfYbtO^ z_jzo7pNa=ZGmeJVFUEplo{2lG6aDriC$thkbxIWsuCyWFx9KtF&tLv$vN_%S(2TzE zehu4Qq)BIo16&W+CwbW?;Bww&ynKS~Oa2VzS~k=(Ut}FO(_ctx>P~>KPO~Vfr$gdB zT8J*Fj-q|IyCCqXD7OeF2i0fw|eN#6Nr(enh(b4ZH=!lR+ z6zNji_9wXTfih_}m!pb-3J|iD@pX^>1%G`4{yuS7arQf7(A~^Uxf}}{!luCu!5=7# z_zS@cZ=&lP9qiQF0KYp`h|$${@DMsd@x~@}*Um>}zjQ30U_f);+~YM>r7-%=85pv` zf=pInf4d`c!1(iEl2THE3*KsxY%&*JV?>~w{~jKxT*A=FZ_vz?Io+;jiVC#mbDmEh zU_r(det4#kxCb4_KBH;uJSd9}*2h6K@fWykU&Qu?4|Bix8_^pFAE4Np_2-YZbK;BJ zC1VeE;?D3gOxMkZY-x(VzsK-nURhHu^>M7fbCutms!y__*`6+X4Ng`Wf(l_qBxuHF zsCjKgYm^7!<`a*x{~=@iMLh)BN5*I)_YZ&g-@)_^6`-m)3KEuVLV=4Y*G0ulVpGlj zFCji6kwO+c47i5%BTixPcWn|kNI+a!XSgT*JX*ffra3n+L4d{{Hm~dBvZ(^~y7z_O z){kH^?Gx5YUt=8P#aMo~f-jTQU~}dPh_|@J2gc}9NCX(xyB0Utf5U`KGurvt84Of{ zVfw9BSmtO%2JO@&-XSg^SUy9d*igb1{xGE(_9k?jlm;EjypnAWZ?T7UqOPRR2hHvI z{N?F~aNJZ4T6#tYY6g?u8vtwQ4qu zlhPyGcK!h)|1R(~6+)}oSycSG4+3HX8CR(uM*h&G(=G}~3hP@Mrt4WcmLl(e+ zqdgF@rxT?!9Wa0|;yRy*!SvH$;^)wSw(ppKxoe7VU;+ok&m;8xhT;Yj__k z<}C;i&~Zy-Nu;9^{r&p_R>iUX)r~wZ^{_U`O!}J-EjIHZ#-d)#FZdH#Wa z=e0{0(?Ml7ReCR!UA6_MyVavXZMoq@U)%ptI&4qezh zu`4hJ1{*Vu6Et(;r&0XzTqELmPl^g}8qsn!doJZnC(K(@jN*=dw0wDo`E9G9g_gpK zX*zVP;5RCk1CH3lvcT%vcz&8XDY&Z(9}RWLaSL|#%b5d;ZsTEux(((1(HPxS06n8v-{Pt#hCTkn ziSJ}f-YowLmApCB+!=xf`?s;(=T-j3L?OL9)R-DY9)o`eWk^#{H=CIzK;`**bog1! zpDbbX{3uy6#fRmnt!yPb)J8(soe|6%;14GvwCR$hM>xb?jzmp;jMiK%JnLazWTRTn zC36ZYd^IGgZ?C~Xq6raiYRi4UT$A91qo^srk9#mW2{l#6VeletqBG40>?00=Ap5Sf zLFXaP@l+1T_r*bb?h#ZrIEinn-a+Q?9xP7o#4Ynu`GQOn$+1=MAin4Vj2UJ{TBj@1 z^D9y?$9^H`AJHWNR!Q8TIVNOko+C8M`9VcbHN>yiqhBAsM%iWG(7vG<&V1`YYZqyn z;V;Da`V8K{Jq8~ntFaCm>k60fnEw3_2s8`EPu}$gQg<$bd77p)r|~Cu9E)*XwhU3s zD&ThocQOX;XefUc%a_LLgOGZ|Qj6>G-pYpPxVS=dj6LpfJp*p4-OO$K7KbcN!^x%j zuz!susk@jfa`Fk~>jQ%q)M2fQL4f~95z zQnfzfH$M})&fbRl%Z>wiooq}rN`tntS=^_^dZcQBfEK%cz#LtDn%(dKN5u4?!;Kew z)T+b$LG~=`{Q4m!gzlYna5>c%Bg4a^8qNy^@psSIG zE+^jO9Yf~GI=hW=Sr2owKEGi7`w3XRrXF0chw>V~Sg*D(8F~x8f@Nz5P;J&_5AlY; zC3#S&b_YIxeFV!;NanFw+WmIMli9z4-~4zm31F=7l^331O}!@FHm4QzH}L2?Yzeoh z^ERX}F9++pFL6n?0{QH~a@o6f;yIIhX!Me~nz|oC?Q>{}O7C z{lOxk9w~Gx$J9|e7~)oqlO3YL;@o!Isyy^x}SV@}p9QEIDgK#A_}(`{ZY#`9d4ow)8Ql$>>EbMvvk09&2*w zpDk5?x(rQkOK`~M8~Am&9I?#2i09eY5+6Fnhl3jZHSsa(O}+vySF9yQCOWiKbOked zxANMlmSop~PHcZJOE&DTMT1M%c<)ux^t7}J>9^>F^VMm1{|57<=sbeaBYwgfjW1xg zdw{E-xDj$nhJyFOS>Ss%oh!K2$6U54k~tl@m{?hkf|~dop=~pF>0S|PZl1$s4}A&& z3k!MiGLBDWeLkg~YV0{>h$hcjXS+&|hWjX!$jcW%OYIS~^mwBC^-}PP*$>s92GHhn z0_G~5#K7GvF-q4StIm$V-V?cKR-#0WR=fferC2E2tWUr4Ds+cJ7^bJkMXxB$sYS&-O3%d{U z!@H!(F0){iHdCi|g!KT`POx_d#e#{4A#YR<%FPeOs&B2_lU-FHm?4p5%w5iz{22js zHRFBLA*f}ePF?;c!ll;J7*zD0U)0tIhsNJUNslx=n4?X0BwAoacrW;?tpYWfU+8hx zi_cKl#wVqeNIDo>rO)`{*q9HM;7~zB=(NQ?;S2FhRM(A1H%tuc72YcuUEI1h^%3|-* ztn-HGkgiP|$_}FYfm@(h!7|-ealBXOEd=ITNpec(>I{2$=WW60BBe=lhIVnt_M!8m zzMxjsWylFL#$RR%wBYwmQA|F?$w@_68{UWttlKR$ph{E=M?=!_0}%D`16Ou`fbAed zq2+55M&;Py4wFz2j;-gO$(e)06dSm+BZpw7RK@r$mS60 zwQW$mJqP#H{e#`VG>Bq%H1}h-0hzUDH=fSdCG894X_Wl|&|C2y`~p1j{8dZRFYkd$ z%tw8fc?EiNrvh^4dp%%N<}buO_Qd9Pf9`PbD9%>lE=FK4AGz(37#|9iftW3z*Q-M*CC2< zzh8scA9%tT$0^t#{*Cg#zQJEbZN`Gz;@mbvgY#0er9-@BiH}I0uK(DI;YI=y?Y|#y zJa`IkMVIkngf;O}SEt1$%wb|vlRwtP;(C*GMLXjFqjyQ}zc zh>)zavnEf(eXyvFqr<-9I!(_RJ7vc6)?(iGrs8k3yM z>mkYZDW6ie7M)H_hsZyT=rppMH>nC_9i>dp-L?;0<{XA|`+}i>WhAr zQdX2eYSn+(^`RcNDZGX4TGue5C=!E?Zv+>%_&&Gs3OtJb1qzBnDpz&}qsyOwCVs-4 z)3>>I%VV)}jT{`8HYc9bH-r6Sc2-~f7?-YZ#q5@Jh*BGl(>fRj_*Mir`imLWR1-pK z&;;1yBujTpVa#VcbMocY0~~*hb>t(jf!(N3PBZT{da=7%*o*7nq-cfZ-EaR*1AWdhyu_c^W4VJ91B-19S z5SL+>IQ4;29Fz&*R8Mnf5@txBsn3;-}Q76EQ zx#%3W&EWsM)Fki4x+Egb5@O#c!encf(P=ry7hJ6pWdv-3a<&JZd%%z^&ygod!#YKE z9_Kmd!-Hu-l^V+48^nBe!$k2Kceu&(vLGzj0A-iULt}h4cmIPL@mw+!8seSMH_VS~ zOK;{K?p34H!Wl4V&melmTLEvnWA!Bj4td@o_!=GHScLEIGqeJWE zWvOK1cj%pb2o|#p%{g%vj2&x&e({&^=2LYN$6PbM!`nou1M|?au?;)<9q_k)3-5Fc0< z+%x8U+m-ST7Cp*m95J?=xi}j?r!X$jflAJ1tPRE8hA2>dhCXZ;wDP(R4GUZX>Q@sW zqwFc~Z?*y#vYbkSjqwR$GHGZu-` zEB0L8ki%b*VwnubCiL{kMN!l)Ok@77bGJT#k!b_m0~s>z!XP@`;Sif|<$=d0Io$fd zgj{rfhyxzlP6Fw(47oR1@;gg4#@kf6t79CY2j)m8uVACT^n(78YJEQQfya7oK z2!LDOmTdoRLMn9FE_zfP7k_phIQ|WTLhp7s_(YK?9I+rd7nP_|>NA|Ia}Ucs$HR5; zIh1WS0__tfWbEru_^{B9SU=v15yu5|eeW}zZq$nDx7qo)u0!;$8~KN;V7E-$>H0Y*0mz{Nh6-(^}2vy0iiax%cK z4}IX#Fcrs!nBoUE7Z&bW#rMD|)bw}6QC~D^rq4Yn=I-L2`xy|(v7F37(M4Aaq#C%Z(XK_DQXVV_S|xP|g8(wcdvOWB;Dv7X%|%9=_5~7PhR+<9dg_V0r3h z^f0gFyq0=kqRm}M{qUSG_$87=>OBO#s1n#T*O&_LUlav3+0ZbR#o&<3^TM^OVC`HB zdh0((LA1~AHcp-%k831@smf?G;Es~!M-qcoK?kKE|<=VGJbx?(M!HUi{%UEVLJh-U2{;h zBnBGTInE_vzDV}=czDG!6#eZju=EPc67LvPb7g$UX)07KG~$vjGP8?g5pJ8DjYes{ zSl1%tN1e+;%ZU}RGFzUE4Wf)!_X@AuT)={-lO!(B|A?NClOdrEFHm|+KE%&AK#yPr z%H2PKQm@&3Q1Ak?-YLe-2DVSYCZcl zEHP80^CAZmkH{7-@=q%Y2M_RJal5fIt_?-&D!}hq4pf=F!JObMUa&Tfn-wd8gVm;_ z`S(nCwXCHC-OTQWq=&$M?vG9-13}m&ZTlApPUi~IRX9LF_&=0niwVV z4MWaksU~cw&clp{Le9%}4NO(hp^>r_4353!Yv1=kkbyIN-y%b6Ht5hU#>f|!rt_P& z-9+8nCsD%gd}r!e&wiaqB2K)Q+dsV?92QLCH5)SUzk&0}F^;q627ow)o2WN&Blb*5 z<9#8J=cWmX(T=m|v_=P=l*C+N+xfjHTG@c z|Ihz1`6!EpWxruNRia@Py0GUG+XFg07tP*w5)Zt40QY2!$kC-KXwP_a0b4XtxSBl& zZyLi4TYciAzZdVuD-iJniF47fci7Cj$e|(w*uduVf&Hl{(d>mWpO0g`_CXAIc9gxG z?_;O=c8HR{&Dnhj;~ly-@!rxBjIY_odTc$s$MIS|dRG&egxy8Aa|s~uI?bDS=A-NU zBrw#MCnc*`r+~SHuG|&S#D}a?{WTb~h$o0TZsO_3%ps9c!R6SS(nHc_7FmJy$tucwkH~RarRkak)kG7zS z!|QN@l@jyMsAB2YYn+A9n53Tmjjx*7ZY%g6^lp=(Z*KmA+A-{%?)e0h7WqKk(L>y` zmgkr`jm>kLUU51LhQLW()?~S!!hEMH3=C z)_~I(ucs-#9u^w1{_d|V?r_#G+_!HZS~~ZG*4RU+z3DB+@In$hlifGpod99dNp7%h z7#>PBCTD-;WA|z;`bA!!dbioJ?1VMVS$CQX&|d;V;)7d+HeAGPfB2ECO0t(c2d6!; z@OvBMp$EO=j}II{oHPdu{!8Dk z>6}TgImob?*~*M!48dFwH>{ORsA>VPEIsNi3Wc3JQiP=lc4`!{Lp282Bj)Jj)E|mo6=8R}#$|=+tt%npElQWrpMs z^RylKcoIUV??I1II{d}Wa#Zpv50-8ZhoGelL1h|;55CKi`}(X~p|6BJb2eg3usYE? z%l17YIo#qli7Q{ia?$JSCAZDBNbQ`HEFUF+-?al+cD5Xp|673(rL%G5Bmogv&4CrK z^{D>p1T@KAferDOAoEoXPW^crK5e!pWtMGF6W{@^>KFLVuQO1migD*Ax=TnjbHv%o@8bz(t#Xn+I(v| z_@B9m64O^OwJZRuf49PgeJ?O*2IIQ+w!qkUz$0;IA-_z6bz;}Rtbs=mqW2b7K2{;I z2~u<*+mK%QD1{dPWnsiPC9rbMK)+#0u+hMTG=I^-+r)wt9O#n-xQO8?J3sdnxP#4Z z=5p9|8#EWKVta-}JkiAVg!zxS+Joouc|t7u7FX~BJ1W}uuM-v5r^BDqKQS@B6*QwX zxPa*qTv_BpXdf$0lulU^gWr18_ge@|3pApF2o;gAX6RM3j;G6b`- za@&vF(uzYvFzmqxE_C-otQEJzbWfK58a0x?F#ZIlyX?S|?zb=~Y6WAjNxOiOeE~8+`VOU~rN1Rd^1L5I&te<8` zhdi_;HtU~3$h&ALT>KH*+boZ9<5p4s^U z3OBunT^pD~Zea*a=&yx9pBotWkU4aI?&RmpkRpGt>(U#KGC^9f2xkTAQ1Q&!oFx7M z{NJhUHEk1P6u0x0F)Z(&&%W2LOFZYQMV`HCL~kQ~)&SHvPsCh+%x1V zjJ`rZ#~JQRT^^R7KFvMVV)uFLJx~zYBNFc+knI@?`V)ktUlfchKH8F|GILsXLW5Sf z{sx8j@9Vi+(iv<-nb6*4Lrf9?nip%M zA)9#*K4wE^^}L0ioYk<;MHarZ{o&riV_0b4$C&6h0S-O;G?zNE8!u`eMz5CV-29^BXv3Ta;vp5Btfv9>8qvnJ`4w?upLfmyL8i3v=~u3~ zQXbxQDG;-@iy2b_oxjy-kRTr&bXWKR`^OoOs{BxXxxkh@SIxuzaX)Ya6s4HE-awkxC~8rUku38YlZr0J=Xq9I0Hdv4ptePs93gMPhcMURga}C8 zcN3f@g@9yz2W&iGNrnv=V3WWbYfp~DSL|6_CN%~#oEAay+8UUy>WNnB0%EsHh{bIL zT+|Y$QmY`|BV$tdVQSCGpjJ%3$G_A~Z=|%zC&3;F@cM zoc2lleMFnK8CLOQe;&ucoUPay;R{=?=<;W^TJg|w9b&gy4GKmU@}ZX$Xg1^Q4F9M^ zJm*>9FHV~dd+rEdiWN!M=*zITvmA9;e``^7D#$A|K*DZMD0dfw-{}l2Tk;-tassh` zOaW}&Zcc0IH_(|r6c+wxLnC@(Q2fSQq|aDHDP|2I@K!?Q{GULZq)21<5PTzZ4=RnW z!sp@#u<1`H?3me!4qfa%)3gLMXXNvum<#aI$CfBR-GI%}L1_PCGw$o01&y=Ug41Dn z>hePks>2ImXXW6mij^#9Qv-&d42aE^yRi4^A#_@Q3)E##LDxE#8UOMF zOJi75QSL>{jfTpU~%JWEOX6dXa73PIPrvAb2$>? zXUFmm{g&AH@DSGqyC(-3s@w=*x7 zBPTd;S|T1A0Es*A!Q#_L@%ei*5U=O~|k{6+>CA@EBoFZ>U zS`cP0TFUsC@kybaFkvH?&-|2A_r1pcj8Ztf{VG0Y+)|_OzR~NU8Ukgc=Yj~mN3;tYpIo9x&tXC~Xy(Yfp1x-cHM@p_ikIHon3Ae+_1S7oh z>lfrzA|sHG{;@HiTl*jdmm2Q)ZwS)NEa{t=Xge1m&sSMb3IW%9{L zib_9Z=kTC?PO);EF!j?(DERi4Ph)vUAKMaG>#j|I%{3+BDKj`hnukdLpBf36E)AQe zwSwFjwyzEQ3bosmsce-k+oOAdiJA-z{9Xhe-f3LAxis}KZRPHiC!&*nIj<~Pj3LT> z@F?mEgk-b)`s+OYptT;INp#r`<)zbNO(pUt{4XXY8gsYe4nvYrnkYeTBz#O^Ii}!e z@J`75z_Q6u&z}Ri6(zXmXCGFs-GkEG*>fy(BQEaBLw$Dc?+KQnQ7>XRuVI6+PT{`D zGcOo-L8kYWEgHv4#l?VCKzC~ z0*wv~!>ujGRPbFQS+Tkfr5BgOr#>z6o@I_YCmO)Ed^4JzZ~+Z_rRjmw%q{%3PvWmN z9$j`H6ooBF=NkJNmvE#rl<4b|N%t8e^ZhS4m19IbyV_{{n&hF1~cF46W;& z4`-e{1))FzGPmb|&Wo=&IVTCrRPsT%){HT_=fa-r3RKYN$KSgD2{YI%sY->h<7DKi zt7-#xd9;wMnKB#ia~IH~vYos7+kyn1zlM_XFIcXS1p9(b(9X{vq{m%>^#3lvO|1v) z=a>m~Uxf6pr4$J>&f!v2enUgxRV+QO#k_0NSdLDP7SJxf;DHu(Q8mSNM{Q`*#RR@! zS`V+eX)LGsNCAu)dn;5u6yIIaA-?8re1?({I#vAR-}Ete=Zglgz8D6*nYy(6xIaJD zhGiN$pX8LAtMJrOh2&)&2DhGRFg9I*`u$FXpqhBl@_Y`xZM~?JUWQ#0D)9Of4H7WC zftMMShmwa~ur9k33${%a>4$to+h`-2)wl)h4SKL^*m|^^=mWQv%h9~O4twQaF~)SR zDBOW@MQ0B~)%aKVsI&q!FL!YneV1U(e@)l8fp_}mcSa}Uc3@H0qjCeuxCsHj*#I&@O69cjwNRyWA!k2 z`C~Bg)o}%vaBoS_G*gysGK0$LLQFCk&v_VY)8lrQG^3R9NZOm>*6CAlVU8u);yWC7 zkx_WmITgEn{=+)vP)dGr2i51Dhp}%jfa@_!#)$BQx zOG~_j^Ki-dGdL0%&+usiTwZBLu8j`GZ|u%9S6PD$-Kj?mJk6ldK_6}mD}lgq=V9-0 zA>Ags6yM2&;9ZtETx}*p+$#Ry$1-)YY`7{Zm~t3HV()NtoJ+yJlmGyH)DmQz1DoPGAx`6$JOT=IX6X}@X!`r6X4 ze)u_1o?8URsS3$)lEtzMb>Ji|O~)Qwh%RT%;lfm9vNK$P2nHX??H!X17LBH~`PwBo zcl{3d4v!X9w`75r&I7E?JI`3z_c`M#0h#ai3G8hr;-*xVf4^IbonxfYAkBa}!~=iF zPLUkXwW0mmRX8(;-El$-xZY(qK}ldlrn~h)!Bhown(~=XRyf1nO9R}qcZQ&;J)4tP zNQ2DhO&BZp4r`zG;nf#4?5roCNo9jL1KWB$Q^)q1>dP?o`CV>BvK}e@I1Zc`qFd~{ zj@@}J>6(fEAB)v-1)&nIG3hTSaGB#M{q-I8JP73vv#glQ5nt~4b{#T$lr!!aA*8P7 zF7iG1qoH8wGsy(!G}uxz2v=LP4ri!2T{!P1PDr()o1`kiXY>~+mQkZW9gOM58V%yD zZ3~Zcm1&;TW0(_YPF+b6XZiXyR;@1O9wjio%;Zk23cAY~gfD|N?%y!bC?4*9GAFg_ z8&Eu@OjNbz5FGkwLZfc4f`G~P@KVZ-XueAVw-w$nZs9$2A72W8X3LSjBo!PX^^7qZ z4stkk6fXHH$MT;;B;&7}5yf3r)cURn+*(ah959VrlCMWx{)~i&TW@0DrhQ;HKZ|)D zABx@vGX`J9du~EPHWajv;oBBA@D3-;ctQCfzAk7nwvF1uyFQqOH6QoD=+;PF7#)rw zd>#nbZQu)bBq5%^1ZP-pMe*$uE>blQ+MJtEFx?mCBnn8!&uq}W%D7xbrCj(SeOg#G z6l*RVM=6%mGqP#JO`hIZtyl%ycd#8rfjU@@SE3I4@8PQ^Q+g*$k<3|q2BRjrLPud5 z>|j4DmzO4@o`iPv8fl9^Y!0J;+ZX)Ec7g706%f>_K*O5T`OY=%xJk7Tr&$>hE|%^8 z`pXz&i!mY}b;H_nb(-*YBgUIs(mO1RdaijB`dZJ0&GoGF65q;sD9_-crtJi+mA}D- z%an|&Hzv>g)o_Gc2IC2ik-YV_Bx&qiRrtmmt>bj5thy|yB!*+1`4+C+V+RUT+a*Se zE1*a&7mLp=g+V?FG&A!XRJW|a)H4Y%_DUcs>;1qteQXwf=mj*cv`2@pLfW@`63q2e zC5Bm2WSMRx7R%{TyX-^!{F2MK%R-xOSz65XKW_o0jw(!z=tP6R65cB$4z$_lvPWq< zI*wyqk-2FYFjtP={GdrcZ|wn1;YmFElD zP&Ef$PBN#ylft>GzxVj0ewI%=+>3)wm*Jz{-HiKx6znph_z}rDY%VzpHXP8Qb~o9x z^Qvn&VZckGp1du`ardDD431WrjKthH}L?^d3ZxR*nD`894JMvW|QHzfHBZHY&AI0&-#!j(gVX&Cbv&WO_4gvdOH|9{ zuhAsOhS|~7K3%%y;1up?cR02)HxnFl2m3%|o+U9R{$u_i`U}It+i}mECJ2CPeq)*x zDcqg~7hVq{NypBDuUD}oaE=607n!5iUzVMCHXkRdYm-SoRLFwMM{)H1Rw#UJOp{8o z`1Hseu$$70z8@ob@w5LV3*W@xREOKpJlhe}mAYVf@F1e!s!Bz@pRwZVKS(m4$QAc$ zL(pgieDdZUXx9>ar~a1LYA-BCAyVoP_R2kvWR&AAV&?O^@a&f<6gReD&b1ECOIi!J4D5nL+lwHSKQF4+Jpo$Dzd^cp8A_jegPLX2 zd9P#@+!CI{{dBmDbFbgT;#J!~eVZ6E9^3P!?;r6ZuQ~`)e9M;~oq^j&E0Mf{GcdEu zoXVwjvv*FMB#=F{skvBWJK?(v1E*5aU3@u+lAKpW*!_#$VP30mmP zUyYO|nL2mifMY8(OzuP5$%bUqJ_Ax8x&qd2vL!toYv2gu>G~cyz;8TnMV~inP^}3% z^yvh49}U(Ob(~Cs&v|K>`63-5@HbArcM@D90zh!+rpTo_P$IbXNiuf-ZV;`?f!^yE zp_zGtYPZK@(%*lQ_@kS_(_kF#IH*DYhM7`)y%UBUeu!z-=46=RYitWPrq4_DiR8g4 zNIok=e-C9dfMO*&w(~sHCBGDXoga@?{~d?O##&Hieol*f(!|zOi3m3b|ze7ck-YV^meF!62(p{>NZBvW?w& zkA>fdi&MifQ1>5Ze9)!^E00Jf?7s)<*Uf1R^B#Qe{fX7P6lm&?(RkyP3@tF1hH~?v z@WD)nagj1$Q=l&GLvJ{;F$QX-U&DwkT`=;O7{@Li0?Q%{iMX%WdCZ$a9R9+9ye?z@ zcv)Xe9deEvC_aj+t5v~=aT{G4R!D4~4&WQ>4!q2=cw5|NL($D!Xmo!e7OZ~Arwng` zoQWGC{Bk$0o+2dMp2WcC^W88ZL5mhFj*vW5F{H9aT4;HD4-Rv`&UO>KQTU2==*LDv z(|;QD$)&;Q-ZU7u%u?a|R{n<;&-?J~J+?clJPE@sR7usFMy}*m7#cLN=i)#TUtk@B zA?C5rwD2h8953f|%ozKJ?We@QKIDe=CUZ66O&GDd7NZZd{6u*PZ^qofej7R=s;`8% zS98besqt`Sn*|EKe-RbzWvr@K*|=;TyBp3`0AG{syjP=`uQXxZN4ewtKIu!0&(;tA z8X9zJ&oJh9D&doMM8oKaK{TvzK4UEYzy~F-urZ~B`!tiW7~S-UeYqU5jq9^o53ZNaL1TI*x2QphPQJ?AIL|a`#hh4((tL`(Ch3yasZa65dTnB$ zeVQ*#%jcrcK8DXxJWP(AjG6Um5VHOcUcRPFdZulJYm3!Lfq9C^ujUObX=i-(loxn0 z-G;)^gV1kX1GhTTLGdqR%_ly_E0dOjYJ(5f)nsw1BRFt5*vl!bxriM{WvQn?9o(uPq%Pep9D$^m1yP0si0Z2hf6w=%kP>H535$%(lMLVXwtuP5ZPUU zd7VA*ksCx5RU~|5=3me{@D83`6w;qIL!eFA%L zxr=!}La;;S0W`54yUDdel+U{Xy~|~3;EWc?k&WOARVWDF8^enUSJB+nn4I_@MQ7qr z)7ORJ+kBgo(mapmObz#}on%gukbZ`QkSQUAkff4ILNX_!VcBPI+K+d$g>*xM8RJ67=IS1k2FvBRIC~9n?)~ggp+7XQ8(h0>A&l*A@Ec zZg3i^npw8%Q=(}8YIff?7=<1O>(E}e6LRfMNUw_yby2>>h5c<2?|aJjGwR9QlJ^tf zUw;%jmm+8+>2uOs%JBlrR=B4vhWm#qFwA2h_jt)f=xddssW1jKMp*L&I}buLu7IzF z*5t!C=9LvV@UpA-qgv*3*rNXimdv>c_g)@E!HT&~Mg8@>{u)E7Q&o-unzB^iB?n)d zoWtaUI{52?DeY$Ow&AS15I=ARJe|$C8LuC4@)lAwIgs5aRphDpx{r)e63Tz>&cfTB zW<)wyn(9p~KwF_U+Sy5?y>SIp9Xk&NkMyAK-8*=lBu~3`2)U1aszg?PEhu=W;S13q z(pOo7Zkiu(-yRdH^7ta|Rey@|ClAA>AB7mbFM}^yaE7~j-hyr$qeYA_JV5W%R}jsZ zG&fSD$u@V!(4QCup<$lT&1Tl;x{XQDEqzQ{{T@7vUUFIsQ*hD>S*mrzm2n>f@arUF zn(C{}Ir~xwKk*U8bKgPQ>05mBUOBdlp29iS2VnWUr))nHg(-#XT-FE!mmQnMtqWdq zl1G}j>A5AXoTx;Hm|p^6!g0KlZ$<8T+7b!tnknrw!*JC|H16DoVRz;D*4rcSXwe7s zTk@I{{7&S@=MTqY50waRZp1gEJHc_U256j;=JPcyNv<+uc)RJ)Q@$JEvU|4fI=y^uqsN40hahfdc$}oU?T@<`# z$i7+x9(IOweY*nb&6203<2EsdjRWtyas*lyG4|PXc}!)T!zo>Pc<{G5@q53D>-tFf zkXr?uV0BFHoOD$>rQIBxCa=Sn>}<8W{2%vxl^!kqkIn9^J;X|;jc`p-mpqEd#-P+L zZiwU^d`=uh#3mVFH&coxv`>VrZ5O#2m(?)-D062RY14*UDVjO|EBp?ZU}a|t@<)`& zoQGX3lW>Zcc+1m^Q|h2X`xquz=s<*f5NPfUM$d^^{F*B|oAajh0B^cBm-9AMl)?UD+?Rq3I|0!e7 z4dHKWDM7)8AC5wHR#hE)$;DmPBerR*8`G{%EI;bgUlVj`VEHRJJ7F*t?ayT#))H$b^DT7IehS{V>|@6s8@l;J=Hu;QUF<}8@P5%3y`8C}dxV_G$5ND`LmhiA!>U31rChNeC?_!vQ;!651=uSu_3o zfo(|{@2TSp*0b)x_#2bKr{z54Y+VK}7IXNnyodb#9`a3UikDF3i zhR(;YV#8O$H|*%)1ikv4yzmt|`d`F^*S@^!3Mn$t*^sz=br5@>Z-Ahjt^6mma9sKA zCgUCN6yF?VO@mrlSjm_15!V~i$6r-QSL}W;mOX>1XIe1yz)Dblq(f6_1y1^N63?5- zksG6A$;A;4*!1@{^Xf8OYsRk!JJredsnU4JWTY zhFux6`K>7mL}id3SiYVMvS-TB?rjG*cLd{54!sU;#jOy3Tao%4zsS4lN5SrMhUDiq zQ&M*@4b?-R;vAMW$;~goZMWZH<}#LZWS_jOqc=EZMOm6-eS`B_a0?S+oMDix6|vqa z#$Wqv=&E~t=%8moJu@`Hv@4xE_xK5x3~xoR4>#b&7)^4~;2mV`|9~!Sdg8v_jEB6p zp0h8x3kyD;!byxt`^obzUS6Jx3+&WLykrdyecBtG#t)eu%gMI?sM}41+8g zq(Z-M9)}v59{h+U^I*oD*L-jCTKJIn1|?FOVqr}Z>iAld?ydItY?cLGJ?}cIyjjaQ zFSqctohh|qIgLP{Ztyc3%Io&IvRt1jeRRDTahW`DeNz#sHamKMlP*?sJH98IWy&w9aA~+J=m#R~fJG!JL_dR0~M518K z3a1@x$1Dsj6)ucwJjl2u3u8gInVvRnpS#GUm z8tN)lani+~An@EjIIb+f*x(>YwkqX3ZKiWM-PQcf+#_f!I>zU0kjAulM)YIG5fgZ%zN#(_>;Kwdn_Dxv!p)om$!P)*mdK>Me_aL zxaPQ!e%x~gZ)`Lqj`IjSDbXT1s{6s;bRVp>wFsl0y z#?xvD8J)(b9?KU!pHao;D${xQ?XqAi%;#e#$e_zUHPN4h_2_<-?OfFc!Kw3u>8KYO zVET9*Kfi~$O>32?$y*~@b$&m%`D9`IlTWynF(Uj2tpgj@-`ZeqhRc7k^VEJPm$Ud4 zpJG=EVaAr=a?XWwnHA2d4mBW-u{Tk}XAtKin9|yl!roZp(6P+1GMj%^6JG%K~ss%VC^)UzQXL4at(}ei(3e1_Uss zpmo<_(3{Ad@y64E_cx~tt+fdELW!MM?7mw4ikA$QW=x?+5b0D8Tc2d&)w>AW_J(1N z&n<}ETY)qDpK_=2ub}Zu2~-XAV(-3cuIkGbZvS#!dZ)mE3a7otuJ8bUR9Y?A-93hu z+ZLhn3)Z6>)5;yaz#O4(VtI-BJI;0TJ=jwH3Z1z~JTWgE3*)|`|EFA(>^TPoHd+`Y zo5D9Z=8Gia0xor(J@0T)P-XtY&%E@gH$R<9jSGx(0<;nxHPp5sj88 zkh-`^2oBePR=2}^06yi@eQF@d;VY{B_>9kuhC!I}JwDkg9>RL_a$EbifNt3r?5bDb z9r}$(DcgY?XbhrZFCs;565CbH^KP?^Kp-1^E8#w zFluHKJh6e)X4T5uJfQvtXp8D6J;lLBo z;-@+!Lis9c1Ra4#xr~b|{Tq7x6F`*t2150d@%tHezJy$5ytsLe`La@ENEf?f?UBVN z1J?AghXVcRXF|GEx9~NO?|_b%B|S6l6zcE4h^BJQxNzVUnsEvw=TRKX(n}L#n^IWA zc0w_8AHw=y#dvYeC-8Ha#Y@smsqvbA=$GyR3nf{q7WEt2&W6Lf`svv05)GP1**Puw zj~h~a9Y%DSGRJZv2;)-Wz4skVI2+3QEIi9MteU|IzRu50o2>+Pd^@(~eSoYT*I`3m z1mCZo`m{6Hu4952N1b z(5#+StXr7DS2>tNVtgCi$&{gk^QDM!DGzh)Y{|mf1k^tk1>+cxGf6cU%=ee!E~Sf5 zx910D)obDD;T@n``+=Vt&wSFyD)`&+HsoT|L-2j{74&zvg7dp3D2;jsLBDqJhOY*} ztnP1$2En&qwcOPIre9;?(04 zxgg1OzG3TQ(GHdsJhf#AHNW}~G-kALvmdIGge$82x?M6f(CZuF#0??~oGfXy z`c=*}&x#+WZbcI`V)>ULY)9sOi+dq>$T(KEj6pRNHLU&k-L0mSuq?RCYi)kXW|pTZ z55ue|DK2bnrg+_jR}k~sPRJz)4X>=x_=`ij9knm^hvmpy@xR1M;Z4j%#?Z! zjl)IPYGF;y3%vQmg5KJrMFrl;jsoFp6sDO_?{$yB{>UMS92<=7otE_GyF~QqjO0^C z`ayq665|SbVYjIp{LWJ$&u6a00RNA;etr$6DP{mIQzf>GdU&ZT!=t1;ovN^ktmi{A3h@jo#sOOIBjThbA8)<79!F6~%;8o%!mu)k-%DE^^6V^~#Sf9Dc`O^T=&${^7C`<6^Kg1P|`wV9{ZN#+UU$|C_aIR-d9bB?dBMZix)9umQ z(Pgi@*yqtfXb%JS3>pW8+izj&vs&($izc~tQJxs9--d*-E_g`YgnT!f$UHjLsP?1~ z=AAL8!ZIy7MM{abYet||o-DNpG$e}?pP|N7JAPD4E|5bv&>+>21iyDh4VMA_4)Z0& zov6ajZMV_CpXG;`L$G0-Hy^y?JVqaU%2$jWgQMzfFe~vDZyVRcHDz1@zo8u>mo2s; zftIT%VWTnM;>O%KUd#nOF^^yBu1j6+B*UEM=U6bV5^X~}xvZHZxj#z_VC6gm`lVHc z8X7IZz}8#{o;44wHmi{K&{JUXz=Fhz6~M)J8t*u>hm%xZ;)2@eB!kOblDGO+Tdo)H&xc+3R(sW zA1G0{rhtOO!yqyEIXr)Ffa@N;0Ehf2jPLG*)XKqp$S*^jdgLamBpJcZTE=D^Wy^7Mj5{lk0F&ry3 z4D(EYp3aA z6B_nv5AR@bl({?&osOPJMq%M{a5NU7|L0v$5mgGf!vh^(31OVOJ{eJ=ic?XM^vP<| z=J_9Ria{oH9aG~{XD!EVwf8Y(g9xGzUgM>&9tP8AZdmi~3`A&z;@My}%gXHH{oIAT zR=EWgjVOTZQ_LgP%Gh7+mbkk9E+mZK$aSgb^KVNm>7&$R7@7SNTiS1fscZ^2qum(# zUbw;bCFb-+OEmUgJOEjS5BYMZC=^Iph^+5k0m|QCUbP$$n^`2<8#JV@UH&XP)bva21mJ}N4W_V zPZ@<_hS6s!vES$Q_j3lgL&pWLt{K{eSByCn6%%7 zXv}x#?=QcGepBK(IG2uosRy|y`vwt7y%4-xUjkpw7~Iz{z`F2gIQbw81Kv2p>${~W z=#qDO^`jU&PTHbw+e2QES1tZ{{0p|7ssVeIKd_>&2$u|(MwgD!5a!g$>klNspMh>@ zXnV|+pDE*3=iGq~t>4hZc#IDIDn#nRIc!KVc>>im<7NaaGUGC33u)?!B$4I#2!LS5 zcUiZZt8}&{eHG;x8_vySZ@}26X-8 zPc>`P4ZJzVzh?WK?@IVy+k~jBD@V!2tB_T#&)pv;OZ94%$up?U!5d zvyU3B8}bZ{fBk?2oj`s|%u9^T-H55xhJ5;bRho15Fe(VONsrM}mMOC)EfzPRcXb>m z@tVvBjMjnV-^XCX{$`wE`k70}HRM!^;^C`@GMO}3mk7_F2CvSuFt_gjR^3?+@fzAR zrbS4tOIgPA##B*xaWy}rMwYhvKjA&^+T!FQE0S6$2PSH!#Pz@(=sP|fw=z~;ul^3W zI?o!DQhq@Hi2D#W+M92PW}Q3VOlWJ;pgIfmNYFAF-gH!f$j{9OsHHhw{v;N|W%9sZ zQwooT$`i+%E5WZR2^^J9GY*D5IGXFB!%!81i_#$L*Awmq&-ytp4T(VUtvKoESFB)} zy!o=5!EbX7x+L|8%zuWXg0>bdZoPmX!j);b)_0IR9S?yj%5?R3#`f`Lcgsg6T#KX; z?7q%|%k`n4@lL`U%~WFS;nyNRoy#ImUlZIip#g+5N;ttl93Qh&pL(%(E5irxalCjY z2wdhkn%35c=4qG`VXhu7sbRCTz-GA8q(=Dc>lmUY1w~t9Ig6fum{j|n&1<(p*XLQh ziKaH|@npl#G)_U^dK(A55oCy@RBi@hKDvut6aJxFoCFdp-a_i3_ncO`CcF{P!jVdHq{;mORN5MoZK`@StVxbP6Qe*Kelj0@ zgDI7yO~jY$&FTG@FVO3GBg|&A+^YCqUh~C2Ou2cNF)j^6t^?cgEqiyg(~sl2_xY++%=5Nqi3b^rhmku z*>(oxzONb`1AlO){85aV9fM9WF(~o7${A#7kbpC`I839DaV8$ZGl33K+wl(Ghy1|= zS9?fpzXtzUXB;|h(6VJ2R5sS(7z;Ie!2A(zVb487KYd!?n8waR9^vpAhz?ERj{fAak+enG?edVUl#HcO*``t`l%{0<(3SjH)j3}7>(6vkKd zv!KHJD&X!jpw}>+7QugHv2FxJqT_~57AAs#*B@~`t zg{}d?*r2En^UBT1y``nN#AFj_>}2d}AAc@XH5yXm!nlLF#>6%A8k8@e4Z$nqp|MY& zn6}r89V6wTTy+bV`fwV@z93M#vlkjB9^(Cv`D1bjd#1DpfYVuN;&>~A%~%F_VZIWr zD?5q9*gijcr3$PcA)w2zGX~|0X?(im2X5P-POk4~v(r>9{&&PtbeN|9|GThwwOJQ@ zf5!HJ6E1PveQ!bfp$4#-G>C+X$KlxYcjy^76juJp!dJ^x>Ep3Oh*XIN5!$`uGhZhsJoeP865dkR|O>aqUgU=mj)M}B@`=a)bct{gAM)QTsf!x>UEM?Z>_ z7|#$3hI)v@jG7(oW1DCrv{ zfBnF9tbGHEU&;}Gc2_me-Hi>I7es2?Q}MMkd&k{;m(P79OCM$ls*DL{ z^w^9zyi_2SDP}ZSE&;kU)^nD#8D~xR4p@ivV`OL!v@VI}CpznqV=rY%_T_Yx&l!oP zm)C%PB?}9!RHB1*3`ki>ILoybaux~n^>ba8D%Wk-D2ScUbDUl{M3uYpCa{XdrB}?DR04*j3v$u7}Mm7 z(cD`^R7sOYOSc;+YpM$w?q6`b(l3bpJeY1d7l`-HS}^|2Ine)f8G;O%zvVN<JLpjctF`^6JXwb5HbJ8-ykQz=*PS{ ztD7Q!#`8^A`~VYL)riX-Dw1z|3NLo3(?@&yz8lHu{?f>vQ zyNl)ZB!h2g0qR;_fpPwWi1hArm{qWwIWTg-xF-~SM}G%9HoL^XPLQVjo%2#Vj?1pI z_nYDOIYH1!N83vRDsWBb1`lgO;|zIP6%`K9vKJS`$`JjL7T7xF2q!CAk2w<_b2agb zwBbhxH<8VV-Bz)??|&Q7KSTz-rhmZjGJ@q>)VPBZ8B%-H3Uhj1^T+yRNy5iju)Wlp zhWvQT-~J&(EWn(Ou)4r_VZC5@lJRi|xuN`_$(VFo0=D%^u!Zfu*74J^YUoie;o5A@ zGiw=0qDr_Ki%R+JYje=)hzU)z+zYZ76|lwS9;l9tMPG+F__OB<#GN*yE_w$YSjI&6sPGbP{Oza9?UuRIOi1pncM|&p3R~;HQRvTt^Nzt7<2a|4n zX|%De!THmdLdJW2JTsa3oJR)YxcYp&-K0$-x<)|OS!L?EZ4D<5%Z8O5r*X9?7X{0A zb8qGqLfyx=s5a?0jJWm+cQMX5e}p;4V%V(f*)~ycY6oAf#Q3SFufRQ(XBcy~8)rFY zp=JJLNV)VCV&8?q6y~zJbBF!C{%qqcFB;*#fj0ETXE@F2DP$eW&awOXHq`F#=((`kanP6cu^Nt%}1uHf!i$dGr8b948K85B%7hXFoou}k9y?`h`F zH>@b*gPy3k@0 z<3bkd?p=#G`U&eshVXwje*x)4F{C;@6*;;_qsuPVE6AS#pIj8^e@=sln&c!p2C!bW zP9JzX9D~59{g|251XaHW!-N-%jrX+;#NU2k*!d@XCz_G_nepIm^auSLba6kXAg{R}$E^+!u z@o^PZ@SDtw8&Y101hy;rz<5M=n`IC&xe!@H5DV7c!0mcNaa!1C3{~=gR6i*w%w(=c zM=4x^mt;9_QUVyJ@>O1ch0--bJQPoo1pb>?H2S|qQlzYVOVR^jNY z*Kqq5HqRL)je^jBxlxn<;5lC_`h1oYT-zi?-ZJO6#;J|Whnp_axm<@8@5PX?DhWzP zrbC7II1FI(^M6_9M7TMFf1{s^ZCg8_9HmJjbi5b?;w9@^hVwNOU&0VM3biY(=%@7ps=}BiOH0ZyRJ;w& znOwmV&I3oCsmAgLi(#zX15E8Xh~}$OQ16u-xjObESg|{5xxNROY>vgUEg9&4IS7|{ z=)<8P10ug`7OFU|z@y_5(Cy77XqlCW_GJjFOIZH$<4112sxb+>qA7lJe;f9$8H-^r zUghps--&`G5t#WYlI}5L%r-Qkp$W@zK-reAGYQ7{KDI0v{tj#A_ri<`hoH?M8pPMK z8QWwnKO?M)Z%sbRM<*NN*>M#pGVI55>|Jzzqbj<2q{5N0;ka8)m2Ag5FlD(Bykgv> zFsBA|c@+tlLzz$cCTU4`!~ehqdbs$gZf*5cJ)gb2hjO zVc*t>cNLt5be5^iF!RI+=|D`~Y{zF-mBSb|XJ|P)fSX@xk%kCW6zKJ0+xT?Q|5uNJ zAqG@aP=M2gM=;D?iyt-l7Yg2);LyT!JWl-4WsxNfUZ?>9Q%{3&nLbsWDo^U0k7LcY zw=iP6DtN}oV>ZS@YMv<|frE zjR$RL*K!@+EAuA=y|~Q9Wf8_;6Og7zZcMF6VL1V7jgl~P59pfOEMm-F`;D?>o`iVd2|qV z?RdhEjd_R-YA(FxO=B9A^qya~#gdw~l;hh&mZT}w4Et^xVwTAzD0`|#O{+8b)>mgZ zj`e<}V+3^n30+cRp9jffWoiBD988n7q3(16^Xm=*pYm!PH{F2vd2i;PuldIIrrGb zP8I)}r9^gpyAD4FG-$VD5_o|p%lC|eU0>e8*91#iRHz0Gn_4(e&8wVGAkUY-dC50i zvd8q+@7RzvoKru38wc}Z6zmCvPiqCFJVh3C8gOLe)AvadDK{oDyin-PHCiy5=bw}rEv zEe#g7N+f50G1oTaE6!IK0kW*C(6D(47(ZBrcOGkzh^2biqpC%3r0Wo$)9WF6XEKy* zvtYe_JMLA$5b}LYEi7io}v68Z2ZW(E5~T$}|RCAiMIhTu5k=pTN{GG&yNv7kDQ zXVB$9m*`7?5|K>x5&7L)4?)F*3yX3T!!UU3x%| z=&qIrNx3toT9$LQVZP9{z6>8JtV4gjH@HGY0&NKs(EZp&7;*L>JYbIOs~#>m;ljtn#5;v6M79J7^*9yrDx<`kNpR zm?zufHe-vQ0K-M&u&cR@&D`SPcZ?~iUHcKG0|1W?C}6njC7jN3+9NgcD} z(RTQ1R}L;&9|ZcJzrpD5a>RbyNPK1^MW(O)f(^%_`Lxe{e1obXw9R-1l8qO{8k2rv zg#QkBJyIRtuQVchiUP7`Bb%L@e&N*08M8Dl3>)6aW4E>`-j~T|8Hyxv%8n+`IJAt* z=5N8@FUjb5=Q(4%R*5z~&>%@tW6?rjN+cnB_)CnfL4)CPs{6Yi75(O!m`n!p5_4fkf(khTO+z8JHGvAB$eekrL#m|3v7UR1bAz(-b znl2Rcnn{Lq$@K($zg(7t-@F19zcOJ)kTx_}o)Z7J?Fb$h-o}o+1?YUY8tT}+Zdh<8 zdg(TR=@?he)a5!zOYTA1@yndG;0auOcM{egm1f@EW#Wat$I<6;A?m(+#BU9?Ak*Ft z;Ir#rFswO0*VN=PzwUuK?Ov>e5zfZgH&m5|Rw>hOda|U-Z4Sm7n_=wEQ;bFN*GY%D zXVd;UL&~{_U=ui)@MBqTAkT)x)I5aAlNE{UuyeR~*+#rNQ=R7fsnShrBp9~#CALqv zgAMtmqA_PW@w-`ASEEHP+fg!1NmP2|5$nCOJg>hs z?$T_8^}D3VG#L|`x-mhV`rVtiyEvJXSm%pYQ=nw}LkOt21noXbX!62@40(JT>%~Uo zhpQ>^YHfzo)$ITLvXpP~*o$6;-H^wf0Nub>+?Hl*Y90Fs+l!lUQRoXu9Fhhp_nlGo zm@-|q_&ml+q-jL=e3DGDRjLU+tmG&L&|N&EJI+~z2htLj1}t`n!7Y=E>j1DZTq zp5C?`L|U{}X`T5aa5dAW8N;;b-iQD2UN*}r-(3Qt<;5`hrZjCzx5QSz1kNx+1OALn z08=#K&N5!K#%&>#^U7R~?N3gl<1LqVd^n`+yb4vhL6&K63ms{A(M9>4V>*ozRAs*CcVm>ISa0a|T!%>;}`F^1R=+b0VjE zN-W>>4??D^WB=T9%;`29Jp`;u=TVWX~6TMpk)Vm zC4WmA=5fx+Z(*sZ!{IG^k9~|iN3X!P0_K>P7{bkv92}TBnEbl=5yx+A!GObZFt|XL z2<6qOXNVyNkw_4Ac0-EK5_a~6V%C^S=8vl5z8_#aqjW9tith$ASM~~aZM?=!@@v2@ zc*LLFtU$x;QFL0#ls?xP4+*U@=)YH%ewWjtpS~*65Kv>>%RBt$VY0-wWikZ)mf~Zl zsiD*|8IrtS2ICG6rkCS`S^w@M?h1R3Kbg(jS0!!Yrv zEQMlgX#Mw(3vfP)qgQK_IqbYye%+e7th>N+8d~&L&?lVq*o2(@a}-uxA56=`>^O^W zQpCv7ibRZE3BF%ypl(<;yk~yyXtnper13KsU~-r9dv{(`t#=ihWN+IO^ssrnB+Vt>$*_iuCmxNot#N?@3IeHzuIMRxhL!MzoO>9EheAh%7LxXej~@OeCBO={-V@_%CF z27*q@Oo+MD52%Pd4g${;PG%Y=^w%!|3EDoNdvP%id*_*9khM0(=!b)lx%fuv`LJjIUc>GN{Z(z)10Qy_+VoPjDgt;L6F%r8XV zLB+UxpnTyVHm%*o?sWy?@_cVFJ@i)`a3h6pvt`{a#gD8mE~MdTtk93}@U zkaN)%WY)EZ*n6)K^GXeB9%iAuz8|9OssS_4kL6_iV4QVb>w_I?w|RR$AOUEo+h%d=V1Q-G+vhWqiEZ zEVipjWF7O3@L?o#QOQJzHI~ZrKV&tj{Jd%iOU@PDIM>6N5q_A^!S4E_D>1EpKlDs5 zgjA{hP_HN?Dql?@oV$l7D^H=9d_ATvE*5pBD-%Iltw@kLBiAqJ0v~o`4jirCkNz&- zaI%yJ8B{Do(%P*dC9W5jomL>;;`5Mfc!ksWoXsElX+d0{Ho{*G8~WdP9zE~qQ9r9m zECZlVqn+}&$+68C$u~hl_8tCe-7Rn$FeFd@>XP#O@%;B&$8p?EU9$akGPFHgiM zp>3}ewJ=a7MI(y19n_Re?y;n@uJ6#%%L49nn6o*!p4hj&8*u1h=Bif&rGKHAwyu=> zvVdhnj+o;}l?K*@kfps&Jv@${jaxSf3H)oqgnh@j_MmFCVrOocR~NpyX^x4vc!-%( zk7GA)z;jh*RD*2iYIjJ%(x>_)y3`F!Umq5QX-oK?h&YfA$cA3aCO+iiR;a3D+b~+|GZ(hBNvyEW zhLk@ARN1lwlI^1a46b5batRuHoPtw~o1*Za481n=Ihtjfk=atrch|20Dd+pZeYPA* zf_8CXGkJ09+hM%NA5Hq7l@7U6X+?r2^zp}iaUH0KA?v=$Zc&`SZA z`X{*Lq7~Q|FF}{fleuHp3}}q+PkhS$ErWOsBJrNU70ESn!dP9_@j4CGLUxyGsL3rk z@EMG=4q-*M3auH(vfvMH=l*{44O^c6gx2D4kc_Y7R$usz`TExMe4q^#shofur#&Db zyy!}$D)};BgK7_4N5R|u{Mu1wWK{_BH4d=3tCbFwd2tsNYE(%?_;?6$8Oj*JIwW}@ zox^2rV7SzhR;~TaUwza8ubqcb-w+pU+**W>ihn`hrH$wq^$q98oJWCkwqux4YHkH% zJUIql0FQbj8o#R+67CJd@=O!j*AW7<**sHreiTgiVO)&FcbswZGn5R}ic^HYL8PWm z1MFU7uu=d>bZ_xRpVRp0l0TSydpyWhe#PkWGn`~uB3%7!g&{K=x$OI&VeyRtOnH%j zcDWh+iqDqxCCl<9rd7e4sccS_t;M-T?T22)uY9@IVN`!=Oa{B@QAwPUDAm74RPWf0 zIcFI=Q8IH|`(LzK;$UMUmpWFwLELP2cXv zAh`v6L47#rw^`EO1uR!T-4|sC24R|AFnC`4%U|`V1(_v{c#sH5eX9(amByYeE>ZUz zQZ|6CZ3)Z04x)FDSm3B_bs!0w$h!uGW5TG}z+ht}!77XMH#^7P_l;3d=`E6PPr_x( zx=}6mA2fLvgS_f+9DkZaM|O?|eIwk;Fy>UP5;J$IIzC+|q-(t_$;UPUu`XqNVyiVM z+33p4w_FCbE;D-c@%aiZ}vcrDUlY_Ju4#pQSC<(bDmpKtleXBZ#Z zGBY>Ls*_**Lzfu0)WO70&#~d#U6HiaJsj2e12(vPL2oVgJJ_@i&%~U-zBqR@tQEqG zK{2eaRxJ89!IUJ7a^P+D%Mq2$0*oG53h_50F*PTj+od|1G4frwb+faer%sQKh;U+a z@8yVY(I~#ax^**_@rSF7iBr`R{4m6txVrSA&{zxm(o|TtTMSL|uc5e~xwz(fai&QE zUOyxeik5vxm#r~kkJxOO;$}^I4z@s|MFKqfZcgK`&%xHtcifmZV^Vuq8|I%GgjH3W zG2x3c6=a8SI-81drwq$ce(lHY-G6b~<2pFq!E$Jkdw9PGAH>^#N8lzfr2NTVTy@Zf z1e`a46;B%=A+4G_UTy_pVlJf)~4Kgv%`2buo~K51%q(=H?Bo*0Q{;op>Q|rp9kOO8Xkv&cfB$lJ==yH zw|R^!2j9mV%%4@b_8zRuxCaT-bGUK+3dAT>irfqql9Hwe;Cu2t)U%F9evSfJq+f~y zUWz2F#)EUep$uBjSK>;KFZe942n(yD@kx*k4SU<0yL6-;F+EnzggLJpU#I#gU)!^K4Dh z@!~g^JFXMs*0R2Z&u;$E!W)==QI+<$X+y7B88pqAjb)vSd6AbB7dY)YY&u(wOMC-y z?28!i+oXwi-vmP4(^tHSohdD0`=;JQS&%ralJ{vphTnvifdeu(CoGNv`?6DQT zzr#2M0j3cDe&K^!@I%0hw^1EuefuH zR>a^;JSW%gfP&Snd=+!_-nWe7txVQq zDXb@OH=B06Fr<4C?p zgqD3@aBNN$yK~QEZmNf1(4_zoeoOH`O?`67tr%0QRjKO|w)fK9p6RlsncZ1k@Qq3| z>i8Mc4S9FK>E|fv=15x-`p^VqR7){Q^Ef(8>0`gmU|#RE5;;GIu}h>|@m0W42(D}6 z_ykM3{#gju^KKtFE0vMpO69z@>&2t@P*@+Dhu&^Kr9-y%!_&EJj-jte|DOTyKD2@#EYcwV&*@5@W;oih zd3iw22h`lnW}t)rh`lmi@dk^9^h@Ru%m~ln+-)Lxzj!nLtV0kkyFDA%h=#$4fM}>o zj+OpABA|t;$w4Gxb^Q1{*)a+vbFie2&JfWjrj7XZKOMS8xfQc+e}s!7yap$LAosg?>O5;|$@VbG9+vp@nKn_$VZE*#5A6DS5|(MN z$CqqZB|GUb*ylgv0t#kho2>~%8ytda@&}`hvbfe2Ik>~kjO>{zONRNGFixf%>&wkT z(ZokQ43I-7xgznaRXZ_f;&0wNX)?xF+=jGHC92}`6|XE{Jgd4@d`5Z+SAIi{N?HVb z&k>dzx|xCNUYd}<9Zw;xcL$sl+`tA4DfGs_g2M6z;OQQMo(~@2F%50f_eF~i@e|Vj zejAfS)q}8QeJcF@^B#hhJ;bcxEVs1iJ?ad9iziIkH^FNgoU*qeDZ@&zFO+dg!r6{x zW)9B&BcvA|T9MeAeE8IFL{@!JB%&-u3=F&vIo>kTLg8dMZp^+@7LGXMQ#EY9eHw@D z&|}$n33?~wi;We;D9~Rawl92*Ib|Ef>AL+87~Tn6%Lb7*<~b1OehJ@&Ct|6akUFr8 zpK-!|=mhI}Yp$^0q$GdTCIE&S(pcCPpo zg*xv|NP(*X9X)6p7TKP`ymQB4Q(OzagAx>fmy0#!u;t#51NscE!EhT5`|F9{1taJ=pG-|o4HiO z4SC-OOBd>pjC)OdhVfDEQkfjcI=?~hftuL+tp!d82x+a`caT@hX57r{IO2Rb%c~di z8_#v(ne&Y65p$d~7^n#IU+L5FXRJxY7I|v_=PR#V5Q6<@N5VoG6WZ=zOXKRtamkI@ zAiAbW{SI66)$Gh5d30MWJ+4D1SUl#g-6}xUm(Orrhzz++qF^Swk3QM`2=j-j(YiNB zxOwcmfB$z07{q9SmDU*8blLU$V^WvAj!Eh|`G z;m??6y)e8j1gEk0N1_vR5It?U}(XB(_pYmIKXWjMB955i`cQVZcCoTOw$ z-NNKZ_y-%}Ex(8}c=L}pPh%vMFxUo zi7#K)yA-CqT?3oOPR8JIQZ8+f5xDx?VSTzV%u6~5l3)d{bc80It^1Mjpeoq>=UK*u ziRM)0b~Pxs51?kdLr}$LGa4(&(dp$vLZ&LvjO62x^rr~RTgGAhD+Rh^fCB4K#B)gt zm?x`iEy`r6lVt)G<`34T*)`8GtLy>PvVHHqkvHM+w^1m(t;>7=0M29f37EgU7OJPo z6Z}1t@dR{1G^|J}(NGgl^l!whs~;hv_yc6g{($ZG&ci4@<{vs*1uG1zVc5&5aO8|3 z8SB0hHhDP1=^M#dYWA4*Q6@-rE0mc7>^aDw3K@|w4W~36#4F1!Nq#$Hl=RF&!3rP1 z!N9C7|B^+xUhT57@J+?5Mx-3%*EKBN+`( zc)zjRxB)L&HZy!AcX#t3eAASRsBS>rC#%!Kb-Teu{}0yce}tJ?F=)2jfSiytGhgK& zu30aeTQyXJD5yTiP4R0$CB}-@GDcX-1eTi$eCOyl+lrH@tVQGR>eQk5Fw}{Qc;mqr zU`THXy#8WH8_atlr&q#ekSfxG=)QP+} zuL_5wSxI}qK@+>)b*!`u>GR@?7lF3bd678K9C z9=1|HEgwEEJx3a?S<6?t>Epzm4`4|21MufY;Kx-FC}YGjY3t%~&gOxb=>8W8XFz7? zT!Ai^cbHh)&Q~(-X}=^EM(?@+l4s%KEkk9gqH8{`+NwyROiu%88$b-w_oJVRkQ2G? zt1O`m!o?!+yWt(k|93QwcYrA6rT zb2V0}l){EKbLu!*nVh@C_L6sG=u}5t^7q$nbaq$)72zgyAmbyL_HV$5q3jtRki zR(8zMSHvrM%0TfeSz>)66NAS-K*N=pV4Qy$LyrVNqWu>>^^-h!B`edtVFq-}VjUu~ z6XLF;N<=a(OB}XGh8td(hsBH4XmMZxiWYRC)3&+N32&LN@U|iA5KZTEu3dtVr-v}u zKM$3v7>|DO4DJ}qqOEkw!ksVK|KDKd1ZMpyi<=KXXmpE<9wI`Cc^_w=rox|*6_D0= zbFx$93n7EGNOW#C%T#_7H&>p5r^O;7soy4kmaIz#7-eD3%xmJ{_uagExdM)u{}dvp z_F%woKXiY8g`c)!8yphblGh0eM8Q1`c-gx+WBEVKm}HA_H+4a2(iPr6a}VCX(+%VI zYLosQDzr}bjMVQ!AcQsMa~6@!Fs*Yd%P4yB_tT4T&k+%I>YfZ!+Dxg{!M_-|*BiZi zoH1*qEe596K~BHB_zhPBkz3lBd*LNo8!_%)sTmnH^d`+9V8ees7ab6MDePCx+x++D&Zq5n+tWY1V=NC|xu49sUrR5QoWG zEW7Cd&;33joLU73zuFMrq3U!>#S!eH%yUwygCki+`*d#rDt#XYX&tLkFx=14e{2$n z&#~EnyPLGXz#NBdSpi{s-*`QVJe56K3bS{y8J_h3F>hu}^Tx=a=czMTGLv=M72m<; zz8rkG)s{${H%c>2zoS*9KK(8lL_I!j061qH&>l}FjArW+SPHa z`9zpl`jk0tx-*wm?1zScemwGmF$AN&@@d=6VbbgGs5Q`#cqy+18%0C9Zni#EU1dmq zta*Ty$5cQtwV0D-&gjbXUpS?x9Il};0R;!rCVY4Og|mMhMVm%DR2r}yUFM}iu0b5u z2{!U!AwT$QQ!|jkfx&&MA>#-A!{axVNOS)=-qr0gOg|_`$7k!1R9D8Kezq8EYYH*$ z+ZpNot;LvhOi1!pA4O#^75ba`Cj@!VGV4TT;@JP*f|fI5*}X1@%S&|0LD}EftDOo> z{!vg-Y)r9%jf^aKe&U?!kp3w#c)?r$>wlW=(;K@9fceLN4ceF9ZHC zC$E<%2}`a1VA@W07oXn1jaAzTp31FYqv4OvHoL%Wn-x{NX9Vre<8iC2CCODuf*k*m zQl|oeIJ)#Q1ayDmwkp|?!Wa$s+^I%(zF7s0dE4NAa19#!H()zo4lnnqlP^y#r~}I= zy2ePftGUkS$o&+rpT8RIQ)XdC9V$tCm-#@!f<}{L7&S(b zKAgaK^8H?>!2$JIGnJao2qw115B>pgm_3GEaPTLgQ(k7v( z&5g^6V>`e!=8AOo!``^u!jh*?JNT5`)-1+YHM0SyAr?QC$B9A6V8p z0IGbaqjLgNPWgC%)1Vlb;U-TlTy5w$eh@v$&d-U1KI5woe^7j`4%R=igtANi(3gG^ z-R4B0)9*;B@0uoP; zW0|*s8>mD4b)_(n`9K7MyBQ6)-{Gsl$DnfG0r1|a4Qp1Wpj^sYs1_Izl(V3cvhmXL zC*L7wn-iy5qDT)E52RL;S+_FN2|Tx-hCo>t?7BV{QdK9yi<3G;vU`Jcwz~~^`_q8N ziRZ$-vJCY8vy%(bFM|A(9Be&3kdy{zv)yVRRJxjD>$((d`#zRsem3EW1wWvzax>aI zeFB^2B%;uroiE(1xL1W?u*J`mx_xDLYh`Oqq zpzKB~_t)?t$c@wA>L)ub10yg;Lni|Dsm6O!xqpu?V6=rTQv-fHLhpNbk}_<0*@ zAG{k6nEeAM{j=gfddz=3eInfM?}d;%$yk%Q5e&TaV9Zq|n)-Y=H_5?>hNf81#n(b{ ziMSA)H(uq121fh|@2lwIcpU;%zhJP+SswqyLfTJt+{|XVIcGe-I) z$ztj2wFMZDO;A1Emh?``29FjUFc@)_i`OZ`c`umb;bI=lW1ZB$H`n2Gi4F~2Rg9fV z>G)<>B{bd~38^dF_jMO2xsMSETTsu7BGy1t+W?xqSC8f_|AEazhJ$nK zShU=tPUrxZ4-eF#6N|E8$>gVSk99OopWcccpNH_P-q_H(FiR{hmnCmG3*yzd6!x8{ zLWgtL(MheAeLIcGE)Q9v6vE!YwoZ=jUlJkdtRC@~`3Vc!`tYZ`B5BQL{F_TGA0@cV z=Y-tkkH?qcxf!NJ&^?MP$~^-STYf;!{Y+JsaA$|xB{0L%eunz+r))&bzGy- zIq;gGK!fM0Vd9|>KDNIGyjNb8x@(*9FWBBE*~*&TLsP(px1~-$CcxsIj9r&B04B1G zR@fUu3?Ac&1)6|f_fPOH<4a(b*>61YT9cZ$3?!>=+{cht*3{eG6bv|hc)8Gm2s-CF zmd+bQ=M2}!X1`mUO?@=jZBe0;egj^>l2O@GQxcYVk5|x1XF176@%KfqF(bK&n}4zv zl%Lk(mTNKKBiqJ!s2sNWx?%6A;|9g%~62_|<$nNh?*e)Sg zg&yVaVaxp~C|!03{qB{Z;Rzc|9n;BmMD69*{9X!a3zfnB{a-$yIiGi;>Ed+PIk+PA z48(1l2|=nVbWkJ9wmNOVz&u5|>9Rg`8GjB0E=#$kSJY`y*&*l{UXCX!KS12_Yy5;$ z#e99g8Oa&qg#0aQqB0^A6SiN0`xgJe&M^}!xqc3hO@g_%^;n*HsyLIeHY8hp2#?tFZt9d>$w|>oe--j1#7h@IH^OKbnhRA zdz7ExkiK7VNf-)WpKFlRqN|)BV<7MTEQLpzE#P^A^}!a|(8#xXF#2;JHnNPa^S)%h zrfxrOk$DNCO9hzmAe%ebdKo9pvZGEv{iLtMo?_LR0ubq;7-ww7IqH@$@}L4qUZq5< z{tc^-eUI11y`gT;Y_?U@Y~#i6GZX;0oj2v`}-Ih_p?D-;}pY57}Warq|{ ze)eN`KPhK%nDuftPZevsoB^{)Df$%)c?Fg)+(&d!=sAkJyFHa29nB&CY5ePi(#X`|SUZvn68)ba!6}F#dc-~ z97CHr@j|;cIemo9j96AFb&5U&aH>>EeiABP*rdc;FA3F|)zN%QzPPEa^7WBs*v{B2SU z7h2gFVRj{~U(WVA)mjuLsG;#@57@KQmL5*bg;#@vp*(yj=7g-~BPT2aVY&$()x8T% z@3UZNF!Ps{*nxfgSzP?<6L!?EgOhtpaN0dF__gNq4i+cy>8?l2t5^a5;!3b!#$?=L z(g-WsJ+Oy4M)0m3jL3P2dTVrvzkLMGVtWqV@&7^GLJ1cwGKWtZ)Y5WECU7e5Srvq{E-Dg-d)HyPuPSjeX~z^ZOOhkAPkdeHC90-8 zM(Hq?NB30Yha5AZCORg>Z|f!W9`jOq(?Lk>&sB1T=X~I=++g%}s1-MB>tfzYA+}s8 zXIT$r^oyCytAi}b_|(Bm)}PE&==coMX%Cs3`ZHH{@FthzRL6%}Gk-{&1vrF#27&00 zgWgvivg~dp+dEwlpVxYU3I);ldHWso^_YiiWACCse8928Nd>N3M4`iyOQ<>ClC=Gs z2$}0t$b{yb{419P)Mnq^`Md9fV0=Aiud$Acvz-lXZ00K&n7TgTq;nIb%aRF34g3{B@A{KnCmPeqdfBQ+)esAkmLz zIrvX3Yql{2ZEHnDuiBU%avn%h=br^9S3Bv_buH+$@C&C^Q-T?1&*KgD{92W>9ovGf zC^`C=`9b>l)K`0fNW)dfgP3LAGP$E|qOh`mLW2I{u)0wj8vA&(1*H^Xh zaW?h*&SPc}J3bNA4cWPH=v@f5S4Y`D5Ae0^AnLG;y<>fMNGngh<`y=Z6G7S1@$Mmm z`PWs+xM%Eh^jp`Cf}+9Vk>gL}9bI`UNv3?&q5^pE&4Bt_GRN-0V9aY^yn0=AG+$DL zl>v{r9W^#o(5EftAE?uLDRC&u+`@%UnE^h6?U?NT3o zG0uUW@OTig9Qe<5+B7J;0Zz39VMf|nj`JTtjMqni?}2KtQe1)dJ>l$g`V8D-U!q;S zHC4IX1vjrg#hk|TeDJnp$QsMs4lF}bo9~M`0dF|t^*2DFl?UOKaDIBmYizpo17$qT zsX$4V+iAQU{MOj>-OGQ#fq9DbwTv;T3)O}uea1Hszr)c7cA+6V56LqJdt$K_7#&t8 z+3RdbM`sFmGahalp~?zaf1m zMY$0Xkhgmb-q@AO{2Eiyz@(E?AIY5C-+w@=pDE{eauWCRF~!D>qoBH}j(Jf1r56~x zYThbqqU~~xu~Gk^)7xF*KK~~e=z0?GrUfyFyEC4yet{8eZu4|e5tjXm;KGM06aT*_ z;kk($+?{d`KI~E^``*@JPmLbEHzf)~SVwgIU=!Lf_z!x&c*Og=&cac@m~V9SCH_*m zfaY%h3QdpF(S8ME?y+pZ9F;Dd;qVTk+=k+l8w1IP>Ie{YtD;l$d91!^K~}J?!h|?y z^ka^}mbq-_=I|c7)U06GWJ3&?eVvUyp3DQoxeuz660P~mw&UIcS@OOp8FVc>iGd2+#T<(L@S|_pB<}xnWum!tq z*;%#sHzvzW#kzzy;;8WN;J2=XUlCRTUgNVk+aK1nZFHxdg z?3g;>0WbWzAJ+w#lJyQ}Ikg@u#>|cZ&FwbCu=g`MN1bNuBSSE(s)TQWro>x#0_=X4 z;YUsOZ}et6x`m!ag)2EwA7)Cmw|oP^;yq9ow}LmlR}Dk_UVumcF_tY%R!k{5r1u6IlV0^!xO55t-??DS)Wfi&%aD+WL8M3_7NdWkLwR30Dwrkj zDC*h-;?fR`3_gUu%Rze@(B(fLjI$Sm_wzLIV!<1heZ7pMd~-45g)MD*_y9ZunIoq+5!@~u z#l7A7^kSkVncDsg1$7t1bN-x%u83|FUwI66W^6%qhk8W?|1BaKpLLyb`IH3gd9wxiw;d9rlHZ=7iV6?iNl?DRC4 zz+K^=AG!$ftaH$up^x@sMqz_-6&_==q~E<3w52EtHaus4)?gX>MH!&z(^sjaiLuxh z`*L-MbojdHb=)D@Qp|`Q4(DEqh=%AHYKRu1Ah9cRz{3i(*3Lw?*UWG6%?lW7C zIP-i3t_iO}$!In9j#I+RFYe*3c6H)(zgR5zc2In{w*drs`QnhafyA?H7uGdBLJP+1 zwJ*rzCOl^SHVb*GXQWH5-&)dGHtW$aEQXE)O3>jy2E5BC>vw5!uRYC3d8av*tUt>) zPEUeG`He{W(lN4d6O8IMB<`=*Lb`Mk4hU2royCmDZEK5_M1|sce^4rl<$c=nu{13N z6@C>%+{h*DyO0kZKiA@)1~y|`b`tJx(?P$ZU-^*SPK<6`2(zQD$^L#@BGkIU`r6D1 z_D%&$v6FQHvOp5oA{Ctb#=SK^gSXXUF>%iyZ0563_DwAm?QDZpwlQcS6;RVp!C>iX zNR20&(C(#GI6t`*GFHCf;@)=hUf;uc-HVKO(yvd9`5n+zyc~132*qI*@m%COIoNXV z3GA@YA?Fwurf%$X&frok_oUZ~ED)=ZukS+f_sL*fG2{z~_k3jUxJv2XadI^H5ZkGa zcjiUejP)Lzm}F;{RZKt zb==TXvSfRr6;9Z0)n zg26$9<=UO3@Yu_iWHm=ZXMYK5Wtfsqr9Uuqwt!4~G6s!qs?v?@-zoV~5D5Ri=8tbW zgu;*{IMOnJP8)4bE7$7cY#S{y+e4lNnhvCi<@dPPyV~)-d?u`5J+}jgo1n~m5EYur zU|AA@3cUez@di~AXJf~A$XwvL1S4|eMKjjL{1k`gn9^_aZOHGB3MB5^3*LZrWRx05 zz`E%sB%mUWpW13hMX4Up;4Pr`^(LHxTOSOX!ukg9^HK1ea*-KYcp~;SG_qZFIUf$D z;+HT{trqGws-qn#;=N+8ag%rJl8H|~LyBc8=490Hyn{ZyKUkBdmRf`B^?qFUM3cxn zDicA}txWHM1yY3&##;_z+_Cvtm@sPs1`gZ7d9r6slj)rrBKa3x?I+7o|;yqhWAC+4jhA3%Go%f?L9iav7kpURN&Q%L97Q_B`%wr$S2;~j2-G% zApnZ`y2i88&gYZxzC{CcS{T!g4hLQ`x-|2!_G!ppr9dRTr=^nVY23;S>B#?PyUm_E zu*r5k<|#~q#?!6LlVe94#(V-f7Z+$e_XY!J56AlRsx&pSj~~<=g>Tg>G4}d**i&pm z+*V#kgNtAIOEeCeOrOCAw)?r`XGWVRE91IZ%4GYt>kwITAIsVO!XV==X80yR(Zd=L z_AB7m%l&K*z8qegv;C(n2O{h3d~U}#*mYDy1lbvx0d?2-a#0WT`<{St)0C-b{we9t zzIc@EmauN59DV&Sf(a5lU|$8m_^HgV_Og&4t?~`CR)xYzHlOJqbQ8wbUxZ#ZGc#1a zhGDbSIj@9+kYC=9vfUvNl6MoWuf0Y;hmqX)6lFRtQAFw*61eMPd1}`@73%*B#!=gZ zM-e0h2KI z&~bG6^8lPy2aElKVi%*dC-kY{4sYy-K@*Rl=(7$taep(6YG5vt zRoS?$wik`ROaake18#;wJ|z2;VU9)@=R8uvg~cA`1#Cx^UVI4z9sS%4;SD^cgQ(SD zOe+WL!(Z$17}hcZlzyM(40}Vt`R{)4HSD7Dh4tJU>0?}WYB(IYM&Vz$Drq@a3O`s6zsvtRbiFZw z;udRiVy6^7CEjIy;A^lxAOsRO$)Wbtzi=Qo1H8xGhX2wP$>v3t^zE}|^wN`qJsD}Z z@6L7*M1JPS`PAc@;tkL%6T z(}y|ux5gHiWJH0h&M%x2l7KG%!XV8~2g8aR`LTD$DB`HQs_bb9G8Iw=zHBCU_H23d8Qb!_am|s0>U%%O%mc##^7JOA@fI`U$t;u^Z-$ zxx($^Y~Z`lyHM6|2IbYdqgmTvs)k*)&5nx>H4IcLg zF;`VYX3(W8F#6#qG~H1JL97cL8<>S*y%wCn<%{F`=*OJjdR;hixec6}4XEfdFJ8Eb zWke1R!p+2thOnNa!?;ey%B{$BYr2fWrWug9{S0TI8jF#Cm*KwJg>c&G6~1@3CJED2 zkdx6N!I!SFJyanV`kA?|Y})zK0mq;)Kb%XA9mTibtOiR~@|V6V1cT%nZgWf!7S+j9 zqw5Kn{Fu!s&)R{ul8h{rfnNy>D%HN${L4+nJTDVl2&<(!y;u+<~|Kuz3LE z%soj2wIiz3%drY?F;79==f`}2;$w`?4&fHPyoNTe*BBdsIki_F!!UzOaD>+(E5fPj z9YAjEj>hN;Lpc6KpQiT5qOsj(G$?<<4_vK9B5O`yeR^Iimh;un(b<7@(oeGzpGSvON7N6tNjnylzp>cCJ8g6L^pFgSa&8z?$ z4~>Mw^P=H&yBB&d^v1~rMx?ChEuUf0%T2o?V7bB9nK`~o#ODe$$sAK(u$ZAi7j3zY zL3=ylZ&U;J>}tm7AR)au{RTGchjTy7w8$J46G*X1L=PQ(I_4$w#7s>^xt)fX<=%^l z>lVP8p>ovwL=ive-vIJqtSn(B5h8i1Dvg^m1Zp1GVFjCcxReA#gq0l~c4sc+KbNN) zV#3)N_$sGs(}mmj{DqjYBQdUp`QmE+aXt+_FjQBQ9C{}rMU%fW4>eEko1Br&*vKk8bI}FA?5ix%V zFBMu(gdh{fpiqnC#xo9td-Gq;$x%(L!uo@{DyC?>cO)8A1#zMk=cQApYSELE29cUs zQy|Kcxffa|!3)-h%A1Zj zh&Gv{u=Zpe)Mcegk3TA5Y;6H(sys&TH_E)@Z!CIi^K7wr#x9=Qx8X7Cxb_M7wQhz0y5KvYUuJHKSqN( z&tlmap&?@!-xxouP>pPf{tem}-$Uc5&G1K{Led}SqQ?V6sC;#p8#XQ;qPj1DO(MJh ze{aSM%9m0uuJ#3)Eiguz;VVo>dS_J`{B${CxJJP@1V)K1kT?6i9OLt*x|68YdZS{6+V4{ zdFl$J@G$cKR82|c?pM4ZXumkW~13C9F+BsFTPDlafv~A(LlGd{OUJ^)e zg+c(P@Hw2PSl~Ai>W+s5$0%<}Im{$bKd=H9qIRQhE~8>*6bsHA^`|B<3dbl0ANg{>;Y z-r0;sr(WfEB-NnSrW3g181v$GMDtD!iPF$}9x%zB`4n>tpzNa$*EAs?3VZz-&%%sy zJ5<=WqykFjGIu!ZAsd*z;Zt`X<;K1Wfy1kQ;l5xCI%LlYaQM3&$K5uixfPAr8em8b zCQgFBQOt$X7mlbj9+Hi!(A(69AM)=Jo^(^9iCVFI`Oo->e9PlcU@bjY z{`5XN-y>k(l*~uiKZ7Q3O?tws0-rgi`iHVh284(@y7dBLku;-}ZnpkLZs-X<&?npd)3%8v$dz{wMQMYe!w z5gY2grb`_53An(^_0Sz20UaZ4xmw0(gzwg9uQ3*8)feFJMaJY{p&9MS=;J#Wb6b?9 z!KanSpr_Lvh#P)Xdi*MTzvnBXuF@n}ljh1eVYgvoGt29wUF6G-4dz5^7@KZc12(cu zX~deBkZ$0FqSmpzoQ(uT3o`iw%|#IX+>$wcmSLNkHeIr=2G{JH45L_1cf}nZBxV{k za@8i>qFs&M(bagFaXi#&ozUvlXwdub3x4Y|BD9&E`;&_>b+{QdUD*!fT9wG2DR$)2 z!3EH@%N(7Q&3S>{`-}^+H}H+~H*g-Eg5HcbY#}=y^*?FR(Qen^Z}~BZlaGS_tzsO4 z!_Zz>$?p!eB?9$<;N5!)3U*WI)U&4I&H2z4qC)*MUclD*8YECV7$)buK=smi#`*6M zOEdbRf20M3P4tAyfH81(tR?jveI7hEYvR_oYNY4FUEZUh3U?1;UXTYC^qviy(YxHpcDaCKDR&=YCs2V zsKNqn5~hEon3a?TrEwkDw_2BOV*Ol=3!|{??`^KScK|sZ<&1aaEJ=5%9WD;mBTfb! zZ(rQNtx-~^={qXmz@CLDG!sMEhaGV3ur^8cxq!_~k2Ov*kYve(VZ-r{XgEI!79TVs z8~t-JGI=koFHYbr3Ui@(Qaq#$3xM|{s+n6?fsWj1N)o@H;W}h5^K~aza&we8ykGwv zjy?#5I`=@Zw<+XPJ42v+G4sMQkIG~}70RDt^B(4WbKd=h6W_^#gyqZ=d`zDdOx=yI z%C+cIEfrG9c=AT;O{wtGF+Mpy9LL1o!giN-_-o9bTkTRj(olI_l^EQj;5M;=|m~sDv=?H z&br_~q8gl2qPVvnML2!qTNJi1cZrQ2O}VEDxo|AN%|7RF7k5f2M?ffWr! z;M&ey?PU~Bhp_!mWiNQIeu;^i>2BxbsrZj6?Qk@l#lzp1?ogVCGpqDC-w^zYfRL z4V!o`d2_6MJ^*5@(!st)%Aw6_4ALEs&KG*|S*!sS{!ZXLehq`l9mY6xu`=rdvAM~m z1Zl{OY`iqJ4})~tz*6T2eMcH zW1NmRShLg~(u{_}DXK=k<*Ja79@cx^U5$-*mSOG2QE1gKLlgRj;F4k0&~hk_%$;!r zjuKfatXTnmUelngQG;eFy@krdyo!O;}UKt!AQ3ES)^=E^Cl~SKq=_a#VW=i z-`9;kD?%YHT89?B>Bd)EZ=yPX5-TN^Ah0?wp3>0=##+CyFrtp{PCEtm9s0|D{L9 z|6#MvOJC8c*q2Z1^TG8*4wbZ0aYR}Q*5%%YU%zyTc5y2>%*#Wkwf%g*sUme>F%3_A zaAORSxxB#PkMw!PJIq*7%DZnn$?dy512#HXkgl$$czDr0SoMLiI3yeR(1xoZu=~Rq zmo>8tk}4goaRfp<+1>iY1=LM)g`OA@miP%t+&CZ9i83Z`?^)+2d_J$?oCB%FhdE*N zX3k*SRcx`*rU`7vb^7EU{B^7ayGGXHI(F`~`tlnej4&n^J$cNb+Qa9JTP&9T_zZsy z2GC`zCxC0XCDmTniJFhKiPv~t9Q=q!=N|@m@1HC+_#FdIO=Ec7J*p^NrUiAr|D)(k z{9^3dI6kF)Gwq9(DQyVJ^3>edAtBjAb`e5)EZKUpWeN$Q2&E*XRVgYp_jO2;q)3|- z2}za`QYqy6WOs0oDTGy~VL0%p z3j0=k!1zJur9~lYaK~LkYI^$-Tr*K4=Kq+n{dzHfAtVdC#x+3mICjQQwE>TtshD7< ziQf*Wk?NZ>AoD^r@5-2dD$|u{LE#>$@l_Euy3-7f&z|u=dg7r~KZz3sJm3VD!<Ve@a&OpnND_A*^v3kqqVy^uVTCm=V z4@=Rcsay%xhaZMBkIk_)=q)}CsDgrJQ2acl7^z0t>#4C1q3L-6?SJmzzIFp{=~5)0Rac<3e;fGejsb(}f1zVd1Z?UtBY*8R=n0S~TAF=u zZzc0MHk;GP7;T#URh>F`Nug`xU2r_8j)J%l&TDxiuWT}ibTqJxZ|P`$Tt^aYG7m+G z)lhy(wI!MSV z7f}4-GUVp8V&T{qQ1-7O1`hoKUc1e~UFwfz#*UcZuSzr^0WPx)$RDvj%?vyQm*w>6 zS7`;V9J&l5?-@ap=|V0wArn(a+`=ZUYCOEV1uJ~U;aGLV{a-G^n_zQt+$o#6+Ge5r zkPuX4d)YC8Y@SHVgZ(upK=YN* z6Zw=6ozu>_-f4ux>rX(q_bzaDz07&mrl7E36PIz~D2&TH1Rm|PF(8F6;3nZO#Wz7lQCb=CM5@z4&A_KgND!#=DMVj z`EOO(vsNwXI7AKni+W~Km@C^2dzEg$$Tkfkx8*fvzqf#r^xK>?ydEY#Zih?Lhti@5 z2Xw10p zD7g+8`3%*11jlD&3`uH7R>nnE-JUF6Do_2S_ zde)ykG9V%r`jPOvy$%!e*nJ^28)Dmr(59WcA?%_S-N9T;i}NSp@uW@I1OI_**=_t8 zJA|b9E0apkDu_2Ml>S?zM21aa+{{^jdHL^_q~>}I9@k~rXw_NVA$Qin9eWJ?uIaJ- zc_>u)S8?L7(=b+f2x)I*4EtjiRGJeCRS#Idxe+jGSs46Yq(jG!A3`Om>#^wd5VX&T z;yXPIaBgK0ba_9<33@ZI=R+qa6|uYTW>f06C7Uz5HoPCCMqLEu%>8~J zB@v7H=!vqV#U_yD`GW8ldx!a+_|4vJW{$^L-*2Mu1th&UqiU;E!0&=3?%ej+ z^A6+FXK65KNCEnWH$w81IMltNL~dxyf^GA6?sHZkR)p8^CUSa2$vTYds;&WXdM;Rp zR)XMTYyN-Xs&r+gJ)$y`Ft6&^kEKX0k7cx5kr#m(2y*eI*24?%aceIW7<11m+g|} ziRESi{wmUygT4c5l_SCBmHd%vqDk zxotnh8y~B}pKEQ1bC4ldwkZN6Aq&N38puBQDF0fsb+J5*t=09kbM#E{w;u7Y$@OgXyXvM~WBqEga+pv?f%GO}}g>1+?E#d3~ z)G^ohBpCh90~Z@b`YkX8mD1(m{GLj9SbYQqk!t+MAhzE+RxNgjQ)B%0KoHFr^3`lc z2|^?6!f)_i&XlYlZB4v9^r*(SSIleS4T1@8ob*O#LG|KMm|szYxiaRo!$O|dx)%oL zk7-dsYy_8KYfE8p0y{^U(9COXoY1h46RO*B;wgU_|6m*#u)AINl(V>Wk~3)RABk>P zRZzt&7E10mb61E7xo@3=f?j8SmxB)|J?!B$9Suo=o4h#u_7*O0Q92hq*bftzl!L?A zBWU2f1@j(n1FwiK{=jfsN=C{N=V-_gmAG;|xh?W&=0HZV(~KX(%dN zz{#37z?_1?WUAeFTp6hVet-EK_0RZF#fT1yW%G3fJ7+Ghd% zc1??}Vg2P1zNYjhb60e1IKgGA3Ap<73Y?v_6%%Lw!tmj)oX}<&XvFK&kuD}IH>5{| zkvaTH=A0YQs)8`T!BEG1dG%W#g8-K2u>mgmY{Q%#m386;pSwWMO`FDkYD4#Db}-LL zfp&zdbN(%1EU$l_6J6d4!rA4VpOG#du6Pb4s_*&A&TrURIh1CPnFF63m}?c5p(LPQ}dp#O=$!ODIn)0M4@-^pn>nFF;LKow;2E>wNTkv_NiGrts4)*1C$6$zmOPxz1t1yJxqhh8g|BYxSJA@0~bY#BZgN4ne4SZ_}p^pLL4n4JS1xDRE1eUxL7@6Dw>#o&kJj(P~^vzmbQmj}4>T3tnt2KtycDe}!|Mv_JKfVl(Ilpn& zgd}Lv3xSu{%!n7%i7V2Lv31&2ZvC$kbUeEq7vC|aJGMN7cd;*FiGmrG^mvFr%M3u* z1`l-aRcCX^0kOucw`jUYg3D#Tf;YP}=SIrVORh%b#+LDz`cDVU{`ei`j*G$f&L!09 zdj>^cSq9rhk(5|Iz?QprU__P-afn`l;d2*4`~?YkZWa>jh#OG3+!9xOx1eFUi=p@I z5wI^7a}tZTe6Rfvqy<(Zxw{KafuF}}6cxF0>sKg{iZ@#z@7`5D;zkbo6*3QSzZ|Wu z*#aFa-|~&chIEiT%ep_zgrfVJxbE>woEQ)TEvQZ-WBGt0ejAoPiOn@Ck|n_t%=PYe?5iBpQG{MlEGx}=Q5BqD1ppKA-S3#fqv05I8p6Y z@G1$D{uU;oUkBT-Bz188Huunbl{&p|uSQD0j6iEeRhrIn8s819NvF3d>^PMRE+H)A zLE?Ga+l!#qZZ&RO!e(wCzrv&3q15lu4Ng>Wo}bWr0(=JIxcEJ5*^Z@zf617ZhrbAE z^0i~2F_HBij6U&W?PQ2##6vE%4dLa19$aa^2nx%$;uwh>@$x)@w}lF127ed}JDOpl zOfN3^?+Gq?tWMtfR)gU5I%$)S0cUUO#V;LqitE|$4PL({ajD8JaC)>gt$nCKSF>DF zY#j5+)wO3)Vf?F(UD; zFQwseVlG+11Ts5E^BHr`q9Ah+jM|?IqT9#09>sf{t$sO|`d~4Rv19qnhdKOL+a{bC zbQG+1{AJz3-w^IH4TBXgptcIIL}$FHzu7M`*299 z4lUz37-P)#I$9Jxn*{_*zoSM{GjxRUTuqN9Nlxuxd~n8XkEr4mk0_DJ6V=I?uzavM zc^M^c@lM9-E%@r$S$sI%oRsa%!+t|e(#sf2(=`%V4~*@hEGscSMv1dOH-ophcpq2u z`50VPf(y}@G?s3{F-eRW;%&^ijeN%$pZSD>6ZT>s<0Jfyt+LSczLzs{`U3MtnA4Vk zR7eAsp$KTlf7l%V-i2VsHJFVrJ7s81DPt@;v)N?w5$OMGMN;k>k$0Yem%rqpd%G(q zXtR{E#0l#KTn9;ao|D(rot&#tI%rLgfdVHVaRCkCwTezcoH!NYhpgpXESsTD6bUV# z%E5c8C0sUBq5D|h-)prx%aGOZTiW$#d)Q4jFS!X@2W4UXwM3L$bcFDYu5iAg10+`; z@LMx-A86jXb9IQUqYvtQxrNS4&hQ`X)6p%dl~;Q&4^CT*N$bW0>^)Wr)@-jkwf`x` zg$99EMn8MsZE#XC=|mN64H`bahL>Y~+)nm>%IQ@j0llBm+*OghVqI+CXM?D977rC8 z3b_r~hJ^-yAhUQqXSDhP42))8xX&qK$9fZ(_w@=YOWxsaGKiF@Z$PoOFWL$o^UboV z_&!OI?qJz}r<-SR&t)|_;o1Obf4YGVZ(hOjP3m+|oGd9_txYTASeK<$M_RK+L_{pt zZX>ZG(tE{NF*K2TTg&dI_+23qxH`WPe%c!n8G9Kr zY4~OQ!m`ee3*&j`eIa~P;Tll0U52LB@6pybieGMV2V6@2fLF99zh(wM~W+60v)EwIPPm>9CI`&1F!??N0FBxd`N*#DNXCUy^|>vUt8|IQiJYBS*HEe)Eyl6f;SL{PFs4s0jN z;5iK~npwYz+y878{u-f02C*5@#Z`5<$R1xM|-|a@FPsly7>3V;b3RV6O@9ve1AIZ$1H`+Y@>JyC1<> zQD5xUd4LPwF$?8vBf-mMBoB#oxJ_S;R+@fteAg+W zqP2rSr-=EfFTBV1wegr@t4adQMd*BIDHqbhT#Z>n$?PZ&em+RSP|YB4U+|anDUrir zH)Y9kt{5bDWAi8OT!f+{|G*a)_Fccf6L%@^1|9u<5EW|=hyObT1?FG4S!LGLJ$My5 z%-@3-^=m=&&vEJYu~Dey#PT7t?@7rwQ)2j3hA2Gz$~x2?Sk|`%`|5OvWXC$VwZ9ua zomL_yGIPy42v%K)4hI}i$?hv38MPcz z10O&gb5zC86mbcuO4MOZEBqXK0`2xOE=gzg&z-y@ zAIeF3+NB+n=JQ6wk3%o1hPV~&u)STEm@Dg%3j$Vh7T5}F(hP{Dl8F?relFTpd zo6r8PYjE2^hh%R5z&A#P;?r&(8hnjN|8!dt_4pU=e5*o|Bieay)=5epZ3S}UX2LUF z#_h=Z$n7^X#(Le?XfenELzX-Pf4fKEF!DM07(GB8wI~?8M44#i9K(-?&tlT^LBz?_ zoIJ8nCW)R!5Ky*+ajD8VwX3o?e}^ftpMH)P$gk!lmCn-4;R@Ka{Tphm{DSV&qfvOJ zl1m#QOLAuGk==8db7k}$zHUhZSMjL;^A?PPm8Z9XL!A%)2zdj;XT4fC_9T(7)A1Kc9D#mvybIu4)Jp@<$)dx z<@e0b=B(4F;TiU%^BrpgNr_MX}5pkR_4!#y)CnG znrs=~x>y5`e%TP;g}Su$W*TRi)`;(9`eCZU3fQ5w6#a^-`JJk&Nu8yqB z@ss88?H{0E|5I`N#&~X~K_{dy_g~H3YCVVh$ zxzY-beJQ-5s|kT}Rq8ZUn|S!?g_F_b(X$k2CUOX}F(!K*x7 zi_vLAX~o-2KGH>zj=Cg;u=xTkSSI4!qgUa?4c)MNp#iCWy9$aQmcd%)_%-NJhR9mh z<<3^4Rf|o@+eYS+a%SBm%^}n)VGqu5QK6x*7>2Q2K;SQyhmJGG{M$Ojt@92)F8&1O z-fRN3v;xL-$uJ_~mMAP-R078eB<-*1Bm>FbR1&(~25@pMmZp ztMQ@+n_IEpTxMc1mae`JP0E@4%GR&A|HnyCsma6kuy)wX@|6D?Xc7B3#vd8VToQKE zxMVjOdUZlCybEszgU`45^3?*;b886-&y;b2c?+@C_$%vO%!0RXgk;f*2N*h&c_r-* zqf6p991=g2cs^7nq2}wM{+JCt#5yLeSC|)LuQ5H9{Tlk%v)|*82=C7dLXl&~k~t2h0;>aAtt(7_kqFo-wxJD{E{rZQ$1l?xNCNS%|$C1kQt8xRqN1At07HjHJ79 zm*pdDxnf3V995&Anv{u`J3z%Q-VmR=0_)kiE6rD%2iAKV)jTGOkSp)^wTD z1*$#p)OZg7~gd5fqKFF4tf9ZMv*Cc%m%n;Amaatpkv&0Ng8o}=biCAv20FbHx# zp>&uPJ;{2)1#atjiRNz{>%o{;6&m#2iZpPv?8HBg#n?3awKUQs1kSY#p<<5+5MI-! z2jWh#Jg7O`+4%)yv%WHaE^<4&%!#w#My}_~A^5(^oSyyu3^xzzLl<_ZdcNT*V^W;t zw06Eg+XF}WknKE%%vr$L1}2=dp|4bQzl1yZ^bh7d_=8s#%hHVcDiEF1qiTAappE&y zoyVo}yJFSoj{LWyxj+{l8(5_ntY-H`U>(fQM-CDxCY(ra&`VzM!{HJiIZ`BS(@=sNiFp z)2e&7@u7z$J^5an-qN@Wsoni>D)$}ud&Ps~?HzvE1{3nS>NFJYyfYV_3K z^^6Ib0>aMWpyUlZf`0DUr2jJ<Fg) zf!7dlJx~FivZ~aw-4(;d+1#R76QWXI2@;pcd_8~09(2;C5vq)7l43$PtNMexeIcJ) z%NUH09ARr*5-iIaLM#Hp;ZfZMJk80`$`yw|lHG}>JqEOPUpV-=D^vA7LL$hqpqls0 zNWk$IoJXG;wip$H|Lz#zQZ;F3c`y3kVs|X3LvZs9%L$FOq1Bgt@H_k7dym-#wyQ62 zE34*%y<$3cZ@-1z^Hgbdg#pTTH$uJgGhAx&5$pYh^sY(@Y+cISFE7~r^7}lPbcxmK z)}=sQVG#d3--5JiH}PIk(cH`9+GJgWHZhqYAjuczVdM-E7Q`h;k4pwqXQvD>oMlO3 zOJ!(iUn6FJ4+M|NTFgO~#jPZ%Ai2c4B*mUw&(v5xq?2`5*qkm=yA;Mujefr-k34*<%{czE^@9Uxr}N zRTUz`*pS`FEUDyuqBuZQ$g4`CacxaLY=5jm+)UH>o>w)DTRfHXh%}_bE0e(yuE2+W zGrHk#5UiVFO)}HR@cP#bNVCjnJn=@IzG*a}K9P#FiVhTF;?G=L5g$<7Ld= zq>1*g20`AEubgI(99i`;oAp29A?n3d&dtw~{{E^oA%hHKUpTVU`B66qDfNm1A{S)KB*k3b)nd%~%x5$jvtO!Nnfy-cV#hN<0 z%fRFD^3>G7oH0bJS@)E6e@FeorIqoVP%aGJ1{;B0m<|0?V8Y(VJvcGL6NikxhIyT4 zbXlX2xEgeWbAq`TH;x6trve^)_P_z77r4MFh53Yir8;q&;obUNthuj26Synj)W>#o zdiLBz^KR_FXhwM()&uz=@;~!O6{G9b6hPiTHgJ7&K@KlVIGm^%{AZn7Z3lbb>7%pbhHTaF%ntA$O|pGh^Zgy282 zn)Ji0!`M^i1AF&b)3;X#@XuEZx_fy(cKmeZ_H^peIX49~FzP<^4$1(J&HiYUFn}w= ztF>bdpajYI|A}(b%J) z_Iet;S+7ISv3>P!!zc_{r$XmZ1+x9bP};$AHorHq`>4VvC^#A+*1TpxQs>_Tmo4S+ zBQh60k95O@^I4zn&q1^^JOraN1Y}@?4z-Or%eA^4$IM0JI6+|q7f{CMwY_kkzC_^YrtXuWvgT9+x8t%UNgIb|{T- zR3X9>z`eS75W+RD^5=*#d6I2I;#ZV#aBdNVZQKlYWfMVb@fj#v5Q-O%H(GU`06jkyK?8uekWwGC~|c?%B@s*~ttUAVtg2~u}g z!&D(-Zog0im!2SyzF}FOFL5yUL^8V9=y86klYvg`!A@>{7mbA05^w@namgQ6*cV-~;n`1%GA$VRRqEFAu(x|k3 zVEgY$?t!5-kwaA~`8$ZVcJ*?{(-~V_lPu ze|8&1R#RbWpdz)&tid6v*2LNWmsqkV)5*G+b?xJVxq+T`1Wz4WbjuUZtUCvZ;(VBT z-4s+tupHw&5qP~l!`WB+Vp7#lTwZz>w6aS<$Co{yp8moYZ!@u?%^N=lu|3k+=gb*% z6k}K)cUP4xT`+YimK=}aMtWFL`2ZEND#;pKXDQLS^I9NhOeh9^aK}2wMlLSk3HUG; zu2Jp)O82%v-%Cr{RNW`8Ecpvxmz&@?K7*ZajrrC3DrBUU64}~*pU7hbPgz?$lIT5&SCjmi5zX8 zE~IPg24L>OR=DBEX0w{9c*ok5_$D#_gzy;O63lYJR|C=1?<;EEjs_LCM(kXphy{TS z+z~cIv|`K{e>*c;IjIpOP9f4kj{2m5`Gl(PrNQe7$FR`hC)#ZIg3|+v@a97k5;iFw z^IU)MA^V@hkL7ouvSA(S%o_uv*IE*VuzR?m{sVhH1v*7dk;Qpt%H-*~c<8Y2!`BxW z@5$ca6|%rl_>G0*C(99=51e1`V>g}kMX4wig)%}rSO z8SG@fVtB&}F10QL0uOzM(4Rl~nM*@JX_O@t*B-^HVjH4=(vT2C5lKDp2wsM%l79{> z5@y>4;rLj{-EtOdZ`PvAZwVyE`~gwgJ8|@^AylY1i%%74(}J}!umci75;PL$=D&du zkfTxa9&#@K@EXaQ7h35DWIG^wk1c9yM zSItLYpnMn>MCsF&5vmxT)6OM3vRw45o1BCGd^E6_#+(Je#Q_eRVCH-QU2BvG2WBYK z4Rfx7`}_fJ?{W%$s*8XM<%!T~9kiXih8wJ(!n;%psyD6_@>C~)bi*wCn5jj&93zaJi=hOiRzj%e8>>#A^ zdVZXwP>)_Mw4_O`mP8!T56g?vAvoYG>`~CA@}@!PoY#RR>!!e&rHv3dN1ySbKENOG zGALpB&XOK4P@ATPxx-gt>z_96#z_Hi_f<*ngjmK{eCU&BbmrDN9Ln}Q?l(7bKUlum zSy0LCI97*}R88cc{K5rV-B5N&8H8zzVCo1FE%Gs88QQIUonIH9eM|!#M`mDYzXJ98 zJ0C75TM&`S0oZWqB;J=XrEf;v$LmsEdic&Rurv%n(*)*ujBVpPR3pIqPdYdYd&C8$ zveI!^jM08l8?RgV9>2$ni1h6%uuYkOOGj>nC0SZjMxz&3EH|On%QvA}l`*OF{}0Zy z&n-5|fUYerfOj^jDAK$DURxe;d`KLc8Y$3Mt$nyFJ&@&#E%;k+7)zre78~EU;Sd^Kq(j0;ji+*OS?sz-v+-#TbI@Pyl1 zQVzb%2|vss26uV*!5;Q|>2s>ZwVLNKm-Vznn+Wg4@m$5wV2n0nJ>A{&(IUMRHtuA6 zzWzim=bafjrJN1!zJ9z!`2ah2hrsuq!NlO(9I&i0hmKLVIHg;Ux%5`{|J-avt6X%* z-3M9F^5-Xa`m3)(LKBut16z*QV2}U{ZpjU~9oCrNqsB24i zXe`8S%wrrMUBLe%Z$dpL-vlqOVQ8E-6JiY~;AYhrTz1%qE~aVV{dYg}zx3nqY-<#* z8qVDbHze*G4f#CrcYf2yB$iqJA{ML@!B@RP^z~rRL;W52dWQiKCadvrKTp6P1%0~u zu>~m>@(}aRoQAiBbCTd#XbV>$_GOnqW0f(rf3yipTDy6J+=JjOQh>%P9lG?LHjT49 zh6fqnu&karG|#?(PiN%Gj!hj<$k-DL>}sdk_+}mxKyT1{phboZ3 zwJ+gLPZN%+y#U{>*^KvlqtwHTu?mP4d0Kv$vGw0zs6-b|4vIlbu`8%EzC7NMVeYsO zppnMT4;u5pYmzc2a2%QUn6YtYeYk<#uW~&5$CwIjR&noiKEdRjzcAp+cVu&Fyn6cx zxcyVl?O$mJhZ8qK_Y6y7TlRvN$T@N5eP$#t>INs-dBjOWy@qi)?O^bmp+wSnSGx69 z9P8or@yX>Ex#Rc|o|o$q!6z-QX~ztx(_$T(qyyldZ^?TJ=5g-#R&(AlTfnxvgxd^3 zV8F81TaGFbuXpLFV$)q1XT&&*0c>vhZ!7FKF`(Y9zwlCMBGx$^z}BtjVY`DF z$xLkI>~>V)WHm#olsLfU4NKsn&L(hm(-%Uwk0Keg(~>kjsNx=X8qhBr4x#){Su)YN z0p1;$fg1LI@#j-z`s%U?t-R)mpSuRp)F1)1y)XjwwkXpPY;R@SPz=6(E%4wFkFkBb zVB|?t^11pO+D}X7-#Rl7Nrx}QvRrlKmMduTHw}yLJ%OM{hUCl}w$It)$-CF=;P<;$ z;DTeHA?ub2Nj%B;7XcBV{vTuPPA|i)^O`~6`i!6OE{XYhGC2pV)hA_^8_>@#nYUkF!+kugO-d!jFuc10D~gLa z(L7g#&8H#kS0sKiOvIU6Sxz}G3j=~5bID)cq9oFTTUnBZ;-=564|E*%waODw+$5Gm zZRetX9_6zu3NUb03}k*_8TBL`zSQR{EO%>%{&UvEZ^>m?mj4(_b`OH*6D{Z~kt_;r z=Wti6-?PtIh5b!O_`b(!n4D%#S9qR9_qS0Hf6)Shx0OOfn;u;~MwU!VQJ`w$+cA8G z5}h#iEB<)-8ula(A@w8l>4=IFoU5GyXeoq5^I*8M&49RFS^^VH4r5`_lbj*oKQ3XkP#wb_v|%XS=&_* z&~@B{LUnR#^f%n{NRcQs520PN+TbzE%6@Ky6o$3Uc*1~Y84&>-SCzb5Do9(*~NN-mt>u9@dz;LEp= zxBDTse0vFvTR%eGwIBTPNrUMbxo0r~VqJ*n2ZwQBj~IV!?qh6dE840xAKD8k+oj#d<~4g^Qhfp9)S>8kK}fiG8Iq7c z9%dGHVTeI8gqmM~s*n;44*d)M-|vC5*L3D#{D59J--};H72YY&Z*( z-Nf$)PYNnJ>di@rTMRzcU_^N6{O0oP`IQA zOCImTpvwyMpI-{(jCY6$gJyZULmiVpK|>%wuZ&x z+=fInw>X9q3N@%d^JGjlz5*WX`x?X;T=ML_GWxJOX?{K#LQ5jJsNA!>hVuYEy}-^t z7I($YDjbBb-^w`z>rh9H6MTljGYIck4)FoIV1zT{J&UZst^OX@pp$`Z?2dS5!c25u z?8TQUEd>{o1W3+bGu#*po|t2K9lMi$yaK1c z7}MvcnHQDy&t$d#;@`WhO*N$oXJ1#Pvt8GKsD?^=XdM^FI>>2DbZGRklQ=a$23*GF z0)9}T?GFBEU+2a>6+Q!=E1AalG!b)_HG=S_d#zKxMgUUS*p{HmHh6nUBcXA8P zoqq~UPU?_=gJ~FFy_O&UN0&6IW=sFew4n zZSxPD1jn!mDfxv9DRAF#6bE=KR+B2WQDPDsJVsxqO({tcD zW*$c5SP{pG93bly`K7MIx5_HSs^5}|+D5??2W>Li)|MC?MegZ{hs!lHk+vKun>ZGRr?rA{jxiJl41n;$Xbc~dfm46$L-?EF{04=dc2Ha=AeHa-V^DKFy1qRDL(Z|;=c~WGBzeDdZTmBbd$JbW^R(&Ls*w=; zT$8Tpet^!Ytu49;CCyp+8k3pX0jsqXDqmv)W&!o0es2fN{7 zzZC611wr-u*^s=pnrqGL&{TH&!56M zU#gahT+7hzw>Pv`G@!=BJkV^vj6R-kc^$nj^fxF+w&+Dy@kf}q*n~LD{|=&OAEcr= zA}nPYqm@Vh0m18~;tw}vh_6!?r0&gz-P$@-vTzk&H&%gqoH-7gbB;sPdSAYyR1Qzt zT9QvP_i!J3Z@MT}LsPv!H}`Z7l*DB6>wV2=+9MrW82$@hJv@pX;a&Jz6og+R@1Vx_ zH&8Ko0)H|7JWhOE16v!WVZuxmbaV(~?ET&1MZcKyDtZ+hW|Iuxf~6?zX=I+b?KpU- z23@$;lnmbSx;F#i*yScwGf!tw({_?>#gvRHjLjuVJ0#Xb|=p^VNF# zn9ugKb!t!f_`pTHd_Ll^nF8Y9_7pUi-^V+Q4=~C$5}*Fphu*z|(Q(;Q=I&IdV_wyu zhnWmqNn`Ub`h;;qb3v%Wd^xhFL^S1rbn3$tbX?em-Jz`SY&jC1D!c)i1bH&s22e68 z2tN6CVpFAvPY7580b}J+7{&58du}+jsAYlhWiGdFk})YrJ;|%>y8xlf{&0em<2c8n zvrus5iTFO-eda7Pqzh#VF>KmOoce*ibF~J8=uEG4x`B{*HB8_tJpRROGc&AE^y6fM z(lKm2fyDSG2*BS z?K*uO8mBbC@#42&G4?miz7-GqZO21}QzFK{c4ZtzZ?Rvo9%s2i69&c~1r4`1I6TV& zKg;!@>yw)huv~}+Tcg1{_8>T1S&cKNDN+M32|ttTgr?6m=$k$o&C@igBj3;W6`a5? zFB9;pdo}37MNB?mg_(_axRP;i&~CLUHIUE2cxM6se9{v%me|6A_cmC1q6?g#9l~45 zZ=m1+5ho9Q&Pg(~aQ^NTeAvV|q%uA*_>>LFQ?KM#GRH{use2e;D+Un?sfS!3uG0Ms zp>i{^Xnh~5ZBvHiHLrM8Q(e+hZ%F@W%!MfFC>T3SlWg>Pjm{NE;G*vzkS%BY-Q?e# z%b?rfWB!N}U3!bptPbJ+tMkx(lrdLzi**I^g&05T7~h*0feWWvk%{}Iz-yV3#~LDX zG3zb_PHKkF!)oBbG&aB6>xxhRe@B?7&Mkj(58Q*V^U_-};5SQ%)7ib=wdXSYJKcno z?J#BBty^N}^)vXqq!wP}wv=nS*(J4nr$#H5G3I(Qo2TE@#ahWI92gOem5dqi)%`qN zYKvfd(~+2bMvE@J*~-Q5J1#!8?JSf;9_1FV=)l?c_rbHAp(K0WXUygF>BjBFjAgP6 z{H9va#d%GTJM$}wBsbu}TN`3$XMwqHFQ7t+K=^fkye4l___x1vGq zxgWesDuP~aDdTlDOTGT7;GY@FlFtWQz<43^f-hhUz;Gj4{n-(W!~GyW=c~9XD;oR0 zusiM8L)_PlW^|j;!t0JPCPg1zP&i=_=VSYpTRkBcJ1W#r;F6W^pv%}sBg6O%NebLO zco+I>^oi(HzO;(@U)IPeQO6l`K-{$sUj-{;c?@${$34#<%)BO}=GB1c(_t<`hwZ~8 zm%&W09J9`8)A3i>Jzn_=xXpUO{V4v3*M|o)ABur= zQ@W$P7-w|K5!0mKY%i^W7Tt!_?F8#ujJF|6^=zp7Rv8qSe$MOg{=wOo%mQ2T0i$Br zJ2CjBqgQ+w=Zq_OHA`bGIphc4hv!02^k7=h(l74xS_bai7w(*$fW&RM1r|rLK;2e@ zsC@VfqMb#2N2DTtR(b^AcUzE*ou9zVljj}MlA&_P5BxLI59&4tGX7I7-d{18NZLHP zEEf$b+`XJn{APn&?K466h%uhxPD!uctbxQ+x50k-3f}FLgxfypBpy#r0sSp%RMz_u zB%2M#t51HyuZheL*{MYvm`7pZcVlXB<`iEtB!rI*WO=+1y43o0IaIE_jTTdCAU4$# zRHpWV#w3>4f1|-)ylF^%r>W5G24(2ks!bl}3P}KCzmBxgC;Ok4qd{yh=VxLM-4BM) z8FEU*Ci5=djMJqv`AhKPlRSC8RY1ExR^gtmTnu0GA72{X0hbTykry$Hlhim8U3{)H zP6m$|=N>>PSBqa+hx2Ny2@M^iNiPhsp!H94(0T1Y{OK5Tg6Z+_G~59NbsM3qN=O%e zvm!lLqOr{H9q!+z2|57_K{7lJ+|Dk<;4630h2>#OJVbEas|G`3Z*rUKli?wA8(Jsd z#?SZ5A^usv*zw=fd`Nv1<4!d4IqWVoOv9X3e^G}CvBIS!cM}$;0sY$m#6t9E1zfpFz~9Te!t50pYqfyuMU~nY|xT zPc0bqqElgS##FpLO-SxUv(B4lFg$fjV0n<&-0Dy485cB9+By9(b9S3RdzuTD{p!Y5 zWA5U!6A$p_%UT>YvJG4lQ=jZRqW81Zu1U9G~-4ta-qcrWRzwR?}xtvEv>0Z=n(Oy0wB^T`xtElkz0B>cuq@)W`_Gf+cPubI(;U)8!-2# zrF#C(iFvq;oWZ#V`XTn)2lN$4q1js&w2q!YVb(0Z>-`=S=!>O)W@_Su+A82VO9H1| zVX(O^S@OVyh&MdPX<2M@ja>pWux_L)c7P^&OG@HVa(rAV40oS}1WJto%Ng15^0MDEKG7}BjkUadZYj$2}}U^9nHpBcg; zg&}ldv49L4uq4-A)v3KehyEJSBywBTaJZ2(ir*E3d`cExpJ+n%+Z%z|Azf0+zWY7J zbJ6?#F|cp{#h+|oxvp_{!9Z4#rVss#BJor4(}}l1Q1(SU%3G1fpYM>a31hjMg&Gh& zDF{!GR;SX@N>p+*H~-ja=Jgo2h?^PK2g&6IuvEqv61EKo`Y*v`k&4-^i=b}C>hPB_1LFO{neY`jk6BeBOA4O;4R^!)&;nSSP z)1cBkmnk91dDkX$=7c06#4mFQ$rPF-2~83mNm7PJ>bz?wi3UlMBuNM%Ns>zV_V*9C zE{?O$-fKP2eS?Cm0S(P70O0~PzO{_~9~$lpqaXdjiEM{9WV=35JD&%|)Avc*#`JLq zt-GM5^)tVd@$}x@9705;$I*J;9oRg131lsw%`L960oMiYT-H)kyrusQ`kU@SQ0*ta zeqtR+cI(pu^XF)N|0LkNRB+uimltTU1XbI2R1)-H#9nDy9{+)tz8MbbGnZnS?mw&y z7Gq7;EL04;!8yNErbpHlfM3Eh4Db);(_86rKVT( z`5WurV*l39nAAIx@e8uJ1P@bsY?v0Q?~M>M&}0mGp+dY4 zK0>!_DH;{@1r(*LxSJu0bUVpq9*K03!v1TxIp;Q>|CoWo{qcO@$x1X!)gki*E6{Is zBcE1%71}Z$asP!|(h*6_kNWjEhCXEZuoF74MD7FI#ji%0h-d7*?+mryJE6F?2LG~N z&O*jCPfJ&(Lk6A3hTm5p)T9nNZ@%GDm+wa)y(OX3^JX39@% zaoEx$tox!)WWV*{w1F`2(J`aLvSuTL$%9LGF!T;d;r0-7(mBf#7T-_D6Rpgz_GA;P zX@$V%9ompIW)6<7K7yJS2XTkecTjwF9mj_0(vWc)^vL`IJgv|U&jW{$M%{3TZxfOg zdHQ6y;Te2xs!3|ZSNJEPO2pIn8R~34%H|f)5R`L*`=Mz?YYh&wS=$;gRW8DL%P9tK zzX=}W;<icg7TK zc!wIdS#DzH4s88=26rnpvP^v-M9m36C*f;uWlaG%=Z14hIo7mUuL^R49r3Mi8b0}@ zOAL;fld6MGs4gua=Ea}UaF0Gtk88q8%}{WXlW;rsOhloJGFYzYM@7DZt5DY=UaK{! zbXO;A{7{GX5z?qO`zll#{)E%NtmqczX4!D-2^M#(=Ia{Pqq1Wz%J!+kl4eUXepLo$ zek@1r=;7dd(iWXpCv(y7Owgn7D}PJK++v-FxF1^u#AdXRoQ+kcjlmr1JA8nG2@}v8 z6KsI+ASH}O%&jGux z6!06d5^IBf(RH;Q7|n>pm7F4N+xmo$EL2BLEq2x}u%J$pSr<>-Kj9whAlk*fgJ%~E z$>ysy_^VZmIF?Mna*qmr3F9qpY`O~Gp`n~x#zk2Fu?M=HtT87z3odVFeYPKC@yB$= zJ;?q8hhEB(3z`Ms{z3w8hkb^Up;y3Z%u`-*-~?xB8-vUm-<+kCKw z#|w-|-bYiE=nARwBpKR!O%ci=owFUj6ins)`GSex_!FB9NVta;O$_DGKIZ^L3gqZ= zH)DFNQH!WN@B{xD7hw8vT{2?HN_cvr0#B_hK>hsp;P{{mrgt2}zse85YsU(Vz9Y}O z7YnHO9(!=w(ZQ8miHBj59GI|Mi8$V!hiX$Rp?p9KQr=jgV8~%nq{Vn#8S960=S&05 zPhseMR3K?sv;{n}qWGMB&mb~*7K+u9x!TDgu=~m{C=FyzCfdNKTfM|C%W8Bw_?(MY z*~Xj~%O&zntuXRZ2l}lU$Nl&uMSqsXqu#MD{(=X)r=Bxs_m!cbcA^^mrq1Pxj;_Z4 zByyxu!-~c@n$T_9576z~S2&_|7YO-@SthCc#N8Z5HN3)wmxavfA;V@YjeOhrhaC7B z(i^d_V3^~5I9I4jyYe@o?b3}9D>5g)n3H*j`z`PpVoAe?QxrC|Ghap;=kQL78f<01 z3-g*pGg|8)Li&K(ct7lYX0B+TFc4r*Szf@@3~civB)$hc{dDODdJ zwbhO7j)!vA8a=o;@Cbx_H=)*TAE8;$2wvO-DBiP=bIZJgQ`5iW2%#pu!$e=va~49V zX$Rbt(jnzfYFX%*pP>7*zVp>hrcK&K_-$p^$mcp#pOW{Y#bC$#03BBK#`+Vm+OrL9vAI97Ty(wp~A^sIwzEyz0 z5jvFiSkTlH!@!QsSk50)p;>03P_tZ~=ch?gSaKZjbtBrgUbZGzSh7j%6hCz!iY znB7O*Ev~}b!F52Yj7WL>IJT!H%<-3uE#XH+;(}gY)B7o`QGdy@%i7d?y9t+(-OYR< z)A-FkjA1c%>IA{OPX3#RG_9%F!gZDvHK*(H2*{C1$V)_w}^eV z8C;2M2Bc=Q?8BJRlC&w^Ago3dv=!Z6nQKD5S4VR_-`PB5#8~c6fIc-;)d0!ZNH}OE zM?K$kfL`B6{%`+TNU0T4hofG2&CLkUB&$=mB8qB;H^6-K5>(%H5v4D@fiDSGWd1Zm zY8>_dCGt{;sfON=5v)pG z=S}B&-)D1IJJ{V^y@)R^A0+u&Y(%=W-r{HB5PHF<094kkgoVtjZgf~kPPj0Kx%eE& z&P>3JF^}Nj&^H)mW=_{UE(d3W1^l-t#Kh5wc((Bw9^*Aga`83H`V^1tM(*%1^e8%u zZ*xJZpZVX8nk2a9HSQdxN7^>72EokI2}|zk5w-Q#p*U3ohCS~G^^O*F?Vbhu5ri`Sptlo-xU9qBx~Yg?c_DsoynNk3C}>gL5lhxv;~2)UxDM> z%NVn09ts}Ra-K_%LFDPR5Z1sL9IcPI)?=%{{g)-($nG2xi9gOAE1+slX&`%i0;KDT zq5r8eT~wk&_W!4WL9b#8Kg53ptLe6+vmuTXzgo(@H8_d;66EQJ<)>MEPRFJBfVDBt4+u?u*CS0&%@*@k_VMqGe) zC+CtH#xIoBpfW7~dCoflgY8^WC=i_JHTqypshL?WFgZESsmk@i7 z+xRFM@5>vIs?@oRzY&Ilf+bws+21fWMw2v8?}rAj*Vx;2mUpl_hYE*2px8H#fAiui zo|smG%SRd#V}-*|ykI`B6E}z|rG10m&|iFik&qfl8xmNu7xYiQ0o(Q8Ig@G$^P~OqT&0oql<9@k4Ry;<_FHhGzF*c*P!;!`?)i$$G0~^NKV(ihqFUe$cJrz z;Kz3*x;1SG^&NK{Y!B!2&QkIchv$nRvNQo?KM7br%D1q6;UgRx!a5;0H-dBCJJkK7 zPHGY_^M%Grw9}Ni!hVm2t^Ox5JopMP2o9Ja&(09P!>ggB!wmx*3OVDQCqPiRr!Y%s z%xSkyz#H|7RCeSrX!g5__dSQu;wX6(9g?Gh@{5IC54su4=`rVg_zIWsQv|OTy+-{x zpK{!y00@qQ60m5FEiHnmoO@4d>3=HwK8wpeo2d^Ef&1B zruzFUAZRCY8=qf;$V2BKvu6lt{<;s<9th}|_&!`yJ(``(toOE~owKY{M8&mtc_q0- zuv_{UEPm;b1cz_j7*Rgjrr*ZuJ6)LdejLB^p(10E^uROTn(VjT2O{&kkV%Y*T!jj? zDpVyqifYkx$2Z>B@ChH3(=|cNI`8fhrgXQvvtL=jgo19J*S(vGc+~&Oq6k%*oavePyy_y+Q};42XKWT&bLA+faiauH->5;c;|>^Ma2{`EUxwAcwaG($WxDl;0&PjTgPm-5 zxZ5rh&V(tDee>>P@9Pwxcs9n6Zr5P^!KmLj z{;CZ*yeF73#fM`{;5||2I}1p4m<#`!)kw0cF0FZQ2X9^sA!F>yP^yjjTn0boPtP!= ze!FJy>6nXx8y}r+Pthadtv$S8czL1ygJ+y+ff(k>F>Yx&OcAah zU<~af)B%^8h-P z+d_Jl4BOi~z?$g-y5gl8X-@tJ4wV|vQ~nU1_G zas5NqmrSyx4}GlYA{i?xf8{jW?TdNA$^S%^UU5)fa+o_}tW1p^wxO@X0mdur|a&DQeivlt6*k^@d$bq{7v6%fNAOL3yO5+0OB!@jKN=#u^x z7hG2$%Lz8iV%Vr#3B{#bM5n{DA(rK4 zA6Xtp=e_$tm}1BsuP`RhTy;pXV~C{B#FngDCm=qkhzfU-acf-bk2}$ zW&7-&$m8IrG>eaUeF+V3Ix^?NJ&}8M13G=`!q=l^iANH9?|WF#8;80e>g78;(x63G zu(+h{GwrPoh>*DW+Xthf_Q$w7l-%-QKOhprex{pXG*A z!SH5U5XPOk1a9neeWfBKn(OjWXLur9U8zRA$5o4({0>0R(-#oy$mSR0B`~v`y%P$5 za|Wil=rm&=>hJ!D&dH_B#Td#r+#}$SYKY#dt)haJVcfxBAH1dzj5q(b;f<<%uwA|n zZJx^!rK@a4?7v@fs`3^VeaVB(t)rm&Vg{zX)1eV%hBPE>Fc}cYp|zhox|9#13023y zW9kSv#CEBZIaQ+g_9R&SOhj+{Hj(R;NN$=Zo5^*Ba)N*-_;)%ps_ORW}Qev=X={Ff;)#K9iRNr zBmO#f;bIRuG?~yTDoOD7X*(8oui&e)hG7YhM8H?9VuJTTrLUR zRg&SqBp!n_b0mTKY!4Iv0X#?)Wb9#i1!XHN8Bn9yYMP|eSp_qr^r)hPBAs*K7bf!> zw8-ur^I^9^=clb)J^`p$rFiADIkE0g{I9VODx(4v;p<>^k=$G7Ym4T`^`xS|V> zAR{XRWc?#$<-$2M~Pv)?dp zjV9cFp-#sBRUp+7wQx#eLTZQ<=V$$sTXW_!q@Hkx8WjoOtdj>$HLcwC`Nl-)u8^9( zvZ2ARb+IS+1QdI=@bA~l5W%C%&{@jy^33~DllYp8Yw7|e%Cdsq5u%O$Phfw(0-nfK zqo3@x=xNsd6KtGO_*F@pB!qtFYp|Pd($oN#EeX(YsEUir)1-!Od0i>IfqB^Zx&e+P1=X+&2#a?wYTuHy$&04*5PCy=1u&gOI=e2c#n;$kUrTA z0^9EW-(MzrE4~g%GG?@xo)mp%{k*fbX0&^R4J0fzg-(wRpz8G)izl<0QFt!2qd$Jkl0M&(ptI@oD{RGGue;>Yhu)N`+LEsm@ zpE*yzLgRJjrli-PuFDjwXZ^$d7x%+_i}N5(t&|juOXEMgmB6gyRe0g$GRD3u!|D%D zaP&e)=-3^e+V4!4iH{v@|I`YQT>g8;vCk9>dHP%A|0JDQWm<2f;RM&VS34?<~Cz z+FNz;yOe<1ouFX!TA6t66l0()>lzWO2|+hb7e0UR9oH6D;xR=_GQ`n{1QoA@26y)~ENU}Z65Op4L* z63g<=u!aHmSn&KUfuZ*e>5Mbhv|Or?uXX7~zlte*N24X({lx}H)(dD>mpZg)uEDyK zVknN!<4aVN!TInhiJ(0NHcrWfo5vKXx)t+Jz8=qee@z6HHJd^4KrFPylyX7`J@|Z| zF-^B;a_ydDFnU!g#IT;YVDo9t(YKm)1xv8FEfiYjr%39LMuY8w%eeJa1k9gY0+lbB z$7*LQD6YHD3!fTtC)Nvy^Yd)(!ihXsqM%J$zFg)!wclV(cR#n%^*4w|$Re(lp@AxI z;n$uEX#ODq{i5@^QFjxudQvj$4C>Hfqx+%HgmI`d?71(=%rE=lLxG5Eg*84uVZ#G8 zT3kAwk1!QsonSLet5E=Tdkf+?`U!k^zaQtY&cvY{E3(N)n|K+H1M7rzXtB8^!e0j< z@4`In|NRusF_(h4x`A_54Sr4zxsOb5<#7=)GeSWG|8@ipD#zF}@80cK2`tsw%W|b2PZI zU2WIXT5u@yh0eLtVKZYwM{nNA3su*H>=p~)WR7B1&3>?Zd=}crunt|qgYo;9mte0~ z65nDYgP%90fli|r7MqGB2Tk|lt1JUr)$NXPF7nhVkmo#0$6~p%3OuuyAvccKLTX4N zHrMsyL9_pGyx^L|$yl!# z>J$~JqJcIhb&bV9y>2YO|BG|DIRpfgt3`rC2_nC>w{W9T5bk|?2Gw5g#@wTpWP`Z^ zU6rm$TMx(JL6(26`4!J^wR?%HhFcKBw~EwYNgYn0YP96!1Aulvq%QSF(#T}HnY)os8@8uKrMDXPY^7wVL zRaoY-37i72^Wq=7x&0|d&~wricbX`W#q&plu;vqQdFL7S2mgh*tlN;cn%$RD@?g%% zLR?T6jT0{n!00O%Q6Q8W?{Pr~t1C(%HN^wupAVudg%9E9Mjc{3G!I2xm(g^u66+gR za8ZNVztvqE?5-LD>vuL`bcreVj@IEtTOsjK8v17`(}d|l zDkaAl6ju~!3F}xsIuXh`Y6;wdat(6J`3|Oy$%jY58f4t`lNc3~2Pt78*tweh>eX9; z#kF6k&gqd`{(T_cti;EZD#DGnRyLo%E2;bP65XSa4YUEDz^!U*fE?}s?(;wr7Wq)G!$jM6wv-rE*4KTqG#CKLlE)UH=K!I=496+;K}6yLHaW8B=0m|kAUs5t!R3c8r-s4fqPlT zxyvR8i<>{;L(gD*uBJrPu6~9+QtY=vSSJb__6O%PhN<5EvQkv?_A|~Auw4mOVt&#I)V*j#LM3m&?(Sc>`cj`buPhOD z7X0CbO8>YKY3IO4TY)yHvEKVq<|B#ife)Ab@VCD)T~ihW*`fOM)QlW-p6kZxeAT7; zgUfM{XEp{tlc!7C6^LnhHrMh)8t&ZHqLCJ|G)K#pO7yc}@OUFK>qs^Rg^rN8Xdi~Z z%mF#A@CKap4#E*(mmrTZs#ounr|AU|+q9 zZk#^Uh8|SXqdy)S5P`fie)<4jevpUUCb+;4&$D(b6A;qE5_Eqfyz$&ta%6WBTb37 z-a1ZT@mezbX%0pWTZ9RZ_u}tQQlz1NI*2da63uQ*hXdDh(LT?ae|LTmDNfxd3i$Vf zJCIj|b>8ccEE!BrXG&4wj#lnwjs`g{Do58_!Thy*w^8#{2j*|{c^yVqx_-;j#{`W9%e)>P0oW-+#k{R#a_K*wM6v1ylYDG?mHGtnP1@6|< zzW_s4Fo%UM@i=3It|^bO<@-6wghX9xcefvk>w85mKP~WfE#tIUtI(XoGw@NbIT`y% zjij%A$z6?-Cl<4n$lE<<@zF&sdh%=^p4)0h#fi<5+5?r4{nLPszphN<{)>f{O*#;) zJ_!0$Vlm|DC(PS@6}#^rgsDfr;+}#=9GovhPx(rdBaeGQGSZZ)#U{eyuaVFY_XP)y zRHx}noggegg{)=_!_HMZpnGf*%oya3#p9n#iu7^-!Y_fn=n3zfB~P#amLc}f=R)Um ze}4TwDf-eS4_&ARhNYQNpKllO=YnipvyNpJ#-B$SD8#7c6xQWS(H#~x^zEEh*wkbr+4PbWlBLjxJ_C$#fWh(1->#MMzeZ8Isj2exqQiCtrLl6J$ND@z#THP#mfW z5C5>a^Z^SpjeCQUzI_<`Q=bGm?-wNm@Vs(DC~8Jr!`?^l@y{+LGUsSNdQUsYo!Fqs zyvcSFC&_Pao4X!~zP^RqD;Q1Mxyf+Bd)W|7}X@~-0RQamuZ-gpe#pjdWJPs z=rbcK{e$5}GxI!_v_N{S7wEbE` zg87*>qhMR66cO02<>n;e{f;{G z#fO^N-@yV*FFV1mOKfhg^$8=JWYPJM0d+O6T*%c9IV3!tVwMMbLcVehU|mgkiE>1EE21bxGjtaJVFm$SVlEpE*|@488S5YX<0n@zUn{C%^uIy8i^op%SaF`^ z2EU4YTf<}?C+tr%n z9@i$eH;?m)%l<;~+pQwSKNhfPkTQ|=8I0>acj9BocN~AI1jTz7^T8I^us@Tzy^S`5 z+Jslo?7IsESGS0|nqOeCnWw~m%~1B1&w+c%iq!3(Jk@D93*qOp_zb)Re*K?#pJHPw znAIh!_c(wN{}{8`D3D9IbB@n?HI7eHQ=v`rX3TBAO46FGOm`^xqO|ZH_F7i)Ek`pY z#R-0*H-Sb}_}m`bWI5l8LZ?hIlpnmpo5k9Y)gPi!VWch1 zXk+j4WhN6$&9nF!2kOAxtQ=%Da3Ski&6OR5M?M-+yHRmiRknjM zMdZ);~3)YcgI#NQMe&yR#Wq8pc9_ zSs_+c4&X>OKlOk37lmsXGw|+5*y~`)JSvLx=n$kRPP86(28U#-5~sC7D897u}A77UfvjcSM$0y_F$#%=u(LMVV%~^>cYoR>1XK6*6ASl5YKj5MsshF&!$r zO6LJAO<-;m`;EL5a}0ZKISdv52Ge50<-D8qM~K*RA7}J+f|va+2zYD-zLyg?f!he~ zPhTgF`4`6SM2;}!!y^2ppi3R>?xD-L8KCXKSgy^pIpIF!j5OK&E=bAIzB_?GBYhgZ zyX^Sp^zZO}l05xmDo;1N*rVIBjo7%(1@~^hirLbiaOlZWEZ-T$`YAf1X$2NEXhXKd z^xk1faD)QJlq~{@*#$^EqDZqASn>y8028Ks~IWvFtA7HOF451-=!z2uwE+o9@BqHRrL!bP)Y@ z-k6FvFXJm8eFw*w^Kg@~f7*i#A$^52G_;z7{k{wQ^wLTcewU}8#4<#^JQ`zsHn1J5 zEz#dr4NjTKTyUHkmU>==gL4X@ds`INsF+j3p#~6S{e8kf+<0(vW_Ke^AG|Y7nJ8{L ziPxW`Vr1M$eB1K{g~wdrb(VmvW*(37&Cj@|bBkf|*)lc=_(VTWL4-B=^|))L3Mtxk6Po@T0uzP~qNb(n zJk+j$8oO1TccdcU@6TqvF2-=+fBf7Ny2r9*o!{+)$n5f)`-Q}~uceNhK`0Eo}t8aYf zodWzZCl}jR+~##A$K&S{Pw>TjeQI_p3|(#NF=E9VDBdkiTMF8F-v@uVrbrEFG(w0y z$Q<@fn)KHW*3Gr+!TMK!V7GHCWJT}hvRJ0&@57g@BlLxjwh86DJHClbHRJe2O%Yjxcnk`E_(}Ix>0!kvp$hO zP=dnfJUsa9Gz3l6mpH#&C$fsyCI-7?N#~oDT*ko|_;e=w*=E5sk`h2-TMbux11V7eydEd*#=!Sf;$5>qQjmz_#y`@9xBcbM^Sl?J%{ zDMqvoCc>KPZt#lQ&2|`?w91$D9Nuj$^fy`!#l5Q}dE(WO7>Cdq&N5Cu>kECkFUWNX ziEDTc@AY&q-evQeXqOq_oNyT%Q{o_`%!=x5PUf_8jzHX;CfNN&j;?Hpg<>~0U*2~H zRWcd-yR(D4cH<+?J>>=s-Wt$0ECF0Ze>p>?nGjhah4LMx;JsBBvbW1q+k_~bHkJe7 zD@Anu_7D~9lCWJ*hx$#d;0? z-vdGTHdNda0MqRi$;yfW=EAN9x?Ydm^j4+)jjPaQ`6LVsm8UYZ6=*^7b8f>IMf$6` z45x&Kz(_+~666;*C5ml6;&abp31BU^+S_T`2Agi6F)j{bPbL=)EAP`y?jNyFnk<|d&d01f2f;uowcgL>(zQ7v|X_9_)0`%7+ zOw3-0Y1z*qZ%zPo>YU}^a17gFHS!CTrHN`8d-Lm=(3206aA%ASd0^21D{3U@QPRil z+^9yGT9z-?mBIdD%$=}(7>?bpNWzy_aUSNEdHGT4DBhQjPI>7t#ZdxAcX~1CP#fR6 zp$AtDW0?iRT+#0^Jrelz0y=kxV&3{aa9N~C{SR%y{EbHBV*q2L-!p`hms_x>stnU# zN?~Z{N3_&e1vUM8XbsoHhI8Ryw_b_XOrOAVHJ{PevXB!hzQXM3SFo+@3rfyQ)2x4+ zxw=5cC^Dn0f5UQ&uO-|*10_24?`aIIa6#YB3%vcC4ZP{!mt2dUB3^YfA-_?bvDf~w z?#pcc`wMfbsqzrs4b~(#{0ykQwim`iwo6sO~o|q&O zq5e4d<&?VPc{b4O*xr5sauVoXU7GZHKD( zVa4~MNih;Swy9FRvIN|JI~m*p{=gpQyR&3>N^j;R4C=bVJ0EYAIA8VTg8Uo#{X4IM z%hnOl`!)kQ7bJ69@rpF3n4s8l4xG+nd!@1goO?ALJh`;8ghT{LC;oL(pIxQUFit!mg@mC+j_FqNW$tlpA5X=QeR72eJ zPbm8~7q2AQ&^IG=NSi`4Cvo|S)w;K#XtgbN?#SYQGv8tO7!BC;!kC<_2*U&h#2KUU z4&qMbLY~z`u#f+V)89(bGgH{_>-vk39)B8y%a`)fu3eC_I}J3~!A{8@@T`X(F^$TNzpsz`>R!?l^ta``|lqpT&qf2uHBQ=NY-<1 z_rK$XtFOVy_c8id-bU9^x%{F5OH!$epi-@hv+IvyPwgk@l{*H*Sf4_@&726LU7Tzk zk8!he5}}MYqg6Ba!_o|0s`1>M1hmft$B+sdxD|enT8o)yRX>W|q}gVs0$2=loDN_6B|D%7@R$fc~2|*=+M0zxZV`>|d}5 z1rx=b;M4$TckT?N+St%U)h?E0o{7WrQc>w~Jj`TsfOPRk6qiJD-K!SDl)4!3w0gp_ zFok^jiwbVc@{3?rr%T4nxPo?tUF^>mfm_~XNT17c)=!#5C!VsN-$!XOWl1@lSrUgC zMF^AqS=M8W7OhGs!-)QS;M}rZwD^2Eh8xU=aHlxvl-q}CtIV;ccLv{MehjM5Q|1q! zh%NqCK-1(rOtfY^Yw>gR`E800M^qr~2HV97Wx3rKo}vGCmX|Af&-?AI+O`h4Sb2kU!6o+Pz?$1RXirnQ)H( z_al~d_0;*J{fvwKgE=cUn2@(4k7M2DEvP6}!3VAA<%&A*chUCQ+8(Pq!jkY5Wf$%>Kt}SIY1|CQUAsV6HDgiZVzKUB1qT!V233~62 z6gj^-2huwWz)73AnPwT&bcY?dY=}PDb4r88=*$3z>r=#yu?bJG_#sS9eEe zlPXDhRTqDGhdzmA^DDi92b}E1K{R;cXvit-1@mYL>(9laZKN+;OPK}z(l=py{&h^e z`5WvXKH)!beV`-r42J(yA%d(25)GCax_(K5kLR1vi}6*tJI_SJw&Y`mit1 zo~@1hirKv46YHyO{Ka`18Bv$TjeG*j83Zo-3dt|BQ1~yL6BXZudyxrP)cgbLln%qN zEuSF$cM`uN^Z|CPIfUJ^THt%<6Q`7$jk;{6dVsNRlODQ)@KFaJYT%8Z>K~)|#}brE z(IUmylHuSFRa&k46kWtwTe3b=}ZN##quEM#4zp+q|S zJ2>yMI{aI48T}s%@tL9t(K{E%ag$Ht0k` z^6K@ld+!TuSbv`T7ycQ)<|kr*-7T0hmcqEhs^rXSL()<=jcNA@r%8aTGVMnavpc_urYvK8V?lq>HE}ktGn3`yQhuY0>Luom z9xiIQ&Rk*Cl2*pH;0U`yY~!ZGGmhPXy61>SxgE!b`2}3w)u#45q2*(9Zk- zycnTFD#OI!T-L~yI&Fov4t1;vy#WFBCpaJBeke>mgNnIBSk9lhJdSMOB4;gt9opOR zP-j0zW(9%xg9|)5t3_>P6FB)r@>Eap7T zLeiv(^9VPT%<2cHD`|XxW&}8Ww`b0X8b0A{As@R#gSLiW$8XysP%ZQYINxI)dy~ab zxkrTg?q)PFON#~$dJ7+#;y`-jVNB`NrQSmpf~k*?ACWp31pam0hFC@7JGhE_>Tgb7 z=C@$Uq&_^Q9fCd!!y(bX5k5T}MCYs-MC_Y~a*7&1xzFrwzHhiPsaajm3vEL1V_gno zo1NuLoJ%3xL5nu#nNUra_xLw-5t?o}ERlPjkCFM`@#{C%=imBo!muO75ENY_ndz@g zp9ISjyNavecDWWzLRq(V?nPc8{{-m;#sqNg!lo^HIC-%e30oY6DnU9}JR)4A%rYLc z54WONKaP7+e1rKMgP|s47zjsuLb{chbMw9f-@ug!cH40l2CUYss}}|eoIc_ZS!+19EYB5Wu?g^O%!NJZlFRG&>Zmx84Q&tOWj#n;~f&c@f(a4X9uK9^`)-6VvKi zY~K6>s@!ex&sY^weD@Qd-c!nDeq;Sh?EtW!mdWi(P$&Q0@4@!{=b_>343L-$Y4*Ef zR5Cjcwu#U9lZs`io#6_epAZ6mo#k6!tiWrG0ik0w25Y}DK5gQ4-q&Xpwz?PK43od` zN7I&C<}xlT;}zxUrofJw$3dL;8NDZK@TNXVyhDIHCMz*+%=sahHM1Rc<|Ec6>e0;x z`Iy%I0YB)~!;#=J(Ed3Oyqa54WyLvYoErh+OXv75RHVf!0b7gv`-KJ3jV=HgKDtrS&xLdQk|1xdF(p@!WQQ1IBmf=vKuEPoH>l;UlbvB znKWrgQo_XR7W5+H>17H`$o$J}-ng}wQ%sEG1j{reHOV*NEn{a!_vdgo9Qr}=Q4|EV zec{$lH7A+_Ce+`{1k)o}zKQV-e7k+Hw`2)SKYb00g-H^*FGlpoVJR|*=+U|o0sWaQ zPuwq7;j|`qbac*x*_yTJ`azzWUcM&kUGa$@_P7;`RBGYi6H83>mcp1@y6j#pfLfz% z7)0_Vo|Z%L)$=GEWS~kW-a7+7*;{VnzF6EDr%J}&wIr?A^uhmV6xds>fqu(+xY#5h zf0kQO+40Mv=93v#PI>@(ar@ylI}gUbw;&CMe|W3i*5tHr4TR6>=ekX0F}-pSHMb+p4LB=QB)>AZ7UmZ)@)K28iYr!CH}aPOsb=5Um7nT2K~ z!ABN6k6nlH%-3-AV+8i>GN!+o`0A4|F7j}y7VSDlevw|kD1T|y6oTgS2-`ZI|}-v@?rUS zP4Z*q6WnL6PINk+LfVR4Ft5DH{9yIG_}Fk>MVDpu8Wy1M+Ir6M?+p+bNzulBZJO{r z3@yZ}WZ$$*WWyOuy|WVRht=|@kHte!Kmex`Q3&f!35en`OW;7yyA6`&81N!o* z5YF(p!rfA|V1W=meP(aa(Mb?}xkDdBQ-eeDzUY94`p zE6TAnQ~~FD>R{2_B4{5Ap zk>Pvb(kcTQE_H<~^2tH3cyG+BR-~5)X%MB6IdJKm0?{jyh6j)Qu=sv7C+KQ$ItqqF z^JEtUj<1G8iw%j*Bz2Ouwg6V$QKN#OPh98MA6(>}9-L@;2LpRFs9 z^^3o;=5_^NYHmZ@sv0?ud5)l|mj)Fk#w3(E&~~^-ZO@N#1tX*Rhkd!&nJ~cJ zI8Y0QDeJNS9COPwS<#i2MUYyF{(&#VbY-Jv#$2a+M zB|C5*d;}uys#Eiw_nALR5yFSRM~h>bIKnU-wDR>y!Su6S#v?WhA1skrCw_yU4oY>(^UO}>c?xNgQ-=NAtD{-vNSZWj z)=Z(~LS3bT7#l{J zCdLM#AT72;DO8q@KhI{(qX~)~&Pjw~_MR;~&DZCB=7vvW8SX1AAFnWsw&mz@Uj$jW zc7_?L$iD!71!F*oJzJ&kW$1192rN}E$LRS7@Q+?4t}eTa1@S+iT&<21v}jR_#Pg69 zJ_2$_h~SNPI>r`$fKPin;V=8&X!HtEb-MMf zjwffLV~YeP88zd)W$HxJcMxc7K`ql7o!U!^x^~oj;^}*#&I%Mh1H?W1xPHZDm@b30s*wq||4^fG{+@VUI zYv>cXavRz)G!n$WWw_Tb^vKZZCT!Hyr+e%yXuMlB*o|^W$($>o|*Gz)5(i(<-r3EWXV zi)Lm0620i#IA?_kO)~o5+`dQ@H(@IKzt%|XWNtBLVhR*_-hh5~KI)zLgK_KoaaR%B zK|TuPOW9|x_|*mv#Oa{fWmi7uW-HX3dkY}x!anCDs1JR|ec>J9uU*Vx(%;T0Fg}gn zMHTd(QNta|kf-I2zM}Xep%|8%gzwEVQLOM>)ahYD%_f%cIc@6H(`pf?{rW5>u8zc} zm3V#Lkry|?1YaE3RKl;4FoYZ-pUERP~NC45lYu{89ma}I9`(&_*+o#gr(R; z61n@Yl7KgC#WmbF5PC}S!Vgiy7l5Grx)mWJ_ZV(T)@qLMqpdTG6n-;QkeYYC%)SJhrl>Ej2a@V4Qwh~cKpBNOP4M~daD?XZK zMSX$r-1~9} zbH{JO)P}q8eYOc{|MLjP9L&XR#{!fp3c-+n;~?eRb&ITl}_ zRiGtva4q4q*p3*#K1N%I1b$t{zaU7PjGjZ@{Pa787=6WrinH=1b_Xk=($*9MN=vx5 z`)ppgZ!xquJ;N@S|M=-gim)oI6!gZoamv*#pzhm`wzL1>mf9S&c9x^#Shld@uQL#P zjqO#GJSAFkpYV7^Am;z`ic7nwMaIl#f1kHT?HQ?W`R8FusvND#h4pX7twGEI5v=@rkPvX`a1R~kU%8AY-=4B?z!Eu`9fg#gP` zZW22Wmk%<2LQIECqn!;EPFw=c(QR0DDgkb$T9V?=QRwS;3K9$(_`vfUv52{41=H$F zH2pSUd_pWV*_lw+B@?*toO)C~`x|zcMPQtQ6eN6khw*LK(W0jVwg&vmIH>(F@tF}l z5N?EfpG1IP#$+fTOC>Usg>=XCT-3Vf1toJ>mXC5EUej3OGY>`Oeal3ro|dqEKI<0m z`pj92Zz4Crmge8Q&0qXapH|#hjDlG!LHsSl#nx~N`dp?4{jSKVPH)#BAxHXv_4XQTapQC!!RIBr97 zJ_MgD1i$Sz+>V6?#4JI~%^EF77E2ovnfEp<4>|~ayI-Q;rj5{KmB)8ptpq`3YVnWe z2Pj-2$47L$!Hs?TbZ#=@lzk7No3Yiuson=exduGm&AJ!`v-yO&tth+fH`?ZG!-i+I z&~cFY9NBl#w#zJEzTOU{ODbVXej#`$#bG?loXJg9pxaIhNPa{lcr0tj^L{o&uwX+m zzgU?l)DI)y3j8o->Tl8WUzWsHy#*iVF;AS#T@*J8I7rfl|8g1k`;H7LZ01n;+92iy zGKW(ALbMzC60Cf6agTB;jvhG$3UVA!cxMp|*6LCDsh8ld8EV8mK$j{pAG#>220GTq zU}mlj(YrScxnC8S6?21&>dQspIT2Us_!O@*zuVwxH!Lh*o$Fp@TC(pxro1(vby^z4 zrzi=vQ(5mlU5NHO4)L1%XQCwh6}alQ;Po<9;h&7x9b@;l}`~L;Og(`bHeN0+?S;N zuqcGhn`(na@<-%pwX1-doz&!fj}^Hx}NYcKldJ;J2X15p0-COWYB+KIhVq;jn# zNbe9sb;>QY$XJJh3l~dL7JTQw?qi;==lOWDNR!Av+JyeISyz9-El$z^%RaDegmfG*=$gy3!DCy zV*93*k{8QLA)}lJ=bEE@+x}+J-HF!ZeY`o@FzFRz0?1;D#b$o#jhE0CtHDKBEJHz5 zv8XHUFb4j$4V@!$am1aAXnWcoQ%3FN1U4T_oOYO#Yb6$BrG-0&NgBZ5(oJw}SLS`& zbJ6sZDiLov#DzRIp#Oc+AUXdD>D^sA%TeIM}By;V&}2$MTh{VUC*$dFJm6i@#-I-n)7neJ&MdG2S}tSb-I` z-(VK&p-UFs_}{Jsge*j|eXT40c&b3YS-pmvzg0;4jSA*PS3q%O78mfK2!9-tAzH>7 zD35JWu6_}^Ud+MhdKql+Y+<~pmpJ!?73I3LXzfQO5Gzs_)oLf)QrVC;C&Z}S^iU~v&vK|H2ASqAnX*^oG7Oz%d>wmP;+}0r+m23z z%7I7tXP!Knuz3^oZBFINZ{J3r;#(YJuVY@CJ}nzf~&&>zU_J@EWL9UzyBIRz1Mu=(q<`;ijEvK`uYj?T2+D1 zm>Yc05fzNvoQ-c<<*2h-4sXUX^8ZZyUp^rSl3Jwb;$Oxj?%IB^JRwguH5-x5McTCP zn<}*n`NU=m6};ErXYjk82!cY^)3~V5%PtMV3to3oE5Mp{g1(oC^HfFNMtU&W-O88U=aKRz2-$jPQWMDO;llBk9PjhT66kg-)S_GV&~OgL6E-Z07x^&lRb!e$NbZ< zHC!3$?{}l9h;!Mr_05AU|IN1i@2E%GbzA%KP6+mNV?FFOT};gzX6V?!}2Za%#fo)PCQIPvw7j@>-7x(JTqop-RGi1p)&M( zs0#5Bsq;BS)eybX0C(%l;T#P%PmEQi13RMmoeL82@8znbe#Aj;<#&17=wJ(OJ#s)Q z^oYgVJcyZl3pan0fJ{L%`sF)uijBYc{HfNot%mTWecvD?aS$t=J)x$ilg&!zftUF= zC>$RNZa-dPfJ_Fr{l#hwy(vRd0q>P^4Li{5;PcD;X5 zG5sw+lHK=zEzV;M1QGuOa~VtKq{~4=TN3-2< z=PM-AD?j2V>6;kX*o3}QOL@;ZseD2}GvBrNIOkgEBT-QpM*7N@@bp6^`n}_M`Th@( zHdTgnAIgOu)yphDyAGtApM&p=pImy|6tFJ+fX>>B@Yriux_vo`O6*xCp(iOh|ZTqA^*g6MEU^MFG6mSD~lR3|@BEEO|8CcWjhT+k-`NNUw zB<$TcaQ`MxwX9ZR{@?bHx8WXakNE`E#~$-j6s_n5kqP-JehSL&g%Gyq3*)nPb6(PQ z;5Qh^2`+?|yqkXx<0gjUynZQC&yU6RpN;8LU1^eCAWNhQ&4_m!>$kW`!QtbIMDSUa zf2QApu8k%V;h;JHGMRY;YH#3|VtF)J@(a%l3?q39FN4RCLYO$mfHXWkhT-4L;jo4- zNm+PXw5ZA(eP|Nbtd@pFzB4d;NRif;I>R4#MLMBb8&W<@!)PuDW>!d32kBebIKzOp zjcjLrux+?OwHo*8H=@A#izMY@2>)5lj3nKC0A~xNiHPmhlwMxI=H_PRIoKeXo+(H6 z&&)t|>-%uq!kAolA5Kt`fyKH>kmVABapP=hCHvebXsc1d(F5GOTa8#f!xyI{yn)^# zbDV6VOTvB?z!7;Pl91|;)@ysgX%};K1oCifRSCM;e&)p^xA9r8GkK4{q^PNtEeS(u zn$p-NDRfn)k{gq-q*aM*zkLeSWlM3<*cKS}NQTs=jKj^dJhA-000 zHH<&+Yt5efVs7h}MR-7W9Pm?F7q?!4%>M9z^&+224!l*vj6~M&h+#Xp5JfC`ZbFa8 z9EEnT;}{U6N?qU038(dEVm;B>4NSw2S}xISy5TXq~vekzkcqfKbdyK0ax|IYlF zA+UR-9tef!xWV6t;HOF~G+O(iS*bUFewHnnF0rN~ozt-7^=tekeuUwFF9OpkDkN&^ zWzfvL0lAk5R`tEZfU!ATV9q@#&h&x!$)1>D_zN`Wl)=WW`lPOh@dH*)lWcsMgd-I! zNd02QzBqA;d-`0LaHny*BXU*Ask%p=#uh@?P$TC`>mz!P$-iN zN3wLN)Klg@x5!5E_-@fg6-Dx|wJeEZ=aoZRHy{i9c)_XDoN0#<9gxw&*r%beGK!$T zuLR@5VsJ?AH)9%9Vez6o+`E%;j+oCZ^hqO5n{k18XmqITZeR4-azrvC%%9yS@_0WZ z7rgpJiTGYNrR7t4xibbfjMZoYU*?8@)v*9Hlga1h}nI0^oCvZ1S(GkeTr0eSMe4#SIr zIL}a4)LQQc`D<)(1!E51E*(xM9B9KQBE}%lFyOt<3;@3A4 zs=^iN#y`V|&{GpePY#0|c9$DE8Vs9{IiaU5JF`{4fvLKTCl&HXBX z+l2AU$qJk%V@%!MkHU>L0+Mt4EZ)i1BWB&cd|87IsqdQsKJDI|ikCduQKd=TpT?s% z)N>~pi|K{^eXwwThz4v|{M0iMM<)$~ltXtoqqFmv^ZXu)u3DqNg%nlGWc}*9o}9vk z8|eDRkaw^Sf{d0-Sh@BM7C$UQ`w3e3Mbg3e$t&T(1Lk;3VUF03?Wmbt1i?nVP`}rQ z6NgQZ_^P$y#}7yGL#u$8)tnVgxF5*4SOGS*b!d zb+Wyd>q$_r4aZZ51*9>+l-|x&CpP*@r1NkJbkx4VD3)lMA-^2E&0k_#j}ko>(FI*^ zYq{mu9^nT!HG0VS8Cbs@!1hDCQQ)9KJA}I6b9NM;6jTWf3$KA8+oOJKeFNuTFmFVf zfTkR_hRiAQL^srm97zyT&zZ|WWgVM|47EZk^Mo!eFd>&pjpCK+2Y8OvcKx6MSE4DIX!_Z6y8sPzJGFoS6Q;2OqXv7nc(8WPdomk|Hy6}TPk z;YX~|AXDU|XiD)>-f{I0oWcB5uKwFZ;fLiw{q;>qy?+|it~0N!NgB7|%SiMP)j%%0 z&psNuf%98Wp`Ymm__TWm7AbDufKXe%M)*0sbo4Xk#eL`V)nCuiuDN-F}bh8~1`> zohmj>4FPdPii_)^8xlDtqDj#kMwg4y&|v;TG~z1JV*3+lE6)<`+G$Mc$A3Vp3v!@% z>H;R(UjVQE&)^>`OGX)L(oLg!@VHGQ%JC;qkmFWTKDZpRhPUColZtffIxBkUuqh2} zdw@=24HE7g2K#01p{m{z81ODdM>&=kEik6NPY&W^p%HadjtB97TR2~Rci3Fl#NKl^ z(N$I?I@oGStAs4i_c)Wc9y@|=ydX^`Dh{K&1INRG)BsSEJB^vQ%Zi5;4kALtp)J{?#5$ zGDd0yNe`X{dn=k?T6!)tslMRbPyWF}hK95_qyiO%jBm^{m|3zWY@K!h5Bkf}MxDP{ zhDwJ_|5SqEJ5q6!o;Imcz6W+vJ)k#v5T&~>K-5!5Y#Cn)%F{ohV8U4#-17+opFYHd z-ZQ+Ei4G~0T>_^j#9)PQFf@e?qvD(M`5CVa>E_egbin=#rrf?L`SAJ*Iv34{&Kvh} z(y-g$S9KNh&%5$Fll$>)Qxh)NV_vlCi=6vmZ8+3?7up+4sN-8P)=#&E`W+g)y-Nwt z20c_DDRe0x@PfwBI?UP=fL5!{;(}f)8u!GMX3kY1wpIOn&H584)N%wpzsp>Doepgz zVIbC8Aga9Uj4_Ncx+!=#RoQ(PM_-S_%i0>mZHyY-d8CKUAEM#oSXO>5vGzA=g3~or2$+*`#L>w=K1t-k# z-P$9lnN$iut>fBxp$8nPwzD_uB)plAp@)`nN_u{-j@bvPn! z1dX*jgv&4g#@%e*XEx!xBqm*s%+xod5u;-u{O`A{tCfx6AD;1hZ?8rl<+J=xt0U-} zTft}DE#Xtv1@na|^O?I?hCGppgpwmHcO)f)f&Z{9(Y$9axv_bmqZ$qY%aeGcP2*79 zoK2zSC84;scO*KH2ygAI2L1%n-3(lCF1F6JpMbK zvB-{&!C_UVv@g{Oy7J`V?t}lZclB?azTb#U*))~q%l`(u=N{<2?H%XD93id=D`1++ zRtPzw089OtYtmu`CN=5PuY;MWJntHWj5&=$msfoLiCtXY{Va&f+YM{|%*X~W8^)m> zPRrlBa#I=4>c*S%IPSwme3khheqC@3mu{4y_t&>!@cPd%>mN(nlWB&6iF;kvxSxmO zh_#S%t&y`W)uJJyzaZS?FIZkToL)E7r%T&Qq4HxB<~zHwt~L+Rqn2TQ&l-NV+W=&< zpL+(j^DibphLxdDL98FgHEy%V|2cuGG_|Sb`&MY0@rzT;It_ST6Lx?2&fcjf;Ku|V z(*1~W8bgI(r*It{Th(YntlJAuoWPG9-eZpq5 zgRS}K`m{zO_hbaoON-z;mTzHPq-&yzuLH5^&{H6I$}$^|bx^S+iW~BN2!531>#O8>@r4zV*Uaf#{b)44NfF zs{C(b*c@3pI@})W&ua07U$3E>g%z3c;RNJN`3T~<-?<{_g53+>pjIH`r0mou=k~vb z-mS)TcjYsjH1aL_T{psi;*E%x(>LrF>k@-9meshV2wr=hfKObP=u6Nh5T}MQM$c_- z{8AygH=i-0fW4n;C-d z0-UE9g*{8IF-|vurZws`y3&rj?l_$M{N9hdZ!#~<0>;z-SqTTnhT`^<8kBx#JG9n9 zh*>R9yDs(f{>@6{+Sob_daFt+E*8Mj51(<_IC+v#G>F@{1Ra}7-Md6u?@9fnab5HegWA{ilm?i;lQZ1pyn(jd0slysZNbKO+5H4&xhQx0U@zB zvE)7f)#qHTr|@B{i}&S(4#v)O!qGDeu{zZY)~pT#i|wazhHNb!$n^xX@=QrwMF?zP zl?W>wZOF;QS19;7-DQL62Rv<{M{l1sB=(o@@ZwvYlDF})MDgP>Jn)wT6fWI?lL~G^ z_r()n7IqdAz6x+#-~blbw?an20feM9a1$r<_6^#+eM&5sJ*Ef)F4)0u1$83)+{yJG zW<4pLHQ2C`LiM>`F5y8iV-EF+#w6;Jd!znfg-ay7nCpc-ZQjse`4X7jm1X_fc;}IC zd5`tYa7)UH7@jdCy(_Q64wgv^KUmKt^<2b%UdfWSCINRP+>~zZ%)s7=tN4fQN+N!2 zXH2q3n04Ex`r|q z`f!@Jy8$Qg>tTYg6m{!41(nwrvq(#c&6zIpvl*ArS6vDm55LAkeZy&=aVFLm)$nh0 z^l9*l%lJB+d15bBfo5YJ*pHpat((<@1*c!4`>13n-u<0#t9~LX?{XLInyE+4Ec&?3 zn{{ZRx;NyzZ{rKfGU5L7CfNDp0|GVMPs? zM{CfwW&1$4(wGW#zHn29CSsFY8az7}4*?GDf0*uF?{%|8IVJ-*h!+&t4P8(cO zx`SPU5MEDsoO@Q1fSMI!aK}0czFJXZVJ&9aFLKof6H`s^C856hWPXI!P^c zL!%|Kbn~?`=xcW7B|PgF#B7kbYCe|iJ5_-Uass@r28m@I)wUBVY(D=I+wAsY)V_P{ zS=ooacNtH+;tczo zw^bC@#@qw*d(ky+39q;;0w(Q!1k#@`!Seg&H0j(85Qim7>L<72t`R3NTwVraV_osN zgq;Z!j3))mZi&p&&)ycPo;yKT9B{fSseR6o?WO zM9i~!kxL1(qW?*cpy$2VyOw>2UoI-b>aU?F82Bm4l@Gx6jEQ>F$CB8~@8{><2}57w zb{vAAXe%=wn#$ku33pHMPlDQU{c97N?>`kpiM|-IWdB!NZ_jF@fx?rpCNL4NzDY#E>bWJ=5%b_gZ9i6|s?s(O3#gl=Mu)b)g*DF25R+j^ zELMfXh;}1#ajhZ=9r%FH5>#M^I0+}&#S+Vm+V_Tt4ur_kse^HFc&Fk!Pl2xhZwh*BN% zPtQkj#Q}+*W*0XtZX@o}Da7u*t&raq#*2I=vJRL?jOmgS#9;eBN^jY-G;A$>A! zhcb3+ocMq4r^y4xKb0#$Z?_|ytY9kU=Lc{l7c5A^tQh9B{f#bOEK8gh#CfQkg1=M^ zNJ>wu=!IkmyzwNgE0qJCF)v`xv;?%>eTA=3xd+RWm?zrx6c@x?$+del@kj3~v=thl z=1ayl`};H|cPNuPyG9T}oTJ2V(|xX@{R1cu?7+CMKDhM2BXA9Q#VxuPh8}+&gSVB} z|K}v5Rj=~GC=OF+l*5_?Gn$x2K>9-obYv=_+s%8JyilFa2!DivFFwwS*BC4GtTb6@ zZB9I^j4;}@ii$Q--uQ$!!_Jl zmsC7$2A$t;LB&m;G$&ldFDo)2}^B@dyx3 z@aKvvv-v9nD&)zq78FQsZ~+gSd6&_zS(k%y3q8WnYjq4lnK?`?GNId&^yv{-0f`>- zFZdnI=XY-D1+#yWBs*7WQP;m(_|=W27*;n4Q=7urp7{Y@Sn?bFf**4WG!H`g=>WJ^ z$G$6n%HsCKLD=h253{^%*!({TkntSf23r#AhZ0!8?)SBG*`6};6*{l{#+iKm40=lD z7}fO{K3y3>%jfH(>wjChP~Mn&ElfvgUDl(CIR}AjSgw0a56W3|pg{c&Uwv~G-cA;h zrW4z6q^m7?+GI*zqzq!pv7fm8X$&4JxrQo3IyC2n9d`G%G6&9(OT#ku{T1DgExRir zy~=^j1SxW77*~tUd2|8=^ib{?+-jjl0~v4F=KV)p>#j=$Ev}q!&MD}cahgB=yc_3B zS&-rz!)cXU3QSnsg$GT8aNlz+GGh#Z*oAlv>Yz$=OpknuI7fy zCZdpQ;}T2t*&SC6T&E977C2kOiQ^RGv*O{Y#F*OEeubMM24vEgBd|B`55ACBq%&UL z#WRx<@SFY%xWKaJ-ebctZ~PD(dMrb~LroB39fa&{dzm}~~c_ipj(HPzrUO`iB{tV8ih2T|&a zBIM-7=r$ZViP}d` zIYxTjNjP)Xo-a^sns}!2j7e!wvXz(ZW&F z!|=aF%49qJ3ZBP1vBo?f90ph(y0I628tuYT|6<(DG7VqKb#ZXs4=mmE02(8+z)z5e z-K>KaF=rb5np41X$`?6Rr>S7acxe263sSV~Fk_r`;)bai=xQCrubS|SP5ffvskT0O zoOle~G=_Mw#?KPl(~+?BVi77|ihv&z(@yq)?33 zOXmA-RdP`#d010i2{Y%~(3V-JAp3(k^)plAv#QLn{N6H&&yGFZ#FgdPzQ&5W#~=n~ z^`nRDJGf!4L(ARw!%W8JFaI6L`v~l~s>9D9Q1A|7o+V<^+1Kd0e=GOnIOEv$PvmAt zThTDqU3>o7020S|Nr1HVmhFUuCH0A|rUxS&7ZU;bdoq8RhwNSm4|R zo-rqYYOp@?*#NYSdd?pnZA1MYE(gihGg$Y}eYALY9t_9YP+##d>VE1dG)5~xyysiI z#W)+YLKKMbW(imSNsgKfzDHS`l~{h&h2PG0TTADK!-i`-=JdY?#SUGlQC@~V+CF^v z%zUnQ>vQnlEkh?+Ou%)M=Rtl~1?O`j3LfuMB6jPf>0ic1r27C5yJOseFbCAPS#O}j zW(lOc(}St8`IvF$Efzi0*svd zu>S5J&Lpq{gvRfGe<~he4!!a_hedr2b79ehuMn-D z%nQy;#}OYi$*?d>QWPvhi>l@69A_J4g_Hl~B?q#AyNo+_E~HVb{rrs3H$=H&HcUGiC*dB@{0sA)MG<(@j1(Y zwfAxjW%qGvf);7LdJgA%X;E^%71>@f#=Y&Nz2= z8L)`!!|Kp2n6Og|ORqZP*FDzMTUmsfWw}r}B^I^^|AN688q{UdRZRJ%4#o0$j3>1n zRfgTe+_|~naXucN6&^&BQ+~$;`jIHkaPJDAhGB?l>Q}0vH265L3Q!h-7Kj1<2CkOZ2=L= z$8q>S%(Kdz_XE?1Q<)EnWO0uo6=cV9WgYQYIVlExw2i~B+g5YZt(pV_sw1HBUKBiIXHXT#Xtw7m;qUi7L5=JS*t*Y#Hbf;u+v;XX#9G!r z%FKcT&nr;vqaHEUHlW}3ABE6oW+Z@RP-HeK5Q~wYv6H<&*4Gze>|yr1Uy+IB&mT%^ zlp>(7OTe8}lclDv$!IytfcQCI!<#QO$dc#Q)N4vM2->c2-45)1xyFOvox}2CzieT` zip`MVC!p@D7lPh{5U#YHhfVd$Bwo#s?#_{c-dF>==Y2U2%%bS*l*sQo`5V)QOi8!Z zQ&bE-!5!!+N1v0ql8d3*%;I@3Z(X-CPwdM ze2dEwC@gd0?6!s7~~0`va&5D`1||V|qb)_GKvm(T}jXhAE#>Os?`4!&ybCvbME<)(4d zVO*wwZq#Hw{K|)1mJ{O@q@D!XYFQYYC&AoNsu=xX51)6e1iu_LrhS9Uc;!vys3D_3 zwv8@D#jc)|1zhUC*si2G!mco*I-+b z6twOuK$BZ9KxaY&$gh;6QS0BLn@JfbZxIUhi{7*RP$hpa*ovGkD`a!I^;~v{7S&CX zrwuO{Z|NUv=7)O>=YAx>jNtPqyVjE09tneDuL7=j-bI+WjXhHRYqC)2w;Iw1q%bxX6V5Ss-#OU@6#a%8_h|Qe-|j{ zsM2eS!>N=+o+O!%MDg((E?pnQyr;P<5Ux4LgULVdp*D0Qo=%b@`9>r77x{_c z{Y(h9c_y?+Q-=BK(pX-xTBPh63*u?_Me5sG$G@+FyB;kd(}xoAr%4YkI&ckrX*qWO zU>xU~=iK%gk?5*nDk^^cl7AcZ4x^V|>vI9wkSjGu^p~($+-l z^(X8uuE#B3?&JKcmZafkD5SPCXTYHf{uvyAFA6WvYuX_Ui~InAV@w%KzS8Bu#}bU5 z$DBSZ1oZ9h(->&b3j14CNS4n9IJU%qWbTxr^V$R?J5PuHn`BNqw@+hSwSRac&uF}R zUyV#NGo!Q8jmiD`%Q)toHJPhvO5C2`;s4qF8WU$~;lv^}a_{wBvk`}_a{s}X+;K9${DYOb$GhB zabdc}Fl`fKQ5FXBdj7$@{rvy92D2W#DOHNK=014RsvE0a9`P?qIsAUdic~Ca!zlwl zpz`1f@YE^d1bdx9XseC;JS<6vYAA%xQ6{qA)Tft zqL1awBagxDzYWPop(@$C%?JI?WnoLuAoRDI(PvwRqmEh*T+A^bf@9}OPCwHkM_O%2 z>Zik)P*H|6qTEoRx3FX&Ada8v&Rq1SpK+zfRoJuk5=MPrinhu>dA+!CAe~qXcIBmz zB~0Kdv(CWg_M;%^Im5ZHN`_|!`uNyafozu$+^J@Te)sfvpD*l=xco8yh&emVZLCOr zu{I}O_0^?ilM_m2N1|T;N#0ZPgS$E0ikx7(!_@7E&~@`TzTk*5UGUC~IyCNJvyciL zQhWg^mZR9Ldjz~6Q-l7ZdTdN*JI_Z;FxN?mx><)qY2r25SY3nm?-pRaRSOrg(i0?R zA0e1=)g~O94I5P6!MQcJz)z4jmXGVr&_ZSDv`uT^mwQz%LXG8H_t^#Rb zX954^r*U*e0(vFg1YyHAe$ifQ?6Gr&#aA-WxwnriKh2oL#m(4r(h=94Vl(vCix6i1 z1ow7K!`Y^F;JE1{bnj~eu|SVjA8F==qeJ;2gL=-u~% zGkQJ@!c!TiJM$DeRA+>y^k37MpD3T9o=BmX8AV9J4BiLK64E`RA1 zIQNT3*U%IE*m28HDae9ac^K1wo?20}O*%O7vNg@wa|*)^rla4e4BWHWn63g%(yFdb zx-TAq#*q&&QU@_V&lLW4WZXUXTa1@2!Gf3Sw0hAKuDaG1?6ux;yJ9Y3*FAk4X(vxi zWbfm;$bZ2u>^PWBwS}O9Q)sF95!LtBLXu@BWIR6w-P&n*Yq37*n_9rn**${PM#aI? z^FcUPeFvTxFGb8eu5fKr6`;H=oAZ=4puro6V^_}H>8Z(&h zW~ju^*NB_?u4|U`@+>`)&CW>*%6HLotpa$?>EY8i zjlmE$Ti@Vc0qULMkRACOv(7UfOimQKSEoRsiVS*(wQ|Zw*?s1NDOHXv0KuZaG5GO# zEZ?(FvP5nK`DW4x`O{6Idpn?mT0hL{F(4Wj3`pfhIqDlN57EQ(xSGTu5bo0Ddna2` zpWz8yaR4(SwM~ZJA9YaJ*om{_TR>6I43;&@lF`MCoz&UHy=>8=?Gx6ab5kIERXL0K zZkn`9^o%#y{sV@p^C9c6aVTwA58w7r!g#m|W{rcKAG>#^{K%2SH}Z(u8f+I9j#9H( z58>SzeEv`@_s4?aUvu`y%P8PR33dZfwlGIzXlGv1YvrZcx3#{ri*E?P|iL~!Rzd96Kj4tj0D&W_AUWTd z2%?s_2!ihv$G(XM+Xr&sVr@;=WVGXSYg@9QND&?MI&hW0JP9$H1|#ze@x__9AhEp% zJ};k0Y=;s+eAJv@Csd`&A3n!wQpm5lbOkdCu0!JvYkKa?0Lay;kePGXKKU4XH+ZuS z&ecuOWGm)8AII^9S6K&9tsm8fw8%EoIJ|MkhWT=bpd#%71Ri(~8_F!GUJlQzPO8SG zA>ZJ@h$)b@pn_}5929lAPXdYaBW$Qi1G^+2EPgf{2FrG_^HU0BiPpg_U0c@ml5lP_ zi}(ud(-7)tPNK#OCgjzXpt=`Z5$2=L=HmZ`ZDkF$7 zn$rTSQt<03;5Vy2fM-fk7?~?i9JgM__$VFfr9yG*GD~`Eof>%+IGhN_F62%6rK!wf zYl6kNO+=Pg*$c6V#8oMW1;z>-%1e-)Y60$Ej@^RoCg0#)0)T?2jO#1 zS>oJhO$WSxVTBX(d#98G*M9-iEhgf|6~-k0UIN~4TaK2mw1}(4JkDZRC5TtO=Wnv? z>f}^*20F`nHk~0baLEF?QV&4#gG4m39KymGcF!jPy2N{FHea5tA^O2sv0qQ=lPcYMm=@xPul@B2nV?4_ z&MV>1RS_`eq%{e>Wkq)>+LEGOshHK-%*|(?Q~5jt?tBr;B`I4|=Y6-iwtjWc8Z#R% zYb%mF+!M^zjfPKB?a-w$6WbaGMT$2JvA51ur%~2G^_oyx&_9@ANO*kwr0fp2-bRVl&h7ZhgtYHCp6R54*4a z-XhVvei_~{x3-(Q06ZI<_#C@mcp%RSx80E=I*-f1$K04pJpTdLvRr5$a}l;|Eyi*s z9~5s2hWgH0t{eMd-W4IqpE;91yjYPoUAx9d8I?oLYD-$5&GWrs%n7?a8KSZ~aMJQP zn3yI@yu==8VH*h@KVqQe^E+@Jy$d?Nm}A@Rb{y#c!_^%83KKsvN4ew!$ZJ~AS!`yI z7&!)SWg3!bk5;_7$bi1}VouLJOGRB{EMeyE5)2rW!+>xn@bJ*5{w<6TbXu9zs2NaK zcK=UYs>Rs)pIxR@yn^Q&*lzQ)4#w%8!Ol-Q^l;`8T*9&3h3RS7ebx;sUkYf`YQ}N$ zY=xyk_rU&KCYLyzb#w=YfrIfC7%8nrn;#mI|E8+Zt)gAX%)Q*Q-yt}%LyD;0FhX;d zvmBUbCO0BxnF4q%uLgKZIX6?l>B65|Z7^*W;)}HR7rE z51iQ4j-MSHG5Yy&EcZA9+sEqAd_Ehl7wMDo2NsfaLnkaMZ9s8zpTu@rD_`!p0p}hy zAcgDKV%CBKxMB+H1MFK3-)IK3Ka^lfT0ZBbdIcvdY7@cjM||6+sh}SF8Qe>isUSZ~ zq`Fi{t%x_2$J#=~X9Mu8-2+0G^|!SZr_Y#GN}D!F87qzBRx9KZ?#gtj4bk!>4)FJddY2nKCt; zcWpxEDIv)eLP9c!a1^P85EA+&Q4$hT=Uux=l7x^Xl{5)Ol2np!e}5V-SC>xjyZ2hp zbKg*P_Y)_dLXcnj6i1c+gdQ>!ADw32xoeXpf_Xz-%OiesrDLku=Q|DTox1U;k`@VS zo{95kCt$_yp%~v*4~dhXa`!KP#z2-sAN;4Dom-!B`YOsqaOz`@N5OvnE9)soIKSjQ zIxq8yi*JZ5Qf)|5dn@exwh4U&=Ovp?n2@ZqHDHk6%;7(EG|0He3GDy48f;$$r)-gR zVW+{kKFau3;c##IPRw|A6;51ZJfzheKQ~p2j8@elGd^609@983;p}|~eD93fwVyFj zFGn)1XFg+CDoT!TX714JFjSau6<-!LqfuuC$n;szi}S^>JH8A@X*Ywn%pERh#XfGv zmuKj~-NKw-@9R$`N}<=9hcSI5Cz_B=e4FlI#d`ob!V1qu)?h@5(@&8Jdmb zE>xm{Vjg#B@-kfAXH8uc!XT7&pM1Y;;7$zFCHfJFa#FU8{`!)AZLE*n?0`<2Zox-+ zE4ug>;|;GL0k*yJbnW6p_>KOB#M%Vt-qZo7njd2DFeU7{yB+G)Ci6y&Ij8M<5Iug_ zQUlh>Q-8&r!BbSh@?|QFI`$e}H>YC6Gg+*UQs9DFH)54Z20)%2D6SlV5!;V~y#Fs$ zD+g0_#~b*4muT52am%bL-c8?u-_*&WR%LE%Bf!^#D0lX>wAf2nTd(AhD@f)H&%X=q-7Rf`x^Wg+XT2f8S{? zbiEASangzoc>IGa1)3y4?B)gjBpR< z9rvH&9g4HTXmK*Q?PvFowiMXCrW7|hv8-LlBVI6TMRrBecg*m)$PYZU#*BVN)~TpN z%azP+A=`;_vovY<$n9nooPLZ4mhFld!0cgX(`2%BGV;^X`H(lI4)uaCX& zi=|1|zl~t>&jsWx`*7g$5WJw6hVKoHiTPe5vgf`kjdwl|n)X^G6O-`e8$BBQW*PVj zP5IWQWNwc2Fx;RXj%HFab*KNJM?@NTUaU*x3T%jGycE%XS&Bz%lX2^r z1{gI`0{X9>;_%>8P+yhAl}B#p&(=#Iu^^Sp@~Z;3jk#R$oF>NOyeZl~ZZ+CjX_Au7 zEZebH%n96GU32FgfwZk`RyBJXSLLjYes&QUbk~nJh*|;}eGrfNq8#vf4G%?ko^x$@9fj1whe{nTzw ziZR%e+MIFeUFO{OGpAlg_0YYGWeF|DLD@Jt*s<~r#y;8!88_Xy4iyJJcaIGQocjun z(_iE2Z5lLWhcR4cJ+c_VP3{}(YK}Yk0ah2<65n)X+T`nu|5Y;nPuOTaDK{F-lH>8+ z3vHS`^$Yg&YtS^CY!uFl;Zk%gdC9My`5b5Umg6$3 zYgqJf3#`8L1B25Kg2#1zddD%Bc?)HMG#e5>He)D{jOK!mT482fE@MVNfqdEfl5EE( zT!69K>uL%zH70RNL?4&`PI+xFikg4@+@F?IJ-C90~b##gft~k6_1Zbs~_R zB9aWPgDXM0#CGg&Fz{h~zJK!c_D&-rJ9I8~92RmFDT8RS^-DCK)rq$pcOaUa#Lp8n zsqc;x5^IxeT-7f})kDllr=l_aPR^kwbLV_I`yJ}kp2Oge#w7L15onU^fp6KC)M(6gsQ6+;X0t+!cX zud|EIctd%;8NG0OgBk5!%)G5rSD;?s3F!Z=gwZC_@NNG9#K?zn7j3WMaIJC}t8PXd zrOYuQyq-CjzhhN8@&P+S!L>_`GHTIT`oL7^#Fm%q&UJ*u{Ld#Q@hmzY#wCkRul*j6=P37S6eS4UVtNg6vDI z2N=k5xAPLw{2R+xt+qwgv2KvReUrpz=27f#FoNzv#uNP21rENBn0Y)Erx@12m^HIN z?@$|D^soZ|cM~yX4$F}=tmOxC3t(+J%To=R1wk7J(S2ElwA5XWzGXe`GDRV3I&|SI z`IqS4{Tef8oMrAViEHrVSS~5=I6O0EbD$j-)R=unB`Q@=Iq)B5y!wdJAsIl~?xJ&K z9?U85LGLCr%<$KygLhccn3tO%QmYW17VW|g+iZL;_yw=@zeC4hIZk?S6~1I$@qkI6 zQG76+cP$o@5c^4RDVE2z#%{PK&X5Sap7Fx+GopxuLd=r-0vQ3bATBZnaut$r|Eyv- zzdadkMHS#YPsqI638>iLj3Y{3V77`C6+g=4KmVT3yzxp@-lDWHj zra!^@(qwMv{Y7BM&eK)0A59%(pB;0vBGU%Q0k4#pJMsw;y4-xx4h zrU10SoXl6ZCes8fp)5ce2L3dFK=lc83JUoBpIR_~QG}#5^8&=K+sS5{vLtb1GHjTk zLCcEeoi?99s{xrJAs?2HECGqrA2?!TOiK2h!YL<{p<}c(e6jujM(fYRslk(RST4)B z=9*LMi#{OkV?SHLSMcnfo6xH4ibulCNqdPD?f90*g_LXo&^D)&{Mxbgl{b`Zh~$4W zes=uidQ9;$BO6tggYC`>_*~5q@93SvM6-0>ZQKY>GmT}qJU@trwae0^r8N-YjOrZwbZh)jCqW$)0-tI~F53yn z_)90f+OVqghU45bv14>jUV>5_Tj!3j#7Q#qHDKfzN=#hsM$?L~vbWGc5(1_Z~ zwT=qI{QH-=J1i%y7~ap<-^%81rL@AJ#{x1+bRVx=u_KDNJy0~bp5<(7L^It_;`pdg z^ot$^GjC~8vsn@_%XP;?DiS;ybOPPCu+LP^C2nS+2@$_Ri6He-PWQ_*AnSbq`xw(E zLGdQ`ooNF%7hi7v4SlM1Ei^1#JnQOkQjf$%oE9j!TxSlS8Hf7`S#d@_klumAip3*W5P!mH#*jSVg9Yph2YtRV!<9t|w9MK%2 zO6m_aOT4G(gO*k(^)HNQ~9l3Ok4~X)V6W zIb*MwyTh?jdJ%9^LxxBE_?F0OSwv9$}q;2zuLi%nMpvU z`vnS(SM#%PQJl)Wx1otYz?<=jZh))8=E_Ci=zglqV5Vze9?zJdsNx(A^?lcbK%4}J#ws@?N^-< z9gnDU38(JBGfO#oZ@eMYCxuJ$ZEPh*O_$Mjj|4&)r#vQc36~#S!<~y$A(GBI7*pU0 za^nSLio_G$HoQdZJWEIn@nxUW%jlEY52+cdxJyTuG`Z>1_wNnK{uz-F>zRp;FI2g6 zcZEc!co{U8uxI$}w~!rZ3~?O;{O~Hq*`6Yxfk8F6=C~Z4{ zF2+aIDrD`FHki5K0uGgy!&C1s!e0$L;=Z*V|1qxAGpxlY--IN6!2&3Ia2h&a7J%YfaNfPKV~X3dB)$CMWoJ6)%P|581`T;QNlvic?cy zYBS4~_uppy@(RhQ{04}sdkNjY|G}2sYUHW28WrCECdxSehCgGcM!bUlz;X3;c=4<> z_1&Kehb%TR$JtbFL7_X|tJk9w1Iw`2JrbYHQl;62AGj$pa&+>=Qru^3M*LUA@c4QV zHJknxGnhZN=k_apkh?uO{pAvF-4_Sf?oGgz@= z=?1`?n;LW+CWE$(5qW&hfQ&tT52LLuLHJINHy?f<*Uq~K8g*@|N3t%L7X&DcL|J2*la4n4aaJ^owEXXl0SZr`gVyO%Mjz z?dDn;dnn8H74u6-;Ff+QcCjvLuJ&J;bNV3j=jr01hpJ%rOqoo%!u(f8`j}Y%R3sko zrfXe|T44#|CPy06> zJY#uSB*V@+jE_DmLjx^pOv#MSYT$P;M~i3`);!z`$BX|$bqZrUh8x51%3g3g@mi9) z6!ovGlHk4aG~TZgi(9*)){?P!R>ezBy$b_5GadS~ zPlkN*V7{3C54_vmP)T6u74Um(3gVTjk|L*}(0VY8`!1FvrNxFcb?OMX)wBr*)=RLk z;x~jp_zuaP-b^Qc1UvK>Vwoz3)0(2W?%QX;@fqV?uhhdm{Z`b|D+G<^euvv1(pz1$BBr$C?-XTPsa!zf>6=xqWEB`(V?GGmvpmg?A6w2ep}UB;v>?F1S_)@>S%R zcjh6K{`Uq7do<{{UXqQSyC#-}mBEKwu6TiMZp5ex9* zykpRfLOOi&3s|+a9t@1)@%f_5(DU~iY#c6tO$w&1 zAK=WN-o}AXhap8*nPi6kVtnRfI3eU2`u-iv#aP+$C$pIM(ES~HZf-=cYab!py#)nN z$8f98d-udv~?fj$9_M_WsFBGzcQ1Bc+BcSQvtDB0k$VuZ=qpr@8UXjFmehXL zF-{ba$>wfR@Y&Cb+?G7Sjho+KM^Z8uCI24YObkQs>nixhpcp-y&!Tv5AOCQu4Oty0 zMLO&=xu7vq@hD1@CDOw?xG=fHy{5Fq`8^`|t^(Z_}x*VP7vgWuqR9h&6QL@OFi2gCcB_GFfVDlOM_ z<|k!{F)2QbJ$2mZrsV-xxQt9JJ0I zhw9YN5MpT!cUg93sY(n?m@P%ho|-}OJ5xGpd=c!nVE25#A?O|bmum^U3m%;_FgtV| zc(Xm-hLZ+l^F`+T{KMRCRsDEKI34Vhvam>Bizbd%gNKaEN^E+BZ_bFZIjsh>V_`jzKsC2`@rkSRcN!Hh7FYk z5I6NU7i%5?_NVK)?R)*u>_2w58NoUfgH@pPRW&F(6fz&SE3VqYx(@y_bhNB0@o-%T zb0qzk{Ph&-#dLrYZ%oBPPhPfAz1jDD-X>v5;1P)XoCx}#k}%e30L8mcz@Cm>c%p^vK!@0p4XjhSYTQp~;^Oht%}h*9 zR-i#aZ_wjw4PVlBoqzK@2a^VA)52ZPAiuPh&j~nbD|XJQDM9$ zpV8<8$&;o+%(Ew4##6!-jF%w|?eTE>wG3OPa7B-h9WMdG^S0!p*YjK^CkE-Xipz#k8I&bqm47?0TS z5ye$V`PO~8Qhyi}nFGIraO=av3 zFW9faVX_nSDlvD*t!@~Q?}16bEXd80|4>lV=aS95TFJSSFi34ZztPkfvwY5h=XQ3U zla=5$=6u=IB12YpXpk?VFHxYdOO$VLOJZMYLIb&v=x%O;g}-LvA!kQuTBd|EKK=mf zSK2U_ahpc1&xAdbR9V;0hP2*hPRYIO9isXP{l-;b-?<9ta!7~7zC1P$os7$fF)a(< z&bSE?u+3DRX!J~kT*e|A5PXIGJ+jn|bwDgwPs-RphD1+x!kL%Yd~|g`c;`RIq2IPJ zp7LTeyL$-VUmZj`bj5soJKIfGtmnNyyvMXPqtN#JBN%i4Bz`YZB3@OAj8!}sgFk0- zzlb^+XYm>g?BlunS#rehhbwgHx`3NQIyZ%~H`t5s62X*k7iG~2Y(1h*3m7}XdPoo` z?pFl2&A@xG&a?Q@P%dZ`o0GpBjRL(!en2)ALiN^S*Iy|V%C7?tpN-tJ40-bW_ZK)N zHyzz9cAy!@@~sKooa3dXyk}4tEKQS!7jdj}RxQm50_M3YUu%SGPIth1a2GDju_bA+>2$~+do-;9}8nK?dihmB)usrXW!ll@t_8tx$-Ho#BEPT(( zoF>8wZtB@=Y~kMlYks<#3DfcYg0mRDLz)hoY(zXv^Z7Xwet||<0%shdN1hn4yke3y z?WnM#Ee}Va%hK;$Q-F{PR9wIyu#bE5kM$*WAAs4ccFg_!2=m1g`MzHdL0=*uQ8(GW zT(&HysAwX*JEu?Ubw!-xcL~3HAIm0B5n=YNncz6;4}WQqDml7djfmF|<3_MdRIB`K zhy}zvUoGnC+yG;OZ^FGZhS*fX*s}l9!Q*~B|7ov~?aGv>x{yG`yiR_ zg(le+)@8RP`P*+}?G{CvczzAnp#B*0uifA^7*E~t%mv=iBnqV+YFYpC2cK`1%m>bW z1C9T*N!PY!mYryX743N_9D72vc9#S8Z3%|}*4=5?$^QOpe;)7mVc+;#SYND0zU{vQ zL)g#OmmC73%XAJje-*!ImLCZP|fDd<%2X|dTI#!w7JHH&i&@&Kf zb{Cu`zk|0mvNT`eqGz!+20b2g@w$2Tb~M>G;=X~uOm#? z)gk#CSqAxKHi|c#kT{5AAz|lk95ULN=!JiR88(24gP-s^8&*NLUu=EjSRJ(8o!!^l))4Axg zwI31}j)x5YWb6#DX0wWCyqlK+y?8_s4|^z6H@y?0CEs;v;^l5hT=Gqr@`|x9mI+Dn zpTlTu{Rj6qe1(#}Za#k#fsXx8xL6f?+V=@iu;wsd!Om>|ZIvf6uZ`$yC)OzoX$Ozr zyZODVZHUe)Z#dO-76dI7qM(}3oG=?DZDWnW-*-4;Wh$_FToZV74Z$j*A?>@_g}Yb( zL&28~l2O@+sgDzRlL8Yopk|OakNL!embmfiEa(wU0>`mGxqD~yVcK{BL?~Y6J-nas zf4|GqNE;10it#X{BQqgWePfQl$%hrRgjAPE2$x=SRHfQSy5+W??l<`EM!O9n>b} zGky7ijcU|BG@tE|3ee+p02grLAm*n{k(8!XV!qWeQPbKJFfsEw*vkFF>bL2jdsvBB z7TeN{G!0C$`U@#J@+2B~EKD_}MYBavacBsF>P~D~@d_+sOTqEb2wtoAIXrKC2L>7% zST3c5H8xXl`ie9heeVWy4eHRUJ+7b^;)%x3HHpqQ=8M>*%Bd|##iHy5FlLtlmQB#5 z!l)Y5vMhq~@{RoL6dAJb@po*v_8Hd41lvz4T=8JuMFjlhpt3afUlB%A__>ju`D=`){isxRG%J#q99&x!95_ zFb9QT^jM7cWX$ay$8z?~WBc{}1sHDd9$N-4LWkKMu#|aw-Hqa)e7G4d&wLBZGy0&% z~I#bG8~PkIJ_b)FpYT=yP=ewy$XH?PP1Um;){&f&J% zxtJWKfc=ZDaCFZr{M`B!-|UuSXFvnq{D2NkRydEzE}`J`R|2_XKC>ClRLSUz)+9lP zu@_IC=jX50r&n?q^LC+-Pn`WtvUOq(JZEmPR_QcOp(+N8zvqJPj_0_-Ef?KNlDMip zU-%5Ahy3(G1~jLM?Fx!civ+LliI!*IhQtAV`o)rh4hewkAEn9ftsgLOeFdyt;fr%7 z=fdJDJG#7s_5U`06ZJ2a!NI*Old)Y3N4%-SlVK7Rq}x*VGf3~H4o9=AGW4Z#0k%yo z!?CF`_+UVV)_?NmcY0)@k;yGk?9Jyg1WVa_eDe{Z;vHsnaqp8{+(3opN1T~GWuCuj>?YnvqtfXATv~OHV z+E57C(23T~>KL>61!}f12hU_ZQt^vDGx8pReOD2`EU6LA^4Tm~3^3~WZRR;Y#yeIn z;3j@$uNlKtAaBbueXFC{o+bwCRbF%XOE!yqnafGgB9}Ahh%PysZH)@_pmkdcSL#v^ zqnBxs%j&jtyP-S$^f}8qwn8Wxtb)?rm)Ly6jMlIC!(|LLLe;!v&~dVa{0;lqOe+J# zXKncZUdhpzyfl8^Bx$-LPnJwHCqj<#_3~CPI)=7U+EuCd&Q`Sr7-cceWSx&rb!d;ve zU(0=uQKC}I8!@2hF;2_VMYH~2sIbkHEFO}C!bm@8e;`egB5GmUvOm1v1r;1UF9r|G z=+g}bGBom+7U^z(0)0Yf9P?~6bk6+(ug7YUrQKO5@bGZe7~jh`4i3kT3PXBoUO&c& z0zeQqJv++eD!622@hvTt*sE_y!Vj_xiSIQS*pd%F7$fpWwh482J_s&z?qkgZY1lfg z2I>pevdsNt6wE9ZWsE(+Ws*1$2tIMr&wD_AR44S7%22MrfP}AZ#^K|{IH5fp*K+CL zo}h!fp0TXw=RAl{sl(Ry=CrTp3bZKtj0?F_po3XV+We$@*8rr$(RqO_$vJoteg~vb^HhHj7kE_#5~CQJ^*#U zuA<>zJyKaz1*_T4FnRqYh;Um5|BM-fH-g7&Z!^Hl{to)i)#L=;jyW1Hwek0aAJ`wb z8T`+rVByC?$XAV)m>nI0GL6P$c}o-gJEB99$^+3ip@F;dSefw!RZ(eX5?byk{eN$e zhKefTRmelg{$Yfh2eSmWNju-r)(3lR`=QYGHaHqDfh5sAFdd;s{S4+qBTA91bx7`AU#z#*g36#s z6!+ZD8Fti~IMigo_vL0pm1ZKZa}yV2-ojn1yLLZ45@eeAHMBY4@L*~VB4p`cw(X<$#M(b_bA*&V35c92(1WiyIPHli~(8H?WG8FMcT;1JnRwx@}~#LMe(a0l}x&9*1q z8>2Dt%okDd&Td#f;}Hsq+p@Fe=0jwp7HO&tf#HX*!1RZ@v^5}x+cxhj4%(?n;>NSC z$$MMgBtQp)G%xV!BP?k8Y9rRcJ_!c9ST?ds$T}FQ+^v*q*!YjJ={Hh7J;@Zs;S(Uu z<}xVyXu~w0aPIe4E&BPd7Ol%Ega%)h*BJH+w3On(<|e<3KwH*f-7OMXec8X4VX zP5cio0sT|vWE<;?)Q{i7O+5AkeaDT$wA#lY?wu@JtMv&ywqN1ICmk^Lj}iTsRlqtN zfAGbsi#W>Y8q8`o17~fPgUL4}(c{x0A^j43n>-Tl)fqtjwx^Owj46|GvK0kJBlvZ8 zH_%{vEf;^_2_`F~p?bR<4c=Uc{-J~6Qg;~c)cT6)%rzct>I}!t@4((zb*ieQ3U0;+ zBnusliB#$!;``~Ir24BmE&9-l$@w?&UgQtF(HMinZ#@O)eM(g9beg-PYDwNuITG65 z3}T~+lElddTzxIaEmCH=^p|>gvHmZ1g}9@A+5hLKp3tft3mNm$c|q`+oP0Mw&T;WU zESyf`YwjJ6^dpIg9jVFHNn&6Av(_5^P>zrYo@rAcA$YZ!T5NRHjMBPYEq$&P)p z^xe0+7-;i-~PeHTx1&tsbFN6@qWiDkNS@S=%g&D`xU zbh0tpkH5w*u8zT&2g_l=lXc|Y89>O0XxzOYQT4wjT>iQjE!G*47_TRs*zf?vRLW8d z*7MSgHKGNhSvHc^LQoVtZ!G-F^&dM0YtOxejLP+#>H!h@ni78Me&&d6mEz6jr$L3? z7Kl+li#E@!Xzn8+ZQmDxeh#(hHa&rH$E|2xX8{D9V9%AdcarCMy%A?br^SQpR>Z&6QA)YDNt9MT6oeGnm$VgsU1bp@nz% zpsZ9aRy6E_#~Ybp+`<}nF3JY}R4l&8vn3_{T6F&^4wo%ZBI2-4SDlG6psVy9dxMNg zu+A75=!*x9J!!mqdnky97w~fN*6iQ#f!;7}a(t*hJ-=O+sGC@lyZ%>D`0byh^Lzn} zS27{xsdiL%h%xD_&IgmqMd;RJDw1~3f^WhGocAdi|3+yLiONe*d>f0thBDNRVU>EG z$}(S&JVD1nQ1H@{NDteMRiVtg&F20Qo}V~3?Qf#3R;-6NSC(Y!*0BzyKYsHphDd!a zlJ9qw^FDkY<02-*+~ayg@AUy3#}%LhiNb`4`*5aRhltZ3x@!EgrF(wNWv(=qD>=!y zMH+$7e#D%lSEhjY#BU=_%Ju0ZT4#vykF{CKVKc3(qAzK zT^XiGu?glvYdY;vKJWBB5G8iyuvWAT>)#v^%~_oVL83%nqA>!?uZ3~KiLZE_AZG~r z9L43&^Wsc*8InhlHbl014SvefAdl^HFk35|_pSNI3sc!SK43h!T~UMmqwCUkj)blaZDzGvc)bCYViRgj%Rb7?4EftDiv2wDn#GiT@nq#oU=YL{F$5D zH1XPOZckJb44rh0bq95+b(RjE_G9z$q2_ewh-aX)W+GfZYegzXFM(A7Q81KSjxQuK z)V}N`zmGATbQTXn^Rhzp9&U{_ro$okAj=s_Zh>HV8no(J)16vxG2O!mGv>bKleeCL z`eJ8JaB6){*9;?S>NJRm7#Ba*`Z^qozk~7-72qLmz}e;6zLQc?n zC`aILcv-+y0F=t=m=K8>?4dfHmwfVf6^s~+y}vJCr$c^-A}q~Zi25;5cgH2 zKnKs3C-Q?C%dIGl+p&#hF&4^`z{?fz$0Z#5Z7(9 z*^cV-Sl+Rvmm5AV6zdIBB@;%Nkp~wni1heS@Vp^Ky=GLQcX~8G+SY_P+6d_O$lVa~ zV-X}6-^Yh0s$^hn5h#1L!G&xg`C@;C!NEphfOY{ySYL%jr;p)DT{|LktsMnT^RsX6 zHKs!osxh&#OQb%-gk@JL%s42dU)60%f-o1V3$vjkuZ+uYp9KPw=dKRBkHNBkUD$K| zEtak;hOPVS@%&#S8s~fp-~BbF8eY4gu|}F~z2t-QUKo+K3yQQuUK8bImFbbQ#&k~m zLX?(Qqvysy!NrV+>g)E9_xWu~m!Xh!tTE>z7wD4_R@xNOBf#^07L-L=g0+Gf2>z@V zY01*zsH(a3t4=U+p{8xyq74Zup@QFz>;A69P2!~=WO30ceBnm)t1@mDN~n}@bY z&RLKB)@$atk44MvQD9&A8Xdj#v3&Yuc)weV2;Z?ge}+Dcp>t8}ej4SuNG#p`2iA`3 z!Ir!FG(6}Bh^PI`nIZKTgm(3!NsCRFIaOyi*`P}16P^*$mz@@ zaF=F&V4V!eyjzc#uE(R+s%76|WF8#$1tv7L zft?rrJi`^tA0>8gbFK1khul0qhy| zo$xzNkAmGhS(2Z7lJoCc1qtP6P`9ZCM>0QLY~2a?;`9~{-Liy;Eg>M&-Gp+{>SVrx zG)XT%199Ur(5L4l=$Po!1xMJgb4?+@XgPlC-hheODUvw}IT$@jfP#q!L;`j%oj*uG z`t7G+Zp<$<`^$WqcWc3!b@rEbk3lUfE83%Y2dy4F#82tp#@zjvb89Y@6wWuLmIaJIB22_31+1q(-VlS8 zZ}W3Evl(BIB^d8CreD_@5qpvfSE_n&BWklQpgIlw(~cgVZ#jS4Ox~>xB@IzaF=-)V zsWUEw{v;t;K{QFj<0o*!Djz+7JzMn0f%D}7tcvJFsb8h|t;QK`CkEOFYtaXL``jt?jrj4b&4roCXN%g)yxHvZj)YP8Au~|mM z|AaT5nKp<_J6_9m26)E{fqX>Xa-0!g2rerZ;h6W&QEa&ZO;_5G zjGACB$f%ibg*?7|kREI;ivsb(b)wd}UvcDf1-d__1wN**EZ6$=a3l5^ibO+EtLoaG|k-sH{> zltW;tJk^|IO>%$vVeZccF#D-Z@9ncd`SNO$2VG$eeC(1XCv910-{$~7y!0t%p8f~%+A(lEs|-X-^MLysimlTw zVMcoufBvr`O}%`B&p-BE;=P0N8j5+WZqr4GBGbn-l0ILL~$rW*qntVrx$DUp7&2uy8X264}?a(}|@ zNOkR7ko_%$@}L=ddfR+)wUy3pQ@9Nq$+NNu8_P)SE2S+lfd3f8tbF0xi0Zs_#~%B z0v8{K?!UKj+S)=+BPN+Mdj18R+NG)ID)xQ)RSlmV^+@OA82o+lBCfT*2%|cF!4?NA zDoKyWy+87CMWzIcs-~cFNfXZVktU_@pTYV<*0b`xB5^%=1{b`oKt;`|_`}+SXvzx7 zfZ{vMQ#yL(7qD5Zlh^9X>vb!gVHT9%wL;)JDys-ZO=Xz6yWSXR4$?kKo&f1cA+DXyKc$Qaj zs>en-#&!rdp+C>F=b7Cdu71L2ZUX!Mhd5>6`+X)K(+j&`~U z__Zb(6HV6O=|^|aF~$}r+E<~mjv86H#+LNT8ItU#cKjM+Oy3lK$5fS57`(F`m(7zR zE#Dk)f%*zqma9v4T3mtv=dI`!-vRRy^r$B5GfEV+sP!K?s3v={SPvol^G*JM72>pg z^}N79nHw{40J&MZG-LH(^xD*c(X;wcaI+;R>(o`uU9k_nf31KmItJv_qf*H3OyK*Y ztik8k3C3G8A}w~=FvLYj_RRkc{VP(S_$=d+`8lX9_sWR zgK14mKvJ_88^|U2KG~k^*=0cwK{Q5Op99b5cfh+Sbms9?eO(y8nP)O5!!?9vP2!%lOPb`@ph+r8lT<3Hq=}nK z(xgce5)FzZDcrNRZjvDqDpV3GAxV;?;@$86efn^nyU$+hd7kf6g8Pc9&?xl`L{E$2 zJpM?zR(Jv(tBq;oeF6A|1cLD33C@3&1&t_wi%uDTL_sNE_`U@^#*w_1&uS1W3b4Vk16^nLfwXxiCt!S0XTN*kbnq!Z{-O(9yTQ7$ zpZQ|U?Z5EL(wro!6hm(71l;v=2$7kZ1&2#xapHq^bYY$2$^1~rx{!izYS&?fpA2c7 zQisvj#?ad&Wm&afSU!9zS1VDclT=$UV9X(SEYzTC9S*1@Frf0zgXySiEqE?PA5xf) zR(xkKuVQuy8tYk}z5OlcK)bme+BP8EaEW^p@(7yn9>keivHtR(U~%jUWE@_PM(Q;X z-nEhS2S>pXm(zH4g&MWm%laN8Ps2&`V*I=8I#hMGvo7vAqL;t@!J{*hck@z$OEF9@ zo&1)Y@LZWF>#LBU_)1Q&qg3Kjdy+Q{f5-V{7*c&}HY?u=#t$cc;G&Qv@W`5;I5VPYuezi0r zMHzZ@*>NMPeMSuWBMMQlUyXmo*t+7yuS9BR!{DQvDqS|W6l4V5AUGMoU&*M&YyFx; zqc|HrE?_yI2SK8_hC0-|!-yQXdY+qmMw?a}VqvBgG=qI!-^H@%zHYN*rxduzr38_h*U{S^IlA;u2?^6KzJ$ ztwr>mox#niGbCdzvmn8@nos$_n8qLWbA2o$ogOfR^WP+cPOrytkGluZ!~-p$!n9CD z>mT@Kgb_(;HzCH_ry=+WV+!p`fZ`2COzYhS`svwFK79tL{~JjCE_}oC?AM(2k)M!1 z;WnG4&O>9&F$kSr3(3>xLAZ@9&=|%zm}LOr>ZYI+|Cvv|`+)y7OrID^`*_cXM&Qph zed5HtQ6G;o4yP(rYGV6=y;u1?0~TYXXFX>XQi3D!|NXumAXp5rO`#d_>lzRVq~Nsb zI%wyr((KTGoWt0Y{Mh%ptY>&E=RH17jpl~elY#`jRV1`cFAWW0b%4S+;WL^QoPaw+FQr^Bf6o!cmhP7W=zDZf6@D%kj7_} z!-$+bbgSy*M>rT^sX_w0UZPAAMqcCE{%nV)!(|Y7=RDfl)nM0^F_12Z;3^WV>8WWZ z~od11u$oR@U%-jXTz45QubOg5)42QxOV zmhVLKCPgy*JY!Zo(gP_biNr%1a)OG^!}FSAyt|_mzu_H>EMT)_oH_Ab)y>tfzW_Mz zD=toBcfuc^7^iL{H_udqNb`^KKbA3ecC{sOSn7|Nl0cN7Xh~K?o6+t0tmAl2ACzC# zrU5T5V^Ci)cPLPkrWc#hRu9G=a95&Qdo;m|W!Qbc*KrX)2U05y9oioH3#L~YGNzIO z=Q%-v4hq#HgMKTL_^x-b-`a%C+B1aIn_iQ6TZuVo)lBZhlk?ccJkG7J)?#pX5tby& zQ#oM-NCgd&$z#o^i0NAkbLGj1Z{wjh`!cujxd!$5o(y#({xZ*_HobMB z1P+=pU%}vb?#~h##`sa@Tv!$c{v|-Jl;v}$JmGIGN`&;4W1;y$G4DF(2MVZ&(_a4q zo{ZL@%I#*f@<}xIZ9RrwdkfGhZm{Us0Y%!{+7AZPwP~b>0=2JGr!MLbdFNFvAUM;* z1^!W`CBC|(=d3-NamHk5;9b07Yd{2hhCtxdFOXpBiqcQ{5)?6i@#R7|cIyl7j@6;U zjGH2hrA+Tzx)JS#zd_KtU9v~m2MGFF6!**C+B1W;HvPiwhiquT{!FfAh z7wH`}A;-3AkczZCtVn)@2UcdF%A*#5f?kw%K9ESamT~6kX;|sgfWpA5+=8o21DgJr z`|f2!BLwf5@9qxs=*qGzg8}iB{zHMf8EsKkqS~W>q1&QDRGulvxL|+58^-X%HTCH7 zcvT{pc0Z>f^adup8O5&{WI{Gq%TmQW6YA7nCDE>IMeaXcVy8bGKTnaNk%2C_Imm>F z&2mLSS^d1mvlzzV*CdWQLK1i&6JN*2W5U<_e6s2-XmXr}o`{-v>w=tw@ zq4zL?-F;&r4)&+9Y)ytxWO4r>w(U@aNqJ*&47&xpn&r_4&t^W~CSEr^2+F!AGY`_w9FNPU z_-VNTIhCzNv-i5dhZ#n+Z+tp@3eLsq$|tOUY6bX5ujia(9*h2*VZ83eQ(%_MKh%OF z5c|3rEBDSo@xo-$^=&=4KiP!zm|Vo_gd9wrGYax{WrNGCc+QvU6XLRq+=7>^6Z(%0 zSZuEZPe)d#+hIV2B@Lo;+Zh*0?~>?}nk5-EwFi4UJ0aMp5}I|(@p{S!2pxI_^E3v~ z>&Nq9*XpmR-JS>8<;8sShOfN#nFg3U%cu z7jA{ob9=bXdnzpNsEG;-pFqK*pXfZ2y`!=!A>o2DuQu8ZRMy;t^aO8of7OC|#}x_x z#gI(6s6c)#R;0oowGvZ&dkZWGbbc31iMf`9B>O4b=`Iaf@d%GIT%4<;N zzx&`e;w=7Wz!?5(*gocO4wt{y4%@EDqSt2x$+=vNy#0rJs-s0yGv}jn`4_Z(u?4NR zO@KN}*0uJl3JOxJ=`7ZH5@hFvPEFw=(ZY`?EcNF)Y%RG(GkjoJXCts%3C7x((w3oy zBtzB`D{E|UQ{EAj^NI%pH7hzprysga5uBbo^UcJZ-ZFgvAxSE9Ut}BRw!X!^$rfbX zamF5;DoexGHA2v-FPwkg7`*V?h&oT~z!$yhB)#2`U%a-Qb2{H*O4J8U(S zwPu5p&_Uv)5+k{-H;8U<&BE3UmIrKYLko3w4*W;(8Pjtw?^CB)VW#v?pb>Ri>^3U%nNMIATQ?O$vd#tz-1dx0P_-M;uNdQ|63zNLWfGh&|LwA zLGGZv|1VS>e*kW=R_L%aj#D@8!?r)JVDXCw-)l$t(jjRmNVvwBkB7Kh^6J#pHv&%Y z%0uzM-<-d@8*Gi`u`Z$l=P`el79?UB2)@f<&Ra3^N~Uy@ zKJ$AtDPY4G4eIeKoVT6vkyEdDg#Pvxe8w)8hBO6K0o7&pu7GujOnzXLK7oS;FKrw_g%sdDYquoM)K69bO0UU zp$u*hF2fL6ITEe=fj7M21v|>eu?%}8c*>aJ$FCQ`Nwt~}Pj|=Yj#4g2Z3sK_&*Q&+ zgGlA;2Jmoh;3JcGE-dRQG^Hbqm_HeG?@dGTrJ0fy3s@F6^|dH^e0kfDwhwHF@aS?r>z597+qMKGBR@b;zyap7YeKKUe9$#ir}fdz zT)=3iNjS^Fg0u_ppU)KxYHi}gm*nZ9sY7VwTvI+}gC5!0&axcG3mqp+FsEgrhamK1 zed9Vcn7rT*w|SQWy%xv(@tuZvNK=VEb1|pQXC|N^dyB-+w;BsNzd`#=mW7IvqxHKN z!bNjivXill(v_sV#-4sOOk}+WZA?SkU_*rkuSEODs}o886_`}WgWZ+~yhFek_`7T@ z2sZBotXPjaSI(i^&zszW&?dOB#hM20?8Jgk-H>?oIpYS((IDBw(4l>ble=xj_PMs) zzmiULdvt}53{wG>C2z56Qx4S6Si>*t|A9vPCt$>L1?m}BfU&(7V5x~Raj9Pji=5Qy z;yp~SdpiYR+&3Y;aA9cK99=p#J7BLHIm_<~Z$WB$xw)<-hhN-4)OwAu{CQ8(Ff&LBAF%qt6; zjehXw-bb=-Dj%HQI)r!x=3(}}H(Y%250H~Ri+$}s@%rRrxWdGcJkc~Hh6zG?+ z#T^j5y6Biub^v=;&c}0EE)b*~ff<#S^!@@_`udIuZ9EpoJj6v@8#-VM>$DTd`S4CB zqWG!b^~lSUwm5a-U{ax#iRxa8nmPE%|Z>@bCmgTWaYW>AD`pGY;F2? zQ#498|Ay>$c^GLY#~bS9VQlbHJSRH_iZVhWJsmlX`;5!pc@4T_x*_-BOSt}KFpZa! zLb=i-USJs|3E62v?c5LYY0$*-IcFfb2|2wx=H$*wMKXT)UN}9@njZQwA1p_(GuopU zK4@yvomXWUH?U0PX0eEqtJVVHG-F<2Yd-7VtblzD9x$UL0|XBzOLR|c0KMz7q|L$& z(?9M(`^jC9FFzcr>#az1V>aiucO!njr%LSKH^FNvAc?){Sm2V5spLHh<4Qm>LJ<1nA%39LPW+_kof)ubz2+q+3+MzGfsfvN-SeDs~ulXU><>XOZvi~ z0DH^&Fk#XuWP29N&me#O<0RZuPziS#M>F9=pJe{* z!)T^qPMsIDzyZQC1)pCdirD=<))=+Y81sDGGU$`a(hTWh{9z?&KWj)zyUohmnER|0*AY!Xt$0~zK;^GnP^YUx$IqPr5mzta zY8xL^j5DQ=oo=HvyHM0>F$=e*it ztR{tu{W}w5P6#mg!0kzIgf_R(h=sKqb56tYram$pc^~`@Tc#I;c7d_)*^L=3Y zwnB6&Jt11Fv=6eUe8Jn#v>2-43N$Ck(V;gDh~3T!P(Eo6=M|I&Ud%hF$7Zvmjn+hK z!88!8vEWBksnXBcio|%)C!Bwy7PAY_VZ!iVY!-TkN9`H2w`C67%`L<1oGiAFj^blg zd|--&E-7dGo7LCeW7(xn(EQ2n^~Q2^LAeM!8k%tNTQ)N;(+6qU1WAmc9Bq2@6}Jqx zr5#Z+ym;d=))CLx2hSsLsrn%Dl^*I5_q-e-eqq79J*eg=!lePI7RAEv3yz|b?v=+83$ekBA1yV5y_lV#lC>VK#v zS^^25BY5$X%OF0(-W}UkO6s5g7D>l<r2LpcgcWCSH`*< zB%qtll;S8`Lpu1yA(Z?17qz;3d3eA$)GkfDbg8bW+`$SqYQ9Br{8f?0sRXR=+QZkW zUIc$P)~TK&OJoTer-N(E|84&y7Jr=y= zL3?g9G^Zk0|0R~o&dWz>jxpEfV@OqARza=iNmy2Y0VaD{kRtu1a8^!>>Yd0(A~7cu zG~|dd{*Pq;LS3@y<0E|2^9hzeIf=&ad~vj|IuU0yica1?g|P!~fZ<_V*rIkGe8%@c ze_1S^kFzE}-U(>aJS}?Y&^L^?N<`;^?~wU;AoW_>0zHS;f^>;Br#-$J4C|vgLBx4c z$a*=dalCh-pkVn+eut_bl%I^_3vRW8?_4{`D3D@uQ4iO1ubTT?5eId%<>|s# zVK_cS54@LlOLqN}C9%HavG=wq-EJTt9fcdY*5Y=|KJkM4^G1QlJ^p}|VXq-v^A2~h z;4pd(O@p)S&h|7=oz`G5b_`KO;dBEKjPwx2#IU^etUx|QO@lPC-JE#bRsMRokUC|$ zO2YXdh+i27JGP9&ssSxf-gP=)$__U`^RPO zf+6cmWZY`&nr|>+s2LHTaTZ-1Zb;l>zwv@e!ID`K%B1$O3|OkMxykx0SCc&fddC;R zO4coFSjO%rnTuJj&>!L*{(#n^r`*rD!DQL6=U}oR25lE!;eSRagM_k`Oa&(4jRYp_(-5$ji+ff9#-q;>H#7*l0T7Ho`$ zE;~(dIW-%k8yVl6$2VmpMZn&j(0AHSBtWX=)y|yg_ zEBf-$*Ej(KhOfemUH>4w@f#<$Wju@ybsD4)%dZJ}iJylG=&t+L1-H{9tCqS zZQ&!>EywzRS#QTJsXU#X$6@^4UU+4yMQnA7`F9rT_YBnf z8-)r{pYVE=D%JXB2g=*cN!sM$I5gRs)~E~sdiEH0T1G>2!BO6JsV6vHOoIBX<7nig zOm}Qyb8_`R(cyRtlKdlwb6Puq4<1$yo2N6sYQr!#kF?n*geP!42_5LUXftRi^#J;B z!e;!9TAI#W`Tl8mYrq>2tL&F(!~~#@N({Dt(WCR$p2g<%0BN&qQ90a(#IL``^yms` z*=a~Z*}1)LgaotqC-Mq;g^Ur$_EQmJ^k1Nihivs|;J`2lOgaZgYvoAnU2U2ye}i}O z>Jp*Y0E(7G!X)*7Ac!vzb>B-z>B6C+<%`Xzz<4*8z`Jw9ykuzSgk6}v`Ws(*s|F9P z7>eWHYvXe14GeO~YSwdczunXH;4}&N`kyh0N zV|Q0BX3h-*l}&Oi%l!{64PM}M4|O7xP2m$lKl7s_ElKp7o4ny+E12V8OHNEOBihGn zVMayGAozID8+h_N~N=Y3b;ZtpbfK+q&kM3DFO!g;K@e=(c(r>?v8z<{LZ5 zenS(YVe$ZU^{-(*&$QIi_u2jR@mM6Y7Q8=P<<~4^vx?Rp-eSf~-1k|N-Uu=v3jexb z(03K8?ygF_)$2L=2o)k79s-`na4NF}pa|<8`3C z{|{dN7lw}_jOh1XRT4jC9)_n>@(%7Qtm9}tSo=8Q_IWbI_?`_`O?wE7jt0Q${G0Hg z&IM=0MZ@fXsqib}DAviS(q~Qs$wz@bbW6I?w_1i84t>EH%0I`NrAPSe52yL3xB-pQ z8XkRO6`y`f6cfT{Q3_pn{~sTd+}GBXgP0z zA7{T{^&NJHTs@eQmFzydR7K)@DVoz~d2P*?D%4-Jlymm(!1UdxIDL7R*Dig59$7M& zK42yE@V{}#7+qY{KM^(kit)nSTomTTanr=tK<4u$;EGrd;*~y0*4qU=UpK>bqfS_N zgz*3#$kE76JQuy~9=Ch1G0Benzze%ypyFJn6>ph}!5IZG_VNQr?{DGMV#3g_K8zo~ zbxE5U6WxyYks>75 zj)df5IlynV7CF(3HGcTMzylpYwi0V<7I4 z0h!PE-#NDik=ZsIV5F`wjgEJ~r(@H=JL(Kqy7L;w>`sG3Ek!ERR|>vvkNB>=XK>MO z2F;Q;#hKkVG2+%su-{&VHoAjJx%MIWX(+|Dp6gim(?L=C!@(##K}G)iH^7hEhfzfo zg`wx(!&<|~I5v0#zS(pUdY0YhNB1k!+C}#0aNs8ff2e@^-|IM0>SlELd5Zg;&St#E z3<=Z;aJ3QJ6@@#2TcH9?yLb+qNSCC8Tf#3Ydx_F*vYc1PWyZNV4)yRrmWPY z+b<|nfs!x3o6Wc7N_Y7?*&3w$z&~yS$`HJ%MSV9L;RC%2)c#V1l9USQId_2D<}Oc` zsI}l#x3!S&;K(Ny_rrq0e^BeU9zJ_Lm@Ge4jB~}+XnaAIw#7ZfUXdKN6S_k^y~U}n z7)Zt>4xuxYZ0N7AWw_p6o?!3@Z1Xz^Yj<3L(9$B9GAJ8u_A8UlylxyPd4i?!T9~nd z-8mE*c~{?NaFPq<^ESBR33(y4NJg+9`WyePGNrbM_Ts!V2E>7J00g7da_p=Ya5UM5 zbUX~^Z1eWw$>{SCH{O~CXB5Hh>;srJSe1m4^Wd1QLmeiMg*(1_6puv1=5lS~xBDu{ zgw&z&WL;c1|2CdjtVe#t51<8lUijjt9`*b(6Rk!nQDI~w_x-6FwVO#f|LJlVdF%o9 z3DcoL>j>6qJcr>{1|%?8hkjU_fwr-2xG%J6n)F}(LmW0H0ZvG4$O$c7;^R$m%0O9iUGfL&ufLYec94OWgW4E(md%4B6Zmr1 z&Ajq5J>q+D4$EdM@|%Vf;SyI161is=Ix|*hU)*u@UeYK!I`;@(J?jguNlkE4UW;|d zGlq)Je29yTz!yww+U;408T}6-Lf8h|XDE=r#y7z2(^oF6aUCY5rl4HA5JuM<677!L zIN^d7?dbf%pX&dH!`6I7tvzGFP5TMw{m~e#oQKeC`E>kySW-cPW>()j z1?utU3K#VKA~gSc!EIQtO*GuY!0Fb0NPe8kRaRbtTK}oIi23!J>@uJ}Z8ARotxcZA zDv+5Mdao;^b{g#6{}~9~k%?MCL1y*bbs?N$gIyR}Cl8G;DqH8PD8`#o&Ng zm{hkM-$%&N?EB*|*gX|KS}(%g8Uv|Q-%d{DQ30fF`viugHo)%I2K3fBA$jl-!MneU z(|GnA%jN{oK3i7S_wko9bbGi>Q!+z3-p_JtLBXKhY;-DRoI zgFblfXi5sEz5%7wIL<}Y1n$bokvj`zsA7aB@l{jCnhb(&Z1!K5b`z!VdA?3H!`LI!{?4YeXsFo#3LN_>Mi62odoT^P=HfwFysGh zPLoq2!Papi;murb#&z~x8&H|ECHD-=He0~H8#^JX;{(dEF8u9R8(B zI2@K2kQ*v@u)b#!@0Ru%o$b28_u&~V)k#2w6^1lvYA$yAb;G|~Hl+Mv85gPb4%5v} zaTWW@aPk#1;`UpcroCYui?WHhE>4CBrr2@6SspUIeLE+f)-4jhZL^>%vHKD~ zY27IpU%3vOw%*xXLpf(OZY$IjLBC8mYKX_PGr9!QpNy^q~%=Z z%*hbHRhO1+ID|1aYSd_84Vbfg>aJh~68-ZhzxbXZNzqawX;Titd)W-!^dH;LzrBIs z(KGP=W_7Z9bTl}}o(8vL2O%feg4VtN0SUWJxqUCDLsDlXcE!tJbOAyAgnm@Zn*rIO zlR>zrg}c93o(S%B@!_j~@ukn#W8CNtY?~!SUzcLO&;Ac)Hx}_?%Lk(F&|>Tvco&P# z-sShq$bk_*F5zsE26d>AMXLc5A@}zxTz4!M)7_6jzTt8zqM^S(Cai(W7 zzt|-eI%UiO%y%xvH_G;S+s%{;X8Sqb^ku%^vmd!=*`ZMWB%O=U`Uq_-CzS7@h5=d$ z;Mp`2wG160e1JLKa79QL?_e5dhM2Pp@WkXi_V<3d1}07Gft|8CM9t0(o-5{K&&Ew~ z>#h^?l|SSbi7epULB^_oH<(I%4nxWSLlStY7<_eJ^RdTYW5J7`kTO??$juA^>AR1- zg0(Fj{mh&k>@=best&-i5=A;(%yM8da(uFQAU6L|124WDjNZM6x*1u}yEqLJN;Y9j zVkA)teVovC^o^uFV1=G59=zRC&T!ymUhH}qXDSq<@QWM&iupmk*?mN7 zWh(mGnZw;}MOd_2kH!uQg7yy&!E>+_Gp}1y`!{c(=wTWpSGIC?CmZ>qW%3{_+{t^* zVBVpo@EqH;XWS=4L)OjYz*Vnh-1)X4P?KXtBMa@gkZ(+f++fK2KiCQR|Gh$|sH0rW zt!!9zQ;zmjU4zKLM8VRX{yc)oJ#sTYTI;bK+vEfZlW0@=f3J&}zRd#Luh4zT2(fJ+y`Qh-SS& zgBI|_klop)ndR6I)1!R}=^z&}3(rOjp!fD%#IQN6SIwXh%k*{VShf?Nx}0T<{nns( zR~4qlX39|HmC9DB&ss(d6~rXwd>b_Z<;yc-Lo*|tP$y$x`AuR5RmJ8FN15D z4}Q3ofDY4l!i~jkX#2B|Pui$Kik7cotm!&vu-B#=Q~uxX5!FzbN zXvP87d$DW)RQ2{l!QW39JGdIRxvJ3Y&he03BE%V^en9^Q3-WxiF|l$cP-)QtE&&5* z^GF+-VDms^Typ@Gj-P~c+9v4s`5U*}BLct8VtskW)%=MT#vpA6ey@cg5i4zhb8+%m zA2Wx0^y(ZAVf{TDIT@OIc?j+_(Is-$GFagB1dL1XqsmT}&p0RMg1#8xm;H}XDbo^) zvXgmf$tOwdGdVnA#2BlG(7~(lD+E?w0(Zq)h#VMz;@Ht5>D-x;+nIXwU10($y(;43 zx4eT3#FA`DTaKnfA~7(o4a;u?b0^XzxbgBvc3xbC1>;_zz}patc^$e~as{tHdIv^~ z=OA`F!>9P>qdeohuKiHSex~y|t)5>or;znIi^Czk+k=xgX8!z3_wnmNOX`{%1+Up9 zxm1(@O~yYVWW{mDxl|*M9)#h$krJG_jd5H%vLvgMH=uWPisXQnGMQ*^O&*)RL_6k} z6OSJ(Du1$#8~rsAukx)JZ8H#etk^Tjj{o@sg+fNm!-4+OwZd614 zkrlk4+8O3PtA*ChOna?(3imcW!dO)sNDF!ct;r|B<6knD-1>;Cui3-LD^|kd1I+t= zQUR}y-U8F#HQ|-rDiq(T!l+fhp{(Z{%s#<7@eZ29@IYCzm}P}NFES@i%>#o9Ljvn`n)M=zRtyzLq4KiP-T7T)N6k#Kt6denHo4Wt}-irzh?+}wrT zpmd>$`}vmT?CeY+=g$(S1-BeFxo^^zzBeBXHQD@6aA}0V4d_yukGOt4Ew6eQhdx=TgqiW(Oy54ifE9 zUV+IgTKSIr7yS912l4Gx4Z4MogjdGOL=?>#-Eh99-N5dqC@fBxlibJWGRH-3xPWC;TSxn0gpc%1mjN*rpX(A zbJk-!V8*R1%#}O_m>14`UW-BQz(BAc^$!Zi>XW$lie&c>Rr1tu0qVBs(7oXrM1C*p zCBJ3I>#sP98!$Q1%SC?O2NS z7vw;?s-CMYAB9_1AH(_D^0aO2H*^<8;MO!HvT~vpNl1~$wXS!dY;_f8a(kfp-X4@5 z@Rwwqm%yHxbzt0Q$-E&-+_K88c)jXAoMqp=7w1fg&^?rIQ=5;!F31oaQx)Q`d4)G0 zXGW{o4j^y?)1W>$Vr673s0nLu=IVyuk0fV1j|i8^G3l5v)E} z2v4rFIo&QBq8%!+)?*V!9vuzg8<&H#?orriw-W3B^>M<`rxMcx+B8nnn6@9vfs8k{ zn2}zJ@08mS8?0e>dOvzLThNf^i|B1`4l$fEwbji)?U*v)YuVm!k~Xz1Pk}`FQh2)S z9O(QrBi^^zxi)ldje)HfJI`t&L;^XX#9-#-98KkveNo@G}T9>%$%9`H!( z!}2ZJICsPf*40@JZqXF`X4SwIM|HY!lrFW)%;dBWgy6$d^{De+K=-bdBi_o2)M$DK z)RtU>r(eUF2Xrkbj=JXfVE9)QFR95fHA#m}GtFsF%Ue8mK##V&UT2y?Czp1w4c@3; zU~H>wZq8X{;{VH(tCv5{&7r3-Qfmq0A+*D#c14n`{2YIkm7>@u8Glr()3Eq#7-nA# zLBYzDHfj*hl~=HmR^vM@OCp@`O(d8(5hp!j?=AZ!te03DgZsZj^WMi?y<#43;9Y=w z{GVgT^b{_`svb6NWxT%8%nyCv~~NYR)$ee$$Yg}%#FCLIfZ;npMlSmd(p4 z$xs8PDyWjW&j&%Ux+JH}yAxJ+3F!jIdI-Ni5Hlv7f}jWPAT)U^+01gb&C^$+^og=0 zqvkT!DBa_ZB^lG$^{3cf<}Kf``6NimA(qp+hq`9|%nvO^P7?0I*3@R12axq_T!{GwvWo;cyRhrYmKg*Nbn|d70BQ%I_$PKV8r(mG8fRHX$0`^{_*$Xv)|2S*bRWJQ!|wCh5m|En zAs}9y>UfgPzmOlTC14C@cvE#aA-{eUkbX|$1?2fnkM0;mu( zRzYecxHSCWo-PbT{h_^JJMS#lEIh{amjiGkNS;JaO9Z9xVNe;=4g01s@1|co9}`u> zwC#CJPnAROQ41m6FO(lwt4Fg}mY}mDf^WxC2>!>|p;~5;c5x%RFLlFVvs=Jv+Cb6P zE;I6eYA6b%le6+i2ZHnL3aG0&25Dd4p+!X~j+v!M5;_VbW;>2yP-qn=82MM?w*NHr z7|5Vd`zhDSdK6ER2CSR<9mjlngq+9+5P!N=W%9NZo49$Ftx;bHB ztd#^px|XZ&>*BS}+~R|-H1S@sl^`A|hRBiAxG1f8D4A$XD=ShlVRwZ@_h<+1>Aedv zi=N`FcLt=fbP1N}{luAUu1$DxO44hc4gO^tIkCbz2nwp;>?}L@@QpvX1!2jIHK0YO zO+5*M1;nxRp9Xr`@c7D9!ZLZ^`TE~)CCk)XU_>|LEPkrSm_eG<-LoDBh9uyO07F{7 zE0%W~6U_~0_fE|mDXw^EK_*8VlC?J{Lj1sY5T5sln;y%W*BD#yr`Kt=t4QZwKTgCW zrUrCq-bHryE`*H9Kf%MJo|Cp}vFCjTgc>iU#KcBG*k6Tw_O>SWyFQ_yy-?!r6AP)AHlpR>Xx7)J$h%M1q~7B;VW4XZ z-mlRkWvi~jwKQ4cSJa96SCr{%b}nlrYrvMrMKHP|3h&f2miBE+s_Q|(wNr`av-4*R zdwxEhJ%b)T9o)&pesEq=&hoVmus{78y67B$h^%+G<#GqadyK&R9C_IHYbls)OGW(^ z>EN3&6t=E1qnB4?qOfuuG=+VK^$}`Birt)ZU>JOwY(xxxU%*DjJJn8n53LnF*wZEp zeI;+9a?D9|>G_8mqAnDeYB4Ww!iIB_; zu_POh8c@5q`6yoL%ei#0?snN6IJ0B`EtOjfUF+gN=RNDt(-DJP<|58^q&KE(xbyy} zPVr@@LeV;sW#@E9VQ*{=j{3AS_c7#UY0;~?l=Wcr z<@ha)fvClwz}5X5I7P9%<9{!B>FNmB{?3eeYSp5zqcVt13nj;NROpNt=6C*<2)@A^ zL239DD1V5Yd-F5+y;Fyb(=a9L*WbpY&jwM^qfk6EpK))R)Tv$dT+TDgm@c$HyfEE> zs7(0*PL@d^cyNU~=Pv>$XBUy+^^P3d0CzC4H6=qwGJozuju%_chQMi+P+8Ui<89;7 zwqhsdU1D7EiSb~}-9X<9ZeVg$K--Vb!e51Alp3Gr|BaWYM?juj4QIcrr2(B}eIJef zegmsFOW{Y>eOxR%koa>!T-nwlmT@i>3H5ejufkmr+jly;AAApws0F=l_X{#7org&* zzq!al4pM7k*j@3wAR*|RrPCqeTdyLMvX3MwW`A{nk{Htla7T|h9qC=iXBM+b2J$H)o&nCo?=9A zMh>9jWwMf#%L8cY6%TA4(ZyHvv#f@fEWoVepzRY0iDj(cdaoC*p7#Jw&tdvm)?7}o zpj+~>a2<9vt$;(fENShz8In6di~M1Bh4%-YO|GBDOZ%$RNFKtSf)%)Iz7Sa@aLGhzz9dCzQqd5RNOrI z4aRE}!QD5;WLAzEnepW=2c78-tIOIj!#friXBd$xwNenTo+k2ITmqL(6^Z!vC&x3_ zn(_Ee8`c#P#sz6V;**RuNNlGKjVa>rfRhGEci9G~)MHUPjf`TtVWReehbrynjQ57%LWxKasx!m?1Yf|6T$&K$l3n3=w zFkHEclPZkI*;!staQO(7wXH=}$j7L#T5y^?fNl|)(sIwo+?oZdbj^Hga`Qh8l4Yzx zWkH{mRW(4>Tegoj>A|fFWy!!QGctFh6xQ9+qIu^VFeg77LuVjFDE7c#zhmH8V~C&F z{_f&EHgj5AiNe!Mc!$5uxSmfy!C-k#5Hvz0E;dBTm|HNSBpD6ou>Pv(8XS5@kJLMv zqxJjc2*;P;J*8x9_5BLHk~UBpuoPUgzQ72^9?+23P?yL9Tw-bolrA?1eH~FN^~3ZCk-zU(*8bGBJ8`(4T&#_w6)?ISpBxaziTYOr{M!i`|>2dr@!&XUTe|a29db^s4CfKaR@H7 zW#L$@5(xkMl+RW%z_}r8E}HfchCF19%k`G@<8BFtAKC;1Sr#RzEE@cheE`{<;B}*$ z=@`dRQA>fGYQKhd|E=W-*tphwBzTxN3R-qTZtC6#+R)prIqW|Jmoc8UH zIP4n_er&ceU#L!6OXgwxxtHvH^%QUJK}_g&;mbz8K&PsMBJus-5~;vZ6kT|ovsE<4 zh*OQ2J|mG+E4v6h^I#rR|AzI39hi8z3#*qK(V7%r5GMO!cxoD_-}fC88gB90r8tHi{5GSyuj=t=kC*tQeW0x{oe+B z=<*pgyeczB4 zZXQT(4H%90P7ESdu0gm`YECS)2QfyjGKpUH5v4b@B^N~vI4L9kx6kfc|8&fD3%*b8SCi{f4WI0d)|>V}^>f9-rmp0^?B7B?mX&o6u6JIj%`Cqtz?jj$44m%>R3LpA~^2>d>0N^yZnzL0)$V$+NCRjYD7l z$I+R`)wsQVcsI}Us9CclDMRh&Ug?;RF@$7_lO$6VGH)rGP=q8TNl25V_H!>KNl2n3 z2^mTyl|+N&UBCA~|C|qJw|kzo?)&?_E{u?4EE01y`eKw0HK|{ZN~XhTz{vve{%;r+ zosK9hu5Rbo9>0Z=KPu6va1_*RUJdShH$$n@5$HLj42dkW`kZ-z&6V!L%mXaX^=Jr$ zhI(|DnI@4O*9{ZCrDFEre*VO&jhLe;Nz$55Km=aoh8qtj54)vkY|bTa5%aEy46hZ6 zf-?%Y#A}dMI}Av`hI8<*j0A~Zn}}|gZ$Yy|IkZ zapWg1MokyK+%O@h?M$g*jSab_r$f^j7folK5?QV-2lS&35#H(qxo!jEpeI3VS6R}K z8kSF8n2s*b3?YP^q3_tBXM{1!bwK&a zf8cw69e%1DPJA{Li@)_r5cvWfnsWCiNJMDUzU0|}2bSSuqd$3wtK$kQB>7(2sq>=jFt*zQ|YJor@4wm5$=_&3=LV z^l;&{z-!QU#tt3d{o>EusKEOlSx(lVN$i5TT(+(k?~&-wbCD9HEDvF?MFc!pVny?Y ztDx$QUUb>3g2iF$#j797la`Tkc$|6UQnDGdTK+MFP9K7nO-WqKwiG_9XDMD-ABdC1 zlEnLS8(0MjXpNIMh~Sp%;XR==I$-96p!8) zhL&q4V0)P??b6)MXS`8EkH@$1ZS6? zQ`llC0VnvsI3!sK#le|k`(S-YHID#qjT_)XT+uf2A?K%Z1yo0sVDwKl@^ynX>5Ki! zrDxve2Hw{|td9q0;6_P3hGfb|dOPxC!YkXLw!ZY5?h-|Jy&{iIZXwP8x#Rr5^%G3#n%|0a%y&()!zvBxcYTEe{U#)AtvZM-twAH% zhTb2SK4#ZiF{g76yAv!DZhO`OPLkGieX=_FH`j>% z%2|OM<;Q_Z-&wrxY)C&8D3KHWj5XB5WA|$j?5J*ms9$Sw{3Zw7eQys|>YalA30AD5 zlT)~|@hGHdFU0&Yw_sug`(1igim~J&@Bh4l(;o^0#gRX7<>qGqzdaeV%Zh&8Dowm% z?(?Vh&p@%J2M(<%WbE|;RDQ!eLRY3kR){$%PJY6B87_q6l}%irmLY_l@P_T)iggbs zU@80maIg6wzWhjqNEU9#N6b~G;1q*StmiN(Whpwl3+MvdWmvVonNvtnq%Ftp@a~>5 z+|Yh~Y8K3u;~wZBX+nRge8$5a|6t0wn|OUs0t}wp zivmH8Fls0k&MGR?nJ*cO@TxR*y7n1M7BgpL{1$MyQ3Q*ZQqspAnyE-Eop-~*87w>SbvxVhZ(tb+C2H2Qj5sF+(nM(m{Rr zcf}v z8OF`G@yEb_a`-lvC-BDRH99-6?EIn}uxGo5d&7-s=P(PhR=$8}le_mkLy zWrp@XNrk%NA+AyCE$38!moacnh2nH=+Fqvw3vPdeEa_T2ji0a~T#@!3QXD(wgY)JTv&7Zd5xltx0_^~mq)n+r0UQ@odkM$hAfAAq!eL$+2;x-#E z94oN~9J-G|-`+m1F1mxadp8J`MrAPdaXI468)*4V3O!Y1n5QHY+#j0p5sA~mTbWBJe0Nig&H6WHy@ zGEzb@25+1J9jtrqZ!!&xFcWg5bO}vj&rSC8beu2`+%4^dTYMO2 ze+0ees?x(}K0;&C87{bY9qh7}A@1+Z`6S6jAi@}-*)b|iZeqWC4H7i*;5+bH`3nm? z-tqFI+PI?PhnPRmiLXAS;Ni9JS(bV`=Dx{7$9@i5B_&Cng9C~N0>m+AMni1zW=J+V z3M&@ zJtR+Y$(IWG6#=&~ZNU@PhnmPK?-oMxo+J2~v5%BH?NL1|jw?Td{QRObDEgWyT*?1t z{+={p-(4&CJx7IXvu=ipBnhlu$~x#lz^jF*k~JCMLH+h>e&QDivi`d~x&2m|uKG2c z{M?~Kp7^HW!;4zvxt|GLxx}4usH0%p$qgu7qmE~%>X2`GQskvL^$I)s`8=}8~F>b&Om9$8AOZ!Fx^87$}>}V+ckEW@XZIt zE*SEpmXMxD;OphiD^5Rk+5lTq+#HfpiF)cLqVwu|cKO8fOtvvm+`Rjc@7?g%gQT#Ht$ z)&=+1x6otiJbWWzKzv$rIDM}$-rd?${Od&-*!kUnw7X_B_Be2ED#f6lr3{~+_`}|@ ztT*&>3imD7lsHSOk+$xUsOZ;<9+r>!sd)wH(31r&hc)S80plXx&j9tcCY<3#BXaCl z5tffurl0plLrQ)PuKcD+BeyHlPnV8hwzECD57~-uoVkZVbMcAZm?8RBP5?c{!UDA0iwO=wWiqxPA@CFHgP`-VL4^Gqv z`6qI)`*$cb%ML(Dy95S)&BZ%^tVzm-95~VX5cE&X0k?Ujt~tuB9Tbq|c4;tbg9TZ_7|}^%zhJUY zCptC9qMdOP)MahK0#!-)6l6%3hx|nElzNtpHK$Gw$KvLNC-GD5c`Uv-nzMX&35J^H zqeJ~&c&u53$zf5LDRB+NYk$B9mWg}!g!!ZE&T$JS z7JOhFfx9WZklpP=b6M|VbP|Z61f5Qsi-$g=~RGEB%}sUom5W9&HJKf=VgD4d~_ z&U?7Gp-+Dm9~UZ3KI&AWW?%(0)vFWD1vf#cc^^~{SK)QmJKM4JBgjoI%@3V{zO&UlKiiPLb+#Z+ zvo)y45lJWs--r6?QE*)M5@wVOP;;(2ogT{A0`)g|wd-OW%eeIEXJ@eYm=T?HL>SEFmRhah+L zGq5i9L&w`!+1$yJx!=t}^ZHkaa><8DeJV6?-Zfs#{u9+ri#V@LD{#*k&6#aD0%+oiqP(r*%x@3SeO0b7 z?Pn@Zdhr*0Zm2>1@@-iB^*!ff$!3DhYcb;QO^(ZX%>1=`d6$X=e#HDDOt2nKAJ3`B zlvq=GY>o}S*8YR*N3hT0{c%(rqe%1hEM69xGW9j7qNH992+GXG);~qX-eaS zAbr}{o`4q9Wa-%5$04;@3rf@(vt*zW?O4CM%07~>i)NXlE9#tk%|d?m?pYvEIm;{A zNE5-lu`FBtouAv^4qJ<~Nc_d!XnI8DDNkUC8XH=(79ecJyby++noRX(A z*1Y0<(w6b@V;4b``BNBD-HpqhgrV2g>zwGs*}_RnHzPfrhMSXrm`#&s#h5jCJCR#W_1b-WIw2O$~4%hzI-jz2GjlM64?F z99`J+vvJ)ePGq))_i|(1=lk*Eq>w+XU;3F3{}O{U+G}{R>1+JuNx^qtDm>q*LK~xZ zaYL!s(c!L6NF|6t^X?1 z-Fa6qr^=EPPkw^L2MyowddYxzT-6UE#;hp{SZ1Aju|FzjowAQkPV zctse4zvvz8I6M#M4l-uTQRd5XdnAkwdx)vJ%wzV`95bGCaFESY+>|>&K4mN8CqCqs zEH)>@8IR)htlzNKPlI^nwc|gJZlFm0rtqPjDrr3NRA{>?0CpN^k(f_6K~Y&iPb-OG zM!7y%?mvwNUe7==`#Lee;_6>|kjyVC)^&#jMYEVGS1g8`1^(A8<3h2hZ}lF+1IlPp01ZIm{C(|J{vU zV?T3zgA~zUm%|zK$3db-HFyzQ*zXt*5ltB&NZQINuI|SwNmnqOs7HqWOT&2<8YJVv zF1VyLoJ`x50F7>oxEG<%F+P0(GRr14^G+51&mjpfU*XE~J76}3?FFqzpgEh<1U)pP z*V}t>80&mZN&SJ@%97M)RtG=(hYfA`VnN@_4x>lcS8GG(VFs$PcepoDJ`s>=L>Yd)+&(M4UM z;LSbP1wJG8|7Y3A*1Y$7u?QXsV_}itYd99jonl#lPPm zRG;;E?Y?8Bi#;wH!?Nju#M_Fy%3#LgdGPLeHyVAJ3;#`RM{Ydh+5El31r{!cVzIoq z*7Z3I7UP*rHIT$l!lkQOm#nfFQ)?q3b7Cj(dfH^LjqM3m z1@n#;RlJ!f7DJi>p!l>if1#S7%q=ZC@ip_5?ops;cF9n$uQ8l_D|0V}W^<4KNredS zLKL0cA*|XS%y*r%;LN-fxl*rT_-^kjP@H!He`?=DUq9wy>Hmq=H@m<+Ycl_?p1Gkr zGoiJMdDII{IhnUc^y>XgmXT}XA8xjxCN~e^-IpccwLTn0W6xsaCOv-Ht}JwmE5Jp~ zro@ZAg9c{3W^-Y}EtaW(!=qSVeUvjyXTG(!m1(%=ni-w*`W$BTSEHZgMG!m*08?r9 z{dNApC2M911MOG_T)~ET%okzqBF4Nck|WW+O2kh`Rkoez@@IA8|dm~ zUa^S6?29H`@nYbVGq*91$Zzd7(Hw^DPFt zZ|8lgcwW#S&aJe31^%h+e8svHP&T~?Mc>X~7rDT_dL>O_q%T2IHn2JLEBrRklKd@s zi|Ts=Aiq8jYkwBvww-N^0g!~m&WtK{lr!g_JWPA6Pe0x`#QKVX%(Ybw3#N-8B7Q!2 z9^cEHNq=y>dLMKxV0j=Z2ap!$V$cX#644fiGj08_>_!fVJ9^OYV;ttC%mnqtrquoF zH2zkW7)23(g`1wR+_@^{P6&1B!cH3+{A?$f#XiE4w})}L>lSF+s!T6B4=0AsQpDq? zBd_hQ16~1@*vZ~8A?!1r`JxBn?>Mprs7A6Y0)U9JK z`I!SSv??6aviISI3JM{Aj`NXTsW@hn9LYcT6!y>9jh42mG)PXG?9tLD+s3TG95*ZS zaft%e9*l>C-;B?&ax`v#7lmFIb*WKDGxn+W@CEap@P7=fNOuJL-b>8mXMPxkjwh8k zjnTR2_Mi(|D$no>9;PurGT?)>qqw?Jj!?!PS6X%u#~I7fEoZHXv}+1vFP)2(In&VI zQ3eEmw_}T5HmuU{&pEs@}m|?%|}A~VGGo6Im@+4Jb=1=>NL*qE&3i##)QU$ z=q@Y~AN^!X3uZn+9djf4&mY#Ucx^;7imrjW#CtBvwFB&rwV?asRPYEI3w_ItXjU`h zW<9p$8skKK(Oela-l_sDqt}Ak2TMBA@e9Vk2nF}&D-hw#eCFp&UC39u#;j_*K^ttm$+;D3X z#Cq=KWpjfthkbru6?CYX;T~KaBuUIRQupE}Eot%z4;9wjTZ7od?lqHQ{&GjSb!Z8fMB?KrmxthUUMA zL&N^JBU8sgX=RLW2!%ISWJr19H3*ox1bs?+gkH1Ha#5G2VjN$z)=HR-s}fSpr%ar4?& zY~H5^>ORZabNni&)2%>CPJIGBQ`YN^o&yJ%mojwfPk48z4x6=jY?U=5jdu%ianvd3 zUeW}=J`E!0VNOIfe_eZCE2Cpj2-qgw=4#@`V&HC7Jhu1+^W?qYc7&y)Fa?(m{y`)ISD?Y=$%MOC0 zq$Iy!>P#G+V?c_RnebIFdvR8ZJk9x{LWi!shhkS1VOh~F$dgzB?hjUq$7`kH;@%}F z-R=UIk`4_IP0{R^F6{Mn#o61|gN1cIj$Nt+2|pUSKjjw0VCQ}CqFp%l`AyJrR3@33 zldz;skq$(wvFyQWXjD8XzU`(*s+JaG*X;Rh7#Nr; zOMQb)@nc>$l+R1&hWt9g`ulyD_R<$m${I7q@DPd)-6_lsWZw8y=Q;Zs-MFHeJ*%n} zP>y?vD^Icf#_?!={!?vQ{g3m{&zQ3C9qUh>gvJ;0;A7!|E{zX(`RhHL z=vEpi^|O4-rBzs<84hC@4=>^DHJF;DON^2RSl;<6l$tmJj5&@KU*u4Il?;8fMS*62 zjpK5&kKlkyG59H5$2xN@+GpI(T|8za8*W@vn98>w;lqQsFC1`XB)w2Iu(qI}OO# zxf?<7<0rp8#heHn&J?ckWjXdfie2keFrRelVr=Zt>;4rKJp9eSvNETi7aWI(v@>w}j2pW<{J~eVZAsorFVO$>h}S>R z!wZ~uvFx5FA1F74_nhd+nD}EswEKcssQwimILHuH?J5vD4M4=Wgt)xV~EHMT<7W#=NdzD{j?RmY?y$tBfIc! z7TX&)52BqHg++|Nq^_m~5x-flZAK(Ik##uJ(l>*f}d%OTN=sxc=T{T@-@j@ zb7^*iW6#s28t6Em<9<2U;_gk>RG;--x-DM7@LM8G;A;7dAO5hBn+T!~#<-pohe?c2 z6FJ(HoN40OUQx){U~9#$A7#kU`ZnO4%xTz1LyQe{LoU^Xcwg^_fxr*I$EM+-2pQ(7 zE8$I_$&&V>jj&SwER=cIv)zF$IB5*R8c{Ni=e)6v48gmOJD_vQnr>phUtnwvj>k$l zPvArrxH<}b_Dti(IA{^CxM(N{{>m9E*b>?Fk9g#=EQ#-QN0ZY(P!QD1XR^6Z zV3|3;<+VNuEPlzgJ!oM6-jc$6p1C?!h$1s5UR~D%x-L!Q!!N0jjZY&{PD`2u z&d}qJ9G4`5z9v_hl5^Oo>%*&7x#RmXIodplVq^Lku4LO>yyz%R*tvpa8a)Sr(|Omt z;RbByo`&j$$Wq_4}5@%piJlri0A8v zRWQe;DY4rShGv%yc)>k^Yl7xOB#g&YuwNOA)CH{_aj4eNi?%IiSYDoW z9=mVD{m$WZ?z!J6^k{(+*&FB+Z-;lRlqgX>i_=ED#y-hZ-t0IP=Uv)_75+Y$@!JY4 zb{JCURw*+5m=YOWxdAKLoyeWb<>%QOQqCtE2MP@6oX}w;Jtc?te(?$H%ze?(sEAjN z(nQ(sp_rMXO}jSC;X2RhQ8mV5cbv+PtGy$!B(xG)Yo6X(3R=Aoi z7;Q5TfyZkz{`-L|_~IdRg-jaY=Ko_z+*VV_-YG@k6Rzt%oucP zoO@jw-xjS*O~05C)oY&Uv^4;ESWXj zkBt@SFf<{31=nGiloYvjK%S(Zum?}QKp6c*o64>!U>)nPTxy#qG*n!{sNB~O`&^AK z@{B;$=J(+EZWtZU$SUmd( z*j?wL?XoRK?zJG=*G>a(u0{Os&&MNE3`u@4;}oPcL&WO^X!fR+bKiLv>(TBPKj@kc#msNwXqmuQ7^{P`L3RmiWMD^Yv0KIG-khM904 zx}~;b^4MRzpf%YwebgD=A*9`P({Egc-DN~;lnc{bSxgfvgEJtUvpHcW2aBI+@H@-`dI>#h# z)>}<_=Y=v+oc{Bd1KHqY&V)G#bT25DsayoFZ^*#o|Fr^IFtF2F#BFOJe;q=7Mow-O{v@WQ1E%s&)1Kc&X`;8cysx?tfwJEBM<11DYmIlWGziRvsq^Kik;9% zUK+1fYLEbFF~m+gg~wL1&RJOoUp!(8H#m1LM87p8W^Zp{?J7Ss9iv04_WQH*_FD11 z7;ExbVh=`neg#{$V?1^-33($O#>Y#C{L&%VzPgw>Z5b<2a0wDRST9-26|=lg;fwV> z%=`HXR!4up=SjL$-X|jWOYfLB;C}DJ+KDl>*%^O{7aDR0%+$~^S?nE0Z@x_4JDVkE34Z%3& zl`2%F-^PdU-=gZxXOR2wGv>~4Kzj*OtVm2Y)lEC8b)uJDItGafiglVgj6 za0*(}VyTVX-mR?|?xIb7mS2O~dUg8oh#iKr@BOkj7Syry39egjNox-u2hoeuh1u5z zxO(R>)Si9|$N%^M*(+*M_izomuM`PSmP*rm8cL+DUkVc5Zi4bJeeA5}0)Cb+;0b#c zOImcGL6jL)(J&wrbqdj=RR`S0{ROQ_BJ?y+rR{MN5Vu~Aywiw*VV#4Fn=l+awAwg# zOO9W_Sag$QEphIxcet%cn&M$GDyDTpkcNP02QhCPn_E24lOb_wJ@~*;V0~q&*Qs_)^h5i?T}$M z3q&@1A?w*?yrm;cHtFe;FxJ6N9zPmHfyjCGcHn-4Lr@tVgI*i!IMKMdP<}L&&G*=R zXri%Ld!r>fKQkgOHjgl>dJOtqUyGu*Kls}!jMuQkgnAo&1ofL!d9UunyhqGzPHa>N z|2;a733(Bm#Zq?HJ$VPVS8I{<)c{FnjWG7)E^x2V;syD#LU-Rz?wiF4l(qW=rOMMG z?-Ag@HFI2$DnlKkg3*8T2(Z*Qr$cWARPwVjy?&(v++Q9L_f3DrZQsv2t-e9r^z~}= zZ?G|``Vh*$Rl1Jl(yFN-axHi0c%*U((G>0_Y!S2)G%;N0oS~@|+?VqrAc|Hgt$B1QPbm`-1n)KJOyZE@_ z9otzS=j7K|QDwVB;8|gYmv1W5%ybX*v|k6;Z>6K^n?eY(l_DJ<|HT|XGh(U8dJ2pm z>aI0K{BYYXeEj4yShmSQw&gVx59C9|n47SAi6!aVqe%r5*Nab$y^fDBTT$(S2N?X* z1KYN!(p9+@B>r(L8f<)lQg;K&!$$O6XD^+QvO%aNYY!!yI9)i8a_Hb$SQS52a z#C^Za$UV=ql z-rNX0s&7r4Bo1JhQx>GH)S)h{11-L1PJcug69c7MkUruHvZsomi1|cH#z@n=iE6CJ zu0Xp4s_3|3A1Z1bK+&!|;p9iBuy_yimT%2MfxMe|vE?67GL>PjsWPrG}xb_X>LEW($i+>&EH1D^9&%;kRbiWXDZ?fG$>O;u$JBc5) zis8py1G1wv114^kA?ZuTV$aocFw<0;ev;B5#{@sYbfh{-*up$U6BqKHt|xI_wLEp< zZ-7AZ0vBr+&PywHqk47<*R@!Ji+{l08~4WvgIDdr+Se=5<)<;tI;TqX9HP;!rxe7D zP2~Lb0y5GOIy`8FD9@=l;jJ_csJ{qPo7AaU=22eIu$sSo`xd%sPsgk*T_Sndj80u= zPNe3u8UOal=q@V|Cj43oA#rK^`!XHsarq2i_i!gJ^0Xlb{TPEc>8WsWJM$T?TrWO$ zZivl?PeXBGKW8JWNG3`(VU+zREPLDoGrr6KRFXl@`U1S!txYfOHzXSB*D!KJId;vx zfZfO5!>hy_D3E1*p(!V^!|oend|JWvU;}DC|2;;#G9JpfG!WhTmmgh&j0tH)XKKgc z*y&c->2VV)I~3^;3oB}8&vr@yt!OfVvAidgbI;qKVvTnWx-5zT?=yX{T>1c1Co|?G zFt=voFmzTa#uB*(j6X34C;m|(cE{%9-+{j<3i`vFi?yi8kDU`%7jx-bTX`4NbKHwj zG4O|dGkrE37aGt!=8HVDW*9kWr9*Ab%MsPmW03HZ^(2*!a1upkBym^_PFg#Jv);c# z!P^o1uNAs9C^?aNaAr^NIcNs$(N_3rjv~=j%>&;zHuz7j7C9~b7~F#*g~|UWa=F*8 z!<1{Om@s;PzaaS&tPLVz{C*`WyfXybM*jiFhM7D(iNiZ0RqD~Tj?;bd8ny&0kh$XT zP~BdN0)>58G+Bepn0=VLB56Z>CRK|^Os&9(TPHyM+;DEgPc^#3_5~I{bK)9|d@;n; z1ZFxOhvb)L-0%y=q%-RbmYwc|?dSDbSF96jfB7Mw9*XOa8j&ks6{$C4D;3qs(IrV~ z*s{f#F_xsU-Od@B+!(|8@B$oi?!dc2S8?%w&td=EThOKTg$rDuN&8xMgYLXS=+mJ* z<wVBRVSdRLzOc)m`Uz}^nVI;{+UGd3}*coI}2FV{|OCNvo6HN4(5L?!iE<|QT<*L zSdNgzzooKdM$mdp)Ct0W)bC@)D{Ykh){jyh%EaToB;V4mLWiZukoywM_nyIgSnPb1 zJynYH9X=J?12pM^$u`(oVoVm8u0oNB@)@%FSl%=rv>sX!mGwgy(z=e#E6O>I05#HY zZ%w>BnG8#;x#GK1OAXROh3 z+&gX~4D>ZKeq|CHaD{wf6i z*>6Njy^ZnPqATcTaS6h8&*8qa!)Vv>dUW5ji5ouLhE}zG=d61>m=|{$RBAMW*E|=H z$}PdNgX3_*5c5mZ3@-kzDZEn1MDqL-UfVW|H0=6<8EW$(ZkiTxx%8dSd8AAleLo9V zhscu}wQxwLCcM9F1LuDzj9X**1ZJe_LMc&1fpZK#Wc;7vMfvD&bso3N$&e#!*vw~y z8~W{EkCP9n(VzbsQ{no*cxn7~^ilFh|8E!ILiImraOeR{dQ}EHPA6m6t|VBN$oMlk z-?={7NM11cyD;qcbg)147pEm2g9~4egGL%-EFF-dGu-!}{-0h>;ICb{F^Y9MSJ-pw zCO2Rz`*Zz+E}Y$MilYxH5H+V>bQ;XSR=Hdhm#`i6E)Vey*;<_Z#(>lWsewYe2J!h% zo+GOyXnKG#wEd_+7rilPrn?uqo%_HfyBOvU9{|1Z|8#u#n@ScbG&a)Pj z*1pEYt-s)xRwSMuFd!B3L5w5T0(}$HV8_-{@RC2t9c+{#GTaZ`Jo*6YXWZvky-mTr ztlzyW_#AdE+`ye<-N*!=-?*|g4vH@y6dTNohk%qe+>obEL-~7f*MPZ7%Q)UQV-aAv z0J`knaQY42F#28?p2TM;_-DH~#>SKVoewaR?Y9Jb>xJjNU!rKp1(K(93A<_>xNUKx zF;z*2#%zcHf0e&n{rD%~)5>}`S*?)TT8|GyU*q@?#;9I%1$^2Ta4ox@acS`ju(wfIRnd(15bXtb9RhFh2k7Q_7<9rA`#?F7Qbs)P(jyWOxv1{IU47Om|nyl{-!d$?% zhj!q&4T_|2su3x6o5!7w)28m8VT_AB26V5VhWyX>K|ORVZ(pK?oBN~je>u3uv}YLc zCYcX2AH>Zb3iQi3MY3~q1CBb(=8UZ$gxasR!8@%F@HqM~Jl$tO#_bc3I;B)FuN;K7 zFCSq7<0-p@{6K}IE=)e9fda=4*N=aGf{XDHh#0L4*^j4l)9UVFz|IOdy`E*PONz0v z!4`Y}`HuUa*Wh)ZQe1ROf^gPS#2_vk^5#UtBlaA#mw1bB6?N#rDpT??^8r*9f9K`R z{Ndvl#?rZMhZ~uvN~WR>iz5QK*#{OwUQjd$FZ4j#+GfT{NXNk?9eAC26C1k6;k&pN zaQXEOE5F}k% zjy$ML&a2d6Lb??d4qDQzm~KqbJ&V?5@1f;-D&IZ!FEr}^z@Jw7)W`J`#uyD@{i;7; zJ#`_5e^93Z2V!y73Qdyv{0=leR^%pKiGcV0h9so=7oPpgc30bequK6MnDqGuh(dD< z`&0+e()cs#Pfg~JjMJiNzQ=HGZyQGZaEARS?tz1r6CRhBC$Xzf@-EK_cuuQEhtbVg zaoiE4S$6c`WL#q_OuS$l{LtjC2 z-W^E#>;toRSpek*abuwYH9i`L%@_`??<`65It$Vqcm@~y+<~ghrF_{pFYFtmi5I?> zz!x1;YCrZII-a(trqy88&fG1LGXhtK7%{E;L(emuu;3n`-R7Qy*#z5~TxSAwNkWup~L%*NDa4-lWwr!5CAf$EYX#_VTYC#9QEaMcW#|2G}qGrr3j?hBk)_7eOb zUBdV5&Nsy{4(%I_@NTFJ3iexb=?b+NTgh>Psx?Bxk1{l*^&P*`pE1T=mx0-`#W?r; z7chxWgz-&Ru}|qOZxh~#PtvqW8ZCgK4_$CGyAfNQD^O04^*a~1;OX)K6t!mZ>pJ!6 zU(0*!ntBEF#xXAP&2p5PC_$!Ft3X#tEGMp5gCp%q(dW%)v3s+;kS(*JI3Qkl`cyp( z+5HCpl5^a#sn)dolL}pa=_htMJEHowCcInyzwdw|1e)Y>8)`M_?7jgE^bY}LBmzB8mxh{2_a}dfl#lcN>wjBG^8bqG!c}t5`V7=!a zq_H01n{#SJCDe)}pB=;5Ryx7VmyaOak}*|P4OnLB8LwM*2P0Nw;>E|qh;N%Rw2nQB zu_|M@pAIGj23&D*{U0cjN@ToZMQl8#2ZO73zz?}7yu)U4pFdlJ_A*oImHQP_CiQ}t zlmK~}ge67yU)rgUXf3!52tVzR?RUL$oFt=rPi z_SI*>YsW1NkqCysbAwprHJ_KURHWxy*^bM4IUjq^m8%H5*39vRcQWjn0NT*o?iC8I{hCv{>$oF29&$6~DJERZPU(fq0k{WF|#C?-6D;)bo@ zSak!}9qh#1nqFw@sK#Mzr-n^8z$eC-O1mGpFAJNtW&L&rRSx<=WY<^d@Bd?#E1>LCBCWfOPY#AnsXK+dAnq#YQiD3mg4zA|lSS|`=ObWx#vM%;ODtu0T zh$6S&!sg|*;MLv4d(XF^qr8;qGedPUCw~}G2x48+G5dJ;)5YT4={xaa`b+dYFA1^H z6M1)GDCho#K=Dr(ajjDvbm?2sy5K4-JHH=2UiLC?Fq;{;R$;-!n=tm7CQUj&8@i6m z@G~B@a}{H+LByhUP-|QUjrk3{|5#lQ#WO5Z7qTg><%!J`2`0}?V)YhZCKVIOZrF2 zlRlgbRX<%g*>9h4bA3G=Yd8p-2WI2D1*#Nhv5wrfot&t*L>!xTggZ8UBp#Eh#>0aH zusYI^tXQQ^1gEmZf%QVpCqR;~+kTQ;5Z{Aq$IBDTQ-0vnP|tW`75rY+Suk|t9F)n| zqEn?LojYO+7RLpOpWaoZo1+kt^Vae;w3YP>pP^#LC&(RL3BK$dUq1f}pIWYuMr~1O z_Op(|e}~hfQ!b#QV=@FKnvf3t&#=-XA6paUNr6QbH!I49o_(r8rCS`J_?;5$3q6K) zb6;|63|he6Yb$K3Rw74t)S%@(34F0@Dtj)TcXhnDfR9ZW&ktkoEN$s&IQv`|^D0c{ zb0p4T{7WNRz~*wk0T)63PAT8WIy~#1F;3Z_3XiKMfpu^jo|y9vMvTtE>k2Ipo8N`n z`6sdNnF09kXyd+b%VEq+5A-^|4Z9;X=rFA+teaTI%(`W=6TknnJ38*x%avfq9i246G8~d6ne;R%cRY_RopaIU5OZtl$MI6aD`=}6#o1;ahc2sb zT=QLq9CKv6qh0r*bWSm*uGUAt8^54;(gM8j54)GIiq4I4d<%j|$p`*ZPjTK0Z1~=q zWzc)_1s}Pro8KR%LG>PeLg|feSgRDsFFgJRZN#D=V2%dV>mJ9sW{ls}Cxs4kCFyTj zbJ%w^7A~zdp~h#nKrg=yZ*0`1Eqn>Lw4xL5|N8}kPJR`6BzM7>j4BjA^&95-s*weO zU1;?vm7BrtAlq;0kz>vZP#1k0?X0C~;JAE#ue%P3-2NPW?OS=X-aQaG&mWYk{cw2t zODqpshGPq)NTHn)zvI3l3GJxi%59$VQl$bKuwDsueCIR9MikDSKZ;=UO?VSWVPJGE zZa*hYs$9ZhjgcO)I$6Y@QL4uZ#@slon#E@1d3aHb%_oMI^mUyo@sd-->b7ufTa(II ze`Wln@P$zC`UggjHKXF!v+;-dJ?xp1fx#7(;IL{FTD|D!e419_7EKd48xo1(DlI5* z%n>TRYyjiXOMKn#tN7*D12C>(8Qa3mQ-?#IpskZQ^=>lcph^uIYTD8Jgfu4UE=K<@ zN8ZM%jj?6Ufquq%*f;$;q)BFgLgP_r>ysn;p04PhF`ExfYv2sK-omicZAjKs=Cd|@ zhn)^=zjwZbA6ckJhi+&RNmXeyK59U>&2+?{iC5ube>XHHmvUK)&w#+>w*APljKA%x zPNNQ)p~MPJ8nDtD;yP;>Gxa8#Zj~ZO6HJNX1Lir_=|*=y6+F4jlxDuT#aK>FU@X3c z^QoEz#))-Y?z(F{WZ2-0aS?Es-6`%LegPNOtCD}3M^fjY%Q(M#8OoJ0C(glmHdFoK zH?EN*7j<4hYW#Y}3X$V1*?y^2<1RR9o71};D$JE~n`==X-wR^Yhxvd6 z!^?&GxDNjfxQQuqlxd}014f!DfZLmUSQ4cUGnxCKQKK05^(TR6VlcE+?FN^}<$V6i zyRa)xKyL2QqI*ru$gCdb?EixnSU1gjw`yIBJx{@@0p8z>hYJDa_|?VS*GLP z_2F1}aIYwc%McFsI)GsE2+X;zMnc09xM#cB*V;pt-OF-0i%V?An0_j+HfIb-PwU3C z!&Ok>VN7bS$rFtVajZ?U;akH(!LrJLXzYE8HwIonsEjY%8T}Pp3=Prvzb=uz1#^WK z=kt&MXc3h;=KP9Hy0lBW32(bGPF{^B$@^kSyH3o+dk+BIXXS907+>gdjtSB8Lh$Pj zfe-DUAo*&Y&^Yy$a8B9-)UQ@Rm4=6W`d@R@^XH+*>joaLWh2Fg<^8Jm9c3eyA`oKb~9Y&ui|Wn+i3PE7Vc&+-`fUn*pj;gyxy{% z*lKaykn|kR$DTp!*Qa4jzC6u8v5GO~)rigL{%A86(H)uj9eU^gk zF{|OmqRZHK%#yBCJAv~Tvh2x$)0_(F;nG)W!L7q-7`Q!@Pm@Ulr<`#7wC)rN4-CW5 zF!`xgx#v$*0KM-@n86IxghGB0|wyvxtv0iQ#(Qn3JwMCm6@= z3`iTCM4y>oaPJN?QvQ(g<8lpXg7``}Gh-wTdQr*gq-SvuEd#B;%~A4=1ge>zgQd~u zFnIAj_V;H5)%oHNcV1qFCkK1LvkSR7nvD6h!Ho7qijz@-bWBQNIhw-{L{?qbd9`oP zm`^Nfs!PBHPA&c=`e^BZ(L)PTZ#t22cV9jbWqrqy@oIFJzZ~_ta1{KO2&k#G3H_s5 zip!>p(;&foNLGyIir$#hMO!P-ctR%c{farM(@MDs>%M~I=?zd)Zb~b$3O(Nz=6Vl;-mUH}V2TGD3q4)4~?CmV%{_0v0vy)e`|Ca%s zE1^%#UW|olKgG$o0Vz6=6%UyT24qW;0)9IagT5Ox_{G%%S~BS)iewL}k=flwlM!T*9WXz;cm)+|T6ylfB)r{ky#rgTg}2dsVB2o9-w zRA5T-v`rX`xkQB?^smEm%SKKxwOds9@D(5Pk31a^4MInZ1qi~g+b6JHOsK&H&T)pJ zaKhFe2sNqZ6N8&D`*aLnS#uI5u=f!iixw{Mau&CtIvS#We*oiY@uHyQ0MYk*wJI}%Fh7bO>RL37v((36wSMI9bX(Uq}Qykpkd!5usxwc7d$p3CGXg6h|S(X>(lt`n|>fLJp``C zCn3^98AN~PqC&S475FOfj(Z~ElH@5gZch}I8FnxrlR}GVs`$nV_+E z5iS%{CGVsp$oHTK6wFcN?y+2A%gb1t1hxxGG{l5e-Dsw!2oh_V z+ePCSm%UvKRa8CrP><>C_Gm;qx-Rnxn(uKG^B6Bbp3ZU*KHT*OX=psB7wbFnp;Yb` zOy1cJdW?HG+ne1oeoK-=

pwZP%f^7siH-q7u{1=&oo3D%qw$L#GY!k|)#f6Z6sa z9r%G+Wns{er$YSAbkTU71+TI=f$JT4n?Ewef*jhB4(`_e&@v(cY~R{*$tyQ-E&0)0 zo?ih9yvA}>LjWc|PvGzsEfO)N8l66=;QLT*qEl-ON)GI6{&y1p*s&b%#X5p`>{I-` zTO8w!h918(id~D5$xvNG?CqCVfj@LvQCzh}x(@f2=r)gYl}^ zJFuQNoZO6ivc!qg>`~PAvK`-i*9;TpdSPP)b1k%4}#lB2c3 zZhTBm7#=hC#BzIM+$X*bi`dWY8CfF|Wz!9PgE_EytUB43FarzchKd}WE4bxFM?tdG z8EUU-b4#w9QS8=0dre)uA+ANY{nn+IVt(Me?`llSDMNR=y#_O%4lIAs&UvtXRIbG- zKIoqdeDdrBlplK@w@mNBT7$!!e4r^2o%jqoX#y-9);{Pr}r%i5*JE93L!&TUOLkFYJ20+&NM*ufWKrVP1#)LXzpu852VDIQtj-5iY zX=|}nyBNfqzQeTAk;I_Jm~QFw1?jdj7|?tHkJgW(SFRh9^5UKN+olEU%i1A5q=N6Y z6X&w$vt3)IBK#-fkw2+P4!qq9*6V{Y+jcEyXfq6Ra)VK&CIZ|-rD5<>G6!?puyx5P z+?~#PPnpW(*|YDU&HBM#ww{8Pd$PRFeRdO^af@4WPlwFbuL6&cZ(+m_S+Y#<44ql$ z*I7)P9GiL+gnM2>i(jG0{ls=IS-n?SxZtp;@yQQffjvj}E;k@}Ukmh&S%xDq1J9J3 z(|wz=;G;wVI@(CXu?*&X>DHq&x9gIUPdgT}J ztMCSf?Gd5cqDFLirHLIDNBDRPJ<>7%2sp+c6xkQ-z-MR8=m(46FnYZO>ASE2HQ2o0 zpVyC_htA<;eMxf6g>^!_lkBC7?%V|W7?b(%-LJY8w*|tduL9Em-E>k zu;DA%_sznVGFg7ryhNPN9GE4!vp|7kT&s!(?y}@H9KZiEp576KyG9w3nt4&|Zqp00 zCoHHS^1U#iYCm&zHlg_1uizhcpR2on2`9}T#+y#8Pv4=zMfRm}dZkj-?t2PGK5pkP zKDdR0w>B}?$W6XPIt4-+e?hIbEFE!Lfef&YTtgva%IJ;~zUIw{*5rKH+3bRjA(usC z65peS(?+~l@D5bM2k_9krC7y$fS)U7~tG_~lS6$0XN^aj{0SElw)Cd0k) z;c(IWE+ofH2kE`da9vfJj8Kv%cFcP-Yoii*WA~8lRt4nyT5~e!aEhynXvE982E^u} zG~ICHE7q!I(5d!_c z@cz#gcl&WJ+hWJ;&p%%A{{&F3=NK z*v@z~KSAGs>J6l@j#Chf4@P|GYeY6Nw_;Ml1;(smyqUdeP?q%x4 z@giS%x)0S>ltKgZm=s!U_{LL=7C1LPFp`_<3MRX9<9KG3P0SAI`1rR5I}T=z$niLn@bn>jqrKSq;ewwG7D#BH%=b3xlkJeAL! zreX@v@p&|OaWB!N@(+YP*P`y`>Uiyd7B#cs+3ckOT^eICpE1!x3?6~pl&d&j<2mR* zm!3HLR5k<8y}wc(s{f&~&8>X5<`TEUeLdzp5C7ZvVg;&hJCN;vpD1piJDh z>A-R&Ydlb^hkUCfkvb(!Pqylj%61dld%GCFsxw#d_ft?sHoH zQyfUfdyLU7v#Nk&h9cs?oSTBR|ME?a-Hbs~#Mz&!=L#kY$cFHDFgW)B$joOQQ-wm* zx5tvZO4ID; z=i$(veAxYSI%XZsf}%H?JOH;xI($5 z9Yp%*aJEsg5F2PnWqv>BV+ML~#kxfFoppuRmyrM$r^}Eu;w?zhPq&?wWlX76yGK9NvEOQy#0dYlI=z67*?QP76 zUet0_2@Hh&*G3ZOZK2rljm<@qg;RCPWB4U9Rrs^S0~;%cc%FSflT=`}#W z_XAKmu@sBa45@V9AUN`ec;z#ybd0SY4LNWd7JoLP#wmY=%iq_5!1wo*g3n@foP{ab z%W^**d#1tk>o?Hewg;uYJ;vp8{BiQMd@oZ$7%sm&VmVKDX#?^-$lKSQ*r z`G*9So7@e8f=zs;xjyl>-^dMrd4LWPEFth+gWiC{_W_*aMWB2VUZI9mYQ!cRc< zty6eZjqyVLKXQ%}UkC--@m%AIgK+30V|AztMPPUo=8V>-(qr?WzMuxyXGoFNwF3IP za}yL64~rsKyys>18bI>m5g7XT6yN?#1HP)~V{Mp_bD3<73X8;1!*wMdWSNZT@(&lcF<%+W!!MET)Wk`{t@!vB zdtT>X6n0yVfa)w+#!ijkE_w~Ykz;8Xl_g2jE4w)F)ziTJ;0lZo^8v$Wy3{s4kUu?E zg&O*?^Ddc;o>{NBtr7Qesl#LJHE-dQt&>EB<92gCb4SpQ++*C%WgfWahdxpHwV%bFgtwJbz%d4cH|xcB*kazcT7FUT1gw5UNG5da*s= zxJbOW-wrf4>ypaGWQ;Uc=kGuK3f{d%eBmZHPS9Y&Z5T4A0f$Pk`_@)aDLc;99*u<0 z+u||!{0}I+^-T08YbmC?2thx~3oMt*kf0?aM2UXnwvj|~*>dJO4HXSLyhUS6 zTi!o_=YOVMhuN}^ptzzC5?{r#4sABRZBZlM9_qa3B~x0wb|hUr)Qqz{#mU_>TEwZ; z7LsNUA>2tsmHhQ6(N>D@26RcBat3(6ypD4nl*x@@#$fq=4oBF@lD#$^=($+|jhEI4 zmwpgIa<&yWi*;j^}W-)=>WGBfsXtRqQCL+j#00Idk4Zm|7t7lOsa(B+mE>* zYY9*cFr?2WD$>e=ei+xUNINSPh;xkrefm;@#8zCu#x@bZz$^+=3bWxu^HLPJ_wrem z$H7hdC#uY>i{bqOV>8k}e+4iz?jEVTCkJF8qnD8-9RB z_g-|*G^T+nHJIet04=@}*ma*}qh7}HiN?}&{d-w*aHi zO7N~Xq{Vc?TZ?Vd}8%GLIcvPM}lw2z8b|;&)w;B_vUt>S8Al|*R)IOofw!LKjC&iksK`r)*7T{68O*KjcvVd(_-DJYD0c)A zyzv(4>zIJ!j&Pw*B;(b}{^Y7;WFR`4@fYqK$-Cq689W!h z*2I#2VlE1GrAO%hCjuIbWQfi5HTbS70?l|;44QmR6eUok3(%0NJ^PN^7b-J$m=s+( z(TLpm(}c5@>JzOKuc3Tv3!mz#fEnXP(uA;jlv~X9W4=9bIs5~26sE)G6h)f)&V>1# zV!$OLlpnOaj1R)vFmm%dZpr~Ano9LCKxqsJ-OVvBsfphmdkYHNPQd+hgK*hWhor?z z&{OIfRA9c2FEl(YO!?J@S7#cM(*?_MVWTv0TBihSf9*nldBW{~X+pEl&f(*}T<3qZ zh|!=)ucj`Lx`Ly}v74E(9JPp(q1W_k@W?wo=ARCMo{NcS#ogmT)f`nrq)CWH6V%^- z4Ikg#K}nDOIC<|XywG+5hpSkZert~i#-@VPu@ms8!<;m*yO`cQDcU!WspqJy@@8=$TJbxeNG9w&yN7d|E>tvhgr~-3q}ydwMBSr;%I#2VNSZw zN})LR!-R)@U?OuLNM$t&f^_)Sbv@Ada}4;`cBB4@&Fq&@f!gl4!i#@oU4w8}4B7bu zyV~Lq#7f3;@JTMBFH=@bf0ccz5Q6;dBhNO zz6e9J;@MDEpha9hu^Yv=L_T>Qox4~;( zpAKHn^z zC=)a}6KWFcg*lAVZ=@nmH<~eb%R?hlz`Cy+l*oovE$yG@AOZYkz}sE1HJO{{&yJSj^&FyN0G>m}Xd8cQE? zrNg_>wkHes#*HAWT1~K!yCC{Bs6gK5OVQ@j%#&um0cdDD2IV<(NBE2AKgN;QX)0!X zgeoZb?*iWW(+dip&Vlc|Qzc=Ti3!5Br;TU3A`<_aHm zUzh6m?gJkqXSh=y4vvn}!Y!T?;lXcRqP&gqC@Lml%))9=3cn3i$9J$^(kKivi3P1^ z%p;&b6<1DR9mEZNqRIKcuyF4P*7vUg{wC|aRDb8mHMSGjq(Im1?}QJz0%~6~on`0W zLF~P9lwthB{6#wSP|aubPqXKn>vi!_ViK-@Do(5Sk0dTBrMSjGK>Dw&fn^;=MC9`v zm)muKzU4Y_H<-gWS)GJ7sW-URs2v2?$MOQfE_;u<7{*Y30!xc_V8ohc9DLlvd*&<$ z|JgeHzl@_Aw__#fjNcC-jv){@^%!q|w3Oe!Qj#v7UW7hoNUqL;K08+j zGgm7SKQYE4`fm;n&9NeeL$z=wTa8>8GAEI?E?{>-n*?=A@Dukd5%1i&oB_LyFKze- z{gln=%8PnL59;CODrHjmLP7Lq^9K+Yy@viQ2`rqn9TGUaj83k_n%&m1F!^Ue;$3Y9sdOVWex^@F4 z*p4hI^*p3#H{o*2=hzc2M!bsDXwsK=F#NF;!u92-TRXs(dL0~YsD+MLV>~!siq6rg;dz!>e!5yloU(P5i}cJRgbQJU(H%;|%bPJIQSe)1zN1zQW6^isYTAGBqCT5S~6k znfJC`lzrzbcWku`wK~PP3TLJvvgcxl^Ej|xVgPnM$8qng&!{HdimwEZ#f{ zqPm9RK;8$GP7h+a&?my>dnaLfpEsm@`T&kco(sdY6iIp8ORO}!hSif5iCM;R*#DLF zbFV!^mG#>EF{v5QxIu!}eOiPXpC+Lm%c2Z#{t8N#zhFd{ImvdQoXMDbc>V2t^rizs zErU)}nQ*nyxxq!*Z$h&|CkR@6gf08K;iI z+{aSnIwWB7P$;NuZsX_wdjteU(?l;1pTu3t*{FEbkW9L*kM(geP~fIP+@$3o;9^SXjRqeh;fEu_KBZxl6iaQF*oNE^KSU=YfgM4Pe6zE zaV{7_^0RaBrkgQoJg$JH9n8ai?GPNvVf^wh^J4=6tI18n6l!M`&zF(@f^YV?pL zebHw^K1_=QB?Xq{+GYv+7lg6j*(i~vp9TGJ{tS$)PC&P<+RQs3kG5<7!F}gXq0)%6 z=oi9cTWbskWt|bJ#A%}y^5H_l&bB-k!xrcR9#OaMC-Ija`?sROxC&#N` zWhvWjIrIF!zD$UVddJ(8H-lXf%b?ExBW!W|%@xja0*n3{u%QLe@>d3DzpsZw@$aBz zj18AQkiz*(?cuwJ$6`o5bGcPN0zpT|)R`^jM1P4T*fIxu2V>qgM=L`8xNNx5{uF{O zE@I$;Bs?&laRg`1MgO_8d8LBmAkc}(b6D{bZRUQ0-|EZReySd~r0s@O8*gx*^njmQ z!=9Jywr=q>4C-gMK&hcG%KOTbCxr~|YAToRozEY) z)u0o$#o)91yRp={7{UT&sf$YipWL~ezcf*SwruU>oBf?3vVRlKS1tgf@dng%j4@Gl zQzRow)ycKLDs)qVIt>=Kz~U9_AgDo{lI{cykNpgb7ivPTnjM-&8==ZnJLegpXX**HFm5HW}>045?1$Ilg746Q49O4sKlh4Ldz6!20+*Z1w7dyE^UI zEWZKVv*vL2>qB|385ZEH`kM3Fc>%sfNfDR9PuSack5?9iV%oi<*qq~sEg5~nGduK& zlbagm|40Tk`vwT(C5d$LeC89j&U^T{4_&7mK<@}|F0|$Y|AILb{emhmpk*mEiTv;z z^NYuSyM%E+WO45iQ(Cj35JgK#Lmcz)Wcj8Y-IlQbvwCR{C?+{5;sgcoNTS>cHFLipcjS z<4aVG=Qi;i{E&n7(KY1dbks%T4~E*T=QE;IR=ot67Wo--`L< zvVZw4A@N`~q=+i=(GZb38Pq{&Z|c_Nhb z{tFeXx72m@9=NEk#^{n|=nmwUExCZ++%dtAd~bLYVx%OZ5Sc8ojVxe}a4f5PnP z+RVwg9UMzSxV3f4^lQopDyeTotu~$ITvb0qOyhfy_V@`Go{k_#uHQ$SA$2NxPzb3{ zWZ6#oH>j=Lh;>{R><x&!1bkf@K|alI5}afeyW>CPjT8_H!zR?U*CM=F;zT*ylPL z3p2Bz%3}!sI!5E{Wab63)+6IqcA(zJFEF&nfO>y9%lI!^MCrqvA1bB zTyPtVCn|By%K133YA%`^iP6+K8aVob5#6^rpK&1*P*P?9U85*i9nR&?uZzXLJ26n1 za0?Dwo6{*11=MquG6asj$y?luf!VFWK(bDw@uN~kD~232O^*=j|4_8Q}?=5N8ef$3mhsLmYiaqva_GukF8 z@?Y;85=q8+DE`451S!qv?RAw)Ft~xA;w|ZEJ{$frw?vAS5AGA`QdtEhqHzBVWNfR) z{MEbAq&yn*o8qA%L5zs&+`zw`MwBEfkkqAXVAW%J>i=IAzdSew&d3*Gd-ZSJ**gS= z&n@Y+t7mbap)yUo{s-*$vK{Wtm3**_7R`S*igsldgJ7!URIR~M%rE!=`l+%w`6QzE zJuyCb#yOUezQp*IHG!Ojjy|b$ z9b!2V6K*d_z#E@6=syNU`0w9J9HZUMm@jgiO-vzlo=_*bqyGg%WrUq-_o48aJ2!4h zE*cMB61to@#)&S9V6kO9R4r1c^E>*W&%Oskw|?L@4zS$9>3iJeUR^TjloUO9O^e7y zoCFo$MZ9C?3*ovj6Po;)@dI=Oq?^jog3DL%^x;M9#%?K;tltgBV|T#(bH`wug(WFo za2N6f_W6n>)e(=sE6noIcU-^MnUmM^FdNESOnpL^6c3&~~3W z#HP3Nmb;jLY!u}dj*7-N_f=>CMxcI$4n~yv!Q8STv|~T3326?XH|`F^IQ)iH&6BY3 zcnR-s{|?vM6hT~>Hnzk@3I+PEqDvM!#BZw!n&>3pscci4U-TJ{5e0m3-P4@N!WScDufbhgc{01zoMpr;$cuiqe^y24 z?>Pge!Kd(9{X?)hnT5qp!658DhB}MKfk|^Myj-4vp&O1tK)fZmPI`lPVoNYp@e265 zJmI{zoE9ECY{h)$qdCWfGG0b16F%&Fj6SDq&_2Bl^B6xwn`OKOk;zkr`)xvl#^Eo(fYqHk)XpDBVzpdL{@cnQBOE|9s<@y)hwz{4XLoqCpdNtjJdoCqaw_ zqQ7(&ln<=KDAvjB){zC*-PvqDVLZZScGH^>gS+G{h*d`bY}0Ijv*H3$JN^&)E=c61 zLtfy2MrI^?;yCcV`V{B?PJz!#aTr(l5e=^}A9fjIDqguEyvbZRPA)Q#a8efLJdmd* z)n6FPA^`rfOk2OvdQ5)003}r=X`McYgPE2v?vyNtm$dY-t&B>ygG?wGnCMp$MAiL=tH+Yrewg%Rbv3-wX8y}$i;{Q<9-GEfaGRA}T zb&%^SfaF1a-se^vc=Hc=y{-(XTj&hgM)6$lY-jd-%){lAlksJ)IT`U+mza*#CjMh0 zpkJ|yIW-LECJQUN#?68(TB}3Eij|1Xr9M!LVw}OGY)qcHA5=c*QK{E+7&R4?^dEsoUN*>N zGj7bg6PzR2A{yJQNFuMiH9B1LaTqF#W_MF4=!HjD%PWg&W-4%K~EM+RG&~?c~1atTW?08BM0k zQtz=2Tu;tO8t6P0RE8gM>7H?XmrptKe60srBPFuWCJ;;64JxYi6&O1wisZIkfG5Uj zs3)6@GA0Bh+?Ra0-i?#;d;nT^jh=>=lO)8f3_+%-u8o^ zneT9b`4@dZ7IHT_`#~Byai{)cu+jI!xR9G%AUQ#i%G78=H~^uGnB z^a#u8*sl=49~8~qf$J@4ub+d9OgI=v$-W*Ml9;KOM_?%S!LliAFLuq;_&jyS%FVH^S}4H9CM3lnBP!X>i} zh}5EF?9kr>qD^n%*CknUr_z+VUp0gIOTwY>`AlI-;t>p7vIql*f_WFUK=!dCU?8y` zvbqQ0*)wSxYP=pi%H?UE6=Q`|AAw=0i<>))18@lU^O9{@z0{|^Y8uwjm~wL zJgr+aSp68o_ldw+Lz7@dBJ5Nc#7E3Agr8h7yK@ug!B}NMm1l)wuVskZ-yZNAz6a@< ze>s1LS9s)(1-am3N&Co6a57H>6S-wr#Ocuso`pDlZVd|Oy=H#jr?@Iml{%hPMZX3E zx|6x^6zmnKj7ttz!+0uoKbdc6TRLMj`N8Z$A-YHGWf)XpC>-}X6$c|$VzQ1Ed}fZA$iI4gi@q7}nz?32_;cTIYea!X(ZrA`?H>XpmcP60ui*7g}Fk4>3zaA$K4OoBZWUuQ5&WGK@AeCwnhxP(>MC5_%<_6LqyhY#H@6~D2i5hz3gI+51y=X$Y&&;p%K#w*|Xv5_BTe+m# zMR;NiyZ@+u9-L|8^9JHfOW>$bu*__t3!EP|$OXK)YqtI5>~zby5vLAT5a{ z*27p{Ig*z8y@uHn*xuFKlsKCH;$mn#EREZahHgC=Y%E5@v=;Ix)IXG5V znMgQm6Mhcsg{F+czCcadHS`zOIcpJPwK}e_;5TPm z8vhpHU~4$rm4xzwEK4ZNKFy7H)*#~lJ;KS6rSOU6=9#e{xxI zH2glN+s!dy#42oTIRP2>)o|b8A;`Xw%N15w3CG-ufq+pj z(BHxXtycALUXM3|=W}@!z{*^mou@g0k+1NkSuh^z{|5wv zyLdHk-Q0~&whd$L{F|_DQyIJM{}L9e%!ct{qeyMVQaq#DfeTA}(f0gvPNn5E7uj_U zg)E<=HR&(9v>1Y;`4!P?RHnfn+F?YUI%#5m$No8EM8Bk7z%Y$q)L9eCjknY#gYPq8 z{vZWg=Wn=0`a2pLzJbtxcB5KuAD1Hzo#*NM6?7oKWss@ioW8{`Gp{yR|aDL2#9^g zLhg{Iwj@QXa1#@v9zUCrQknmzk=JQ3F3y9)j%CUUaV znUnXN5zShZ3El#B&s}&H9mFXlKgksqiaUsyYgj0-b(|u=Sm^p&f|u0I$tkBi{9$iG z$Bg|2OTXlx=#nDM-Zlxt^>wJrjYfW4tQzqT8;LTpf4RXYe;DshoA2yrH>DZH`0KGY zIlESl$W_;Z*CO`+EnUI7p3^}iOhEN)`eA8jKia&ifz+GT_+}xyhqz9{C$_9xl*Swm zD;7ZH@=D$*ITP)*LLfco7H?(l2A>YAkn+)oV0ro{oKMH1#-4c)x#J|`R?C7gFN?h| zoCUoJ%doc52{hiu!b9fD9usv7r2k%InTSM)IJ*RzMzio%Vlp_Kufp7!7r4Kc=Cmc^ zy~xogkDLD}8;bYq(le$JxZ&(8h>Ns?ZR{3TyY&O_5^IK@NcCS@zys5LCd1N?Ah$ys^FVY|3)9)P`n1wW)s0- z!4Wt$Bu;(?6tXPJE-X2ukM6gIdDo8*p|*THkH-=7wAG2smN{sqJO!-fccaH`)*(N0 z0ET`uH`RrD*dP&ug_0q#VbOO;XpMuGkX6`sunN2m7*O%jYcNpv4D4mfFnjANaQ)Z> zT~8YzYH2ulR|NCEZT;Mry3b(T2q1Vqf2#MCaNz}M4aW3shKS1lV8r$_SSXz%k~2v~ zt?}QX{BSL&&N$i`dyVkJhS})(^)p|4c!+bXmlckGbp@kJYf$oI0$4n5hP#`uqPNmI z&V7P6FCM=U+m{xe)Z^~&tx$Vp96EZpa(ftObj=4zl#uPA%;1`}^%!TDIK0vDFWC&tk^U|UQSkF2M{}2meXqN}Vfm$p=1G0RN7=5t9j9Lze(cZ6P zdEcWR=rFAZohH~}ez^s$7b#G=xi7)E$rJ2`nahl^95lRVgB$Z7G<*FEantg+u797v z*ocuNW7TgQkO+dcO>3}xOFTd4+869tIGHiCt%Zd;wTaKg_ZV9KiBHH8G6oz9 z?N+GM4EKZZ&iWdv$vwp!d3Dn4!q~!Y#~^5pBVTz!if(AD1g-Udk=)@~K5j9zuVFiI zFGQ8dNbX>85h^^2!Va-c-b_Q8ab*lB)CEHFf5*V*=QPF{m;{ce4hu=F0r`=xL~OOw zArAUb@}LbwS;r$dDZ$2X&vDk+N^H3?0TQ`)_}@bnIz}%F#(_Rr)U8blYfZS%4HneZ z{UFvi8PRa3?=VYDhSsQ?lDLXm?vKAXxgI7@c7B$pRih-RzO^*~<5=Eh8Fj0Oa5Ri# z{FE7T*fQam(EH|DuD8Mt%B#!ylFoY6*FOh^{|Px2=ZCyUWfC~LQBI*|1oal$@gE$O zY5hi3D$;ua-Xk-(7lB#$r<2Fz_V2vj+83C+g>l$k0-ogDy;)$97`%mgMze#+n+L@w8=t-S>Z4#0>nKEbU2_Z9?j--+#6_O;0q9jSfx!10cgd|Cl%0ZGO znNp;48<&kP#~Yv{J{xTF27z%X zTN;l0hVE1LbHORGpt|ZkDyx6P(9h>#zwtrXpizjfQ}sY8;RV)6dLiJ&4}Q?j&**Ks zhxgM+;`;w#|HD}yu=rpLd|DfY-YTp32P^L2u=1Q3|5&PmC(ZT-#FKlVY@WpJttKtL2!@px`kR5M4VLjGFBti2^mP>nm0YkEdBG!FwgPg;_`%abegm zhnDdvRAl+r;1UEyX%R-3WJ8RrAtZIogRdX-XoSl+Sd*qtg%7uKrf+Uz{0x2SYTL^n z++2<^fl<)tkqIj+Sw6>bu4p)Uh(90RVt#vD^wG(N`ci!qd}-u!HMD5U&?MMB@jKqn z&?1vJ7!YksMXufd8>B6ep#kdd5dW?kJ)%yb_T_oJxax0_=kD_m^nMl3LL#txs2$by zISN5EoNK!p!MQfA2iFG15{jIQkDM*Zmd!bgwQNV*2A$-OQtH&pEva?la%W*9* zUp^YT&Wyw(@q=jif^=MR;{-H(=*LoN1@FDUic7ut6;Av9z|zm|BCqf@@b04(iLSM# zL4F#r>@w>EiFrPjd0jrao6%eBE?OSMm}*~kgFzVMcWW-j=qbez^Ra-}US`W58+H_L zHd_#7*8PzsZ7}LyGThScM(w13IqAxod_wv%@H5fl!ZxyVb)*USEx#)9&lJ%7U{mTY zTg+9yJ_c?61)SZV53tzTfMhu-(z&nJGgn(LKf>xJx^G%2IX=Oj#IkOH;3wfPe;fco z!xc%+rfyuq&eWR!CygY zw~sG7eHMalZ{pQgFjnUhBgumnd7`<&lrHr#BdH(FsFxra?yt?m)~iy;oW%C^o|}Ix_Q{A^RXw}O&psBq}q@c-ipD>C)U{TstdRFXpsr23N&TL zQ;9#zqZl@Sgxdw>csDGLW&DNYzp1RZRND`uBlLs2XmuOi+SqI`=^BFAh^NC;uARQ^CCz zMOptO!K_$4GG7g0E5k1MI5EbTO*CKfM2>*-RXC|`Ph?ofIydwj`d>o0XnY?7<)&k8 zKp>3K(WI9j1Tug78|H8g;6I5?NQ?Gs#=!SxZl1~POlAx7<~3q zxrNIOKEil5;pics2j0`t!Cg8+aww}13wJ!_Qq}H*vPC>*+MdO?*B`>eb2so*QX)jY zV9!sxr8QsayA>o=I{40D>> z*#)y})oA?WtvILgE=o&2il&m^V0&Uax;!s}xs!q*X5L$FtkrjL-+5AGm%SSMC;x(N zpOkRq@j>)(J$t@=cwMA7+lX|TxPj;725^`3a%vi84PKyl{LS{37A25Wcq?CatMJ5R%qcmtBZ?=aNO$OGvddG2gzIi~ub!O|P=Mf(@7!D2^G zd~rjKhQyRW>mVKitXG2cs|M&A4#%`L3UGtXxaE8IVxyh{gdTc_BR{kG{lP1|(|b9P zp37qSkHw-ek80>DG{d^;XOP%e1;S%J_?mQjSN71RLVrP-p}x(Hr>%Y|pwv z)0}vPZbpZ33t_nKJ&c-l0hS#qV2&*ps1Os_a7T_k&$P(idC4FR@yGe*=P@PvATJ)d zUoyQ-1Ln#a;FjMcm7I_E!QA^{V%azYX#Q#KjlkwPoUUR7F||`L&_E8;gTw( z?0Y21c&`Z0>T*z4;Vz`8bxC@Mv;3N!4h9E(0j0~DH0!_Z;Kg`#0k2j-%fYL-U$z%? z8@_|nA#3Ctk7C&Ed#wL|nPs8g;XCGT*x|-JyY(4ZE9(Wnt=;j|$uhY7>=~>LJ%qwb zbFnS_2Cw{g5?bxDC$;gmv}C*k>Dh6PmosA=;^Xh3RwBSnk+x(=*&uSK^epIpcmxwa z=+OLyY<4+z12^JPEr=NZv@@s}1dAuRX%tCN;Py%6ba4`d7swKCK{z+B{|YM3U&DST z8QP=J%W|B|<+6Xb+xThho&VF0aO`h4@$Fn3?<`Ae&31rgRs*!ke8s0rWAK%faX%lt zaVtodBaUah;2>kwI+Y~zma5CJ(~~j5cg3Ns)djTOv=U;5KINook0p-nmNYFV2*mH& z_%Ij7)t>(pclF7jL;5f72Kj~Y)i;=Hcm}tU-9I&K?a9T9azxqs9CtsV8Uyu?!?tgh zP+4tC9TxZS)4!?VsD(H1Xtgdm*ie81kCCu?@<`OaAcFFQgP8PM2={Ji5o<{~wlqp{ z%h+_Vsrw90Z7aF*l~b|x>`~BPVF=rv|HP0pJTzPxg(FTD;>>zBy9((L-M=1!8>-me zX#X14?Oce0@DY-Y6$Q*Wg8Ef~MQYqP>wF4ONz4%JZ8jc%VwLa1d0_ zHpW9*M{w6AP3nIBrlk6v9_*F9iX(3+(<`e~iI-?6>I}E0&fRg?Sh)%7a&AD^RYgv4 z_OmGF;(RV<#xbt+N)bPKi6PNfOh#APS>P~H9%aWn&^K0!ROPu2^$wE-+fmlo{e@*N zRAtDRzPsqOsEZdrPcNz#unb;U0(Uxr;@2{B>e2WF8ZJMDuFHf26D?TnW{nQ{Z<)U| z8hRfZlT#^gaR11!xI*PT&X;$fI}9su=Z5oGyoNE38S^4#B^6EV>_)kG#+S>SkFhL2 z=J#nU=)7kCSD|v8@|0Zc%nyV4$5_w1_XPCLv8I*xl_6D^WtRV{alc0_#N3?++523P zx7FJV-W|sLk!e~)v*{itd)Uy^bTC~WV@~%O3CIRR0loP^i>9PF;IV%8fBy zo;ip-*$yWD?0gud*~hZdEHis06XLY8aNo~s{i^XrFGA&1~_u`PW$ z-JU$3B~PQqHK6y1)%=#0N{G=)<|~{JbBQO;Vn#~@pJ2KkPCDyRV-0J}w)g<+7%ye) z&^NGTq6vmA{uiU!?l8FZIZ8LD;mK@Odhf3SHE9?`ikG}XL3}EAx##`~hT4uskE6oPeB{!j~zmz-YU`Z>?KXQT#o^BoWqe0AxMV|KhG{Ax7 zn}@`qxH_5h*{neSJcvY(H&!%t<3|w8nFZ2OyCEfb6v(K{k##YQ@i%%0*6tKyzU3#( zzf=V!ZH!B8nF>x=OnKGtfqUl!*2>Q;rSR*(^ z!w8(PtP(qan9<7mx$vu*Wlcrq)Z1?bOl12s=YW4;PLCP&*CLWB=ziJ+%I`dY=z}ses7L|Qj?Y8eeG6fTz6?nz z*GA1wF^bm{c)re>oSN_l_l|Uje6?cycJ&F^aw{OG*cR9JSd&$CY>xT72M({-A;QWq ze%Cg}GT(Za^OJcdsr+*q{2Y2E0P6VCM2)luN}*}I6+PWnjEwbljyT6PwgktH$zx)pVs zjObCJIkmWQ5etpq;5m04>Rh!K{3lF7uRV;zt-T9fCUk@CX=~8@_%Cj2)keu@<~a)g z&P^7ud2En1s^<^E5SBqv&3TQXh3fcr81vVZC4e~P9lv@i^94Q8Ce5Etz==77=zr<@ zWX_PmL{Y66a^f=hq|?mL7hQwmVeS&a1!eSqaSLQ(W6@{)L%6%!n3xQUMdLlcx%QiB zP63kv4s5BTuqdHh{ybc5eUtYv2;s4Bk8Q`3IYs^Q(U-ym=u@JpTEDH_`_a>G1a= z_ZNQ|`$C&H?(O7-$>zKg>n-({ea7|qT4Y&iAKr+fm{O?CO?@O!qpRAPV`aZ+@emoB z^Trfv8X}=*)o>`3&BBgt%b{<91I9l83O8B@k%sT>V7pcm0s|hQULxyVpKriE=3A&1 zZRf{WB3j;y1ba<&;vU+C2N+ZKe4sw*eQrgjc`1?d#Wz3@WK{6O&Krs&eu8f9AxJE! zLQ}o{=rn5~SN_WxZ8p3B^AJOfTbP99HyFo8e?Ok&^hi?SFi`co4J$5Q#(b}I=$YjM ztG{QWw#6&nL2fEzF;}|nS=4~XzsZsPHz^KDx1}L-9dV1%6kNzSHkQrZAh@-}?OV|c zmSw7k+P4=Wc=RoB-LV_K?7z)=p6~d=QKnFspbu^RNBOUFFQKkp8`Rhzhwi&$Fzt{8 zZ!T9QJ+n+ekY0x;zo?T1V{C|Hpc0gp9ul<$KI9WqY*9MPjW5(Ppff|PY5intlslh^ z2UBWstFILqEiotYMiJ0uH-i5=#*{cFroob|t#Bol^+z6ribqxL8)i$>|pM2sWhsGm3;7P=^rggJ~^`8S74X7_-Rg^8#(=``{m zZ0J8KT2#VjkX2*+apmq-P)<@{{?X~ot!c~mBoBfhQ#I&3Btx5@m{5m`Us&pyCvsoE zP$Dir#T}iHfME;ms3+})vh)=Yb;=C?1{;xWixVIrE}m0=qd_Mg3rE3PwIWxGRy=uB zho;={0L`{$*lV-^jbkogPzal!wVnZ$%pz?2(85h{dCYiV;h+YlH8%-3+^2MSy+6xE9xP;k#zGRo{73TM_y z{MM%PBHM$|>c$x2!2Vv+J_r~;1DmhdP`wq(M7_!g-P__sIa$M5KIDJQh#vmR3+A7+ zw7?wicl;C?#?ZO;h>O3XPOoHZ(0bL?plbXPbOKM{wF6aX`^5y`o^eFwVp>IhFxxW|vHAq$I3s>hW<0xp(lAXqjlDyZd0|l-7o~p@R&BGTRa3X(>c{GI z=XwK@ufw98w|?+X8{!hVP8_q;lz1KA0()kg(W)cLbnKe{@hz5NLtYn5zn6uj_483} z8uOMc{mDCKtpOY%0u{nC#huz@)EM>|2}3^Kkahnv&oJl5XDs!f0y(*W{?{gBTrKO0 zn=8^%l@2cAvLTI(Xa$7>_QWPulS;0YgK}3N50Q+~Bi)Wuv`ne6`2cJj*9D)P)d}at zn9P>QEqED^ol72pz$ti4?!W+8zRv*vp;usUjUNPmy9Cqcvvb~sdNixEqeC}TF=ixl zR$njT1|ABirnwm2nmmEZy+a^X+<|F2jA=a66x3HmV9J#QK7Zs3a1fVq2^nJ`LU@~t z$qmJW(+tVFF@D%+IuD*C$`HjZC1Cs@hAW?R58a+$LJ}`Sasw|wS1aKjwiiQJwiiF@ zpDG+PR*gs#wnJ&}do&FwXMOi7b|($P=-Yh|IcFbUUBhy#7z^jE)#&G^_B8m+d$7>A zBXJA0sF{}q+kec1A0rt<-FYa43_1_a#jI!9*#|TIZ-Q~%3NUd<#gtLjRJc{2cU5`M z&1Sj5jz4uc^t3Xa8Py2o*~9TSyCeVU&qD#f(`}DAJZ(sPfSRjsLb=5Oj8jsh*0db= z`-Gynm7PW1WI0>i4dCjlj(2|<&|TNx^CL&xX6!~0%Opff#ywZ2gJ!*F+|!A?_N0xR z_J%k7i&7clH#|+$TKE!|>apG1J`O9wukmx_vvK4GO%gQWCqMC+H)aI2;5NhAaPid_ z^jFfPswaEFW$_c3|Fi@ADxdN5+s;7nGsFq({h8so2zqW$f>A6FyXEgA$e=&aH`oK0 zag4#-Hi7ZmR$*{)G3-yd1Yej#+i6z`?|b7ib`L3s=esmW*A->lyr~g$ZeN7z@~_;# z>Yp)2hCS~_o|jxTJ_?_jOsMnkAV^^D-m`tzFzAdLr?qqt326?4iqD_$7cH+6#i4U zrfc@8l09=DpzEwNT(3KGu$cYj-Gb!EDJ#YxOn!y6`QbQWtsHGty$&~y9K>-;ve3NO z4aDo`z_tfJu=k=lnX=BF3T-n*|Gd*7?(21F+Ds+dc_0RL$Ca_Ukug27F%=(ru6m-B&y-`Civ>rp`p;xcYZ?m`d;(njUX9VFPvP-AA<3BY0y7+kfTiqJ zu>Mbj1e`t0i67nOUyjoxj~Rn*AS(nfjkG2AKSW@}ZyyxwHj^}@Mxx*3MA3-o!{~6` z5qmt|pupEyl%%}}b}rK(Z5!8Ph@c9@n~J&U-6aqc=)(2J8d0a+MqDH))3<|~Aza+d zW(_ZR?TBVA(CRtPW+X_e|F(`ZAigxY0f&LD^VVBn@es-`5=^6Qw|M9vH^IN~b z#ROB*ey#>;>yJX?xyksY#*j>zuTO$AKLU)n0-dWB=!mVoj3s*%ZmeKusw=LNKi>*a zJKq_D7BZLiEXFmPYer?5zoYEN;XWh%N9=BHwx+$D@sSB8q} zF&KBzfGpoTn0#2Di516+(bORu11>~BlT$9fTv~)Fe)ZfPqsPomorY)oE!e%;jHY}! zFWR#8f9%TREN^d2W3Mzr;n)(czHlo9Oc@I1IZE(uACFt?e}UhM7|H5^Vhp%*llufF zBym~+21k`cHFFCzh8ck1c>^bCoyP7#PNKqoS16rQ$R)bQV)b|ArF(aQIlmpdQds`r z%Mxy3f+;<=@F;2|o6|YOmM+R=JY?(P>}(w8=G;9C@33d1r_`EyvFFKILm`dHF5m>> z1)>z&yF4&9+~8Cz)(y#nd+hJ;UNKxE9lV>r{;LSZ^roBNhFfS|p-c2;UO@NO0?s}8 z2paa~!k52{@tN}kXUUk6xA&4DCGt35bRiraWP4G(XM{w0Vk^Hb*pA+@eGK!aE70=? zwMo}YJt~%blYAL!NhXYE*}4boK~7GaDBOF9t{TUqGJ4 z;!xJTHsJDJO7P^SOK5oW40PXn1@7ZUgGBroV%#%1+atqa%Rn=%I4!}5Ig6oOv>mH6 zS8^It)yUeH=5&Ua98t5WUm%(G>LG(tEEb(#PiHqgz=-*x=;`}QBgd-Y7cdG@YK6@Sb)%@na zEwZFx6F5#c+1S~@y&;55mfoMyXLVRBeSiDvvmI~T5 z`4a{0vwGmSua9?lGy%rmJOqQ*UBMhv3HOR+g~pbAU^%c4u+vzLWZCQm+pjZGW3nz0 z9NAGcY8!JlMiy}m_RZ}7@;@vzzrbgis^grk0^)8`DEhs64E}1fAXCL};Y0mn^y@Fg zhkD;|>@hvsz4jS2kE?~DRXcG0<_=H|I}9oP1rme6_c(1FVoGW#a}R`bbJR`gfRPaq zxa;$S->`hV)z+ff_wL|vmbHJnNRQ;*91E8>cjBH2nzZfwV4CI}fVQ{_LJpjSQg+Hq zes~8~`3jk1`<=w{M-wEedO@k%SMH&P3Jn}8!lB>4La*cwW*>P7g#{&C+9_GafN91J zLI-mBxGg^Q%fl(ocBEl?G@MzcP6YSYxJd`vC98dRLhtEocy+oe^}YCq7mtrAa@}su z_M*9%EEvFi)wA%A-5>1v*3CJ_Z2)2TV2H>}<-9E0P+IhrE7)-pFW-0ul1Xz=EL_fy zYEYq;k+&f9qBV6HRD&KNM<70`1XreX!>pEjh+HXZca@-ax(ldF9-uh%EFW5@fu(&l zqW0!RSYhskt5OFOucJqB-h?k`e|Zqi@eJfU2fu(1Rj07lUI=A#TToMILH{W)L(_%? z%$~vA0+Tm$hq~3se)*dqsQM*wEISOPqn%)h-vq3B>djp3*Kor`3+7B7!u!f;Q|-^o zc(FsLoAz8;{?SBTvIAJZAmSYOP0r`N-%aJR$RPTy;5-^&EPM^bdDJ=cUz6JF!JMMk7SwF=rk`eFPiS2*<3l00}2hZW98c@JqUIv*B* zHq1k%5y>dt5)D%EQxuQ7U)28U53V#!fNe^V*gXC(2vLbYH7*hi!({2S@oIE+g)XiN zwI|afSSNV18PR(fft&6>U`#T763OyKA5~=Nnnoe*pnq{oS{(Dl7}3`CH4wNa5I$Q< zaE9s`+>-Nv`7?`A`Bypb!1}mCvpimBvKYs1=z!8&^+<0pAMQ*EUjDTeBl){fP+y2n z<&LQTWGmizG7YE1$&!Ll6S8c8-H(S~0Z9bwD8(g$P*lw|<~Tv=$xxu><|Nyz4HUXn ziT^~l0~&c5b`^?1tn)!KZ>E6Ut{zP84!@2WGBO}+9FAGLhePQ@H*mhDh{wxh$u3bm zoU~$jgpbdms$Z4%EYIR-gB^)c4#t2nCET`qrs(dRD{AtzB9@aI!S`ku7>Aq&-yRdT z133jlqQ2uy*#oSX9^w|gq8nB+W~uo_7mV7n5ZcCXhbo$mNtrLOe(eu#L5nec6st$? zw1>d_!M9N3t~v=G#b$~-wgFdu2S?TC!^CB#xYJyL2utO7@zx0b#5-*&KGslFeWZw6 zw_S~q)TqLc+?MS_C+p?d zx~~e>gv*oj|LT!a#+XS9Fauqd*}OCR5^UM<2)AxgAuiXR;s7oO3Ct?Vo} z^K2WQxWVSVH)G(HsROC>nF4?R*wWt{et}QaD~OgafShx(I7m;2h?m}#+*))WWlisc zpu5ZM1>M88K*MIEXe(f8_O){VHrb$txwz16qDnsElC%Oe_eS9d@_MH~eE z7tPoFO@Wpe8I=C>hUX{i(KUxtFvGr`6CaVsUDA9mn12L;f5yRymQFOZm7#-GtjXpN z%@}q}3g%0XLFC?xINgTbmF_XWL~k?ibkGW7YFoLh3KanVWJAyse{Sf{+wjrVnoeJT z3U*b+VMJUQ_iU~e@wIK?uD`j6rAxlzbe6|@d#H}}9lpEyxes8{P)+)1v<21991nHr zR#fWy9tBTFi>}IDg@(!du{v)#Ect20IQ&gm+TF^1|7}GQ&YOXPIMOC z+0Wj;(_b*=!*{OZh8Hv!@4@YQ1vtJ@iTXcf=jHLIS@-8F<~kjNxiSh=mocx(o?6pO z4uff4-4Q6&+K4{yZ$pUb0vsz2#^=+O$XSC5oM<)=H(xU#bCeWGk5Lm|^1F)xKVEW= z^=--A;gLX&niKgjFSOfm7DVTsvUz7AIt+Wq^%#tUj{kIENs=3CEKs1A%XZ+(6DHKV zRfVg4u@q`Xz5y@Zi|vrAc;8{k5S(`g5=LC-jYA*to+ET68qyyRsfjpSy^(J{4o>0K3b3ZW8Uf z{*?c5xd^-O=7Q4{53bGWHM+4; zU`>5~CSlS!6L5-t$oK4K4&{lq%sSG5pfd~W#~gqC)g)t=wP9QKB9zXx1Uy!Uz>*RTnJY`>eFiDz1+wCSNNafDO?eJm^ojMLxbxt5G}iaXSy|r zXBuF#8yD_L?_PQ}TBIRa{BuVCkkM{5Wm)y_3IlrgyBUD(n3J`OSDg3v}t~ z^X~9cSb}ZOmSd2;J71^z2ilddz_cy8R2n>o3tYbu=Uz91p1eG+!A6mu{vk`}@it`7 z6-8pYU>*9Z>p<}=cMMp_@@T%vlF6qq6afip6frsEX-XH77rgMj`+mJGuHR$fN0#3I|v3+|y zj8M_0m8^@RpvN+RPT6jPnVy^f=GW#P=pwQx>Rj6uh?a#x4AWA3VLm}uXF z+aw;4xh)B$mi3&&_I7M;>;`9%7nn97Bv{x}%ZN7Er>sbd-rmBEfvHHljLB6$6DsM{ zqTl+IX>40AoExS|#J?NR;4=#ow(kPpI3p@M_$`!P3FX97Z@D$GJm z^HWi&XBuZ#cL)dAGw|+FEs`{0HYCpa1LnTkSZl2T!V^3Edv=X(j{8NJ38FMH`a`?{`WIR_8C!!f9HT`{2bimZ%5DO9>Va=PjFJH zHW9vR6$u=2T$9{PX;S!12za9hLmO;p^T|}mKf~_IO>6*GIRoyLv)PhYF?ZADENWjh zQX(ol#&CysMPowCeeT3^dvem4&6SR>6!n!U(E!8EAhb)7Bvp=u z8tBLN)=XHRT!@!Hl!BaFD89Xr3-Ki$kaP4Ro7Ma*nmbY{D~7&EHq-wSm+ zOYrRo1zJ<^3RkDHJCM^DblC0=`ZJfoZ`H#Pvh^#Rl|6|nU(J|1Z!kREWI}~EW4P$d z5@>$+6*HX}=X3+*g0wesRoe12v8Mq}$-Dwr)~O8H&se{|7lu@U!mBb%k%TFRN<%z|Hv z%uyaBfDN-`$S^kV7~$x~I%zJPso)RF8w`eSw^5i-qzw(-Dd1DdKGW0Rz)o*D^2MK> zu}3ty^-nwmQ8(8@dHpWzy{$yn{`!oH-#Wl%fHCKWS(DcnROl4OPf%?w1iM?E5Fy`y zmvxWBdrvcx;eH*1TMg-2vuf><#8HUxXeSdxIxp*S$~5Nc0Kk+Sj=Up;R&SKWUYR>w5r%q3@WYU^1@ z-q8q|N94#j#v>Y{WkH@SF`}cq5+HMbDV}0kpVFtvU^PpNRJB)vwDue?y*rOzvBRAD z2EF9EGW6N|*AyL#2#B6N0q@S0oUHl<*b(^+?sW=?rrk4CJlzg0_BQl)pA>W6&*KH1 z8LrY`cgP#%hbJTT$ygENJI5zNv(F=Nh|#5s9M7YLQxY!mvWA|@6S(Nr2F$N~1oaU& z`ESf)|LS-uj%Bl^lZiqi-y98pI&8`F71pF;rjS;z5pnI`?eOE$^Ehd*0@+o21}4YJ z6Ze_X;IQ#F8gBap$xG`X{`^}M>K&5gN8f?8ul8s^! zv#QJ}fV*2TXL&FdKFjAbp679GhC4yI;ukmY*OKg;dJxWZ#Aals+P_eHC?60aw3tgZm;+(Uxx$P;mE$M%aeh`(ID?`F`>xWJp%jKS*t zD!kEA6%sr6I-DD$L27c?UPpuF-kv;$U2~b|ea$2O;Oj%^$M_~K&t++dMi>MYtmfsj zlyKwiE*QN{NEVD$Caz(Ly!(XF5^Ikp+Cd%8&H0;E>5 z4&S~ix2x`-LFr#p;!?xIslfwmZoEp;YV#5_Lz^LJ;yNzS=o$#rM)R$6oDZ_LtB^so|ysx8+@xO!3*zj@yx0F|cY3c{? za-IZXb7iRVG9P%i%9xyIXU}8zjA@PDCtP{08fK>{6U$LAL9D)+YuaK*Ue7Qip>>0( zC()t@87nR6j{&MLu%e#l*p9(Kgo=Sxu=4kL$cfT~+Uf&P`|cBt9H&V70zFvAdgvJA zTwPo?`zsip9Dr|4XJPO9>6r5}j0@^-z`VeHQ14RA`MvH!{Nw{gp*A#R2V;;G>(E&D zNH8u7fiR;k5W++*$Dxgvz8M65UnwzKo@ zxH=OO6zR%~mmHNy{`JMF+0Vg8Q$Ss2K4P3(sYJS=R}x^{%$>7l{>)+8qNW@jYOi~h zorAkk_1tL?OwQm+D^7_vzBQ!gn}dLAm7t8dF)3{-z>>2p|2X*&*RL`P8xNFV%2k$k zRX2egG7jrabGV1g**PFU5n{8`VM($f+YeZB4hM4iuOV@mn^+5mtlyKeL=)chG3Qy- z1IYi|1`|Jz#6V#*-p!MtPS0#O;R)9LL}z5e8l|;pV%y#ak!$5$x=_| zT$;jWcxLRLtD;U9{rdxUx%Y6gYujPYQhnn6XFV6a<|)hDpB1HSQ{(?VX+kb|B;(LG zOA+df;O;$R;^3Id$N0x^Rs&kJ%T=G=+42jjA6?~h#`*DW=O%;CscyWp)DI2GPdpj> z72TPi)~O|(-#6|J9(bFM0@wfCV)@SuaMp^eY$~v2XE0or>%gd;mKgc_4+s+Mi&`S~ z|tZy*O%!;TlQ^TRPid1WvH9e~;Bols^k>DLk zIOBW`Hd-b_8GBz=o*N9K{_BRlZ3{t=FbA*f2T) z2KC>@DxKe$OAP3*V{+tvEz3IIGvfRX{}uTy`HzoiG(zLW|KLH^Pi_oZia`xpFsMnJ z6n8!V;SqT{rLqlOMJ>FsuNpZ0HbnEYnGo%6K>ZzFV30>5XBvJLc1YjD>irvGubm7w zzh*qj+;46%rn5lVIT1Yebb-k59<=wohKez9oYO}i{I=}{*q@Xk?PapGeufvcwF1@{ zyo2x+GQ`VU1VKe)LC{LZFJlzH3G>mE3 zxmO^VxPjL>{|omjSVQs~#uoOU!rKcih^)yBd~{2Oblv{QclA$3@z_(6=}VV``|)E~ zAKuIQW2-SBzzQV?Dp6%aDK5I-g4J(la9e~&z%uwVK2KMp$|_?~BiNMaUrT{sFHYhK zPv%5;T?Y?i3GSS#Mc~L$P(3^wt(R&MVb=hv_DUh<$qsO3Ss7Eji-L(eM4cnMp%@T) zk{)5M+e`G^XHD}i*+Jgb<)Ha174)5raor&Hvy8e0!iBT=i(~(w;Qj_KZ~jrZzTTdc zHfu_9zN(>EDMR#okQhs%Ov$1PVwn8Ul!kV)Oot_70@Rt0)qI3Zmq{UQ0RXcI#*>uRcr;eEllF$ zrC%}rPawk*sFLW%cVOIjInwl5mlR7yXabKg`S>GPlzIyU#bVbCQwcAoUXrgZ>NMy` zIKNlUl4c!_X+nnQg;=C*)co9-_j%!mFKutM~yh# zAA#;`fAiicO`K^g;~Y0Ca1MRPxt^pJ=4RQz-3+%Or2~hs-Btp%cpii~VsKi%2Ru!s z;J92BH8&d2?gO8|(;)+T6yNh{ZMJljjv;j}IR$S%TF`VGZCbx|1FYI)Pjiirqg17d zbH*mD-lE5~`kB#!nf7E4n|1jQcgE1GGLRrG;C`=51y#i{Xxj*&>upUnn~#Ch_d0%s zeHz+mCV|2x0oCPxp=jU|?s`>&e=W_(dS0EBEnSF1-qzyK+;9l3lff6N=45}Xkd`bG z66@(IwAecrI%zZNE6AbapS|#EM;1P8cn_k3&v98Dn}iQ^wa2aJbZc7haFP~@J3oj>J6$-TuPmMkE5ZC2 zmgmjrmH5r?;_Rx#;Qb^WGPL9{QZF71TS_2RdK;w0H$*mLJMiK9H*oh`4F3JkfI^r$ z2Hkjr@dstmPijKTd{*MSTWVCk;3NoEB};^{@41-hE4;VmYHmH7FImf9MVXlov6a>5 zEG(GEGn{cv)EB|$|Bm449vzb9^9f}Yb!hEkV;Jyc_Yxglh!nEi-$#ka(F76P{2{qS zk1kls7>g`xBtI$!j*fkZUA2yoB0mE!i)P{Rp%&D(u@>E<7NX8!_Ric~J|Rjpq7mY|yvhEq~Zwft;yi9PexWyyQnfoADG)#o8-)WhdoZR)bQ9tP=04dtJCv2^N&;U=$q#_X^jkN zS;=-pdUv?;FPCtx-5Ru&)G=?A7nc#}k16YOB@c(EK)G85YNInaGk)!K)4kYuY9VYZ zW-Qmt1nk_*JiV^-v3lSW3WvVHgy8R7V0Z`q>QW~8HzwiGun?7Bg^)`e$nK9qyOUZ=5&vgsKd+tJ4O$nT5r zod(2xFd%m$tcf&QfyOXiW~JLiG+loft~Ke92ulqRpA{gQ>ga zVab^;B_e+9hMC7qY3k!!FhceToL_YU2ac%{!2m_^>Ptm;{*R*baHR74EvR3FMk`ucC1H_k%{~%hnvhl zJOa8lG~xmT?Hm6M)X1jx+iaZcr9p=NItgcsvr+g% z!hWZr!m*6$v#(8yxa8mB(pG-q_GP`tk%G_gT^fw0=5i$baygukXFRU#pYZKn4hVzH z>9`^4B==zzyJxiF(B)d#AdsN#91cb|=+eqpCLlRc&npbfz^4g@)Thu9Bz=qr2#2|# zm(Cddu^+UO?0Ks*XSjQ93gqJFqcC{%0&Lv89{*0$qV1dS@j8qFdvQ)aPP*6y>w5&G zHOm%sf^I^yp#tf4QlVK3*gSozK0Q)vPhA8)jGv=`r>`Nl*;FtVy*=m1@_KD2e`2v; zBfo4=KHhz#Og?S+jk-~~B_e-ZqRexq*YJ1I@d(1%p;M@sYKDFzKBQ?yCBY zw}Lt14pep~VvpRa1ik!P%FZB#TQjMSr9GkbZ-?^Modd^HqpSqP(owaHVF z4w-py5vr7^kr*|Wm8uyFy+aIX<$e=LsQnG|^46hN)OlWdB7ql24?@+o=OC)tfjBeP zq~{+!ny9svtC@2JD&Ez=_2&=R^Y#m;WjTxAPPXCP%@0s%k^*%?dG(lQr~ie1aygzQgY3vzRhQ1I6-n(0zF)PWE|*wMT!# zTDx25&K<;8I|WoNZ8AjfNX7EfALv%Okmn-p=+XKDj2Kml+fS6hw?VepUhc?AIzw4^ zbvfo)r-GGrmC(RC1O*|l#exO_U1&Ohh-7Sp-FJK7i{K+hx{n8^BcGwZuot`)j&p9A z#@rIKY#hJ+GyLQ) zmY4sy8tfPMFea`)jM$-09t?9Nfjd3%lanG%X1wqlUPk1OM+P3PsmA2v#$>p3ASr5G zfy*V&fQ$y#%|=&mO5k?sJy;bx>VZx^_A7Kufdt_ughI*eh~`TT!BQn9D~ z1~+4$BJs6o5k|&8;ad(}gglw925bm+O{DS>A;q(l3B6^gZuc~Q>qn($+-G{O>|uy(`8U z)$&&9x?E5vo0%;v>i73%>cvg-=?u^E@1Rx)Y@ zuj9s(9#}Tgn9jJXNu^U}bBhn&gTdL%b^q`s$XwB}l zmgBva=R;chXxzdW+-f7#sq(<9y!81xUSek~9N8dGqgOnI4OhS8@2&z|yEGHyBN+F8 zQWebHk_ZcooUwn2B~>oF3T{bB;=F>h-1^OiBwDu~+C=U+c3uq5^R~k^yK+%uc_@56 z*Nu-ql%ju%Hgn9oLgxz`+Ek-N_y16VYyJjQtwabLOYDi;x)kxU2h}L;YZ9O7lBZ=& zs#H7S5x$BXNJOj<9#-^&>*bo@Kek8wXXr>KY%RmG+qF>Rbd2*wHxRjv=0rpGiCbOI z@#SB)z>UWa)H}uqzbv+ZzTrDjLIb$H3v%HbXGodihOw%|eSTf5Q5NL*GH1ry*ohgyZkBv9{$2#FY7hr%yaaPSoHM zy>l^n+fj7%)}nn*$H8~hTNGVFK1t;rJX?7b=KR&AY5!jGS#j4mO?Pb)a7P19e>El5 z<6mQOoX-Dw64XQfCOlro*et60bjsUvQ2%KtmN`*)ttUr|Vn1TQ3TJ+SA#;)5vxXTu zj%4!XPZ&S=C8Rmo!!oG~=^k8z%92ptYUU1Zy&~d9_IL9oTpLd*4n&Mzj=O#p-BJSOE$zI?13O&#BOV2|ZTX)+)u2^xGk5f3E{Ii_pX{?5z1UKL z?XNQMaHa$4ZpZ~^#-i}P{DWInqfW?TOES_w9mYB}z`>fIch z^&gfmsZ1AVxG*2&vD2{G>NXZF-+-;kW!$>C8q_4#mX2N}OC@uq;tdBs0#1y=^xAyT zx;mSiXU+Da;*oG|6LUEX-2_Qj*}XV&8ch4ba_qy*usp1vAH1Rt915@Dx87H9L$ek| zqrVFirMfsNRf@)MRY_^h2Iz>iAzw@^>Bzt%knrLUn6Mph+uv(Yc(03jx*y;qSqIu} z+zu^OJX{pb!^pqOFhk1^eMe-AB`G%I$nlJ`uy>o#VptHmi895l?w`2wMO)zF2xnNe zK!Lq4jY-%Sq6q=UUxdi zE%y5V?h70bMDb6(Y^nHU9FCczL0&TNm-3No{NPQUV3pa-_n2)4VL>>Q@8x-a#$$LI zpiPvOV);*p{+T$1#83d4*$4~R&As3;+4?Mc3gExQJ(Yn3m0>?aVEpa;E}+sj;*49bLW_wxW;|!=TBmlO8%n z>?HpS{fz^l{h}^7ZdXNT=Mm^)UWQHdBT7a&aW2UAHzt?zzjSnA)=C$!FZ;vx9S)d( z`#hfTe+7@SlxUDsGR_j1kYS7k;5_CR_xFS$IT9>SwsSnZTB$&NEKk7ZXSSsB!WH=N zRe@$)TLz*b@5C+D58$Dy2I)x9CP{I>AZ_L)5Kk_IVux_9oO{7(Jo*5wTbQH#qe%F7 zl_KfWKLU$yHDE<>2Q*)^qCHnGfDU&Zf)wO2a?(~V+ftX-vAz9#mSgulJcg5;PUPmE z)*;F|_872f4!^c}F6&WUM2(y}klAQSt(vq^a|h#vzK?~AlMFyhe;ha@AI5|C&w+EC zDj)i?5Dk;f$-aq${W?}GU{0=&jl6X9 zFLY43i>=F(a7$ATEM2II$I4@{|3?JupS2V=MJ@x+;6IR1c^mw4kAUsd;h6PqHE*vP ziWY(0_^i1B5B$?0EkP_VKC7A&NK41HpIwKZzst~N%rD;K-%AV~I2>Xb>mzXUyo{%wA>h8y}^*Ud=-F2RT#|mvLQh+`#^8)MTl#Oty3hD#?LgatD zP+wg@tb1d?gt(zq_8BfM2f48~`f$+Ub{x9z0%krdN9P|EczLb@v7XcdnFn)Wal>U; z`*Z<{A_wrcV|L;qHJ0%m|5lu?#4@RSCVjM3l6;i2=&RJ4CKKYf!ib#{J37|D+|WLt1Lh9AWrCe-vjI-Pow+4F7PY2pz60u zaJ9!Mh%s}6_A(W&)r#et(qu{Gw^F_)qZcRNv><|(Q+)2VVhDPB7Tul-xbg=YT+lKp zR}Kp}?F>b7WwI5yQ+F8VUtsRWm?unA{2w=oF;CM=&$CRE7f3$;5*{1&1D}nL1Hbe3 zG|PW6*c8hW%DVnS{Z|-1f%)oAwIhu2z-?ZZR93GRCV!5_Ht#l^m~Bd2ZlojI@M6)* zG!U)pLc!vnD0}rc2v$yXZJm>fJ9U`D%4P!Yo%tF{zgR$aKf>+XWr%ggbn-Jz+WOuR zrrcY`Tm$#9eV04D-4TZ~eyS1CAZ?n|)eGX|IS^2%%cYiVMnQCrD~iwI!L{YA=Ufe2 zpRmrJktRKJ&6sY~*P|Kz%)79!9P@-87$-2HO)T@;{_`5HU^m~-pw}qe*xhV5&fU2t{D4#LdWte-WC&4jP}fYti-Fn-Eod^260 zwx&*pilCq1KQxAGSje2^VU~3CI$he(q)qGm4Kete7Cl&f2PBKC#j8zKFgErNx-#C0 z)ssN(V37^hoVmlLL|=t|!?)o0-yoq%HS)TZlD5ZO=K8vP&IG_O7H*|Hd!7^LoG3cH=&Lr zogvo6icVqv!hz2}Ve!T*ob$&RPMrS_MsD7Y8m^YK;BgHUsYbHZNOh%vnC7o-`E1>7VBX*eFqEW zIe+G#PAsezwtraxy6)CQS)xf}wy<5S8NsaRGOny@2>xR8d%@AUeAv^um{(H5OOiK> zrM}m=V{A|N@vtiK*6rfkE}epaICswPvJ@AGH$%a(3ovSPI^I%Zd4~Oap>xR>pdHH8 zc(^jj`gjxfMd;BDYTXc7OdzXN6-|7yKxF$s9G7;6BL#XyuOZb>Ws$3t*Yp8 zTf{w|X+w6!i%~Rvnoy(@2K*jN;#sUs^Y&cl!q&uq&!A3jFx|FE^e}L|@pEFkOBrHGH%*Ea5=T^tm;K7U{=!s3noPmmD zw1Fl~`>T(qp5B1VzS-Cxdl|-wv`DRf87fpWMqvLXh=>Wsyr)yKXOABB5#;mVWh{vA z;77vnB5gXV(2_)Ioq!ml3XEs}<}kxM7+N$^AnZPu0~bR?SPqx4$YTjMt^bN?L)y9Q1z$jPNdv9#*;22LGN|rNL1lX!k$^Q`K(!mNK?yt36i@Y@ps)**lJ zQZ8fJ#dfp4*#Xqhx&antQplZf5@i?lKzw!w>tnnYhx=(z$v+QH*(`&*!6}l^=tW>~ zy#Ty4&p@P%3}3(hHa>c7K>UxM;(q1V;gdF|ZvB7ibkm~au-bkt=v0=$;aN5$uk$2W-M_*4 z^)WZ0q%cZi2D~Fz*%ipRA~D58r<1P@$9ixIK=oo8dbgKls7%*PnIfBN#0iYnajS<^Os-?wWM3B zeq!ybBKVqQKn2;?alvm@YDP=3)9xR%HxN!jo(rd)HK+D-5929MEu8s`gQT*zpmFF0 zbR~I$PsM&PXY+(x!Ro~C@JTdx%fjyc-@$%v4r~yVKy@LT#j`u9|IZ6hdgusfv%bUB z6h+eR{D5COkmbT>L_m^f3CQobh<6_EfxyN3uw3RdK761`hQ0mAT$S;>3F|{!J+=nv zcgjsxu%+2-uQz;$CJ|b(o_KGb*fD1^1ZXbde9yO|G+D}}NLUswHw@~p{Q%bnWtup5 z0=KQ1Wv%5?nfqlo*Y#mD^!!VP_!B&QmrKT9tgqNH&wwm-j)JqtYS=tT5$fV@!cql} zc{~}9DEA9|X}tkowkJYl!EG+rF9GIlEyklK2jh-0cTsmI%YHQ0g2$5uxb&C`hFG%m zg0~~JDb=Bp5EU3Y$`wR*=39ZxkpEaf!Q=ox>L!DU>_GP?>cTiX z9g-FHoUdG)ijHTb-196mBAjy=MJx6S7k58lvr0!Axo9+BQTz`otM`Lo>|t@$jkoZ^ zP?PGc+J*s_r*P+<=@PAbbGf*n4o)(0C_l%s9zvFg(SK?jxGuepMd~N<{2DXjx#}rM zS}qH{&;8~C+>`kHDsA#aA$kk0@Xnu(b1MpTsA#(mCz;d8 z_h(p8YnKw(JF5g*3Lio4Bx`oo&c~SvBOvv39-5t(LCLbI9OtJ==^bOTeR3{H_b=wO z^DW7mb~Vzo`#LmD+Qt~F!{FW-)-h&xK<~E0&>hl^)v^W%nZ<^p-fC5qMd;v9n z1dYsV_-ls*#Phr@opk6P>{X3M=a}`V*AWGNt}1l8jKbA9F1Hwah0c3~c(s|#pJl>6S+^`h<`wTH7nz5a<2H`Ev zlJsd50T*awhG!__oxV1Pb7|^Cl3FiJOgzYC_&R}+ zgE~DpB9Hk6)p*|pW1-#71U!eOfquLKam!Oh?}~f;#owQ?DWw!8rv2jdR0#+spTj_w z0UfYik@(DOz)>ti>F+ZEGGz5pGA~)I&Ny1z+!){U^~3z8d;x9U{ht4GLYDlD&?X*c z7vN^09pwaCSlnz!*E80@_fjiTUOA9^r)Nd7I!^P-+dFW|8uojh zcpT&IM#AmoTI89b58DebfP%x*@z*F*5}{*G`c4bLIr9}C`9PjK&i1PXE53sD*f59= zv8VO%64>}cmH4Esf?cbsQ8cJ9UwZi#;&VHwnah07@@}|m=OY{^VoZhpt#G#UC|c(w zF!0O?@EXf0kO9TvI*?hQI3&Kd#dl+^h}Ntr{NC;Y5H6X61)??BGRTCs?@Qr*uiA4GB{}f`IdihD zW&jys*oRYIY{bi6PuY3x3SV_*AP#lf0#6GRNrqq)9zU-{BhL$Yt6M$1v&nTBTI>tF zt^xH4sX)K^Vo+-wNQZ7%3)zgzQQJ}i&)lDYg)_VVd2Gg*#trbQOoprrlp`}{%ac&! zB#3VBghjKJ=#L)uGy1;a<235|@s)WH*QpMtjNTz>NW_D^_h5@<3KYfdfQsM!AglWh z9ge6`!HkKZy)YU_ZBirG`V2_1j0tLaE8?sMEpml%AoTBRkhhy07&ql6f5>(Kb=zHu zPX~X+Va1xH;94#4N=NYf6vjVKe#yU;vb|txgLw5@=9t<&0?TckF@C`_2^BgS%`z2Aiqv^yGVeQa3cq&wXQ-=EfW&9k)I%=@_OD{@ zLG~_p`{xQp)8es$?fLv<9O@ak(UiTJs%vKZS2-vhWYI?s>-v zB*v~e+Q-;V5;3z)n+|Xr;IA2KR{g*f32*%0dO{{7A3w8WDL+{I-v(By!Y17|&+XF^#Us@3x_liBE7wZVGngFqd{*GCtbGI$VF7+iB`7U=u^cs=My6>y`F%}INUO5(r3{)l}xxai&P%AoS3$Zdl5rO$6V}_{U zeHk4x^ot4xcZOrgMqljMo{fTq%Y=f!4zcg}W*(bcQQ5DO+xaIC1J91dA4$5zC70&~ zZZ57t5wAI?nsN{J2{Kp`C!&HA8#44u5V~5Hg5>aK?vZ@42G<00{AZ2Ugr}kR};|+Pbgk7AxR?^V(#3MiOfMP6TPuHctC$4^~=D z!n%`rxI!dD{3}Cw9mgbC`rZ@Du6MyZ<`_3zaUE+;YhdM=bgU~eV83f?dN9@xzf7{G z@-Jqg)=m@7Doe;Iug>A)6W)Tyn@oJ&AV<_%*$%{Sl(5&N3#RQfC!b#pAdfcdk|}4* zpuENkLdDV0zR!Sj`=rAiT*tEOaSO1n{3Q;a+XcS!pNjL^YWaQ-0j=%*2nFrE7>e(4 zQ?n|5T-$<|dKtGa^&;PH$TPk@`*}q5u)zHx_{QSH=V^v9KZ=2imBy&R~(LK7n07jn)O4%BOVDyoEP6L)zD z9+|?p|LQ||$Mjb2;GH$78D4_nTjRjlVh3O5dj-xX#p3WccH~>cC#+;V?))kpl2-SR z3v)YxDQA*k{~=rY)l!c*PJP3b*EDlx7gysPwV`Mf_?CBDp}{S1m7}ej|L~8l^d%6mLCtsj2HEGgaPf`VoI|=k{kLcQuU1nqLAiS~dVXnjhkoPZ5~CM1zi2Z^nB@vLs-`1qfmN^!l@pV6>q- zguHqVF3%ote_q^1=e}pWgB{B(_{(D1(j16n4ymFG10Y~tDEB@15lWuS6+U}a1=FW0 zko_tLAx`l-&cAX42aRDo7lRP-mhxz*X)flSAMXW;D&r;}K7t$W6(Mfbq9djH)M4jE z-064;t1Pr>=>nEzYK@YTPzI9nS{7!|11}*!nXGmpx&* z@5mF}9K+q{{3wz;$Y&wFZ$T zH`b&Lq$(G=p{c<*_<<(Py2*H!7iZ+9zZ!;r?FF=A>QjhUf5AHHZ;|uVBepN)*|S>? zw2L)}_puZFWz`%Uw_BFDP50tl7A=Qb(>PGxWluvqEa>E*!44t{=dsPr3qbhI0IJ*@w`!?KrF(_W?he zJwnIzvQ$fUBA?rs5Bs*4q2qHuSWfDpq+N&FO3h$xR1|OW`OV1f!yT|0SR#038oVri1)x9xJdsR-dkrx z+|4gTrQa|G$IXx(us)EvROg4H>W}1Sh7L6Vx{b zP0G$b{9mrCQJ(Q%%eL)c$fG=#6>~j_KRsDwxwaO%dmFS;Bo~8UN3P0^*x4 z5pNQ@!g)^%x~e7-m6-QbdjAl2PE(nj@|S}Y+qr1!5snpGP3WCDHsCTlpU+I%g*Vrz zlCT@GoOHW3v=muTmjhnVW7N$xUC^fAS+3yj#giC5zZq%qPMDH;U@^XEQkkf3P=7|wcx>h}6XpK<&I zk0y)L{w)W7s~Mrz?Em|yn!mnTK#E4#!{wrXXdTfCjl)Vnab6$vMijx(%3E;0E*g)r zneVScGwQfPhR&F!M6RAwCAH_$z+Y37hMh8iwpn6~bCrj3o5g(7H^wK5`YXJ%)eX+u z-b1}34%oh}ojZ~H0EC0?f#1@9&^`1LNWIQM?>GrWT<%5#dJ!h0VSh@)K-*JRv> zz4@P^Aw+=+G&i`Gt{DzSw+^Dwpa`5^`T+g@rNM;=0g+0Qg<5C*P|^|tJDGPXC*6i_ zoTo&FACjRiPBIXGya`fjd+~yg0qtfy(oW_Glie(VuCYhJ>S-Y7tNWgt=RY47XMcb* z+m*Xa6tdIVVzPg&2Ci#V_8yK@*Y(&HLyV1V0 z7__fiki|bsu-)q-_V0gyOBS;J_T6icct48w7;^{r?g)l{wrfc?5|H_0jEOWh620~0 zxW2tCzq;-wDDUY)-=^bY=f$u2N@p`vTXr8SPa46}-z*b&$`o%j8q@Uq^{mU_$me=} zfuFXF^Y(^gasXD^&Jy$dqIHx zAO6-tWnvU~3yZoL+rrTrx39j4<3Um4Hl=I=T6;aS7Gf@C+`HQX3T%=ZwjATCnx&r1F&Sg z5Ra&2{uebTE~}2gDzgHt?dZddVSB;th>f^qOcvBk(Wg$Wl_BLk^g|wxO+Zk({-1FG%@y{4x96=rFnjk4(~`u3-ad z#Q+|B5?}DSmW9k462;sZ`B3k{*b*Q1p!bRU+}`aH*xPa!{2nlVR`5c6w!edU=Gd(F zxCZ?;?-OjjElVutsuRH{Uo?Gi2M5AW+!CCE(J2+Uw&NLSq;^AHaTG|B>bV!qc7*R1 zWA@$WpnUc{=V$#6&MvJ1=8fZXSqEqC1~Vc%stwfU9f2(;Oz7fkAE9O*%h&&lg4gQi zWJSp2_cIf zfI;pD=)V^NZll(t_+t*tEBTC_<)v`{%5^+g)eD?4o9C6WUe<@V@TlxLmadrtvx6jf z@!WFOy^IBkj9evO>PSRrbib^*l_mj4>$#Q9~ufJblbNPF1_-hZ_weED<$0u@;2^QsgA&RNpK zk-C)IBp_u$JQz%_XAIk9as0VF2$`1&fqHWw`bq{?{?Y`&z<~UP(sbOMs6-;qokr2i z-$H7lN7Niy*W=?QZl?Pn*s`qv1cDM?6dNS`{Br;iZndI`GOvYhbe6DxdM8ADFF@~= zw|URR+aP#sMCXib28)efAmX1M{Ss(~K8r)K%A<}kQ#W%nZ}BK?o5(BmDAFh+8+zx_AjE%4Us1*0^ErF{_)+G9F7AT(Tg#GW`VEnTNC||T44q7rk+NL7j z$@e9==}5#0ZFVT0o(_E%^r_2Z#tSKn!71xH(Bk!bcq?CfBUh1x+V_$8Zfdy!=zby#+IwGq*ooC$;NUgEBb zG>~k!#GPYo$-?DE#174f*Z03rKm85v4oJbyjV)-oLxY}M(+)l{Mo2}_z@vN$wO_J zdft`BsJ|Cu` zQPO!#5uXJ66}MrP=T$V!k7k{$B)H7(@R{bRVAHNngN}a1bIRrz((4Kw2Zrqe5Jw>0mAiKZZ)pR*x)meb(Xh5}g9_NEr$f38aAGhY+ zPqckLf%RMSaPOCoDEj)F_u!Qg7TSYckrsWL_6ZGCY9RYn2Yg*oig&G_BfOGfUG0b9 z?wt;SuBV_XKNA*J24m&-0Z<&Hgua7JaE|>eD4X~Tteg=SuRei60b(xefHL~c`GyTn zndm#)o!hfmiOzd=9WtCOFnVDoYBk02j{}mh`kMy%^qQhqcm}MyV@#*rS0R_;FJfU} z1-#tP97EyT$8@jj6GgF3FxHg|n~g(0PR- z7H)YD``>y&#AI_C^dp+(Qm+Un9I>E32Gro;&`{J-&w;aMC6L+u0E@$Iq3^Q}`gw|> zP45`GH(Y`dGtKGt@6RBn){KgNISFO&zrcNi+59ks=cM-|#f3q+C>z)VOaE*Ew^^s4 z+3gw1`ZU4XQI^!HJP=Pr$Wv`KN1~E)8KrtVVV0;EC9CIhJ+%%Htj2n#6Px)VH&n^@ z5_PhQok`pRs)c8_FF_B@Bk-hIh4?4R()@G&H`CV=g~ zD{#IsV|y+)27Q4!k)HX6zSale!$ui$c>rTzbo@n)!)IVr1IrTsR-~d0UxiUjgyNN6bJA|JfVW;;jd=%I zuUKqPCo@)~;J9V}@SnD1bf7g<-?1ed9pz}@&0dVG`Hde9%*m@&%pJ9PGQO8lCEMg2 z$XK6#JaPX8d}i~ZuuMC8up$%ks*mt0Jj==ck)_+e-2}-@8?OIK2}X~Wr*2-ILPKMH zBHF-q+t&`mOB#k+SF8C0F1kc&8^>EuOl0{>#=vt;5?ht}a%)<($o(=^;<;cmE~~Vo z4bzX~(5V9v9vFaL>pgV*br>W^KJbl7V zPNCXLKr6c%PWf>#S7G`TWS=y{`Ps3UXp}3Sc8qoHUFFF3s5WpM_=cOvxHigZExdu- ze(Zkm76*N2Uizzt;Xpg<)7xp$$I~9bkx_EguPO)MGIpX?kUPTt0kH9`DVe%di8@Bd z@?U;Ik&EpI!(Wb{4oF<{+~sV*RGJjO*BsS{h)p{+W&4xQZo;fk4-v`gp@HNrBS5N=L{ z$GnjA=+l7cD6sPQ#{Z~@M@efrXJJ|k*|KM#=~Ev{rvK(@O!TPxr`ur1JStUPnslob z%ND&n4UO+Epx)2w~y6AU!`Fii9)3`T1AA>Cs&b)4BjI3%+uZ6E~pMVIdbxbxHmzQ<7fU z1sLxPddYIs(Rdyt`Y+@sz9`3U=UzkS*oFA`bv>9Q%F|Jk|DkZ~FW9@Y3dXBdgZSEf z9R2GbCi(*gmDwx!QQF~=XVLH?jzv+49lV%bz`Voee4qCobe4SK z>&%+5zo-v&4l%~z$0BGP^$EtWsfV9p_P4j%3kLtnhD|T4u5D}n2&(;H54YXb46NtFYh}_oAQ&@N#Rph>{$E=Jp&NJPJY9xRegZ!$DrRJ z4|581h(e7DamX1+hTqS_6J_7ACT~5s{dpuj=3z*^_gZs7k_%80XGfkcF(H~Z{W#(* zn`w>y0Rn}5@!|=Z+!9D zv5>Up55V~#_;Xs8B(|OBMle>c8$0tYI%iCJq;JvccQ$?=$pr$a!lfPDICSIO{Vm6X)M;{zr~OSCBlE(T45~AU zxrQBRBT&$p{~DZakMmkz%ed}-B|7aaW6-}d!Qf*>c>VN1l6};UnpOp)EcXrG?9(HW z%WbIE)-~LgS6gt%AOo_+su!jV$w2CG0f(PbCk88}pdR%d!_V_jdT$So>A8zXYIUjF z-`$XJD?@MfsS@KE0&@JeA{k%S5BXs#)M(cls9|pLrUNR}me&A@Rf0J3LM6AC@dZRX zcM30lXLqfLbR6>Af*5T*#@)Ph6&HN`2UAx6MgL#jymZDDvB>E{zAW=FZc=u|4-SaM z6_tF$Rb#TIM2A`xRtY2Z|L}s%+WBsoQT&{hH70}&%5<=T+Uui@|xXk zo*T>3(xJ>7vUi8rZAdA%H}5|zsEULavlK{6bTy>8SM%F_?&0udYQ&^{6xQZBBB2*B z*4~OpF9WB{JHoSTY{xHR96dJotx-A6_mV_xR4Rhm*ObUN*9SOaCd(ETMe<`J5^%0q zfvmnX3T|F8Co@m!0&E(BnfKb@7Be=+? z@A&cwcRBex(_qr=|Iq7h5L)q<#ebycWbczU)_;1A$)j1<=wT`xt$PEXhB7~FX(}(> zVnNHbiuopcM?7;wh8)#&q@Jh#!O&rh_0@5nKT%l=q7_=g&!MJt?d)z0IIx&&zdN6M zXL6FcRV?Y^>d&Z=R*LWb-M}%I9OxT=9b)y9vB@SdkH_JYob;ascLtP+$?waswB8(A zeQt2aYcPe8(ShSohP3W%OU zVR)_uaTdNq)Ujjv#zdIEI~V2oxiIX9A_+9p2iu%$AW$`8%m#nqvzJA%a#b7lvRuXB zyRY!CRGn}jOBTOj{N)>-_;9%$IdbJJ%GO-MZD3A%ekAfaQA$KKWFm5NL-4=BrbNm< zry9mfZ|yQf$&o+2?}vd9B>Nwnt|-Kw_+0Mu>=1mqUyelmkR!=HW~6gPDt6w>fOAuA zN&D+SFmOBuKGm6g;WKt#KKul{j!{T^r3T)Nv9P7)BWMU3z`rzw^Zf5QScm!GoC)L6 z^K&cIbr{oMPmD>a$v8}%$Yz^v?{WS}b{;p&fT3q5fU<2WN`3e88?O3bXrD4>{pZZ* zwAZ3w_c-Vqp8{i_jl!sX{j85>$Y-ov526fb^!nM#GGkWxM&c9bG2H~E>PLB5*4FAPG8O&=`d>R(#2xwhG0%Po+ zL9c|*u>RCQ8q{Y3V;=>>oTE$8|3f`rqpSgiaUD=wpAMGnUC}f8J8yLLB*Zuw<6_fx z{P{tf?yep{13#_rcX*p9@h(?gY419Mt9)X&vly_S?l$UdrAztBHR4{o|K92SS$;NHGi~0vn{kIa! zaGLSHcU@ppy9OrK)roz^#Dn=Q1>);r$FFiUA!4MXLV!gLuX;BY{@B`3G0P9k9CsAsgb8T<=siZuegZS8CAR!y ze9o?G;OzH5iq6EZ#;yy)r+L;q4^DFk@uE7<+NsQBo+3l$5G7=ILnO5dmIL72!e(5z z8eoO7Dk&^V_~vcS8= z2+OBy!83UkI;s6Rcp9(dO~%LIxXuXNxv&Q6qFeA+Un~xuzXV%cElHeV7L*5Vfv&n> znD=iO7Fs29Lka(|?(UfUJ-Opi@b@}b6Ty6a_o}d|a!|Z8G7kdRpFyRoPteODj(`2E z8HMWwxLM{goc?S#1pp`*lfo>v+CO=^JmPNKoEN3XAzUAZ(D~-G4D3rg9o2 z*<151<_h3>PKFnpI?dO4PsehWPkb3~MjPVou)5_dbXWU8d+HO|ZeE4A!Hk@U6GQj? zHIU2hiZ%*{)OK+qbfW|}qPH0BdIGTf>tjwyPM_s-vI?%7o6;J!anMs!gz4FrFi>`! zNGvrS+wZ5qDE9n3D<@BeZ#1WNcBlD+=I3DJ&T+6bT1Z}PPQ=?jjNJNNgL*zm5_LZt z&q?li57`~7_|P-0Fri!?3iqzyb{Rxs(%Krar%t5k+&dM~lGMYSu@1d!-PXl;yYMD3Y7Q(q>LAc+a<&YKwq^Ze+Ry<=LD(pq4 z?S0IfdRg>Jb~x$tPQ;(dD&*InK(yRsL6Sxp(n&*Q=={+ede>@Ddpli{baXzZKih;h zt8JN=VLvR;wLsS$&EQlKTI(rb^kT;wqKYVwLKDRnG;}Y8OwpQ z`MdpBNm@141q6xP9Gfe1Kz_9+ zeEt}w8OCxCJ}6N4)x+>`@fRoy`3wQ_KA7J03dVGWV8tdLl8tXe{LKb_|Dt@(Z}|YM zf3HU0f7c+MOQZSd?ab}0yd8=iEwN58#8<^>as%%^fbHyXEH*R7OY^S4yH%Q0@a9Xw zuW{i;*D+ON zO(@Qkod~siI7->atFanJFE;J`2-JU&CRkQgE(5$~FBe z#h!o8L#Ky6QQoo!Y9H5w=c&2SFOUMUq6bFnKZnj9);oB2Ow2zsCVst%5ID{eD*iTN z-LRc-De@>NKAeH$70MyNWjvf!IEiUG5!^H@A>CZ5Mz6iTf%4-6z-IPSP9PocI7zz+ z>aT{uf`lAiIAF@}7au`o$+7UejrDg&e!{1(O^E{4CNa%pxy|zTp)h)dSpMuYPI9>% zy|K!IHss0C^Xsy(TyhKa9zF`w&t>81olWqihvh0FzoWZtB>2s$LE)KXk(cv*E^usY zL2bAJZLR6Tq&2GimCL$x$mS320S^FBpuQee>!j zvOd zaxZq7lI|3BE_LZ3d}Mx+Q+F?+t2N70q&)%$mQ6}AR>bS<-&Tn7bpLoqtXF=8yOr4< zM$Hi6pe?L9co{~`)gUEOETgnl5mTfuVtTFLLc{{L_nnh%NEHjd;9PeTlKJ*1h%|e^`I)aMa!xEBjZ-4iQ-8vlrSjC7yD#nvW7%}u z>!OKM<%sXwDBhmPkbRR?Xnb@H=e}$wI=`{zB_8K92T!K>4`KB?IwhlBqpfKJ&d_;FQ>=x}K;z4aMP{-Qxz_PIgn`edjd*8pnc z#$e-l=FSyeg%F!r;QLUPZ$JG2)D#c$O-s|c><5c@r>FNJE!TxHaF&TZRBoW>^#;zY z={J;nc3@6kFXUM=KJ8z7e!6-W>{u9y)gR6RvDSGcp9eM>& z^xWmj6+R4t_+QOjtNI`mJ=CG4nol^ha&|WL?t#8PZs?iaBN{bLi5$7BOcpUG@hd8z zC$iM3;_G5?+x!|mA4cnS%}NnH~uh`Q^~k zu0wl`*ys7e9~f{#$o;a1c}^;EJL8e+rc|L(m-5z9zrhd?#kZAIpIqNigr_)B#`C}T&RjF+Q( zuIXXex^RAGOdLO^vkGTSX6d@}+Pg>Kgyi@YC%9^EN*aboqTX9}SG-)sTUEK>wuOn9`I%*sC#>aiT_a&nVj46VT@?AvsDTyo z$1t<69}7<}5%c*e=qsH8H5-)h9^!4uR1k!#JF0c3bN54z z;@h-rFyAB1T;Xij@;3+b|CXXwv?@4noi5sc(uf`^$;7EUrhqz8qZ4!lG*2`Hqg>Bo zn@$;5X~;ew?<@GwDaT>N9cz+(x}UN8k7K$*H!M-Kq-y5Mbiw>qw)_7o&OfYy0WtqV zOxkM5-DCz1w$)oKRBK{uE$I#I&dC&}FFW2$6+tbneYZ%BS#QXxIhnJ+9> z6~Vzo1E$^O)Cz4pWVA)>lJfw{{Mflz3F!biIHJ);6ZS!Rp|ZFb`|r zw?pwd8T2ym;4&R^F{mvLUNkjhY^W5?-ZqQ3I{6XhyMsAFw3%aC<{4gR?j6{fegi(6 zm{7q>8P0jtEOAwdBPYDH8|=;`@D=}wu}CT$LK-9>nvaD-(_JuTRtrv>Ux|9>ji{jI zrKoQ%g6HmDKCNLs)NH>9xx-Il)TacD++awfoQ=r>xi)kjJx**QV$L1?1{m422%8?h z;0othiS^P~Q+MTYW~5vVK;E4%cLsDb{3; zZqXiHn%nOU&V7ufY`7QKt;j?L_IJ5)(TvvB{6OK1C{Cbt*fG9L4xV*Cz(7}qKwP5B zEn>YS-$@hr@!==Ya8EjR2Uqf^t(Rfb_lewn-+DaMB1?{c*QZJEqhW!0F3$%^kh_@@ zM7YtJD_T~IPb^q&&oh~qoVX2bBJS~tk!G;}fii4mvrNy4Zv0y1AskzjiQSq`T+D8D zepu#ru=8%=TSUuX=Il}~XI(Yu+G~@M&pTj#I6DtHzJZV)KX8x==Ov_v(Q^CYFysA0 zbPZ=Syut}s=Jfz9)=mO5^T#0GE=x_OcVOPtNXYW(huX8HApOJ?dT+_VTej<-^zABA zkRqZimEuwcmhu34-3(xWrY%chYPmLAnonynvR2Nm6b#pT6)hOVD)0X7)vX@YN z`VhFTP@`e5t=KtpJmb{1an8ZDST}<8YWI}zSJs+POJ~IBRjjL0Afzi7*Fv%82i)^u zE&f(3z+Nv0i0{13*#sVjk{##Zj%5=1|MwKGOxL9jQ{s5{u?wN@&Q>h`G#2_Lov`&{ z0XTJ;)3m`+EOWjGkEt-npvhf0F~5Ov7!QKyb3^D^(*l7ZHy}Cj8w@NH!-gYL#QEKJ zzVNC#C;$F0I%;NNV#RB=OCJV{rQf4-@mhZW-lKf+Ee+c9AqEG_w6J(?0Lbgz!Tv|? z(D+k_emG%4v);XdRa<+|ZoCBFXOjslWYx&Qm7$n?|7)r%GYnk4>Pq@ht0SyoM?E^5i08FA1;w=2n?l(KQ#EAv%}&&lWj@f|??2tBt~t zRXfmG!V&h4*Q070nSA!rMUWIfhp#et$t{yA#LdejX}3*1|C|`prG-K=&C;4KKQo+^ ze>#uvetu-n)KSoD6a_sxt*}qigq&g?d!gcMZu@S1;yBHm)QzjdKhsO__qpL@uB;(( zU+)2S4T^m3=an$Yr2*p8fT*0 zCmkvrnX?>Y^TD%1H z(~Q7w>tp3+kNYHFd$ zO6I}LybB}#6_OT(3y`0AA5UjkqldQ^^`3AI(#t0g*wNV{CbJ=_~KPJ z1{w-Pm8&vv`-XC~w@@Nj_8A3&$rJ6=Od;N&n+M6!sGqJw{#)9Lt#O^`?^A|J^7q8r zJ?2DpmJK-(`vCSoe#U(oXH303%t6=a4o3fc0AJLtXt|dL`dA~D84O_aq70VRN`VRM zP4Rp_MX}05jOoq*_b0))piiG~mZ$=pWd#*o6`(gagfHX=MJqybu;9N37}H+KZ);=y ztqn4y?u!9+w!0{@NYpIL(_upY(I_*#HYCCYu=dVG{CQPc?iel zi&1>H2~5szMzvHAK9J55`_BA@vsvzX<2+RyeJc((MrGm1QYYMTNt^lq)}ig@`EXI% zlxQTs2JZkJSnvH9mX+yI<%9-|>XD%N)Ei2J19=nSHxT^ta~w}|p~$%jiq${hrDaVp z@^(KqtaXK_OI4_(=K)l^<-yH3U_}HKD@C?)O0=gk8-883AX;aK!=I}usCU&I^zQWX zu`BeUE|#*M-WyIcoOLf;EGeC@N(DHF>)tYwOB#~m1><(`O{IId{T;P9fymO7qpS}h z?BU+N<{@>}H^#0vrf+3mLVdgiZH!@#*9>VIxc|MQ?ertiJ8m4h%^3xk7B$1)8-{e_ zquDrn=}zo_c>&fucmYX|zl(jR&gJ8m9p`^CPH*@{)^mE&i9P0lAT;;mSgM#dnF^`Nksdf*(*-ZDNt4NPHblo< zj-HD=hjueP;dse0blIXzyNiEfq2Fxrqe~X_g2G=AWSz>lDPlZ_r4jIVw>1sdQfK=X zKP(mtASoP0#iL~(T)-kPjKl5)$q`OPxK zE=QL0dvhM_RkX=*4|Z=adLdfx`WO?k%t+s>2O#+DT3|N!6L>x?5qo>2f$6O#^qfAx z=5sH(Nk<;Sxwb;=S)U3Q@?L|J**9?6d>#GlZ^5{B#!_&UBifC*IMXbbo4!_&z8}VV zNS3C!u*{s~dH;n{8wJc??1I$Sh<1S-b!5+yL4h`YVEZSFZH&hPu+r0;Hf?5w4#qz+@BFxFWT|6ip(;>x2 zL!deSCJZI1)0Zw9H1WVhOgZ8M|MZv=-*;cp>a7CRx)cJ!dN&wjq)WRehU2Kp2n_Rb z1Bn9>c;ewIOtjLZkr}BF`*}PB>WmVPizvrETdv}%C==o|_ZohEPO&Mun&n)r=rT5c z?{TdIpAHqe(x#s6)qT11eU+HMVjtES4dB2MGpxU!2#bT|$aw7%^sV-18Byl&_<4p4 zn;gmqiu1)A!`)#?#Vd^OJ;QgXT?QdmO)4YJ@jYpA285Mm)E=ar+ZbK;Jk!yh4qs_#AsivE54drdyoY_$8j{ zOT#-~6iLvDxhM!IDlj_2xG0H-^Odg~V&V(NCVbt5>!<#|N0Gs06@A65$85G!V?|Z{ z?%`bz3z`x37wevBg0e^-cOH9-wqgmIPniEu9_3rZ`19wC2~n3}d5vS-*>CsJX5lM7to#@M@~{;Nwb!S|OA_&2 zgCPM=eSFX?pd(dlAo)Tye0P?i*}|jnSV58gn|&AOJXa^VnhSB!i1%>w6=SkpeC>GB zOq(q6v4kr36yDW^?eT``i3Q@3j#Hy8@%InL8dl$n26>&xDQeN{_dZ~vw+*CbR-l^x zDt_gzpXkfDW|FF}kdfUn_f<36C0s|<0VAsRcruiBeBeipt%H)ptKhuPmJ^L(Y|9N1 zjHRTB8RryfeFggtyRq3*t1Ik&lZ3IuheOL#GrFy@1JGI@hsNH5frW?I_ri+YT+@lc z5sl~&)y9Q77eEmI3KsmJ=;gE>Vu$8|{CySLvbTVFoIJ&bN=^8fu_O1)9Yl+j%-O{5 zKJgdyvGlhSrmRs!iRI>`N%xz`ZniPp`_={73lq48aW>R(17nnFm=m9Knsg05gW9|2 z@Ya_|l-Y9)+rS)eCuxy;Y?fWPEI?0QAdz;C*@`F#Fnc_hr4wT!ldmX%ju%6?pXO z8uL3l@~MLzaNxZO8Pmjg-KiTe==NtA)H)2KShgt2kG*4_$`dcc49?e824xZB@Y8wBQth@3aQ6jeRj0pIqYM2(7OY?@xdJ&}$>yO*-urq++>o4Shc z>HNc7i6xwJni4AR55y(o-Jo424+XB93d}t++3YU}Y9Fk`c%LU+vQ8VEUR?!pR)(}N zR|V(I7eg4bj?zRo*rjhwi&Ztyy}^J6|0%$M)puE^;jBm^N0(F#b;01O0BEgv3G2%$ zP>{7n94jpcvNfylc!D+c)Q=GT9x@<5Of6`~jaAHHU_-iXm2m4$1NwcnK5@&Hr&Xeb zyx>;`-VIVDn`06|Aa6!xIwNt1RvX^-*CkrZ`*7j$i>Nv)2q$nSAaGV7_ek~)#(%a1 z$y0CeUIv>r1?e)rox$B)Blf*rvl!g})4(%ZSO+>t7B8GQ2VSLD;nSNObne?A^7xpC z{_09pGCvjqZ%;v$(MAN?>o8EUuAn&LAe>}(zlKv+Fv?n*3>&c;iz_r?BJ;8PDlXz9 zQ&_I-fE*3PM+I%mFY$o~rt_0DBcbS23OFli;~vI#nYFSE^QT|u&dF(#+1|I{$2DtG zeW4DYuhJ*KWrk5WqE5B8vbIF72^o=Ff>oKf`6cB9zO0PHLh)Adtc6xoaX}L3Io;*6 zjz>($Yc2YK|89+ z&VdMPwpYBzVcEnE5IB{I9*C95<|{v8($5X}eWDo=oIT|@sbLpBG_|HxPKU9$atE9` zq(e(oLJ$}iNnoDJm5mGrm#OSI(tcZHF)kL{{zk*fw^DT5^gCd+^(|C=v;jeZRl%g| z%*}9k0K$fi1o`lz=f~9AlP^q{9h>0n2JJ9tsxeyo`*wVi?Z15hd!)o4ueTo9d(I33Zm}1<#trF~J>(FN_hEwGR7qA*ykNF3#@`5P_km9CBo9Yy~_G%Ma zk^Piq$1d~HEEi*SpZ%SVsnPwjs`g};<>);f#%tD6h3AJX`=GlQJ1 z_6@KL8BS09RiG0(wqs#KFmwzkgR^oNbH9Y5$DbrT>Y+zI>@^`*{H)080Sz+As1!mT zsnMC%S3&)g6-|o#DfYkd78Byl$RRs<5^!V-R5*3QP9I;E>og*2j6s#7@ES6oEXJ@o zS|FWr0j;!|pZW4JSjSwSIayLP{?*Az2>HM-T_AJq5ZjF!YAmaHsRD7F?0_hzE z0u!|Y&zW6fn;Q@Lw%J#?t`EyFdF&>vTOR~b6{;jY_Zp|+_6@$BY(QCi8Cq{gfhA?I zP*^J}RF%W`jLk0CwcWAm$VYDUYfU1y;34p2v)JP3a5{|T^^Nyr;(@XtT$PWwK@I*bMyFe+Gjq8dS35CM1O&=i=qVxqw~mIQotSRr__Hb5s&i_YKoP zC|2g*rzBv=RW}TZ)}zV#H=wHAg)5cM#WBrAD5&dkEY$uXDr1?e>^KdsXHp~{UUdrX zST7^l|2a0z=;Y_lQ(_J?L%4CE150}nxFlYOm%VR6@8%d#vV}Rbe#)?ZO9{Chb?)&U&F|MCmSUCbeIgW`(?i=o)h>-1J1`kQUE9yZ#sj zmg$hbAqyAZ`h%Yh3`mFU49N9(fMv~9=o>f(MLq@4@Guc3pHZQ176EWzqc%Ca(}YOq zX^^W9rleMI5T)zb-C88a#|-#!ZZ3>Xdeezhn0XeRI|@acRZOT1;}!n6FHH*zb!b=Z zLkyg>5aK5-hF4Xpl&e)FzjC#x@J5X&a7GO1?o%We%-%!(p-z792}P2_vWMgA)aZhF z|A5AhRp@y-pHKT?hR?T1k|%;;)UUc6R+{X>S*LjPoS4BUeS9JQwV(hCdst`O?Jopx zRHGwfl5xq%SU7l>bMlBU1 z=q|s<`*roh?>lDnO}jO1TwqCFe`9kW4aU<_Qi4OwlU#Qu3LNB$_-gxpcy*h7UX0&y z`tvo(+BB9QDo=(7*I7>cO&#BDElZahOVReV>F6-Of-jUvg!MMv;49q6xyMP;6^t!l z)U*wYB~9_{Vm<17dbMateJBdw4(Db2+Sp9tKaov+KE7f(Sivm4f+dTBAv?(mMl)~h zGH*7oaeByguQ|upO%CVb+brDaQ4QbTpU3fkN8;4XK%71L9IWi9g4?FL6yKf27{3Ax z&K^z{41MB5yFb7Z>r|-g$i?j5*s!5aX z&pO5z8BWA8&!4~*Ro3ZI4#wTS>eP#m;H&~3qIOLJ+DL_gZFV(2KVnR>?X&sKA#Cof z05IJo39??rL$<~S&cE;*8Y~L|=N$*c6VxnFe!mj-<$Z>i9vURodJyam&cMP+diaUu zNizIZNz&jHUX)wR?jlxjWJ(zhZLfhMKQXjU_ymG5=YrX>V(4$^mFEQ0y`tWx<070g9bodJE4D%50iCqR}dmHxB`wtf2ueedtU zeOFZ)c+g6`;ie>M_8Xd>ZvgsNQdA z`do^6p;~lfS~O-=q{HD~*HCozCltSy#$~2Y0pDt3h4mCvEIfiGjEUUDVwTS2viS70 z=_qfT0)7vYK%sGnal+*=I{!Je2tJ|{$pdXaC9-mNB|MxiLFzh=!K%%w;OR&>EPRP+ zhm!d{_GLKr$_}u38pj9brxXZQ4B?fNLel!}7AA(C0e>@Pdd^v${ABjuDsN9dE;$8) zzCVQ~i(Jv~@yF5s!+oe&pATz-KY;TiO%A;z-~sbG+VS?B^VddEah(p-1)IZ2%~#;( z^AaY7Wnm55y+yt;LV=k83O^nNyF1~W#-$>-yHk@)xE2B?Bidow1$7d1X8~@kKZD~& z%aHY(zN4DgUamBx9xo>Jpq$EZ;&Ny>(Jw#G=C~)I`r6b+ zG6!$6UW%Pm04JHz04x7lf$rD6Kt1d%{#eC);6{wC7I+O-U9f`Im%%Xea0LI~jWCpM zY(U95n$*Tf2~2MYX!^85XyL7CS>GyrMAV7d$U)FpTn_TO$6)>L5)e#VD{9{*M(-_g z(0{a)Pw@MUdK-=DgvG5W{r)oiaMUH|vzR+2stU5hckm%05-`?f%8WttBv63}?V>p+b18a$4P%aPxx%H{pXX~&vuFL|;k0njly4n(1KQfoVr-2Y z#Bc7#u#y;VGRv;&tA?Y`1|#Arxt)tT`4MO6n39!ei=ZIw3XUnR!O3OXjsvAM@iJ*XOzgZROp{Pi+Dk`y%=?ngYr1M|;e zS5iHcoe6@swhqi6aFKWZAkC*re}s3{LNaifhru$I3p2UQd3|Fu@;MKyMTJ#p2!)|g_jtYG7;w+Fn(F(Gf>a_n}EI&}3i$j_}A#h}kXtfUGNcgO(lpmjl{wAzXol`djLbEsS*E)%)`RWnx4yx` znPDQEgBhH`oltc1*QVJQ_j0y}OrZZ(Anegrq?ev`F$PN|s+h?TovR-qzRew*dJpiP zS)E*m=0;Fr-O_%L@O@VoDso>?Z@V=OiH?R(K9$%x(}LXYmL>AJ72Nt^22}0i z32x+dYdToL-uVU%5V5rzXHL*Yo8@{C&}&BLB{X9m%bc9Ks7Iw|b)y^WR=sT-jYYLe zG>T?m`%p0Wc6IQtli5719+F&eDq=6f3#2tbt%#Ik2cYEcC!!#q9l=KHe zC$2)FeI*x|oWQ%--$L*{1oF#%@~Mv*FRyhCFBnsa=TodnYaMeET+tDaPzb}yi~2-< z`T+OfUIos+wg&InJ%E&h-FTvnWzjr8p?QXo%KG*|{2%rmC?14By)TZ+3p?@I8VNGz z`Ea^*vlgk|ph~ZNVjjg2!Thr#b}yEYauhsIFSuJ2gK6K~VZjUoT9j!?3%9F5%(W>H zo?z(GhA@{C6?HN`K*(yTTm-y`Pu2YdD z7Z!s2QZG35h;`J`(qMDv4cz-a2K~mf&ehxo&e!iPx3@-#xLt{cWm3x2ds8JIAEipv z?Avg`-6VGIjo=;ZIXJxjFEr^_i)?+JTe@Ou0nWNlycK>!7{Rl!WI$+OcWB6zu zjpOkHg!R<%In{q)ks)(+SAOH&WE6151S>Lo@l7Z={}z7@S&_P#5;S?rNhthVg|j!5 zK+b4yd}kkydG2+PKJW_^haH2O-o>z3kd12cHTWZ3kx1Q&MZwLxq9wYc!2B8Gg$G~6 z*I7^Sxr7`!tz5=(4GJvF69>-!#fu%}Q#ec4GgvtK3{)-8;|A6jga3+iQ2ppK#Qhr$ z4l>1@&7r;U=a4kHI6fDXHmu+rE+lb*Zbt#5aiPj(m$i9d~94vi4j znS%$uNsx&uMkI#yyz<4ATXO0u#Oi25m)j>ut8Cy71_WYQ#y5WOs1F3To8it8Rbcc^ z7uTk+e9Nb|T+*_qs54TZW-xDj-m=5ECg~S6ExgZnYBNXATXpmmMDf?tnX8-q%n4P| zV3uQtW*v9o)mn2hKkgPgH~e63fpUJ%jYN$1eZogiHKaiqjnF?<0}P^uQ`;AYD5t4L z{FQFQ%7i}H*RM>LYUvWUnmQcTp98UnCZk~b1M%$NtsrPq#-JJX@K*9ZoMcYxMTK=R zt}+YLN9dzN?wwqUmibYGhN0& zF^a&tRYAPp$t2un)e14T6)2SUgk*yTOrG@%1-_M>pg#}`=T!5}Gt$5%SenEwXM48| zt>WdMx^Tg}Nu1}1ZP4U!L)<>83)XzU3Pl+#=kVhLXPfK=Mgg-?P%Ovx>q%I3(S|qK zPzS>A%~OguzvmzZ?XI4bo}|U0;f1jkzy8eVg469q;5khr%92fWrmn!WebNjzQY{vY;0cp3!W{JBl~X}V8jv`sdq1BVyt5rQC!$HgxR_A@Oe9geAW|LfANen5zX;u{>%n$p56cyMmUO3`>}rNPR8TWDHqvn zxCFW?LgJMx0srljC*|!sV8Zr9P?C@(3t}C?S<+Cfma&7g3^pdmrgdTwo8^t#Sc6kb zR^#~K0=RNVkEnf&hWzwD+~@@wwB^7PoZj(_&7(ILYeI(PRE#VYM#a>nX=D z4B-L>7ULGUhx=l@vySc{4ulItDGpE9FC2LY>5G)pGtSwk12#1rp!$^$05iC&F zgGJ|FV1|z|*|x_N`=3T|*LNxq{i=5;$Y|##vYhs+^IKq}Yy}?fXVXse*$}TEfQrXk z;N&+$y4zEkq;JT^ON*3gpq-0IqrD8P9ANU3vu9aC;{65(GoH;XOVkdOIH({R8VH#=!RWlX&W-9&x=dNrjc4IjzwOkbb@e z6n@H*uj7BCK*^V{`ZN=>_nFdlp;pv4hv&=(|Dwt7YzzyQ0b9RwSpIJU^L7-0&hRk! zxZ*2L-_4kPvm4Q{igA)Xm+)S0Rb1h{ciiW)AM7rlz|GAyB(ai=!F8fKXb)YjS}op(4b9? zE})jN1%f8=Fk8erBJZ_vVuT8*+y8^xK@>^gMh|h&?kvdJaUY!XI(W4|6Zoooz?FLC zVxlAae|;|I%ZWQQ_uPikQ4)B;*U>za*X@j=$Hv3;aLT{Bg_v%n@bWjX8#P{eZ2~J9!%uY^WVUv;#b%nlLP@r zHJB?jo9%LM@R!}x$n3bs;8k>nGniKbmk!^7>#WD!BNdgeQsu10p^#{96-*B?s zztF}fob&2pJFNdc;m;0rGC^M#-^?6F2U!lZNIe@4FJf-ExTEl}b~t%)L!FFWq)J@B z|AwTLI&q%qE4cDak_IeTkNgr#l9HbQT4PynSMMYzOiPB1B47KJ{QUV6M4))wu}F&DdDrZNHNWEEPR%VGQ(psxEPL{A^>fs| zYDhx*cQWSQd@%Yv8-?fQLEq8ap!dcCN7vDZ4U;7u19&cgYlAPdm%SV$djEu7Z5LX=I)!ajP;xT<(#Byp|&aj1s!`p z@VbQabadyc{GOwgq&I%uqCk?IF7V?un17QwJp?IwT+xAeFnAq+fxZ(kWbzFR9KBdP z!N&&7h84oPOl`W##E3RTPr(;=HA(f;WW4m^1*~aof}!4{aIQBJX9i?&8hI_?chwrb zx7$LQzb&2|8-t!#uW%c^tzf2IDhL~IiXunaflZGdy**Ni$|}~t`#N1ZPs)a_dOZUi zudC93JJ~GqeFE-X$C#;4BxqgJY;62hff3#&Wce0j>Y8{Fj$AUMrPeoL!L-->?nO4_ z*bhshK3$8R*{el1`9DC5M3yyI*T<6v_wjGDG`Rx9h}-=x^qMKlW|1KfCp!k)f@=7q z=6XaZYUGx@k%HKDy||C@g0`_t){9@7B<7<$-=wC;3wFOP@U0Bs)dacR`gk#{3f7<} zQa^&s4VE4IBFo!#`0%=qvheKTQ&{*aS?s)4Mw~6a!0&KoP8jx?%u^UbS=)F#{!xx3 z1zzNaqY>3hO5)qcMS!i|S$MfnhImac;YCGFIQ_pRxch@O+b5dgvep#hrUK% zgL%AL5o3+!p2d~HXCO)A9hY=^1e{Tg#RF>kbkC9h_wS5O}tIGjMpuT#vi zr;fdLhFD$r6||hjL9qNO9Mt#+Ll=}|`{H9TTx?16S;s^4uoa94ElImp8RG%4eBliN zY;>QAl2#osa`y{pD-Pssi-fc{Wi|Mp{|*ahmtme~8=TiYioW{{@$KR^SkU#HcmAu! z73Znb#2s(3%4!WPJZDNYMTRu|Qv;q@{S9L3+PIyu<52n71w3-rjI=#;h8OGQ=shic zy1>Ss6FD}*GFN*vd@o0G?mmU?fpD&Jh6UL;KNk&|Yb9Yp2a3Mt!^W>>*b%o4@<(37 zX>W{4oRbO7Nm8Jzlomnv^7&Ag=>ywP3~8HM`TXyhtb>~d!(SN?(_sqa@u{nC?s_Oq>i2ZPNRO%b z=x+obR!E1~r%s@6Cr3XWkfKk1n=;PeB>w0ODbjr`ofpWM6gVVmu&lH+4f$pWo|9K{ z*(Xa_PVO93-T2M9^=smRU1r2?2FDEw=AwPO6pL<; ze${No(N(3XWm%~8uNfYTQ6Y1Gn9+frr!dHPwE|@`M?v6qZVvOpc)l^>LX)g$&#_|A zjVQvuyb%cr9u1Sy2Ox93EOz%8V^6msEj9bgSMwb(GqVei-IXPQkB3CI{%V*wXbOHO zUqGwv9UQQGjTI;BvEiTzeP;(0E|A*AwO^zXU~d&V+uL*zqI zVJhPboh(Cl2{&wdlFiLBvmj5u7?JQ%EZ>uIkMq6i0(XDOknIL?^zh$Vcp@bVI$n;# z@f|BrL#rDC7LEtI8?KPD?JibKzJksd^Od}w6Qf1MH>TQU9l4Fp5n>{KHJL2e_9U16sq74VV?G+ z9BlONK##}P#JFY{`5Uc37o0cd{d`OC3Cq1W*N^7w3)0b7<{)>?@jZ&Kr(@ng4*V{V zq+<56)`25hTXgPu2zgQ|j% zWTnNS^RE)VKQR&J*BFxhMu%Z}qYTNM$ufz-&(ZgeD(`AE#2B#hIP~WlnEmdBmBM$h zyU&z3swSY|@FQNYaR=;Xy{IPkY>w|^?`fU0;Nz-Kq%;0tS*;jC4E3nr#y9NlY!52} zbm))po4DeE0kMhbD^c`V+lQ~;q|0E^IZZ>C~qpz@Ng+4uz(20LGu(_qP z3~@SG4(_RjbZF@!TxmWCV;_mJ&BOx_EHR|3lM$EB3`M>`e zi^9RRaOi*x+214wml;E9jLZPszi=?o`ngS$ z=y`nP1^GU3yHk~1B+OZO?ku0%IR-?em+_*MXv?2jP$K&Uowb&Ulbrl<(%Glr&xb-_ zl`5|$e$M}tS0?U5@mze#Uu^4&=NblTu-j0cOIqXyW&v&JJuML&b~%B-a2q%8ehu1X zujSKAJ7I@Ao84^`qMi9Id=jNftKKsfc<(Ser09ayHAgY0@d@0jH6by2|8Y;QvRyP| zk(pe14$k(=8T-}-%WoV7`L=eh%_o`v+dZ6uhYU`)EriUWtvEaU8nll5fS&KuVdLE( zxZr`HeOpKZtA=xBn?69~O9NOPW7+Wb z`R5RtQVANKe*qikK%QqEq^`&SLH63a?zQ3YBv(kf1*~7NYz9|T^AE_qW9+m>!nuAh zp@lyGyU0SD|tR&QVUpGn8$4FKZl8}gGC4{6hGAbcak|ZQaQs;f$ z$x28vB1t7}mF$%9yT8BxJswVHyzg(T1MeZ9pC+vAceU02p?VaVb({`1r}e{4Z%Cu`fN%xs5vmF0%}wlX>ob zvps#QlNvQ>xQm8%B=X&(FOz0*&v>(oaJxus6YsiwHdcTd(}Yg0TWXC|_n>SEl{{@4(PH{I`KT)2RgDPWLm4?5OI9FNI6UGm4+`=U6`=vq}sBz#wwK=?ZT7kNKmbPC-%a3*K-15Zq*6fNP-`BLveR?UFp@W%uHluwrc2E<`{1 z4ybJS2f6c0;o(k0k~d;7t$5@H2SAISlhPt{>a@tPU3LoO!1J5z|hCGR%u^QbYLNHsh2tJ%rA(ei1 zcwBt}25$ZXIiEwoeY^^FD?SR1FHE@lT-K{#tkV}m<;k-kB`Wc^K&VoMf}BqnsMU(D z=iJft%U9^4!y);nE6D47!b{zUV635~-|Ek6JYzOqcwtiSMbwLYj@pN7AQC2+eb0d3D5#_nWe^7$Fdhucc?@z!?SEo5H1 znd`a!@%d0%bPl{jzQK9H32gUFKq+5kQePI<(>xBVG-2M#q?|hF{fpWC#s1KN#8PVg%Zh^A(FBrvK z%Xv!*FyfCAtjWKH@y8<}GTv13>Qgo-oBV@AB?{z9un`GlGtAp!5s{8*f}ZNH&}^7`(_TqU$Xji#Pz?>p5c#;+W(>9SOeNz)`L%-b%@6bHC(@4hLm)q!M>lSwDJzi z$!HHE$4=~Hy*D`=HvS6wc^lAn*9Lw%%Lw%qW^wuY?5<*c8oPgPg#)ktfv?~Zq=uT% z#*RSFU&!G^FJ;>MWdT8?sBykgOYd2=jO5pnba+xJDg<_RD*r z_mVHnDOMwyuWfNgupUWr(gpu<-}$C1-!ZSz71~dA@YTeOW}RM(BDdcNSts!d{e)iY z%ekZ|Azf(6y2I87xbW-6{EC@Uv~OY(-{{aUQ8cUNdUsi|PQqXD(LaO9A%{vV9CinW z+-yS(F+xqojsC`Z_SQ>EA?(-|JmCEgl*=z+Y~)L*s!V5mmIQI{-19KL?kY68Y~zhC z=+S`KG%Pz-i2HV%;BAy5eX`~J%lq>|QB#2NOU$?swaegPCxDFjM#eHxBDPb_=!ScJ zsJQPDmwUPwhEx~fpr^O6F!B)Wp3Ru`S#O~zelg^K4n;?kB~boU4aU!Y4Iy8QsK=2C z%(4qd(+YF4JHUi?{9_&QAu`D8OOfnRm-!n@*`C_FNt|EEvK}5rxV-fub3E+k{gd`G zKfo`nsac64Ly7p!@&~BIT$-gd4(PLD8k9R9=IhTr0yn*_IMdLK-ekY6nt89eezgHC z60Bn1Pacgv2AvVi>`CIS#CU+`9*y#u&0^^FK*S@D~U*(4yDx zs}W$`-I}9pz3Q0>jj6xH5t6CkZmK~y)IEgUrL5zyY6Grr*bl=4Eol0oPm+@krbLsz z0sm@s=wW@_vFxrbAhH32Lb@>g?mF1-cLNh5waB)0KcLj#lXH=F6#M+prM*hC@r=G9 zaeJo<>CP2=sL^;{<*+8{abxq>Ia9!?=pe|hIf@_5WytMW`t z;ptnzYuH@)HQJ0O%#9G5b}g@$u%49PS7EOJ1@CW zQ*JU&Q;c(HYZT=5m~t-WDtu%`KJ$m&DC+ZehM47FQN5`e-Ns4L$mElJ`=c<3*EgWA zjxfHC?*<+wr9t7RE2#D?6~Ae*{LO3^-uFT-xHap6OW9V=WA8lpUZPCY8SiS%${`Sb zua{R}_8oBQZZOC!Lhp=CSaW(T;~PhS-8sZ#4J8|ovYA`Y7@gQ3K`zyc5t4^$9W96}26wJxsyI=NtImb5vcq5T`&Lgx1r=Ljdz&$JC0Aix;yKofljgsK-+|Dl zH#xDe6JkUE;UeU4wFT>at4;y;BV`bI;a71$++Dc;)RJEM&#*jxvE4uXRaiWFx;mn0Z%a-=Xy+Uku%} z45f$f1^*nD<(`y*ia~ez)Dy*Mseb@oyC_l5O?SW#Y*^>39!HQTaQHo!!@BDV&As`6T7&|9h=?f?RtU+G|e1Yc%sZil@A9balLaU1zy*GLSH2(DF zMHX{F_whfxYNAH+7i2)R(hnRwEZ`3w_KgL7$zjOo$wSYh%*A*u1EPEup#OkA&K(^97mvTj{P6^v zGxTw+`T#zNDaYCXed_0P46fzt(<>`>V`FU+*B6)#7NX-A^L#f7($7gAA5^9O>$Ly@ zanBn~x^u_?IMvC}-c9GYKIYDNt7bv`LUUm(o&ne2Cs1^$m#g$hh5euP$&$AUT>g0w>mN~gRgj4H&dSl>ImOt>_O_3*dLa6kGe})woXPl)(8}^M56+sB z!4rxgG;T3yj`|OuIq1;%cQFvSHwtR5f91vT`S7@IKf>W|ym!+Dv{sa%-{)M257gib zw#I>X`2g4q8cd&uRY2rHmOW93l9*)vgOtatGt#tMeCO?ERA={5c^@tK4CW+o9rg~o z;!-iS`XG8vJq0hD!n)q0HU+ZD>4toZP{(MK0pVGcP3b zmdMg>kS9Zr$`Swb!?>-7HAs=g2xw2z!9M2km^9pq3XU{zQ}0EiNSG!5olu8zU-qKQ z-`QfB%_5>>l7(LXmN73?Giq%;fZ4M;*!PgJZS4MX5$Tq=W<)dE4yoZ957di8{U^ik zl_s>m;U2h168N8ntf!flhdQkLL$&cH_F1Oa8htvrWG;|cV?#YKd|1M73 zVIhW`6{Dl!JeWP`F+3ZhMl>Wg1ekZte{m+?_#qYiI)YJXxs5;Hd=~#(WJdBHM(~Zj zF%nC60tMblFgIui4nJLtzwL~{W%4>c6mcMbnF2s0@97Sd$t&sl_IBZEDz}PM=LTBcdgt-0!=(IKZ+Do=@2ROE?z} ztY_Z)KqeNTF#L{W z-O7E~H*P07KF~nRBNpr)?;gK`F|JlQS&+TzW~7qwuUhA~!9+C$lBMz!hFK`nhCmr& zGtZKETsg^H9H%8!^AuR`GnqSh{0fu@p5*=5$IopjF-*if*Zq8Exh37k9H8oDk71DNB{b`MiKCQdNzYgl8o)J!#|;_SJ?bX< ze~smOHeZB*<7v>CtHcW|E;Gwwx2;*fpszT42Sz9y)ZxPk9<* za0i9==krI;{=q8S1zhBXo6f#!tQ$OCg~#{G)V!nt?zt`o`Ev-SmsN!ariLyF*GdkcYO9B|b`tQBR%z3;R;TYo&>n!88xn2gfowrac zQ;uv~ewcYYh3IC#1iXhHW}O@fUlY>Er}1aN>4-EvD3uF_^0LG?h(qBH#&gP%>jX294nPTWV&sT&|X1 z?N$k83f3sv*duQDJH+>9#$tFb%VpQ?hw$Kjj!Tgum!}M1-ZouoKKnV$Utmmz|2Cns z?=j|z_#{5oW1SN79)5X|4$i8+jL+>pz$%r4Sn$*uRUOJu@ZlO)Xm*)->Rq_>?2mkP z)+H=z=*K>@(|mc!WYozpr4E7lFuSq?J98wkeXk0c^qxJ-dM)`Yu2MuO1a9L{IZ_jI z47Qbrp#B&$^q$&|c3BBv-K;==I$6_KD(~RGAIy)&avrX(Z02YD4;~Ck#j2G&C)yeY zKh7W&nLoyxpIM*iiUkpMJmD*gyP;rv40NW{qQKYCS@8NO=vU|PLK`c(ag#CQ8XUx> zvlDTR+-CHf{gmyMnNM{3Z_a0?7HtU?5&a1=bd;F_Q5`fLCWq9c!97Fr)lHH3vR+&I zxN=_P%zEUEU)j4zkqQ~#)9=nh==}K%97h`C-^>%R#z>aFVqUTl3oWpps%hsS_Idft!@PGqX&VxeX zUQGV77+mBxqkZ5d{?W2h^!&3MbY`(EhR!+&nv}$U@~=esX?IXG-~i3Wsx)c79O}Er zaPt-x;L|X3vV2G|Wc>Y&S4Ig)X=ejxyJtCGUs8(t6Q1yv`BR{>vKBVJQl>pGu0iFT zND#erfxFJfFnN*`4YGAW@xNmD)czHDG(57+>u=Oz9x!w5wu+xXJVnOG%f$bKg^{4YaUYTnTgQnzp6(($U)d>rdoe-GkE zTbUBfOTbOyWT@XMq>Av0t6nT1<-?9~LhlqVNwOLa`Cdki2=;GM2T{+Y8|?p_#7}P! zf*|gwB#dPWW!Ox@Ubde%G*P1Um0{R<;W-S86QeG>gDo1X0L_?=zOo}x(?kOO-###x zhZ?=enD#-ga?ImoM5^p<`N&s`xUi!~AxCKvoOTESH`X`RNY){>LJ>{08i3Hp8fbE% zALjb&VP5Ee+&&{6+Pk!$8(6?tBUauZdcTer2-k{twq1mMi(YXx+b=_&?r*LtE{w}o znE^Go#=-c|DoAxOBzIcaoJw;qj(13fZCg{ZWYi02h_oTGkIh&g-5OtAIswzK{Rb14 z_2aC$D%4%i3De3fsKDW@b8VXj71(6*nKXQP%H z%X+JR!G9&XbQjA?m2arVUYWnV&Io1J_niaUm78FRohp&5J&OKudn6i+g|z5S9WI$I zVJxft;;e81EuG=PdD+c?g)5Co)7NHDeItTL+y<5bj>1CgGx#Gp51d97@|P6)@#K&c zb{1tls6Ug$?egA?zpx1ImspZlq5Hr)g55V5Hp8P4jLCd=Y_V{NK6n*MxFZ`2(A*gz z;-NZvy;{M0?G0kQzK>Xv^cNEM#;`q7D?g37AEKQV(9h>1MDKow@hkpuZ?5Z74^RZv zwJgVdbU3GORK&9XdwG}Yb1=Kjj5cZO)4qqAc*IP~+*U-*!e{VgrY$XPRzg9GBHpGVTK9~= z(REGeZ)3)9@-T!ScVuWt+653#K7e;#|AT!)Tey!!e{g7v8r`?R8IIXXP*DI_Iinv7 zB4oinS)VsL=ZPtga-rTqn{Jq>M$uv;Bqz$z8^Nmdu7Nf=ussn*489F2d%H1OdM>o? zNr1AjSJ1b;ox9C=vda?6aAZO)zIbs87n&LnSHq83x_v1>Bf^MYx?6`uk)_-fJcOzz zjHroc0Q%1V1ooi?sM~!X1XdQ30{vas8TyRzl^OHF-h!CvmSgKDO*%r{39dwydi*-i z-5+d3wo8dfovSO}8fHzFuhmCQEJp9qcc5U<6i7XzKEE}~79#u|2 zf5m^2QEui$uoayLw>N{%!9k>C;TL=wT8T%y6^Y$+9^;iwaMGR%tdc&)`U5PRwky*O>6$cvW$s0F zxt!aw!5BVZ3kAdUnb%s86n%cp+lKAI#DE`|WzFskwiWQV%H+uN9cfUuv{i@z8BU`euq131NAbzH{lE*SmM9+0S^lYcBKgjr&15{sC+f z|1Y~Fp!0IoiSz;~`lel-Tzb)jn=VUJZMFt9CHf)$y^G`SJVfto6PkV15F7Y7T+SK5 zrK!bmZM-TGR_Jhv7sjG)$VfD6W(@Fx@4S1g0^O*37E^o8$iAOPc&F3>(ki6KVx)!#VS{ZT}03@wOy#%-4E3gQ;5Gox3JUBlu(0&-wdHB>%& zhUtuZKvP!{Vk%6vF?`hf)`bxM}udIDKj> zcrnL8gSZ59j@m*?vJCktMl2Qh@bPU?aKw?#N&Ti_g>f);jdDX*Zy751`bq3v_8$J- zH6ZpEbUE*`-7v?^l5E$vVcZctnleh3MvBLB;Rga?*Wly0lP`s=;6mK-HVR92H=_gJ z0RF;V{Hu=vc*0gdR-bgmb;dTdaOP)R%v@Qq?+UO$;SCaAigYSh!@(^s%#X7dpGs6{ z-GCwLBlEQG`ron>!QX$!az6Fe^a*43-D@3zmn-CH+{b^|ds@s-ASZEWY7MqVrhx6% zW?qN4pn-dnL3i{M46JGanKLC=b<77;MIB73VaM)TKk#zXgmh=k1Bm=?%j* z7b)+D3HkT2-Tn@SCTjAr%U)pKN*}(toUt=cv)-riUask0IXu)cqB*-VpmdRtzY-!% zXZMwf_H1`}-J(TOy4XF6iX+mkvZOCd21>@>!Nc`$AaC?6UbF2y>}#mT@Wwj+ zz@&c=^lvGfG3(N*TQ%sKC{NQ1k@J)%cup=24^RAwkMos@?$~3X=r$Kel}XU^b~)rZ z@?1olFYJ4xOTB(K@WNLkvFUL%sGMcqhRVBe@pUC`t3nvFin-YqFz?%hGZ>x0=4Eof z*}AEQk|8Y^3(**_uFUCPHKCeQyfF0fJ$~Q#w|HyZ0W@AFplvzl@sBg})L75rzva(^ zIjIR4uI9+z2W{#jx0}~~z!=wu^5Mtu*P!m#0R3NjK+t4_ID0b=x-2BIL4)XtrBM)LZ)gm>l`H+_gs-wSP-a{?kHtjN>cB>Zpl52SXU@MN?QKsH82+NPG zhIZ4jj3)xT{7E0opV12XQ>KEp!XNx?a||PYtMjg(8INX8E#E8M&wJeKNAorB@YV45 zATDIgT0a%8uXI1Q9b>t2kH@^wPaTtd0M0dNlG06X2vNu3{+B51T^YnnPS%6}&CmS8 z)JPOL|K^S;T9UF@#x_uQmqf17;2w4Ng7~sB4Ol%BLkCUakGS5)OQ&1VaqUOuyFGujCIz^`>pa}^Ex%&q5K{mNQeQe zRuv*y!|t^fjlw&5*YWT<<}A!Kr`f7ye8|aeu)A^}BHixe!~R5c-`)52L>cXx|W3niBdJZ!VIdjc)(APRkca^RHlv?`4SC zwFXyt3?{WIH^3xOM3wX!nSf@FfSYOALx%#$A0Tx z2vPGv{f3L&rBer?o_V+fw=l<%s{D+^TX+`p^n5(CA0X|_vg3^?Hv1ex)gjE&8!9w<2)RRY(myDBUJ_8c7 zv!KG!8GLU4#4zDUl=nS`Ztpc3xsr^Un4SdciUfd69SwiY^%M$G8?cx2?8u>`2 z54>RL$zpZ;R%oBT2^37UNz^4r{Po0~D83JaJ`vBCeH?&{xZxOaWitvKc8kk#fcF;3 zQop7P5Sr->=Zq|=^#^_WU(jGuI=dRxzmtt^3y zn{~+RNe8g_y#@KwJOBw_%h5nXidu3*Kzo}MemkOyZSuM3UfBvCQq8EB`5M0TOQ9rF z)PUzbt*MF9OSnO;i3Q^W&WkvUA7jl49y6w`(;IPZHREZ62F?H8in$(TkfS0C$*+I# zEW=hN0o;r;KfQ1f+AiLI3hJh`cpM z?9Xx`ax<=?qvJ7nb-WW*3e8A{YCeQ#t5C=ELGXEn0WqjorY~nV;0T$6(EqXtC&?=l zj~I6_yemUbXC6SGFHcZ$bt=4adk_9SS)AyF1&sA&Ik68cgJ#^ya;R*r_p8T_Srlb* zPvDBKc04b!CN7UY@$nU>U@hbK^*XM=wBK()xcfX8_HO{%{Sv_Mq$Oj(Hse9|zS$%g zk+7{n80nBx+`+mG<~7wgIfwNtw(ViPqI+Daq8YdNg$bGNmkgq}2H@qwpfvUSaN25Z z^3C8T-qzD1p>oT3%|t!w^ZFitpD<6$Bn@h=)eoXSQp{Cf2C0TDf3hIoxoYHS?o691 zxi{d6^{zFjU19{5%6)jq#Dt_iV6XM#8D6M6h5Pqjj#RkYfequgM+Rp}8mHDtdb@0> z%k~4@o(4sFTy7cuP-JI?@OP*kmks)7mq7TFJZ^*eW0tehB!%PK;dn?7^!F&ygPLrA zsaA{c->T7Xk94s0S2J{49|7fp*AVsPK4!+dqk~x&xLlhdF{SeGTgLj_ zUOSOw?8d5#KlsBhb1>bi3oHJ3K;1zb5RlvW#gRSNzV`7F*46BisL=?bL+w7gq02~H zEPGr5!oVhWueTkIG^awX9Q#f^P^CduseIbWI8fhnj`<_i95N-pcBrbIG(33%&AqOIw|5!q1WXxYMPJ4NrkHH08d8JbtA+WxV9L2H`ehf!)xKM!!PzZ7@^s30a0u*!JyS&IfI|f zXB~BwZ|plIUhTaP{8v_rEA9nhVxbSp+Nx8xjw3kckciw!kS80b9K`Ftb8)194=%i0 z!Q5bxeCvgO;JiVWZm$=TxGrU~Vb51sIoz1E&KFTN*5Qaf+yVKUPou!clD3~;3FSxk zf|TELRLptEeSRk(&u;gi{D2MZXa1TZ{LDrAt`=vhw?IJod<U<~gv6(%hD!;og6Y3Z&}y)V-Wgc|Hg;09?u-g{GS0%KiJ6Rd8OgKT5x+C1Zo&a@V`;f7_;^{cJ9uGx&>>%-}n!@-Id4E;5^CnS{+of(In?e8Ow6uB5b@b zV&?@3cF3H;ceafE4+G+goAY7I&CBR}ZXt#r&R`wNK#8rgG+oZ_m;{f7P~dN`Mw6XLR-e287)fTFBrXlXnR zg~lS(1jg)b59d8!o(AH<&YO+#m~&bMqZ8QkZfz}>yz&dzWgds6^Ug^2pU%e5sVNw8 zvIu6@7!u#fNm#Zz55p%VaI!*o80`>+hZQS9eh15*=h}kf48~vY@WG_9YM3Q_1k)-m zqW(TZ>NrG+Y7EsUwnm@$#V-}9$1D%fH*`h6wHCB}k1>px!uSuzQ+Z*E7M|@pj{-Uu zrdm+Q8f8drePg+uAsmKn{*6XuYhdXe8`_W{qQW*4P+e#V1t(P@|I0t9DoNr0Mp)yr zGWJ@o~uUY1Spr2PgWPR46?eC4L%4u2l`y4HPF0~v(&n@G`dW_M#pdSS5h`3e1 z6r$t3AVIDKl{5PwK|z`v(CUVR8If?~o(&y5?mD*h>Jz82vQ*4%XLo~5lI+EMc-I9k z=(D+wk14y2=QL#r{Ph9P!h6s-{S80jyogrhieR3eDOuy3iOUUYG5(+$p8P0Bvb7X2 zrtTq|SE^$C^(5Y=*qk&joy*TIH)nSY(a^W;EQDm$z;Me4sM(@OkIYZPH{8bn-VV|bI|JCHXl9FMWH)s`Cybbzw^&ru3u?M2R*ed87{ z`n-nq^WUJ`Mb?9|*MUIB_im4qh9~TG+rMAQE2zm3_p(cv>+>9TZL=g>&M1=Bb&o*t z>3r~sXLIIKCr&X~pY>MWK#@`sr`yglO^fBJze1Jxu+t}$mny>aS_R(rmGQggr(ijX50!8IvW8*uMX^{Qzd$k3xaxYVlFR zp2_|z#Ihq(!9DvP2zoa;$Nn6H-f?WE_{fmnozCtrXN<(zZ!5r@`7P}t-hsu|C=}pa z@w|~a*jL`l4}GjgjY36q-qIq*;|&wrpBcsrtv#TV^`@MLE28?zHaJ*25|0m^frt8I zuy6ho?${sZuM3@rA~OSU(bndI9$tpXy)3sMSdG);00XDluL5}rs=Cw_~W)2+FP6IKO5K#qrO7^0QYpK?%F(W70XuS+x+PN-rVA90AWgTGVx020mNN98+I! zVq*9exUo-`4ByBYlDY3;n9(z=v)_Of6Fea3>Mn5i?t>v`voZU@Rj%*021@ciV*CD0 zkS_0r0im5R^{*4Ud=CSk@4LbOQ8n({po!^9zUZR=ALz17tRwpxXS6zG{$X(VG%v&{rSIRXY$}ZiY`DYq>75j3He-`onY;j1o&F54dJaIy!9KjHI=+dgh zsF5!)yhs7ZzX-y?pK{@}|2zn)G(ZRKC|I0bih{xc+-}OUvcr#n%R@~`@`y-Onj%Zg z7evDlM{}YqcnJ%~sn8JHCa7(YK(cxS{JN(|qWrI6sd$(;%A^aHWoVO9rK{pa(_$c2 z!HjYSy0o$neymR#h_Vxlp0{6EfT@oFes)(=(swKyW>F(@j< z!?Q<^F)>MrmPjSy?kTqPzxUVC<5~r8Y<2}(lfJ@_H0FM2RRn!$Gg|B;BB6>h+>fXX zjI?QSeqX}6G+H^BJy{MtS?1WKdlDCOPnooj(gvsYXr8?8z+^j3TATR+1Yghc(_gae zPc_RVooe9>n`Oy9mP7Lx{R2xQEVuz}MdG;94(?ox!^j=aiybE~081YZoDv08e%Lj5 zV4_1FI+vhnp&VJ{FN;-YQgHVCNN{wr0xyLgehABHent(F82baB$2~#e;Imxn2gcZK z9|Q~Lo07hNIP9rdh5m#1#pVFG4u6S_8w+`# zjcHuYZ7mvKcb3aHQKt6q^Leulmj9G*;RnaHpu$kbSsS9pm)p9)#7!ddfH9EYzBZ(N z9w9I&tOf(R^1yb9HT5484mr9)i0H@#gQ-G#%G8Whu=PIs!CP*09GiVIU+6APQ{p$z zkVckCQH%3JQseWEZ;sTUdC%&ZYw04E?SG9Oq58zfQ-g-N3uw97O-`{*76%Gm;@n9) zpjY3GzabD1mnqA+fn+06q(2gzi{gAm6kja_c;vwA;QAP!E(~`ju{pJ+RzIhK0#%t4v^QR!VU51Q|YGXY^ zTQFlx(S1X7K<|(;y}46JviBBpq6sDZa@}z-=*~^l7$>6DX*NVUWfArl6`@^07h6w^ zX=6CcC2W)8d=H-j+uIkhZ|w&7ILd@%&wj(5ejkReiiXtl#dS8@eb4u4vKgB5Oq?rV z*{18IxSV|lePYjZRZn9;>YNTamuyIycZI-MzcjELbOG(}f8`QG^=X=ZCf=WZoUw+V zigm)+cjng;&{<eJU_Li5Q?l$aW+$wh%|o`(iZ4bwIN0%S>J*lAKt?DqQ8r_c$g6X(*1mH z*Isn6+ryj|`yeGh0oF`Efa{g?$YO0d@;BKT`$s%L!QJg#rimP#&zsXc_C79)NkGw> zQx@*m>?$Oiwtq!`b2Y9#=0A9rV@!7UW})nhR1i$4 z6#GYg<4uw?@XkvKJZE0Dbkj8c?;AK0@{gci3kg&Fjuh1&a_(VyoJOhliCz+IKx#FZT$>)M=8# z?#ypx)Wn%rvRu`*N0MbnZK$FH%eQXV!ff$HRBawahfX+$S_j$d^k-|;l}ukl8e^AC)x707_mAR1eq3jU+UK-9{cjCY-k6;`a{|Au|;ZbFPK-T_aU zLn$So9)kb2V*~q}Jl9n~N1G;fNUnx6r3xgwI-3_1c3@(DF9vN~iR})GbX1lBnd7QK zH=ccgYRtPZy7d9p_gLXCsUB2$WKK7|^gvPfZ7%R^E!y2~2kX;p#_{kml&<|OdH<#n zE7_+^DSbw*9bA7xwm`Zjjb_RvL_8i z<9~{i3f98(9s!C%AMlm2X4Le$h$f#|i|thCZ-{oeu|K%YYeCjy{Eb@(=mV({qE4Teh=wJ6EC z1x*L?VD9Y;I8dWYbWZEgM&Cw>6j7$7(ybDgZEA2jVlWMTG701Bs(JsLiQK}mtoxAm znTx)nM|B?Rko3y?=xk(0R*x!#l?Ron=8pt$v0BUd@B1Vv&3Pq0KIsi(U2epjiKn6C zkbsPjI}M?h)m-0y`TX~jCbav;NnG`oJ$F|9k_fFd=(^1sBtrcX-0IdNIthjZ588oy z-dS7~@BlU5SP|vRmNeml9MQI3!o0o>n7tA}`wHXnnY_p3IhmMJ-2u{DZ83809Er)5 zYRGC(pn~{4aEmz%<)79<&)f01Xzo<@{sl=&qeqHMRrDm=Q-p9#usb1<|i z7zGFNiv{^q8q@%2!?2>E>nKd}Ca9u7X_W_e zRwST&K_Sn|FKs#Xa&mEZMfeys$yT1$~RnQ8dDBL;f2 zMCjHh!b$H8$ce2I+?IU^M6<0iXvr=}bjm<+z8U%?onp*@canp%HbdyETrQ_yfC9%i zV$n_$OfWYhT``j|b7u(Sq6YAeZdI6jtPk2l4QZ!L6aIMp4g{fzPG9SkNNH0s7rJI7 z?=?JwU%t~7oV1jvTkt%b6d)oMnT{;)BSlQ33vvA}8)DEdO9ChSMNzK{cX&l6J0lr# zr8GeDCM^}6!_*16U`Feap&I9p^!f9xkM0b%_%w`fwDiJ;l(~vvd?@E)Lc?V z`(z#5&1n1f!A4Z-oegp4Uce!XHdM$F5ub%`vG67Z)qD-AGs%Ez1S(U-{2nfG(M!h7 zj^-!H=@Y@XT5-1LBK#(w%a~!cMM*nuVE0buvt51z%In6ZB3pj2 z(hqPGx?+S~33hg8g7D%*2)G#o{i;Q<&`FyL(_MHoks9%NB9ALIV$fF|(008veb}u@ zM%dm4{k_pFZf*|I+ z$Q6-g(#E7<(jxf&P=k~`NCUx-7S3+78m*GnLBXKN;wk#l#LH8OOSF9mJ=h2PCcZ`M zmMZk$xRRfn=mS>F>!Rz`1&wUj-0q3Itzi|(|&RwE&2UPJJyQuKJ8&igYy zqz`uhgEBNBbng{@TZ=r6v?}AR?b<-~pDw)7dW0J;usN1zCLgI)Q@l@5jqT`dSZE`!-?gG7l75*y8f@Zn)8uu_Y$mfW^{}QP7!FoTsEfbNoFa za<~H@aw83f4UGW%V0~`BuM*MFFsCo|Y^kz;A1*FZqWVQiT3s<>gZvI%7NFNCQBKcF#4g4go3$Sci8=+HTW_geI5{EIu>t3})JEL-n3pOqqO z?7d-X@Bz0?@@m7ez_SWPcW?2bGZusCEOWZk^A|>rmIhJF z5AKb#If*Q1J@TDj`7e~QFBeBcqn{ODb|x4H*RaJsWeI%vWJyi5^@I==SOiNGmql&;7I6=n-2lTmEf{_J!jixO}*H&(WUXR zWSfHt?U_vBQZj=0od(Tt?Le$lqW(=hy8NdrNhwW3ZN=>v5_b_=9n2|i2#1yrjPH8# zCMUnQ5d_th&MsP~B>I|$;8B{%`xu6UyG}cd(0zyg#qYWNL4RSwPWE}ae}U41NHm;o zLYz5ic4uCIYjPvN=;0MuHR=^gD_(&&7DD3ZYmQPQ~wW4`P?q_`IqZnpNE~uRah3D@{0) zE!H&O^E||dF@|=&8aX9k|IZXx_IbX%q0sD)DZOsk>~XN(QV+g8=b_X}0ONCY=%WYq(4Ko6w@JQ$0z12> zRCHst{z>%6XhPwi%Ap!;wc;W4&Qkt&g$Zn5vKt#56p8=XZ+zdJ58U!5OB!Tugyxw- zy86vL%-R1O6V>8b27NCY89PB|-Y>j;xfnerS95x2DH^*le%sa~IMm39tSRirq><+^ zW5`{+=jZ|AiY}bT_JcX!lVJV0)3`i&64-al2$sv{BrTZ37xPDBjOjW% z4H7bLDlSY@B&%)vpzH^8)n&Pg>sa@v{~E%@b~$Pr;0R@_e}HZM1ep4rWrNSohxvs9 zYAkC`+q+f3|Nb$)k38b{N7tZ7{#^V|Ri2EHxdp)zdFs{mnIHOGodo{wWx0wj-Y97> z>Q@bLv1zTSywQT{`u>FM+%@1e`yF52dWsL6n**N)#bVRl1|X(pbmd-kvNy_*n919c zK=CEi-mgu!HRWMR6Jvq@a=@c(Hk_RJf-`C2!CFm3y)_i6ldC^)I@&bIXC$sYWJZ@A z)S=q0IpDkgDpZBFaSE4S;@NGQM0famoP6*GmX0XnY8^VjZ>|Jk$+|tvPj$Nz^A_CYt4>aYBJ1OjiHr*)o5k7ur%VcT;$US!KaPo& zr@`(fWOkMxdORxRFT^p{S*jdH&sd5R#z~VP&l6njkWVl-BM^q$WMbHeCm63an0LRh z9evkkqErK8V9kr-(-mx}MY4cSo_8NTo>p;Q{h3^^$qznLr3dGJ-;LjGY>3nN^UzeY z9v|-M#bLTrVC?>P7|b%S{$|^GadsAp1=ZN!a0RS88DE0=>o4(LU_WyTKiimMkHT@r z2|43@?&M)S-4KFV*(32L9YpeUV)^4+J7D}S#;zZyhB1?wlm2))=3LUHOShSkrYFVV zSYrTz+gIYLhe~w(FEe_ugJqgh1;x*Ozr)hoGIaPf8CsPX2C-@H;iKy@RJ_{8mAa<# zQ$vbDRVE%v9z4NR=6Lg7SA~}gR7iToO^Il7Bme1*H7PyQ&(}1W;4uFt9Py(T0?n78 zgPt+<9vqH_GK0zL)Nd$nc?7%;)$o316f*w#W7U#95cRPK!er~1hvpRbuf>G^xP23x z8kBKR%25p7FC>xC`$1Ud4nIy*LsV=z`p?pU^NdGf?%EBVOTMAw`(#+fAI1{XT8NL@ zgr^S%VMy6LEbSg4&e;0|mVL1y-sTk;!t%K`5-qxM;}f)hy%Wbpr=#wvOvrQ>W0hS7 zSU4#V55|48%uz$Psnf9h=mq?u`yArGu7>iA6kOTBdWhwQ@OwiOMsD0Ho;aXIuxJhZ zWcixpvAVRY?JcI1relxQS?F7l3-pv z-8w`%d=Q;;R+}cX{6hTVwS4OkidDi5{AkAMefED8oq1f1TN{RVX`VHg)NT$*=GyC7 zBvS~PGaNFUq+`gODO*TFl8`BBk`zg({XQ$DP$3m1MI}X~WU3_j*7yH8zu$4Z`+e7X zp8LM8w!0Yc%@L%%hg}au=z+?^AINqd#3kIHRPC3D*XLJqh9u8jneHQ=Q?^57>3f{$ z#Mw1zEikb56j-HJgYm#bvZTEgyWVDz^s9RyCbA6f!+QL(Q=eXY$a8-Sf5LjsLVLBI z^HjC1&}L2~t{Sux_pIS_uTmS3dKhBFt6>mvJe^EUdJeLtKM3Rf;rtm@x+y&ZLQdB~ z&1oY#VPP5Ce#*o7JpUEYdK_Ih>QJxUQV6(YL0)oxOH}J}lrFAxUC@tn^J2R2^F~WN zdFU2Kgj^!I$$i1?TCOB4MVmQ=bppAkMeCLR6G}sNk=$WolF>W?efL93%7>;`b0Uj~J44Ezd(Y0jT zem#ST(OyKg{L+~%ydgDX!UKyyi7Toe_fkp^1SuQ_J8Hc`_A5Nk@tODZ&qCYYG?>4VyNmlJLZ_M%)BIPF4*#f6)B9c( zB4?PfovL4;+wm>*G+x4T+5s2lnn7$ynrvy#K9qEY;Y!M9gf<6cVVMOGar%_t_Mb@B z_ofPsX}$m+DF+0X|1N>F@}Wc&?w>6Vc7w8Y@RsIPczt_p3+3C2|mjWO>3XK(pN7x(*Oro2dz=HFJMGYdsDk>|M9`IxX= znoBmCN8`pDSFojNKNe4Zh^ySC7(LO9J?VUd-Um;U@pZgYRC`?(*WC!yuD?a2>qfM6 z-w+|{+X`HlY)|J+5YxgMV^(kBB7~-|0$*qDL~bjE3G+^1+7Sz?CjN`pQst=JP%Rp` zxPo{Tb2qiae&V>O0;~5h_ zIUi#EZb+Q{mB?HefQ_ohV7qbzx*f7$Kh%eyXsuz^qm_MVfS)a-mYU;=9Z?vSw;ZnS z=bqTm$>8~)GwOKu6(Uyj6-1q9W%C_=!pkwGcpyoYdD?mL{q7GCg)MdsU-29!-TDig zSDUf}`Y9L&704d;X9dOE;q@g)5S2xc0(Kr>s-D9E)Rf9^evPWDxy$dLVMN?x1T)Tr zfLo!JWaqr|;IfFbGYrndqRIC;e`2C+!T~3I(PYPZn~Jb))fbF)yd`LyJBN*Lf^a~z z99{X%h)KW33IS@vNB~U*%?K@;b6bHm+oTKPmAWkaMj85Oo)Yq^ROlVk3vj1NpY=F= zMUj&=#2q(g(jz^jS?33dPUl^*g^^Imv%<++44{+y{7Uo`SU~w#p)1e>RoXw2&dCj! z*+&K5S99m;tq!7;cmhX{m7@WtZJ{^m87M4O1LGJeywO)@d-=}XPKPreym((rwM|HF zzbfdY8Q^GUpZPBRCy=*D12Xed(B7R7*~m z+(mj#n@U?!1=X(pbYYMdJ32~^RsJ$z>l>@V@}ViKt-THo3q1ktjM$H9R;(N!!2M6v zaKb>Jil!|WCh_;a{dk^d{-;bh*P_gn2G`-GerhyCUyTLv-iDc5Ds)KASgE@i8&_k- z28UZR9dCDGnkWWK3pmHE-i^HEUc=N6abQ}k%~Uqr6zU_!!P3TOFf&AjX2j%RVhKN! z%KEYBFNzo|+b_$l^2Uen?_&K8@*6~ErOjcO~O!Gr^cK^VaxRB}t2V;=$Gwm>Jy@t5TmY?C zG-%CHEtZEr2bQ`fycxz(JhI0<1ct5{?#Z>dG33n=%RKhhhpgY|(*{1Gl z5M5p58vF2$;C5vm-!~0Ja_SkrcGLlf7w4d61@{*^+cTvj{GY3?aap@;K2}dZ26CPK zS@ej#8qnXhIqD^ zfX4QT;C8@T=B&6(aC=-Qr0=mKOHC|TtU<8EU1b`=@1YPg=MAJSux1H2Phh~n*^rvO z2mP;~!{*8e?8!982t$Evl)ONjyR)#1yd!7%+~?{jRhq2LzrUHMVd}&S7#qG=aClpf z&hJGK&3Dd|Ha>%Ye{#o6aWw`EP$Hv_9|MnPR?JPom8@7Kg@P`1xHUS}(5@X2?; z5&OWZ>KT{|O4uY;V~cVHSo`05ES_P_>YX1DQIi*#PElorjWrn1{D%})c!1mKU$`O8 zlr=}RpvM4Brkm3QSNI;y^-l)+Kds|?^k9kA*^4keBnumswgRk>WApqk;9!F`y!t~- zD?HQisJ|t8?mGlQ0i7_o+m3Enq(i$Wb9U`(YfOH71&pW$RVzCL($#&yxFHQbyt1Sh zZjJ@lc5AjesT1!E;SRhp9x}7}F)(3R3;L~7gRaNp`7_-gjl-4rxpW_C9%|1V=D!gh z4YFoA+$kZ-b8+4C)_`78yMwLc`_Wj3P8?I-2`eXaUrl)}ei^JrmxKpE<(gr*JjIyZ zGcjTQL$1T$h5gy>0~$19St_xLECSiUTQEvH56=1A!>ZpBs6QMd+di-w-T1lQeApY% z=uAOr%MFq#X~VcRvCvW6h|VjGpzHm8jI=L;$kX3p&^9&NTV~3pfChcks6r#wn+hsC zhiF-9OpSw;nd}&YFWY%OVze?T46tI6k1s*6SsX^UMw3m;OW+F_^EvlfGz_YR(9Jx* zf4LK!7b&xd#;Zcs)*x`7bpuSNJ%&f^_O!sh3k^!1;=VX}dVeHhdap8>sP4e3=FLZ! zkM%;}VPCv_*ODFBp~h~kyu!IuYN*RHLFl%iy*oJjE2jdhc3&b!Qx9Ww&>B#RehALz z{*Vam{n#Pd4_QxtlLV^`@Lz2_N)4Wo`kyBuIb0y4-aUqH*I3;0X)pfn?n8g9nh0hi zmtc{hBD*6D!;b#V;CV@(cOf`Wm!y(IdK&b1UIS|1i$=HoV~N|3PKnElQ^JT_b0Ofi zuF!tbfIJ$-`JD3O@TOe?o~%))l2=tw*c2`7t>#_K;~`L!XG_hm#DLE$FJVj?0%*xpgr6CorBG-4W^#hcJzuaAUTPjbXzyXF8@vh}y5v?_R4%Jf@Vv6rZOmO{x z%|S+ZKhgu!p7Hnp2+ZC+Nro4>w|2|nXqeKYP1}tgkd&V_VCB~Yv$h9g)!rLIGv_9y zw(3FA0(*LReKsmZXTslSiu8*i-_54+e$Z|mma(`SBdhnI!B;g_DjY}ub)27ZtVK4V zSj+-y6ktT-M;!g{DO|!k7jCa!$XxuiP&DD+>|Dtp+;UC{N5#AVX+Usxz=d zj{b`NhxxmH1j?*z{=kogoTYHlmh}8{5_;{vpvu7^FnWe2joE6%8HgF66xCwJK6iZeCk~4a z>9V)gedx$CJ$nCF4W!hXF!L8*aJ+i69jB&=fN{AZlip#zw zVD4K@a9O(nS_K(uH7&xkc?Fo^HV}(^u4BY-TX;G>7uOaJg5@WVpgQ+TOM@R|R}~4Q z{?UD0;^ByO8mZXyowL2=3a2KPrW}r)^2}Jbdlg~BBVaZ^93`t9Z3tvRcZQ^6uROe6dn)B<&nxDh$ z+266FbP<03Vhxuj8`IZwN1#`0fA%@gnuRVofZh|XLg~AC;5+jz#7Vt5bHN#nmndOG zbSdWK*FwE}moO>v48&xla{gin+;rTBv4J5F_JYB~&%tmaMNA`)wPBcD6)wJPO4m;+ z0+F{|)GGWe(#|c4E zh{}6ZXoJL_x$9UkZ9_42yJaX;a#!t$bF)C}PY7V(JQy%XnGNKu0pINfP+WB$&JaWP zaaIm`%=H29V<|$G=MoH2Qf5!$4cQ3?4SL$E5$@08dlG+L@DCY_x_oyivJ(qk?*DN9 zyeCXj<_^-0`NSZ{jwY^*#g~H(XhH2o^h{J|4oW|qYnl&K_e2>)vsd5A2sIHG)llBHmq*0 z11lj{V8X|loWFWmBCTWJlj3ofMvcghkr6{sPx7S*NBC@*CEkj)&rm5;Q1Epn1elieD(qw9a5$vW*MPbdt!Ox7*nK9aN|GT*EP`;D`plQ~y5pmrU3*`-cX_&mWy zZ7xhI{lQsr^0=&De`@^mzTn>T0HX`ypjV|Bbj&9Rrf>ej3C7P>)rO$l#xr63k2oB&^by|~8nIIT zt?=9WU9gfXhU7$N@H#pey}SazeRDpv`zZ@%vra?Xh%orf&qV!`nz8-v9$0Z}9o9{L zgda~t;I`{$pzzXSklqjr!+-K~&B1F}x_P3YUy+PE=Ctx`y$qEr^6_EJVet3Kgu@v* zDBdRK4x$gZ$Zb7F)C>f-W;3C1T^*V5Iga;dhQp@hixBY8UyzJhhi$EGFlo>qaDNpG zmll7)+?q65@XUM&EZPn#&VPk}d49~L_o{HtFdAPs%QLCb&Fs9@e1=pkN0)HNP`^3y z^!Ri`y4u5vb{y1TZqYHIGKahIu3dn%6nVCE-(`?)$&i(d_eO^^UZ}o;JJ~LjNs_3UhfiYeGqyp+kP88+xG@bX&w2V>eHv*-6c77`|e zMt{bd*?ecdFcr3r)u1A`tZb(tf0x7=ql_ceUld5ANNjiF>{FZ8O)AR}WxBrHnh?4x2KaiS`%+OZO9Rd`+{wvdF4 z{s^+8jZnCL0e(ELM5jIBdp%1{;qXY#C!f|Vc%SGJN^6wJzSJtrsH=lfcm)cEn zWg2kJ6Te(Dpz{Zv#aqn=EH~$xZ0<&NoY+%`Jxx2oy_oN|!=|E+!FL>+Xh^l&jA*vM z7MsL#4ejD4!B2LF4A$aKlLhTUs=69`t@0alPbOm0YE2g8FM~K+Cmf<}O+zexLYK;4 zvSVcr>W|cBxx;y%V}T}XPdF?{{@jIGS4z>r*^+5)m7|I845|6e8yLa)aifO4gfFWs zsM}(DVdH*PHe{?K6@5-40sG}($)G79vY8=UG}{vc?hF%}FE-$WqrcGV_5bgcFX5RA z-;3Y1mwofgz~RPxPC7mx|G&HFEsmjJ^m1m1A)^eV?45GF)WBmaqVMwta z&AQ+Ko(D-NrC}hW7pUBIdrUl?^z{=RNm$ zo>;CdTfCx9RzGaLFp&TEm(_OMd!jR=-;@B@{uUo8v%CSYjcWyqYg336Ln$nGnB(dQd&rGynSGN%rx3@~>9v=c) zzZ0S7Qw(IAY0;=r_GtKD2aa9C-4nuk3|Ov%NpEXlXtX^263=iVcML?R@qEuN0~nth zg$hs9*ll7)?|f~A9c898V%>6KPP{H%pnny$*5`sxjEQi>ng2Wg9e`$k-t#^;4CR9Q zvV!nyJolc6cfJ?lh(S8=iQmWm)r-fq$2OwhhHGf?)PU|bHv;iqZ%DSw6?SRI;IQx& zyf4JhzXy6T{96==FI^OlboHe!e`|>({SIvAyq*QMttd+EFPZU-dt|FKxi86#y3M{U zeC7;iQR_XK&gyuuQ$EOlv)e%C7Rwp&&a%1dwxa#62l%;Cju{-$r!hl0yYcaJ^se)V zh7biN+ONWrt~%q4NjKVu${D(fnJ`j9*1iZH1CjokK7=J$tm&AABCq7H+9o-0_ zKhFpznZ|TvgFUSu$vbpkx`mvi9xGc#8M_4;Lq+=v6v*1FjW9&aXl7BLm!SY(tZRzCg!EOWx&M zE%;3u4uzE`@NB?w%$@r~Cg0~bxR-W8+p%^`ob?np@Hyg(JTcoiP@OjZXNBD%p&&lg zBMZ7;$9ZpcBzMhQe4UbnBh`*#!?gl{S^9WOnt>Uk^FjLfh2ZjPAKxQ%6XB3C)ff_u zx|=ws{VzXT&~qerl@vO@zQuN@p~A2=T_CFLmAwD28X`}<=WhCgpfbJyV}|^NH%p^2 z;B~t2B=|WtX1~HR-W?gZ$rn#A{=@f_z9eON6L=j65?Hw+jXGrnhr_s=LNAx>N~^?8 zc5AMN3 z-|cDVG~Tf^3q{vjExKTXJ$+~^gM!9HNM5lO_I&QoJoaP~6^p*Gh&%(saUs|*)rPiD zUym0)DC1bpnyj>O!(jjC+^wDlJAc0bx9lk5=*s)BlV70D5P6}j?hY=huEyTSH@Kgz zmyB8CjN!WNFv4^)wAU(v*Qv=c$lR8hT9tCHp}%ZWK6i-O9U(4B?=fI2e~v3N1VUzN?9*MyNyr_f_wq~O#0FWJOBV{TW9B?0a3B>HKxQ2N>j9$DPO%8EXW zt8rxFvQa|Hg!icBr@^|U%lOQZ_sIh`@bh>)TK+L)BmQx~+4)akUz9v`9$*JcUAZIj zNee`tn~0UWo1t`bl8_ZJ22So!VhRsUm_m&$TT>;XIO93)tlWXWn)&XcX*rr7eu^=D z%TcPD?wUOPAc=L~LuPU(l-s!#l7?m1G5xp{*6dWJqEQi0xlWE$RKhuQGB^n@&-%65S z_X-g(UU>4)Ubr}+AN74vz}cS%F}PHjLAeM;{;9-!;Q_(2&9`Odz0Dx)h>?i)ERg)V_7~mG#KTganOJYP2K^G=5Zx7tFu(XPl${U3!#AyH z1{%WW4Fcd-o#`fc*>u_F|z3#JkH#xMMFG_0go}SvE|Y z&n&`U+(XR#il=>l;<;&IV17CsrW=@1>BkwwB#nDV@2!QY{T`st)?AW%`lZB@vxD6( zzLRC0ctr{=u92$GZv^MzZ9=J^uHZ18?`v2QFCY~FTnO7kT^JDz_B3^XnB*=o_Ptm@7;yyjd8-Q8P>G6C>2jXs{|#DzN~qj z6@H6)2?KBcgHO*YGWPu}M*evLHs@wS);>N{`D6#1}Bn+DWPgR9-9%=G(HJmz9ZeP`LSmuIe^<4Ox4 zJ5=e0FXy=*DG1I9`QROvNUqqPM>oG$lHAz+k{L%5F@AyR zo8+jYn-**54)Ny8#e$Aiu^=5blhkj#hTcbxk?HfbY0u>Uufr@HUZ ze6Lfe8>K{(LyTE+*;yhvwGW0)e8O|Mw%Mshs`z63Wf)FX*?D^d_Nnd@GzisD&-V@6 zh!x9n(Pmz=rxMY(Sn{CCh^D*P2q9~SV)Co6Wa-j-FksJ$UJ~dfriciOpjNL+;_> zvZyIbIYa9!taaCigqfFlpVJLp{B8(S@=LHZ{uG)0iaRCEw9zq<=a6(pfOeqmH1rw&+mmnyrUdj=?bf+ zu7=l5xma?&2ae3;&q3S~Y+si`4p%6!ioIHNagh!kW?TTOHy!8@oidA!F(g=OLI4Xy>Pzd?+=XOMv$VXX*B#n}cs))~jfEqIMDM zGwuq(eX5`#umKx{Avp402mZHAOmjAtLvGw_N#XJSSU%-ETwZF!=*8dY=3y#pEM5(! zE(_4-U=D_}x8Pi+%=`upf%dwAWNoQC`W!eZJG$U7=5JPJNwQ8jJ&W%!pX>#TP(#{X zPw~}eO{yHgGw*p%amCUZx!EkQ}#8%!QDbx4i42=qqe{zz~)uLS{Reyr~iwZQcF$7nv+XptA zCh&Z?h9t;Nlf4YHVA02Gg%O#Gcwvzu^K4UQ)Ajl=ag-6@@ZB&qwoUXHuU=569_RAOq`RYV@B4cEuBu2M4{@M{ zi`DR!iX6*Y77AxLkHJg4ilLLbe>Ndq&X;;k($AUs<;QH$(M`wD~hDX^#W?_k|a z8x|zJ3m2a>ps@+}ca#ST#b*l9ZdEF_RlNaQEh}ndVn-G8?ODOJ%iuC=KCaJt0B~Xz z7?<0#_TL4VZtnxZIfZbX&bV z?RI=8z3D5qM~xHw3~g}J=1^R!X3AnmWJ)Ufok4}mPB1F04CX(7iL>7~z=K5=Ooeun zT}futyYnCsziq`aujQGmn=(7>XHD0cXtGbg{({&1TY~YV(V)7$FRR;OK&M3ar4eb? z?5~#`(`iuymxtrQ@r(uxd-(~*>F~_#q3O6R-->-bYd~#Q9EUY}V%mH1AY7enzykIc zkTGQiu>89jHA`5`J(`oDXM{KG^ERg?RX>0S)#*&WmngF14$0b~V71#9RXEdQ%Xvf2 zNHb+N^8nnnj-iRH5VwZ$S+eJF*?Zr=P@I79K~Q1BaEA)E{P3BS&2Q)4|3kj~`qwlXvH%iNAMi1Px^-h<&2&6!7h zvf#D#fsiy}CC~RyLeEsdO|mSUbei+Wf2Ba|vXLa`Z#Xo*;T(dODR}oE9r`N2FV%6q zNvv;w#AlmD?8JR_=CdYNQuRI(9&l$;?j`;%d2$n)ljh(Yt2lHl-^B0b^2{Pg#M(`J ziBHQkh|qp3%+1!}ZilJ77de%)mpL!}`*XZnX~F{J#|Yyd=(5=Oda|t7fVzF0BvU%2 z#!d^DFlJOdM(oxE=gc2u{vtKDE69SSKQe>R1s0ea7mlTeR>Oj1J({s!#DXg-A#Bet z^x3jmFq0|3mJ!9+-{vWXx|*^LcP)6o&=kDK@4=wkaWJqt5i3f=F<&_Z-D_mvoc>sl z{tl8|n8bObNBls^)sne92?LRAiR^c{C4C=z6-G5Bq2ZJ?Sk}YmA0Bep7IhHbeT+xL zBTu2uS&KFVTd=!lIP*=b99GY?VK?hFX)vEBANs%V90kG z&E#E=3H3B=LDM=L7xNs-XYRL05y<8v5Kxqym?a&YBc#R~@MmH$ z*>a*k8{eWp4@_GMPV1k8}Q+^32@AG{#KficYJL5WiU0Qg0DJTxppgG)M zxO$ukTNiT{tDLWauaF1_&a1KNG#!>K+X2nvTS$Cu72hdM!OYttyx`C0Oa6o4z1B;p zU$z7k4|3o8uKhwm*dL5}`3`sHeuaTi(>TX54{{exkPW$g4znCBVFy2}iwfS!!h&+a zD|0%D=le;AI&WC)%;zt@lO)AE?bw5Uk1%^E&&YN?gzV*&D5}~H<~~25YySn_1zRn9 zaWxP=uNUL_1ZBE=_F_mKHwEkD#Z;!<2!4@)a8{y6SKV5Ld;I!S7#0T(vy+L#l4!E~ z$5#yBJslVAC<2d#A4!i%IFtok#DZrlFy(tC1Pq>z6(6nX*Hy;slF*M%%A1Mw=2`3> z5(zHjt zYV@VDDGkp11K!*z-8DG`qTXpidjA7Z_*kAC!&LCw=>%F=PGa3({#f%?=H z7<=p_>O4FF%YDtM;%|Evz4HulH03jny~iLnz(^2poq$&^_oqvf2)ccAl1cbXC3kU~ zOjg8sy5tY{TA2%@epf+Y%Sl+WVgyEBd4O-Po3IJK&+*&%srY$U9$sBwOfRn0=iX)B zrw?tB7<%8p%!Y{=`rsf;_@T+tleC0Iy4?u1#%#8J1iZEC&wgyUggMK)&?6xR>!0-z z?6P?W_4H?W_E(dpbsEr^koWMuTLxYO){}H+MKaYh333Ns26exiXtl2l(zke$e-iDe zc+^Ww-|IzkS5ojAGLmdN`xX3_{zOsjzmiGYZeiQ_V*WdLM#OVk$tD8@R-iwLyPxNS z^DN%;)a;A?;tx}^)ei_2-`v&YvjR6aaXYg98&3X$t8)U*1?k#MP7`=$a-F2&BROub~S*=A!NGCw< zzs8(LBZDpXrb0xQ4tT7ah>x39nVe%GiaJ{b`*1xvVT~Tj2lK9j**P-q&mojX9fNb% znql@kDX6_U4)sflgkpCsypI5I;oDufHw=t#}NMizN!abh#cX1<_92`58>&6 z6j=MgowJ|1TrUpi+={KsCBJ;KG3!UCkkR0b{%|d_JV@vSmT9tXapK z46uwkf#a5IGKXP`*s$drL@RglY{4vSezFIY75EI_vKF!)DuY8<3VHnWA?9Wh;i?5X9mFvyB>(JxDNMt-~7cDWu|p|Db9M3jSGi9!rhy6p@w&HmYXZFq-XEo z><)cuq06}hX6|^H&omP5I=~%W33dn8;)N*_(Pt=61#}6}6?cSeN$<DsLD+8p6$glT*TgUp@VpCR z#TaORdY9Cyv|>kD1uQwJ&UeVah<2%%*_YVTCqI^8s=FKiCR_qHbN+Y2{aP+zv4ZIJ z3m2SKhwrM5c&;;uh&7fFgG3Q4yI{^FoZ&aY+KRo>;Jg(JZ#?a&$3$m3WmU`LFyr4y zkmeudv(}qfeE1`5QMJYSB_q)-=0Dk@-KDTjk2|z?4d=hPF?jVb?>*Uu3x=O>!>HBe zsM(-SeTsies!pCFxkrb}e#|#wsr3r%c7-*~*j)jB{Y#0;?Nykm@&`+`ZeqeKWyqzC zg3qDdP#ba|FLB}xURn#$5lOHX9bwj96V|k$5517N4}FU}fVAB} zvD_$lXjlvZ`^|-6wT0+%`xIR7ybtv|#&b^U1tC0}=R`d$p{LXX27l1vya5|pTDu8+ z_Gc28|KdpLj+MgoN8ccg_YyYUJc)y^s89u|JxlcF88Sz0T7DxB?j0>f4a*)Zv9v`8 zWpA)C(Phqi-eB~M3i2>B2fK2j$oichp;)y7Mk&65Q$aQ?UGuKcv8N9UbLV-uhRL|@ zCf}bQ;au~HYAh_U6ndWSfY_-q#MtMEVEaj#O0V6_4pFya?b^mr9h?QRy$?uwTohEk zx`pH4sneXLHQ+F673K`$^Mv+ZKKn-@PA3xcRrP7+Dt_PDdXMOg@I=upcQSYupIJwr zB+GavJ5F^URwb1ZqxMkrOS=jkRV9}6Gq6sh?9C78(>pM_p=1nLxEQ?NQ4Ik*Tv zEK_2U`vb7H+z};}k&ym4jQA|M0IuIvsB}?#lg z+=iPod9T%zyCnA<T$SQe>$P# z3=ChcM2pJQS$*qUESG4qh>jP6&c9o6qML|CFP#PHmZyoV?@5r#$CD8+HbZ*Ie-L!w z1J*wH0uhz=>@m+ydprNd`d0^qSl|7;$8#LZ7iU34Ono@L(6ao9deAkUaka{AL{s@-;DgXtq;3jlgXK%o}5eY6%42J!lGj~=&LXj zU-4ev&qWq=Mn@z@d~^h-CuLZ&wwrfAG+6G$Su(F;E&R}HPwjH}?CI|kS@?-KsC0h> zc1AbB(pZ~Tr-ne+Z6(YbF2}UaU9)>mVgxh4Zeu2oxiDMs;COP%W7cd6jPew)(9@u&AbP%CkvbZjeo22xYzh1 zarO-*BNPqsS4uzHFlQs0kKp%(dR-VIGo_w%0DjxZ&mzvJ1hLTyyuZc+^||-@ba@uo zRCJ^7gVA87_9CO1Ie*&Lfz zn9u{KE}*KJBE8M?u-oQkLifpY(4%UErkjRg?597(?Wl=NY&=^i_0h+!nX6FT*dj^& zX2-OjRpW_I7F0(|gQ?c)QR&p??2~_usmId~LhKDWGGNv_9KV})snt8NdB`uaXJs5d z{KNMmlhVOsK^)Yq=kD{hec6nau^=t-7#Vr^Q#~< z@+0}-tbnmozsa(CN`+FDySNDZn`A`yY z<%JL<%)xVu?}3@eZ}ePi0q4DMn`_WlftY||@5`4JA88}mZkS{9~sZ>o~=5{=aRCsV_T-XYneSIC; zCA@-$t=_2hrwIP>>%`w?Dfnl!F-`J#21*~l;qxx;_Tl;6ozHX7?TeD6t~&u;s!d?7 zT?gL%p-jzU4VZu8W0-K3&n+&r381I{|Gg9Vu#-eQZMvZ2nog?AS|H$Bkl^+yM`&Ii zDg-P#1xCLN=te(lrrgQruWxyWx#I;spJhx_5^r;tS`!H!>i`ex`qP}wUfi==pN?KA zqVl%m}GkmW4juK5Bn@>{sKiB-f|zL z1*ctoE)Nv0&AWusDl2Ta)FcY)W-e0Aqtitj(+{2?)$9e5pX zG>7BC<%+bvEl`jizbO;zdkL58OL1(;ah%*_L#yX=mT+bn={0DCw($Za*WD*UBevjO zsTF5C+OsQy9tt{uOIumc5L5gbB=u)L_2rPU`)M}z2 zL3I~8r*0EwZ_S5X*>8!D(Ktcmtl_G#Rm2*dR6!@IQF#9+4lY?JF}1aK&}l8ts~Hk( zef%9w{jwmz@eOu+$6sg;IYTjNQV! ziy8qWF#RL?l*N%Z@wYMN?mmng+yoOVM0CZZcKlOi%65u18JtyPId1cDM)?;gEzl!t zml&YaiEzxVtQA&XvZgzp=(3v&v{~CnRkp#^f|bqRhwk>VaBsUkb@Qzg7C*a*A_eZi zmYl?>+#u)(+l%&ZZ{eiy?~wh%hKe4?lgA~-G(+PsYz;PM?Y9Gjh8dqgwDWMb^&lfA zalVU_Z0ljv0W0pYx(opt$5AEyx-gjUJUm+Fz@qZ|a5vM4{qNOY7P^J zoWQ8_`@qnH=XG-aMV}Y1gkwdDG_E=vs?vT5Nf~BruIYKW_f?0c)CPkrKM$51S&Cx< zUxG%;E7TZLfqCZ@+1^>&O#IfGiSxT9xxbZRlFthadT|i9rSSLp)H5z07JWr&#%J;> zz>L1#$a^Uk#o#vCM`HNT4M^q=DX;jx!Uxc%apV8Mgb()Mo0|mEkTMc?_%^h?e~VtF zenMhxJT`|pV|s=hnYiDcdXGE`Hp8V*V)GJxq%J~Nv@JYxx`mM)`fT`bIVyemTWIO0 zMTai6WQWG;)AxHXqDoB=eorxC-B1O|%fiSeb_EA&sM7{2RDz9>{kcEPUXswE$3mYdu<_FnFF0St^rL4${_9FS?R^>V1b#x} zplTtvCkotJmJpGlGVwq82gk}s~^aM`L ze+;qvgUM0uS`nYkmKCf{0G}%s=ybLMy?^hAuK|@4O%V~{$=XU{O^EcAs*=(cJ$ie8&x*Yk%$uD_V?Hqu~&Ute07cwexydIYL}^r>gz zad`4e6<$S}(nVPcY`sbr)Y2lb^LT{PG$z|QlIM9Qd=Ls+T+ydVUP$h^h|9O?azD{r zXdZk5l*75fYm{yvPE=rk#*JLexvC$U>LIZfT z-XK0F)mXB(6g_Wj0+pSM!TDM<|E&dNDP>&e*=0HI5XnI+Jv#9ES4S#H@JBBdCokhdU=Lz(_En-L5}i`ABQJ z{faJ~o^MR=aQf{4Web8!u$)rAhlv72EO%$sOfXCtfn99jw}Y7f)0${)gxG4%YfIu z#@Mm?1!(rMqRl(UWAE`g_+6TU%j)~mCGRLsQu~IE4o1vr{|pQqEQbS+r9jE5>u~0q z1wAA)q*|+bP~oXO^V{Rjy`3#kSQAM~pF5#1f3Ci1ALY)wM$#VknZqB{S+IBqraYa3 zv1{^4b>}9H z1#aZ|ZjGzplb1|_Mr*Jt_hd3q`U^zgtYo5%KV03ej3A-C;bgj9B$n=V0_g$n@(Jj` zZwi5Uc#Sg6ZQLP5e#!-r<7AB0JaAO89R9 z?})aYK{bPexO_YJ+(bPfVrO-}rzs_FKW$~#4@7`j_b%*e{wjo(p9VNJ4XXyX;OyCP z5P16>xLG@preim_e^iHUIh+h;&(6TI=jQArzn@HaYYoMk#ZYzk3K=p+k%evUg2MZ% zSTM2`AGYj+(u4QO!lm3@!nwF(wiScsoDzI}R~2AQ6f9h-KtIkokC_Prpmbb|P=3#d z9gLLYtf&NdKbOBFzCM7)>Qr!;b^ydyPi4J_Cu6CGIrc12!P1H4La5}TFk0Jz{!?g3 z4WE`{tl|+#*ZpGhP<0E6%s#n(Zs5BxH4%9x$In~YO3X{;1vV~~!u%1PurnnAMZS?) zvonGr>yHE->IWm)b_tyq7YlRcI4>r2Hc{g9)HeS2^_SSQcnc+3JNyx}4boy8EBZ0V z$6_`b9B93UArYC0vX?ezK|{;~I9k6Mec#JqUbj4r-V`dtbM8XKT^-!xcp0N!>OuQ( za~5;?J;bSP!6g?zW2j*#aeFFHt~{`&M9-9l?n;4l?Lf{&y@B+i4hwiyOvWD2WxMl7 z;_L8qTz!ytpa1j1Np78RXPg=9SkL=MKfJL<;TD!Y_Y^Fr>eG(?1rYNh0)n-E;ofLN zT0cM&{U$aEJ}PU7?yzQ@_$>!j3#{qsH>EiDvN2d50~ zYIGO|{mO&p)jBLj?-$&vS7Q48{9$twpK%{7BB^o{VCnbcVD?QJGLQ4TMA~?n=TX0C0Wk>-r=K#8ce!s2YGl%o{iFJLtp%avEJHX)$j%S z#@W!?QLPZD;SOW&aIW#A?;!r?mE?n7U*^2^CUI$O#Vcj@G=IfU%<<2MHNHIG{oRVZ zGCPT5M~T=7?L+7@UkYxc63BJ`b5Jrsd>^Dnhka>+l$thJt!PL$4C=#tb>vw4t7w=z*O<)>xe7Y+^}_A|OT7EUm`-TZ zV9P+6?||Hd=A`$+vA_%Hp0*b?*S6yP!NK5PQw#p35!gLw4nKRBLZ^ey8T^9qh&0d|z~IOqOZulW4MnCC1o7b<;=ua_{SNb4 zz}-O@&U?L~$8V7aI07jrR$=+Vqv+dt7RqOF=1IP#APrLz<_;~#7rxx9^v#B?It6e} zH4D?K^}`vqDGs<3S( z=O7#OVNU9MuwmU%nBce^bWYa@p?Wqj$RrA%ZZM@9d#%7Y!GwL&T#vhdXyLXuT~KV$ z0Ascu#@vW>LCHM>4L7I%kE1gWi0OO(__S&_?R%y@Te9`Z+~*|wPLdFkJzI*9P$RTS zq9n46Yzaw{n){qosE{P75XP1$36&)Io$v2&|LM-X=RD8*{d$33`zO}zEC7?TgWQu5 z?EfU~1Zvj`h$v(h|MeIw3}n4YSM}$xbveree{Eo&_(*R4#BM&|zY0F^^dE4VegxXX z6jAxQIt=dn9dkxfoOj%u{JKVwzh+OWO>FVYmu5Hv!Ey>TwI}?36lA-oczr=@XFo| z-9t5Kgs}nX`g(*{yWR&}$FIT-4z?t_ZV3)J(*+^Q$G~)sIhpM3gWjbkAg|14&Vwer z*7X|4zmErxG4A}?wq!hZO+bq@@)+0TH|PAl7%zLaKz6 zX4Z8L`M@3bv%{O&_TlU@

C$1j2&h zEWcE)4v~-A@%5Af2=&hAZ)!b43zl0FeSgTA#I_g$sM^o}Rp!~-W4X_uuby~DG4zujmo`s*zAY@Y|IetS4M%X5S;cEz$Md(eMz6u&BS7V8I_ zQ_+lPTv$OhtQ#pJN&~K8=jZ?oKgRY;x*z%2{!V1D*CR|YoPw1#tefwX#O{@Z+ZmOE zoexh#=DuGb8FiNX%s59s;@+Sx+h2D-e+Sd@nd>Joj~{zxAJnB-keF=bR|PbIuwNV} z{dAe@`-b)ZGob^DWHp$jc5Mzye8pIn4U~5D>I)^@ic9z#E+HAxx_A(}^ z1qFQi+Ku?4sscQUd&T=X3(BJvEoV*!y}&xHl(_%@B{{i;ylj%{qgQS1pOqQh|5zT?ckad*P{$8Tmbe&E_&j;e7oR z@M?SyC+}#GQh^%H?bD1aPAvwTl`kQ~a4q-`(&7$OsF3ie-*Cv+j)eUEh|fl9(!`iN z-j?NueoW|vb0PiUgxW`JqZ5I1yNO35tVtx9fsub3AZ~6GS}JCvM$t8>+jJQ3?O?Ot zZ;g<0Vllc1Hgdf^CiI2MKB%$V04ilVBnDq`4?cf|^ZztSg%g`gb^O5Jj|;InuM0lA z_oY_r_n_3$44zeL634y!(4qYcNFP7Lka8pNh>GLIo9ZyB{67BOVMB@vQ?X^|TRiMh z1sNSi^vFpy^6X?Yo_HaGwR-ISch;O#uE;@=Ga4~#dV6PK}_^^&igr-vSut%_uv zK6`vJQJbb1wn1p#J#LlIl!|s863=74Nw5FPp&?n57M;wKZHZMR3%aez5voaxkK596 zp(dSwy%V*EXp`VeC$Z<3F}g241&3mpooM1JeQxGf#;0OD>G7$`kbZw4>bRw$A727o^I3FT$FkgI>6o0n5U0M~ zh5|K(QKGef#qwX;^ytREr0?51_$b|o3Xf~@2`7`l;)@9J%z86%yn^N zHy+Ra3Nvd;A)tK`l)F2^n{Ot>!=;~Ce(k$Vcp?Y`^4YoX+HqK$%Q~-mz+uM%l)GH! zJfCQAInOVvC6(TYfbjO6D0Y-%mHIF!exgp4gZ1d6j{>4` zrU9+$>wp}T;gQhictGK}XR3|~`pgVl_SeBi$sKIAPtcfJLyTBjJ z5bq~L-r!y+>Rc?Fxj!4F&)#wGNI9fTD1h0i7WBf;2Uu?q%TEq#=PcH7FnxC%6T$n# z^{oyha`a!cu!?|&8zbQJS7mzG^fzpWcy#{r6Wui)V`}jOKDTQp&g|cpnq@XYV$l=M zrKJ)-#Lq$tua(ea7!BgiCs3wpK#q^A21%C}IyuLJwc z)*#A*{UK$~M7*4%PEgEvh%o~|^f^&B-%B6D(T_aU5DmV@Khcw={;bBq}_0m26f zsS@igaF#-%f7qVR(0qqHyU%#ql~vZbo82 zZwh}otREeID+!IBCF8m>LlShT5ATxe4o+W=ajxzA!PNIER=L*kiEfHqqiZ_2?0Jh9 z-sOPDNF$uBX-_7u{RKwSW{~^{l&Kw2qvsB4!`>OTF!^8&9uOOn2gUCp(RB*Pw8>n# z+iw2b@;i8YtuF07+>FXSL!fF&Dkt}j0YU3YS<8r7aBg@oPQM%uar&9ic*&6VyK2q2 z!zaOWVjf?!QGq&FRKwob7kFDsgLZANL9f!G=sb>j^7c-`wX<*FuYvb5(%~(( zGRK#ENt8JEKnJ>%%>lowfVUMo&}X|cP593427LvPy(NTy9d-_+iw${?U|T*nCKw8| z^r_Pl0yo!W;);ce^zMu4;NvNvW>e}w;?Fwxmh-sv@>vj>Tm=iSYmgCxRq5o3Pthzt zow+lNsKh#!bI}>ZDHKJa!2mOmYSr))jh@4mGYL@is!3MV=E)UrVR?bXLD=y5KiFHI z2~95t!>QEIP=3Y_O89-?nK=}EU)8|5`)gpq@)q39=16>%7IC|k$G!At%+6>v?%5}M zQntgK8r^#a2BBwg^Xxt}XzMqw^oKSLT`HiH_qgDkMV92-Mv9X?uQDHnJyAQ>%72;6 zc8Pl@iL(c91b5YPh#d44npxIGGVv>ardW&kb3Zt*8|Kt3YY)tSdZQ zhO)ahRJtHWtQ;AI7Tc3R+Blr^Y~0Mr7npI1FUz4f+lKz9Y)f0_)noCB+n8ID!We}1 zR5D43gZ?gsp*u1$bdMFNsg>|5XIKYhUnDP8QeydzBC?4-Z+!$~J+~BMOUGZFJ4creW z&O`mJt9)X49%}GU;i;}Msc-Z|&(bWgkFubpW~p$Dv7+h*Y0+)7waK^#_C&I-RF?h9 znl5~N77N-D*UGs6sSV@D`*XvpN?}0f52#J; zfI&Jd!KK*;R{SS~BlEtXce4QFb&rE!=L*i?gA%=8W5cq_RlLZghu`Cwhci5Nh|h%w zAgMSBcLWx6xQh;T-S+|3dOFf4i#14=dKpaq6b_4j9mW!`Jea)F3Ry&Jzld+Is}+*}Nb z7rGgj|z0S-LrOV-9@z}Av>Y^yQB2H$XWNet%wmuSGivFtAQSf3^Y zy@o8i2=vWuVtv1tXqur)lzVl->4GgCq@s^{#R=fFuaS3JoWva-Z%rl?u$(-N+g2S6e;J3*pBYVO>%* zR%ebyvFR&F+@8gqUfYkz1(lC^_u#&^1M$4|lizRp7LWJ;0w3ZB!;o3gXq6SqvP4%QW0Em! zo@zu+++G5g`I|UQld+nEk|E{V11J}F;0Zhe(re53Kl--hO!5C1DUYB#s0&8zQK5zS z76i(3xUHLF;F2ZFKxdriiyR|(hfY%}cyyPG4b~!ls~qX%cney6_84yZ*bC{Ox*=!N zG2FeW8T<9C!18krw8&>M=YBFD{5JfBqHCdC`Ul2p&i2CK{mg^l)66Xr9R)qsgQ#6R z0LT5&Ac=YjoK-u^)hzm25M;CsbtdPbBI6bcLa*cdxwol5y; zmSyFeQ1=-s^u^hB95x{W?Y?Wm%#)>Ha_l^ow-s>{PnCnq8yh;hQIiO&hl~oEhd82? zW0{m9PAIMBYP6R?`V1wyKfs3g&CCYjyT^RY%x-=>yJufi(W7be9Z5i`E{;)9BmD+C z5kZ7M*ZEw7?jK)(<$q$B3qBUS)q}COI2ePJ>$$7J>LhvWF6_9>-Z8BipyAt%n#^%o zN*drH>uQxva3V@OMWiz70VJxYV@pUXXjEMTwV$W?+NL~69RS%xgv?Vi*!g=u2p13IeV+8k zWZgEL8lMD(1%Ghr8^$nk^8srikLus7Nue%7xC+e39!Ai5{=$}2z(9~L??u5?)(tSv z!q?fkFm+rpW*kl9q@k@`|A*>i>g7cAk1Q2Wb-xb=5$OnX9l_@FHCVg98AK+-xRsYI zX}~|$lUtPo!E^hOEpDe!HSag{^f$&GXBm5Wbs7pH<~>H-luaGK0i`)d zFq_S^kHn~xHY0aV;PS;SXl^DqcGW}Z|I31;gvCHtZY>r@#9?W20JeK5K*gD@I9&TQ zepyh9;d8&ksx?>eYIil>EPaofHVR}IddZ}SF|%u9Ut;#|;by@a10mf#b%x3zf3xD&PCWYa*AI!<7@pAHw< zqybt)J5rH2{JH?agA^Hm;UnuZ{x0Z$`3L$gbfgC!pTnH5nsm`o#w?b;f@WbXuCqCb zg*H#&-PQ=K6eL5}>c^bLo+-Hcg(11g93OcLU*b34f9Ur_mx^Z2=1ii}xL1Y7R36Kt zlj{x`P<|cn9Am#d?dzZvZ%Z6HWEl9n1J)KVMe(t7;Qa3?r2b|s&%(<%qr#r-+NVq} zH5ri1#2X-R7vbt5isYq>F&S}TB(eh;hE6-e%kI2IAIoAG^kF>ykiUka8@ZgrCn*GX z-b49X2R^L#Bc6~nu({b~zJ2C8_-Cgy$Vtp@bQ`fw5cz$soYHj|D zvhind(fOU=!C2XwDtYKW_6puN_oM1RUZDTIvE01~E4q-Kv1C^m&t};JPBg<9Q%4|P zS#uI{<*qn}&4#*ln6tC#3s!H~h}}sDF2g49MFY}Mx>JL@7-&ko7AVtZ*1h#vmIKa% z65vpzG7;^q6*u|4;zBx1siZc6^E#+O>wlV{kL)r`cI-=M{Iw++v<0VbyN3qu&(M|K z<=cwWP$VA&1Lxnt>2c#&=H)$9RX3nYKQ=G(WL&fGa}Z)7fyRq9ux!nDXrF3A%WezF z3qJ??>XRLr^4XNs^l!w6EkdHom{&outoN?^6{ImodEq0*g(B}DzW6xAX#eH69%;pw z!=mt?Oqb4DBqHuF&mnGBp$4YA(MzckDj%_4^v~y9&`1&Qad0WWvsoD=r%pqU>nu3W za)udM|3UsXU2OhpL8?Yr!wDxRaQS^1%-`4%m!}?F%(p+hiOX>e{pka1YXEE4+hP5s zZTLE)4%C_t@bZNvGOvycSn_E$7CFUM|JRgLFPsQY1Bk3ES#Uy8a)eQ376JLt2y>&pN6w+0=BV=@&I z82cC1=f1;v;7BGL`9r4N4UoL@7ne92Qvc&`WaCDd5tlyWpj8}(6&GS5s&F@mD=leJ zN(H}ycLaf4zrY?6P};{FUJQ5xH}f3n5H0r3d0U9nZk@xn$Zarhr6tL?%fUf|RpGw? z#>(5#$|=Y8gAlI)P+pP6H)OV=)JLB`dF~Zv%m)mbd>>CedknKbsMG8jGa0+iidJ5D zh5qL_dK8nvx(72@Vq8!>5eJK2n~F zPa2q8#q26n1`4QuzeXN6YM~^oFE^U~TanT_UT&qxxu2+m5ye~K+}(lTGhh$Y{`Z?X ziBh=ISqI^vh`C2k*w9T+HRx?`#ufAL;^z-ukAmG3WS(0u;|sIv5Nh$B|8Jirk$hdm zE!ESY^Nr4;Ec!J3bTOjNubzSTR3WT5^c1sLJ}_i&7yi`_#p}MR$lkCKJ z`}TbGkcB7{u`^uMP(EvS7x>%{#r@2EUDdFRyImq8T}S$Z>?y$&WrtuFO;)U zZAd~OJ6Dt+;cX69LhimWXsYl6l~x5Rzs&ZuK0i=6N6bmSHi?%+*b&t~aqy1a3qthN z;gU`n?ztevxs0G z)h`he9fxo+bqB#T*?~+?-GHUr*-Ui5F0WX}GA5C`(P?56nhvv|PSexiAM?c#wToq5pG#sI3hEGiP=_+HA`mKU4$LM{A?#M>fh|D)0vx zv$XIG+Znp?_#xAbeqp?tsF4%V%KHlVAN|aGENzWs$yc#nX#<=)r%t10=u->t zm6#WG8l|^QY0IGgR5t5gF#m@{JQ;$i`J9;Vu7%_>p-=WsjP$cg8=+-aM8+wXX@3%ndf#f=+}@C0+*EJ}GTCl3=LFju++2gD_t{-!_Y7Hiq#Nu%q(*$5 z?g3>U!SfAjv?3uMx?PS!SBE1VG+zjMqu+pF)WL$gt7bAbX0`Yx7GucR4EVA)hn-R{ zVq(M%e#A@@T-)y^jH}fn+V4eVz3F0DWb6UMhMs~|J|A#SyA5gc(#0Iz9B4MRAvn?`_@|0>)X?D^L+#2HxBTf;0qL`+VP&x<6*w-c3i!Z@fs^qp=6yU6&#w&{~RnL zn;G-kZ^#qy4oO07N7kkHiNhjOZJABO8mymW0#2Jtd5>&!S;_v(u*|d>QrmN|MROP! z|9XjSeFMqPxa^p;{(`Zp^lplJOW9viFit1#++Ikjo+@DlFbE%bU5o$-DpX}sEMcG z#KA$J#@t@($DW3KzciTUb`MJ)6hckS0vLJz9*&rO5u)~FL(JJjkeaZKISOvb1b5Ro z8+9jKHlQyxS!M<&{8+Bw^ct>G`jk0t?xDwvI$lsn#S70pL_DB@lgF0hj5J3YZn&Mf zXS1-&zJ&Ks3k6BQZ%$sR%VlTn;H2U_zR6h#898Tg$Rtg&cFRM!l&nC$|GJ2+e-Wg! zOL?Kc4}M?GIDsdc_~&f@Ilk{G<`lWZdw)6%fm8m0$7`1HuQp@8YUZ6B!g2NHNx-+5 z6aO4!jOT_^PKG$0vTlMue-vb?wsrmsB@$7KZ=VzUSuGFY~=`vJ)N zy1*?X2Wm0ioHnukzsGW+SoGb3F{Cfz)Yt7O$o|7seRttg)4y;d=3Roe(M@>fodr?c z$2yyqeQ1~VX6EnIr56}Oa{}uIkNl)T1{nPUsg)jP*R#$G>j^o1JC9+N%GAIAS((B& zw(A~#Pv&*(H8$=$i)!Il_?fnKxc+b=l*gv?IC(wmIMjk(a0ct)K63N-wc_L%_tB1J zf+mdIhv}OmL4i33uI%c>Cgx+iN*ze3y%?Lm{pQ1*q@YuD5ekdau=-&tEEr|WSbeJW z%l2HTe>WBcf*x7W-fB*1Gt1gNdk&u0uJC!LjGa|h3{`I!k8@xb3cNdHf-yT}J5x?# zt<8RD>imlj-l)>sXBg*shyf9;(V-ItOVNA%GFW{`g=E!;G4XCKFF12dEMeVv|5G=_ zulg#H(6INs+6r~Xo=wFELt5~AcNUhPyvCb@9to3{K-J*moSmOC=+dM3!l(iLyE6D` zZbj%X4CJ?txdbueC7dMCmNS?lqAB<5;BP-868h>WREE|=Ymy9q&gPGzUH=XEh&jcY8vkNBuMW!F38b1{=a(?f@|I*< zGQuhu1`c;3oA$?{@RT<9MZi3-`)kY?s*$(;5SF6_7q}*N;vF~Bh z2G;ZX@_{c*rZDVEIPQ%IhZ7D1LGjBsxHr_4RPR0j-x-_iwO$W2O>w}9!9UPrR|?-$ zo50v=EMLkHRA)!3lCGR?UaokH+jZTFZ0&4d%$*zj`c@U%{)w?YW0YvyWG}ABffLu$ zL%f2%D&4?bsws(ILGzFu%`|Su6~lV*y_+iiq?*jS>6M`4&vPkv$O;n{hz=GsXrDCp3P}2>5ogc=#wCKWq$s|(~QY|jTe-S;p0oA zz|-B1D%LQ*ow75(_k$wcx+ax*vS#qNKA6#8>(1gN19S4<*|SI^)XBe%Ji3ip3VUqq zh}Qo)%iKP3AbS?OH@*hlRZ65=y9qo(on^wU99&}AhT<^xol(ofl@Y4Mutl3HoHeK6 z!+XF_k@bUX_CdqkE#M*@%^j~ULAg_uxOR9O_Oz^puy<1IR7%GQVLToWYk{h#OZc2f zE9O}C;uF_}g8SUBkUzdJNSqRR*YCLy$GUOqjD_cYAPkiTE0J7_b098d@3Hquvf>F> z@gTc11~V4@$k}?NY>tQ+wEDtsJJxlGdcYU4T&JRb9W09Q22pOi_`$YF82-E;+iNM% zAT>QYAn6CVZK>tT2gk9US~;4vcHsS{3~VV+#ng&xd?5QmUqQ@)PrMS#5MQnokW*9d zK>6iO5TpD=m&;c_(2104cBXI3b#L&srm@R$D z7rkcQd3gfHIG9lXH}ko~_Y*+y%Y~cfei3hF*^)IUqA}{RDh=%T0H(;ew(pF1L0C9v zJB57{6La~&ov%^z^eHsV(IoAE{;@2$4USMV$7Q3+@V!|MPEL#fy^cBvS#lj8?=v8h zdHbPl);Z{PJ`MHZ%xQ371k!93vcZ{UK9+Xy4^P{Z3x^S^-0L`D@glx@&w%~zmV-io z9g#jM=6o9O!;4u(Fn`rUzUIbSh}YeP7SZvTy5;~^bd|u;zl>wS$``U_?Ahs!z@TA^ za9ca`PkA1L;JSP0H{&$MC-_38qA&jFJPd=b9RmM|Q?d(+7IYrF+jbq;%MaUhl=UB7 z`7<>ivBFgg28tC)@Me~wv{k0Bv)c>?#J!^vVEVlZ zJeRs1ikv$T_8bK@w!i(@a}DAT54X%Po)y)nxKZ+VF6+DlubRuQ4%27~{VP z9vqcn&xhHV*Yp_L&Mad+n_&gs9Kla_ElAs{22NysLY8r{nahkRfxV#vVU~|AvG33z z>DrBu7?R4HWpzN1>q;(flPSG>BOT*ztJ8$smk@U~fqiQTdTMOsuQ)Rg=YIrD-WcJ+ z97po>5959?-;!X5?Wn{lzA))uKbj@WhF^YKWU_4>H&ji9l-)~5@9k5tPq6{f>H8c? zXIDTR+bw>sHKrc*Lty#r>$uZ73o6sgA$y_<4Erz$Pn~=WMW;geMV)$>dHXrUj5R>F zwLGeJok!JEzoE7$5e3mNc}1%V5c-Vb1lI9fTi|%!+d>n=rw+v-F|YCLSP?0^BP7}K zA^iCp6rYb&A>NnH;HHONFlUz@ZNIJ$^A=jsD+@)K9s8PFpT=DAFMJD*U1Yop3F9uj z{=@qkyoZdxw@~rdNzjlp?vk+~N|&4*`5ygd#reG8k(XG#D-+8{?1wMSQ!x664p}=t2n{|B z0mXT@!OXl31m_;+=Vdgq|KCga?9wS59Ljo~8OpNi8dJLR+#$49W&F2KDrD)`hiJ3m zCw$b_B0Zrept8jkr{8eI*;|C9rF$osYo5os9S_jMeJAI6y$=d5%Ed)mT|D{s1Y?Sx z;vG*_nq3siZB?rSpKtHr;Ajm}5x{)vC)U7p#y|Ibe1p41O^M`1sJOKw8q&tA((5(a zB=d|iO|iR%_ctpOwKd(mXHgS|r<&4{O*X{R*OX$8AvIYs7!w4n!*(|bglem}>Eq5p z)o+&TJeCL*Gp3=-;#h9v7$H&1xz76zDF-v&k}h<#C+Ij7YBnE-P+cikxAhawbC#oM zbP2aepaT`>-0>SJLI2CvoG>txAAg1I(9V>@acw8M>pWv_+&m0}yoq>tF z-lEr{F39jY#Ia!(RiKHur$U(|&=x4Pj054w7@*S~iEr{RsC@eenv3m-`_wgjM=8gQNd9aPXRX8ocScsEH8#(FE$wuDnK@GbKw-C=XQ(QM`+ z{>-JFGbf^qMp?K=DtMf|DHd&?26kh7lZFUMgWF|vdQrh;DxJdx z$pR8=oPd&BIUISWNgDqq!GE_6$=Ju`ki&RNZUIgB{_`FDLVG}~(S$M67`yG;HvG%F znN3@wdAl9U;qp~WmTMRS<%3s4@9q+qaYc_PXlW9UF({tCHW^B*Q^9ZY1IBNSfz!1$ zsIU7F=Z(`LD~B*{XTV`Tw3W@o|AaySW4c7xF;-hk6}I`EgP}9DNaV|xATe!}S($X> z#+h+!Ze_>U>i-1$M8+^YG#1*QW#OZ@FVRDF2j9!OhE1zpa<;?L(8s0`p0hc0y2)NR zSCI*NUyW$E@@w!|_er)#JrmzeQKSc>t%>p!6Z93Hg4}C6;77MVE^~_p550Hd`RB8E zMK%ws*mMZg)PvBn*n&#i+qun^7qDQ~D_tEu4503X+3P%T;k|AT5hqB}WFYkUUt{Sm|7o^0A3j^k2)pBLJ=+y;CzNSJw zUZjdkUZ+Fh_ZrqeddUe+M7zn=_VdDQdkA=$0Ij>zptIsLb{Cdm;P+K%XRAQFbDVKW zQ4ES^zK|VmU>&rnY{tA<%wLZyrF>R>?PyS?cis%;^VSL!y!}=X>fO%!mwXgUl)T}Jl``m!HK8UkjGeeX7A`zuhpEYORF1Tv`;SZU zzdnX^fbS=~@2E=`+U!B);YP5dRE{Y^5gq?s6K^)_k)iflBzCwutr7miw5j>%-kym{ z1&pV)|2@wdO0ewd6Wr})PBM;4c#j3!#TK38!Ff|MN+T=r!O;S|`(-iM+Fn7o2TyqW z;9GcN$PrK-X@gm;=M(QC!U^9Hga7m;lPXlHj|Iy|vN^k8b@-_4`OeTX5-7mFa?VlFs5s6f}BH{jk%B|59c zjMmz=L(IQOj&y1fmt*Z*;^OCAm{JXMBi`V4JJ^%_q;sfM(wA>()#JS*r0R( zu;C)g*L~*}>l%{F&6Y&E_oqyHH&SdAq)MfaGi71@n4ffg4HqhO0z2A#7;Pu2e1@Q%Cq z^P7;QzLfDT1((1kT$}p7&V%zZdy@TrFYJ4$LSnAQa8(|5v?;rT%Zh&r#%XrM z_R*0}TFE-%s>Pr&Ckf=@9R9?TzPPB~2Rshvh>Hv}#BE>A_y@xaK;%>?yH!g#BC_4Q_--qdNUbO-fyJvxW-%L0@wH6xY38+WrDejqr4e^=Vg}UBs zj%MdXKkmMX#%yk(-DN?B{VN2M>ry`YtR<08>A_s#9gIoWr+H?KY5bo9eJ3~tGvAm~ z71J*4=8wSahiu3HuTHl81mpNkpM#=3b*vM?98}R}obd7%2${AW>JtoT$?cnXHqerY z1AhaawWoFodmw6i4|>l(1E~eAT)>fN#@R>cRI;XRPo}})#2!3-*?|;#wnDh&E^G?! zM2+9?;H0h+c}coZ;_3;K2@0~PAvc&mh-Gk|Jr@t@$z+}96>zhu4V~WX<^wkT)9ZhO zzrz<9ZB`=skL}4C6p-^3hPG=!bE#R58Lx;{)1zQ?04_oE;ut690fO!)Nd#OskQPXU-JWGO#_WN|czBH|S$IuWJ2?`7TsZ zupywJ?=wRx@J@mexa*SPI>vCTN?OJTk}L2^y##mqv01{lX;@v`!rVRi z7}`?KZ4D{|LG)Oe^huQL)X1BRVPe92q^=duIe834b3XFbx-Vh;q*rKrq8LRQe|QUV z0`4S+P}jinDro~CZv{cQZkO2T=x2Bxc?Yu6-7#XocT~EnOq`~?=9XR+PbL<}X^($Au z%NqBd@Pvu%n^}3c1Z!4$fsJ<$u5x<_C5iR$&*~~Je7GC$zD~ywmlHty)uWdA2$;t1 z_g$a2;^FJ1keV?FJhu7BT$smQm1S|1rdUwLaUa3(Za?aAX#qUa)}wPn?xV)N+n5)( z4L`p%rSo-adCp3mSoWxpVGW%yqIM{pSgD7d{)n=@Wl;X~GN{De#Rf-~57gTQ9y2`o z{$niZ`4h~mal=#g&(VU`>lE>tS9D1To1;2)p5`WJu^iOI6h3j>6uA0cf~_y!!M1c#zLF41-_ejcIi_yVLuN3T7uCMSrtRFn&Y_ z&WFz^ndvAi)6%1AKd<2z0}4f69^(3QncTa;z03{Nggsw2LE=RPaIa-qx8KpM+n0ml zhK~?@(2ywAJ;y4qbNI7YhaCR<5QYxdB7-*VgyJEl8*c*dMkA=7(a1Ru>IRoXv-nUQ z)~$H-3`;{lf@g~lL}{ASakeI8AIqYON`CVbCNnnHrCIz*OHEp_v=1$}XyBXT^caIO zmKQ{~$*dd;V4`~hb{)IJU)gvLGIUe8VUIpSY=bdLX5GlN{pw_L@<*320nGMF-ZLOjjBET z86Dl!saeJ?usf(f4a3BsZn&RaE)d84Pp$~*_sT4qk2wV%O! z_IzCo)g|YzD3T`q3a)7W99d@YPi(92%lCL3#7^f}EQG^ob*={#)jmOVR2uf&TmvNz z%q8;uJ8Yc5a-yOjTw7d(W6#H+AnCrWZIT9lE#@(@_XTupWiEQ-oh%=zPbbC;Xi4ic za1T8TRZ}$Rx)GKnvBQG%D(-=*L$7#2z;f~LF&~@iBz`ZZ~z|r&XvF6$tcsc>`?IlIB?D;9??#|*8 z2Q&VW`Cnd?bW6PVj3LfU`UP{As}MiI0a*C77qgeW=arPB@%<)qmSb2c7EGHc4qvkZ zQ`Nfot?CuH?_e{=<{!o&|K)rv)gUl!iz9n~8IWL|g`&Tm~bJY2)~y ziG8WRrWhZw{rl#=?A|regsl4S9i)8~u=`32{@i6nnw%0i;Uf;t7RTXgi6PCn+{9}y zQy|^9s-aVs0P=$m`IEzK$uXfKxp3qv>=7H$(7gLx#`&B4pnqyqFnwQv$m0z*>K%sI zPg*ogu?s)?_aW8KKY$Zw2wj&IXr(gag#?;YmjV;0VccfFonshBtrxVZG|AYJ-LX6(p(-PSKhP2H|Sdy4ns zq6OL@+GdE=5&BfmvkOFrcY`4Jk60@6f#DYRq`ub|rlztThrs1V;c3?G%qfwD>=5B) zGi_S*_zjj{9RYi$JCLZ#W(e~!q}j9WIO$?59RJx5da4KG#kKmxy4sYqHjG4>+EH-R zbp#*Or#82`LAqOotM)mEQm0t4fZtRw*}@6muMiOb`=8JR`{K-#JmdwZW5(a9P*-hC z&h?uB)$JcK_@@!s{a1-f0t&=hN1ebUEt_Sa8?fnf8@J}911XA_h$Uf7P%R#STHzj0 z^es;2(K=KX7Rh)xBm#$zWoMVOtdrBoT-a=O|HXS6&Ti78RV&Y7`O8#pO0-Rn9!8rLMIcB?aMRqZNnJPIhY|6{cqgVGIrW}-J95)OXLuuuS}vl; z;IDX<&2Ig~{pcE3E0W{ifs_C0)1V#W;LDXhG{f5p`xuqs?(51V>x=_kb>|7Bj<4po zDz(Gsv0w3jd@|vQdcJhwDBQThjvBl^gUUB8slG!l;y@F47nTij5-ZN0*q@7Y&j(2i zh^vqP#5G@yNZ6kg_>biug5EPe8QTYn)|}<4Li%$8(SLkqQZCDVpW(H;S@&#ay(~Ps z89bj3#8bJi!C_7vdj>oDH`i-`Xer9gh%+6MxmHEN$ML(?N!nE$ze^K8A#E52KeY9;NQo$qe& zoT(48D=#7cS`4pJVMaBUv_tRAEr?a_AiX@8b5CgmCQHSJKABLVqXy3mD=~ZZV?K16 z8BVMZ!Rs-x`1;K)NHA}LaMq=0xfu_Yy-^r2tOJh~8j-O@ui$fk#@ue|K(pR?sCiA9 zYYI|+NsZty|H&tJ zMMKr=GS0g=2?U}cu;_*f9N+y6671Un*GXWyuoLA=6L7+;%cveVI6K}c(e*V*-fy1NW!migCe=-E3-`S9$ zJu18&{~V6IGa;2DQ{nGnwo}n4VT{Ul_O0v7kH0IV-_>I=^xb0)(lcQ1N+sB``YpQ2 z5AYAJu0g56D!yrDquVB zd+Y_cZn2=}lkCX(yDB8i`~m1QR@#C6k$BL9^-B!L!Ni1Cz&S3Eeba^$RcdjO?(bmW zM0>KI+{1TV83=R!1IY^=E|}(@~8y3tZPQg@124L8?A^St6et8BOV5+=i=d0 ztVSNBwOml*i(YIk|fd~U;{^BGPW^ihz*_=jqAR5h9qaG;>_}t`JNWbTR zC$ds-=>uib^z{k$8K_JgW+sE<%A**w?K&5__6NU7;SOZ)AI+CPS%E8FWI#p4D;%>t z6Ls3!nNuT8CK{I{yBgM)2w$qw`>|GJ=z9gC6)U8pOP*lLfJ-nWTZ075@`sX{U3g>% z%bs<*gGFHkIvMZ7(2VCOxLe6ho1{ysSRU5;mIcY~w!-)*7gW2P$ji?P(KF=&y!1PO zu4l_YsndqE9Oy%b*|eeTWE8A8pijNN3hCTi#$@q#HLC8HkAeYf(BQc?`{qw7sD8m* zyE|8N^1`in_j(gf=OaPbxs$uFsSTc5^d%ip`c%hZ7Q-D|lRm3iZa}XKMH?JhRbGik z+p?WomIZw+yNnWr(NOGVL5jXMix-Vc!sCAs948pl-s2Pmb?dRzVh?Vdd+=GGl{mi5C8BOq}`0LTvkJR0Lb(^fH;+4);o z`{NTX6<87rsScG4_Tn;_lOp5%W4^1dmfLzjNQ(yCLDd0m;L}qMp~Dtn>RWZ#821>f z%?|+&H?XMSF5h)HAJ;Gz^!6wXQp@%zqjZhw)`cA`V|@me2L3MxSyOO+zcCR$SPz21 z_OR*BO>nHTB$E>~;Zd_asbd+x+5^Q9)U8Vctd4UT1z+*iW(5)ywS*tP+yw<8@7-LS zqQKixk?Ke6#gx~d&@-LqGioP8Mql=hI#1v%5F5h1#?sJRc6`<&-1I4K;;HYRF1v$5sZS}6Y)1+obO zTC-~g`kM~L56oleZ)?Ww^^1g@rxtXlH{;xuP6laI7yo9GC7I!H6a^o#z&|KUToU>l zeje*f#|&{GAAMO5R4GyXZPgXFFwTHy1Fp zf%B3#!?k!vB5XAUo4!XP$J3llA*pk(P3E4E5He?YJeiVAnNlQC5>g^bDoK(gb?&w6$drUkNs=T= zlOjnP-u-^?(eQIl_r3R8|NnJ;Me{ama_;kEu=MPJ?WLA*iH2j$)&{;QsgpZ0ll8ck z=;DJ(4=~+CK-EsZgb3dx-hZ|pea2=kXQv`+a3?v3hgINv)QC&kBgPdeJV-AwrH+@R z_^3@wVf#@Tj9nCpf{D$pM!hAd5Xv%L%+dJLR)aZ%6huqQlQ8J)7p`i04vtP^8Py}~ zJk56XKCeRIx`irnvsM>-dwsBFHA*Qrxi#BEqoZ;N=UFIgdVSBGp7F>Yf7QfJq_3pce5X<06s3`N2 z8~faxjBIE5(bZpZO^P&aWPJIG;(z&JPfX}Y>tHm(YzT~5ifexfXsY7}+;kgIc5o?p zJzzPh=@vBOfdY+mF@nVyip0VF3+QLdknJKPxY3@A(=ScLjy<-VXXF{2 zG2f2(4?B;dHT$rXTYv$L3%DFNOa9@<8rV(j$(kN1Qj%muwOL=cX?g;LWUk~?m#9&h z_gd6>mNgBFlcOOUrtnh__hC$@24_F>4fm*Cm8?6&7;g$)qD`5GRQTj4AEe#FUzHY; z)y%2mF)N47Na9eRdAw_iq*3rewa~j&ot|pS!_-s-NNzmG$KGSZJ!KlvJz`9=k1J6(`P*XQ8eSa!p$JL(Ahu=bQQM0`ny^TlbouClv`z>lhAF|? zorAb;`fT)ka}C!uod@ykpD<6zW{{hbS=Mq(;Sxz71T+Igg&NQs%b8n4r-)zjXDgna znh1fHUO{EX9nAT#3{;|JNkpRprjA+-WRogAx4IRUjdFsFM^iw}^&Ui2=kb}tenE|7 zGjxdDd0qc!IH%j33ZJ`iF4i8fOrU@cdj~=Kqyq+wI>dz}juv?vBx6?mWFY^rTz}UK z94I~v;d{rUSHovsnB~ZCX)+{kZrZ$PECqb-58(<1bmen)-rBCqFT1@4%xov24RHyy+)0XMK#iFM}!?t|c2vv`Ah4hC!L(a?%BXcQKPDgC#>k?kD8wjUd|o54Qa z(@>v0fN3M;=;oph^!axsoIRZe6)MrVF^J_Kn8Rs*iWQMB9>l4U`$2z+6wMI4=Dd3|K_i8ix^>`@U@jZ!C{Wig`DaNE+XE7#?UxzU%HC*!~mJOO4 z#bLoL=9s=97Fy}j+BA=il{Dni)@9!yy3-&Y$Nlf5^iLR|2to{~*@K9E5KyFIUIj%L`^G6FVPkV)nEL zPo+M``LFb;V>8oKm*#9Itm2%??E>@4xJ7<*>#!kE^JeDw8Fh+WFYnZa6Gsr7eqIk!FSmmvEuP0KAYv0Yf?AC*d`%)BGDx#kuR_!;1QQ@+zidF zZSbn%3PyjEqGcw`E9P+?sm*5(?z#U(cH%1Q*4$i#U0EK1Ghd! zjQf9oG1gf8@OL;)c^`%OeSzqa6#}|P^hr!mA{Tm9K*kt;MK8N;T(VRxC5mTFAu|AS}FE0gW*0*IMb$H~TrLGR6D=$&u@PA@FSpwSn3 zq53X(ew6Wb80$0|A)>#L8v%)z_-hP$MaFE!K?SF$(ct6plm6;DA z@AE>GS!+sX&SYLRw_=zx5P}8E4|9B*2Kj9K1FNNe!lsv&wAjLkq+2P`y5uRaUiB*e z_eqW#e6XP=RrBz|7c;Ux*or1AXNd%s+eDLQX%e9+`@AsL)yIitET=2t_#Q=K<9G_= z4o|>k!9&pZZH#E%x^2i8f5n|MzhlU(jbiRh92!7Bx6qc|6P1>O`&$Jl(oaXDZFfP` zk`EfwhH(wb6_`?W3EXV0*lbN28nz9`nlJ4LR}3+3@Ef%JlHe(KHsk+jMj~&dBYUii zW0rp9|FZeG;?+2)?3sfv2iljeop;2^bWkgf$>k%6S@2O-q2%l0NsYo5H;5%LF>A!I8ToKo0_IUjFU1)cRa?N zb7x`HRhHFNO%PR|D`Ndod(5|JL}`}ytWaTIYPn}%F`TgxHZZs4+bv>^1&%Q1tsAN?Yuu0Eq7!SOW!Gy|o!o<7M3Ke>>O;wL9=uVGs_rbqYJ;!fw$hKEU~>$g z?>&MGe+@!MzBHdBKF3M3E~%(L7zg&2fOWe z5Xkzyl?9v^zm~hJYe;%5KBB@bMY76IpRrU8=?z&MVwPQq5ZHq43I$*)#kg8*m*qW7 z5%!A=NvTUe)&(YtF7@cpyoF03?};sh9o~h)DO))YPsaMVTFozcwgk^-{>H@lLul#$ zJDj!sTv)mu-Tu-Gg5C}C(jM*Lb}a$>lCyB$^k#IJF&AfY>Li-IUnGJ+9R5p(Zj!4( zF9qhEY}$c?%{5qego@5Jd<6TGj$mtUN6mLjQEelAIy&?moAETeI<{4FrSHHr#`UvDZwF|O^Co; z=vuu`ow{QK@BYaR9`3mVRU^J}6J&otaC|S+U9^M4g+{bVK9}>}J_-oSP{hT>z{+wn za_zW0k?i@0y&tlncb){&d~Hd{Y(6 z#l(GT)XlU}bZk@}$cNs8vjrM#eleTdUJwh88uOs6e=NR!Z-&WMqab88h)cq6fn^oj zef;Vy%rBR~cqI!eIeAEIV|O0irs(4jcK1}WR3z$*(;v6=C(LELEs>8Qoz=paXtH_O z;hfLg4*moO5rs}ycD}Q;!`K>v)-4p>Lfs)kX$EuC&*Xob*pi%$-*|6bDdwx2&s*Ed zgJaBU`15KAnMIx9Z}bIxAa@<-4>u+zNd(TdU4i4f1w>LgL|mVB7W}rPu^jnLjEsJY zm3^J)c6>FraG(lGtDlp%KwGR zf{&1TV=FGQjlzKT0nRaD9k=kdBK7)~2|e?@z*CuZMs}azt1isPlaFM{@MZz{9M=U8 zD}>R@6zRqztP}O~F5lQ@#@KrLM9{I&C4EjlIQF&j(SwuFcf<sf9@oAEKd-JsI-8~FeC7-Zji zfq~RPzBE*omO0hI+A=%(^+_%CNhiTOV{@`=7vmQfr?>{z`~dILGR&V;1a(K``KD>3 za5rO?_l%g0;a6|Lqq!9*95bD(Td4@G0UXqYdtgD47ql!_CysYpIpN3WqWG^iq+(|Z z7khsZ&ic6)d>%^~hA@_$f7Hg9j z)g(S6Fc;>Tqtzu44bI$c;`W@)MEKB!X^ud6=X}qBG0$;uGF1RI1i9>%X6G7WT z*ZsCEe?4gky)~Pi&pxF=hjx4I}FC>;|kJRtx?IzkrH`I=Q;Sn2x=a zfWET&u#9;OVpWnr=x~ZFPacm0XDR0VyMptpc7(aJ>`91azo_&sV>~^%0)9_(d3Uc* zoE+<5yFBgSHh8@N;eldtME?`awL1$TvF9)(?uK~TaT#=1`@Y?>6Pk6zcZ5mXAQz}!S9lJ9j_}M2^V@!)? zZPtTYqzi(^8gWMJJ5K0-lJ^RJ%ysuLw)sL0PW(iLE@^lVTlzj?!v=Rq^S33dC*;70 zyeO6rd&7HH_Ss=*O&wMR+&|8LwQH8hkcqIk|RG#qr;m%HBC0^9{JGyb19d zXTzQP%iNCPX5gC~C0g;~93C3~7^S%k@V(qBt~+T16N);aLUlB^VMskFX20Rx15)wu zR%Pn@s)<(|*~14nTmlJmku;RsvL2QilqN8bPskOhcz=~2*ies4N*h_PmKTfF--6bd zuW<6VH4XK!B#D^^;Km1C^0@yDwjZd4h1Lx)bxjxkU1LSwRhyEH88S3*I7M!!5>a1f zOCP4}LdZG<&+h$V-OMhOcu#{1=JG_LLB0Iwif-(S|ChQhYtN%f+IKdp#FSIYPd|9wEQ!OB|=4dS>Bked7?~Q zCZ6Hch_XKV83%x6?<{0<>QZgQoMPGiQJD)be;LW5H*e;=LC|6%*5UO#ty)L)FT z&l+%_lOvm1Y0_-HYADdugXTV#j~jEKP=0zV_U@0v*TJUP{vrod15d-PgbO%Nd<*VG zNpMg0FI-{AL-WbEFsx09Bz;K0x=G8yGg^r*9W*B~t-*ZG@>ssW#|izHYtWi=VGxru zm8bvzoBSv7=BLU;ZQEV^uBl2ksXHU2?11t$VOUY7%{^xv z_!UX((073x6c$ky*$8lXDk!jyhv9>ys8vyM-gq4}$QDcEu2Vx5q;I*GT-QXbs z6RTIuDf&(cA;)+V6y`m~)wQj7O-n$s zLPD9(Lz(W#SD+G17bnXc<&&!(;ObR6H1zaYG~Z=J9MpWUcsbkqER-QF_AJBpM1`0f zS3ytnd~Ch1fVMkBFuFJ&v)n)6vcm@OyGDT+Y(I+E_t+A@C1?0VmQ^XKT8u$1yCKD_ z0ODr&W6Ia(kX$l~ab!dJ5Vr!6_Txy#Gf3x@M}Gsi`}4&bhlXPP+J3mjxqma2{1i(kI!6PNeQQDm-9 ze?HNp@^cS@XP7xnp1p+|lFiulOOK0t8ouH5)C(|rh>+~5>&9i-lOgKpCLFitIEJb* zmtt@c*VP{b<38(=V7Ct_F^v_q=Y7DyX&vD6<6n4CEg;o3A0W?q0vKzY$MnnxP~|s( z*4J!l@$>tbIk*?KZXX1X!?{rEj(m!j6}A0Qi6y%qw1O=&F;24EImRnMrEIJ&p45Z3eX6Y#Yn4REY%pRSWIT+K|uPGQ?n+AG9a2j>3~| ze46iTRC;Sb-83JdxHApvTrBwZ_dNLfjl&tt+YuEZppVV8$;X32G9czbfpvGBwSPgM zzcx%TtbnNt4A5uebj*9ie6rS2qTi#mso?9^!aA7@(WF?$(SQB~w?4?g%4=PaGiEJ! zLHi-DdS8ob-w|HQU4X)^I>c@NRW8fjm(7CYs8O9fJ=>hlAJMlY#U0v2HfS@9IHFDD zZ?!S88pgjfB&R&TgJ6N5I3?H~52qNA=U_<3+5h3hwE91m|=5@z;(nw3eho z%I3eIkgG$k4zr=*R-eGX=_|C(vw(F|r(^e*N^pxR;=Ww5rjl1<;CQAwv43EK0SCj` zv-%=GOWptvE;|M9c*b6LJ%M)$&8e;PHE4YQ3Ac{UK(Ee3_~COM<}ioP2w%pX`&lNg zsDH_r-x=JrTV`bF*=AH;`3St$T*L7hswBK~06lZRLB+Z@-hJhBKD(t6B%{}heZz4W&%PxbLx(Oe2qMDy<_7Z&Fr$O+)uko~x5;3kaB5s3OP}T94^Y_rB!!PfF zy;g&`8?qt5+mwDo!uHn8wog}F9VNM~V=nG2^p zsTUKs#ew5#HBKm##kot@bEd49C%c*P1V1E!eApdOu2rYDRWCtx%MrlTKpdIWf^IJQ z+%E~6A-A0aOwa=hC2gV|qD?iP2C$BgGgM2|X>7r4m^Gf|3fMcUVs}4xm$}*FZZHnh zEEU?zxU#c{nUiM)CE)UkdBvvXi=r*G>E(tWm?p1HHeNL)HSVo=)J2<2U-S~HvT8Y( zxNgjLE5Lw;h4^}E1{SewpN&9;<}PP`*Wf--40y%q$aUhBF_9=gsucqKbiu8C9`C4L z%Lo5fqCvwnY19-wyeEvt3YJ@~SRIHn2CeA>4Qo1c`5pXOt3@Y{JyUT87RN6A~Z*IH#nmrS5=;HovtZTv=` zJC*1n+5)ae^~t<@x$x5QFB}=h*eNLnyxo5aq*`zeR&3aYpgaMBhJ5FW{ftO;MxS9U*tB#AWM?Y<@w%kx@pfoej)J#(hGbQ9G5RD$Vf~lMc+jsJ zyA{v!=6;; z zvGi>LSN5?37x+9uVHfiP2Zch=-o0S|P?pwB59XsH{Za6@yHMBBh=$wGM!}L(g>{-& zz=_S-Tn>zbg=1ephjT7pRkIc@Oi`pAugWp`g@pSw*N{ZVZbd<6M8VN8GvfR3l~^DU z@%lq8sc@Vh?;?90^S$FSkv+ptu#91d&A;$_vkkq(n2s~;Wl6L73k-Dp28k6~;Pt4K z^Si+M+yN7KW#wAPd(7N6yTYMvi#f=3GR}kUbrAG*avcs9+!j-o2XQKKDMetpVCjamV7#+lQF<|Z`0Uz5yQoQ;oi3bB!$llLvN zpqE|`V!+m7uGA`;UlH^Pk`K3w54YQqfcvWa#=ng1l<^Jh4qKCg?(?X!eh6_3`X*Yp zMT}`}p(q^Z#HH%=;Ne1BvL~w%edm5eKOMG1mstpFCU#@Yo{eCe@DTzw563YHQFw;U zdwq^5pjS^PZ{p>Lg0@t#s+1|5Sgc8doxY&f_4()+whz3@HgF3MRY1$dLX-f@i0D1$ zcI&>t`kibq>(J-gv^ARRh;racZj{4#Yef<=A=EW!7V?i3%xJgyYpm%~gp8Sf$Zw8E zxBC+OFy4YV75X#wavxm!s6%SA&p^kMP;T!iBT{m-8TX!4AQAa@IQNr5EQ?$qihKVK zL$cyrC1u)l{s9egD)SBVofa3aW!@Q~fgJZSONJIKjO2vU2gS)bkFn15KhE~yUkDv) zN(6fi3YDc=u_sN3y6vzQ8Edh=hnBwhSQX<~K70ak`SVfo?hwCG-j)VBP&hd71U~;# zr7_d3c?T;Y9lK11^i0|WN})C+er*nB)hW>)<1Wt2eJCU+B#J*38q%OwQn*OB9t}nc z!0Wo0Yd2_w*s?;*o8ZN;eTT6^lfa_~+O*dFG(MK3;iX$FS7Kwta`v?#f9DMNMG3L{ z&l%WLSO+@(GZ_oxA@)tO!5P|?q>yE3eGa6;h;C`Jd;S*;|8fkRTrKJ6caPBi?S97M zZO4q+Dp=`V3qjthbi^4u60X7KxrSX_>4K%OQBjtdsoT?vkLLJ^aEbnodDs96@18k1ChJ4JXncr z>2r}Sa99}XFzcX4{7G5wvQNHk|EvcCE_JaqXMiu6xH zVA5x3Fz|)I`gn{wW=?~>?xEw&&D_Bf6I!oX3aV|gv?@^*D}&7GxA+jObovg;MbBWx zxyMk!crC(p3S8b+GkP|sh)Zu}a|G>!qVCCvZXMge=bkI9ouUUpM)RS+mUUB8RZzv6 z?b!3IiR?LfdUD5a3`*|j8m!t-w}$an1b6X}n+|c~&Bl5qCLIi}(uV7lz>OxjNKea22wo4e5%jjc|%F{(Wbc~)!!S_=#s2+%gx``rACNUEg|2xLddMS?%*WW_XqIaO)#XJu; zt!WO|$@R(W(3d0*GE~|Bqx1}3+;$3|Og@ICga5&Ji5=-Sya=l0*KnGN5gGQ^nxyyh z@H2uriYA^!uaJXWYw$e$yO_Cv8^R$b(1NRR)B_dPSAwGlprc-dc^yYV=d~r8jxR(* z)``g?i*V{RWvsjHfW8$a{JcG~G~A&RB$EUn{PmH`7}J0b26kA-_5|9DPon3+y32E4 zK~vWk-sJ5P$TE$A711eJdVB-2GZF1_wxoV5t}!1q;~ZBw!xGmj$oV0KftxRbc*IAr z#-ms>ssV-O$Hcb6%kXc975T8zg6`Sh3oeW6@$O3#`e1=3(Oj)hW;1`gq+O5xlGP%0 z7sDZ=TpCvN+=bqW%o7&ofL_NZ@OhU1z&OzxthvXsZMSalk~5v+J1mne5y!(6<}_G# zWeWz(O6GPXYSSYsrC6mu2g+N=VWoKiXdJck)_h41Ruq@_!vp}crHRy?WU)V@~#zthLKqnZ%2m(2rR<3%xgqqvVA*_i5m z0;2X;HwdklxrR8u*6J?R%J|9T6Ku4DNX zcUYIv>SlWO)Y+~_~?GsEkn~vL-x8e`=7w|gW z4JTCPqIB9@uwU&3&W9Cf!{tC|PJIJ)d>v<(_cXH1++Ls07n%jyq0a&>?Idk9_&@H;u->Cw;W)QA?`EC3%*_^1YD}}>}LL$7K3GKV)<55i>WBy$X z%6bEgty9M*Pu$5%Su?jz{S|mQ><64R5#v*(Ce+@~x^MSc#-}hg-<{o^ zHr)~1ta}xBaj-P7v8QUsP;IR+7)O+N8tYBH^OKIikHoQ_4 zVmY62Xjqrl&4#Z!Fu-Zrrlb2zbE;T(9~;>Gpk}!U%|@1hQ3C6_PTj~il&ylGDGIc9 zg(oJu`eFF9PShG|O?*5Oa|TmFDcitdEE)^K!u&Hk<_pSZu->SRTa z2K5*lhgVIXVuWBORG17wXXaV|&CX?#%uk{XR@G1%b_UFAjL7~arx{mxC7fcpsW-_2 zqITyPOsM<@BUk+fpC^7Gm}pTLb1j>5yw%H1z8QrdUF=AK_iuEt{spC5SE9+(2#g*v z2~VFC5TU0Pio1{C4Z{TN5U%9L?9IhNi3};o?8m~?!zdX$No4z}8C;Zd`M%XtU>EZ^ zMF08?5j9=d^Vb>+7GFc}n33R?!FZ@FTfL)Qj%=8vO3l4u(XD5iXjxY?PN(y5U`iY7 z;Jn1x`2(1^(1_0Sea*b`ckmH&LM1yMhEm-{SYD)oCxnJ%jQBgeW;-yo4a-qjs{($9 z&7mMy3V$;177M(g<{nv+_*ol-Gpohz!PO8YYer*a8F%#MDA+G$NS?d0ZdZK{?;%pA zUMn{74S#3A!s+AD@yt5j{(T|$ICBX3akm+cE|jB>cD_V|lxy5sIVFm}b->`jL$0sh z21W>^$!(z$$-K22bw^0icXwsTh||d!cH9iF#ha5iWh(S%WCSJ;uM!P8Wq_sOk<>1-cT-0~82s4|9Re&E#KtB}R5EFbH44~A!* z!O1Mwm{T;0mrT6F6{StZT7#otYjgriS53g5hRN9Mvw*qN6U71N%6JP;6=JW(qn_&n z=JN5xW17kAbMyeoLpSc95yoVrZ9A@guT7VYH^nid<){any{s&?WnF^`{NkiVPRYGx zd*%&XK@H1BwyuTIw^YfT%Xcw(yDGQTgUw{P%Wz?hI?1qKhsR8dVBhI%oKdYro@GsB zPKYK{EANL>ix@M_l<^+^9)!EYjp$t!X<|}a$U2_6qLYSpRJh85-tGH^hpVk>pQ4DXoP(+itw5BO)af!z$0V}9=Y}m9flnRNKt~)#-m*Oc4->0xe%LM zpMq9;9(I|X#IzX&xH$C*`d%4}1^Xg-k2!yE_8ckFK|XS^Gl$SsEW7NqM+fs|6EJAw z5V-YMm9E)MvE+R-Sl`yiO*_q~8_Pktr)A?GcHb@a{>v#$(j))RavBt7bIC7b#NmOF z5b{rb;f~YlWE9)?Ikk6T*y9k;**yaOV_w!-p7PLhV>Enb{Qa;yCiG*iKIuHDNX$Y! zahcn7xXQ#}Sy>|>aeo(@2WpXJ(ZQ&}vWnaJH;e(vb`6W0F*xoXm`uvW$%+Rs(C6FJdd3~jeSXAc!2fCR=ndv@_utw zQe^g+1GA?b(PbSt%?*6_=0vb=R>EMr zUr-W}h0h{BK<(N}oWr~&B_;*;`g?ZX{m`)I!5+gb=vIu5rOPtwt#j?c5(h1I(! zqa@A-ymHraE58-79Gx!R{(e1l$f#n~+|4YLl>(BBJzUskYtV^ezQn9C;#n+f@Wb^l zNY+S+7W`Ht&bqO1;t$K2*liOXlwFP1^Zx@IKOQ$MeU8nk-I%mf0*?AukUfP8JL=^5 ziUX^-W;PeDHywmt#Zy?ORDk6*LvSy9x0Slz;G<)@U@7x!oa@Mfls#(HL46K(cB_%H z3C^e-7y=Hi@n}9>jn)q@1?x*ru=k`YIlb-^3hFM8Z(?r1Cg&7riW7p>S{2g$E(S(# z5fTGCZ!EZc4V9O@2g^q+*UPwO?g=HFcZ3xtG2e5tYpyuY){GjhkfjwzxAS3lEve?I z3+ywNryfyu=pR=BGmrM+omX*iyM?mP9A}ZL@!RbB`kZl?rsPEm6w;r)< zX}KA-0X6!@)qsrFc!vQwJGkHR24pDveh+VxrWr49^4B-3Q=#8pRIZCb51wV}vp;jz z-gXc&J60Ur+rm1XXZZF#U$B0~IZ)_PraMJtC`o?9A7%X0o|J{?XV%F5Z2gQ&<72Vz z(!YEosu8EeHDGWdkMsF`nfI+*47L7jH*jJtzSk^5Nq8;y{)#R6xZa#LxgTde&heb6 z;s|^_coPct#_%`&6^PyYY*Y>5z~rb4*p6j8=%yd&EJ%kNB^Km|&mhZ2YvH_*C74rj zmlGJA5!J2wEN)>8_HVB`@Koe+toWnIyB*vpu8w*FA=VCJ%l0-5x!}O}9Qw{rw~l12 zcohgzjOUxT{KmmhBT_Z4i8pIZ1o4(?bTp5L?DRHxII9u6F8i^a&j;A~su#jicSGuN zW4gk(l6fUFF#2`^)>LM~q$EA!n`j13A9e67s?e?%XRxC`op-!m&Y6`BqJi~JJojH0 zRP5S=ZyN95`kw}5|0-*u7ElhCbCihPMIG|Yxg2{Z{es&4`c$7~i9J-`LCw@%xH&?J z`bAyjUo-yXv>FqlGuafTj`w7)NF(C2{~2d+NSodks1lJ?7fcaak*0W0us`&iKNGJ^ zo~EjicP<5ZFkOX4U5|vNg@(joVF>u#yn@+Oi1w@<>rlST_x62;2zM1+ z@QTfnbdHKGuG~iAcMp9M%&>a-H(al-LMCrGj1dh#ct3kb9CWyXBOC8S*qjIuoH2t5 zrHuQRo(2(|G*r)MKm)5AoQt@e17}m3eW3@Ity7`~+V41T*SYX$KF4Q1293C=Aufjc(r5oDYi4h8E;+oetgMjEaoGKSn7WqN|~mj$~va!s*A zA?EgUc+sm)>Q9?cR|O5a3pIw1|Gk3zNth?ZFYc);?ya3@pTfzNmJDT{u!h>cK zSU>V9j+bHcyQYbt)!70Wn`xDG>vm$nACiuwRUSB3|I9H3Dj?twKJA z=n{L+i}>lOJe@fG848B$iWdi4lQqv1Sl_!H1LUIl($R;x%kiJkcUU5Ku<9OUJ$1r$ z!*jrX(e#QwyH3?6Zj;wQ=Fa<=Y<@=MZ1op+ zcC%;Wr3?IrOvYWi#`^6~G9gXdnrJRzj;b<6bW`vYznNk{|9oTbo;)^#zH3=>)F8jC&eg)_uKY{m#dwQIksvEaxH*q`qa9=6;Dx7P z%RblZ;X~*VB|TbpWFpJc9D&jnQ=E121m-@~rm-4I?B`CAcGt9l@Ah=jt1=7fH|jqu zm01PS+IrOcuLDlpAx$10v%s85Jm(iV6oiKq&}6SEt(*E4Z}@6bE#`%t8mSJW=PMFh zi6VWcs!6Pv(<|q1JHOfG82fEzg9Clc z4VV&&V;i)|)br*L#^w_eg*4IOz{@zmoXoEu%|SNXtf^{!BT90wLNedM_jIQ5iJIm#rtmbTDyLwr|4{TD+lQNt3`s;r59Tzf^Bd}- zAvD*3#CEd2WL*w^`g9D}?w_c$z=XHEFuFen zRF1Uc(oQ9^zx*)f_pZU)X{_rye*^xA(x#RE+0g8^99FhqvAZKw2MbZ z_Pb^X(_0ILHxJ{OQPtq~VXCP0!6D4{5yQCGh$+K6Fk;fHf7p*hKZWA2 z0Btg&lkKioXYx1C2ub(KB$yhe3|`iw`FGcRB?lty?4V)mEZA z3yi4LLmi@?E~F;v+dw(jj=9StVf~JLESla3F~T;qeb51sEsXnc%Z}a?%F-oukFkjP zQ{B?NMBZKraJRsS%rer3=kp~fo1_TYlS*JnOd&RGIRO)%M4;ESWc;dGkA9DJ(f|5y zoUYF@y*cK1D@B%!eDMi_=RJic_AFJ9(xL{cX6W#jxm=e2gz08?!B6ilZzS~{S9rV! zm}13zU5U(L~aQg1ZrL zrCpj9pdww$=InW0D%4zEjh>BAVL5|NY>?du(}v5DJeJ1ZApZ@zo-wY?Nf}}r@f*IR z-$jFgR&E8)zCUM1L&<6_dT922^qu^Ot932}vu#qezI+mP3qL@sw;{f;O2B`q87*{F zBzxv)QnxqB;+&2{Xf!$>eAaexf(6T5$FWT2|Fa+=M^1>6vk+xr^XL2V8A^e{1_umV=ScM;W!C+er7Y!Pu+a_4>2Skl!2(8EvU0c3l6&KP_uP6 zVTidMX?`66*6C5OX1g}o*XD-g8FM7K-Qs44*^r@2q{ws832dk;Mi(Db*j}KC!|XN5 zmGLYi+FFJo9;YzFcpNrei~`W_!|tC~K|J*}v?e$}h+Y%dB{LG5v|n)}&wN3HJ!yPN zH=7X#m_c%2B5rt@3eMWO@W+m2d@Uk4?eBT$w)mJxWcUrwuB_%RNEy;(y_MXW#|2om zv6rjTI?Olylgw+$T!m&`306;d0Pe{wyIndWD$Vv{xy>p#>tl_JGS!G6?4x*3`wdVP?*lte zd+L}N1P>ibF?h@)*85!qRdtlBTUE$~Bv-h0oLLWVYjkO~SqWIR3?V1pNYTLbS}gSY zgU%QK!Zm|DJn>zd1U8t_qN7hB`j{H+@m0c2HCtiD`)oGXE#~Dt??SoLa5Qgcj4sD8 z{6CvNao%jKd~*XF(`&gU-`p`ddNSwqN(6Z?cH&F_Vi;?zKq~#TsKCU=)oeeot}t24nW!jWfFw zcVp8iW!84+F^+kah^^a~;)6oY9Gz#%B)2XK{)e8TsLv>I|M9)SyV4FUjcm!%^_7?z z{Rp&kE`mo)KRV$`Iux&#a*x0p`d{TCl)arUqz*VC9k)l1sfT8wwC+AO1=W z&SaobTOKXq-a(HBGqgLTNsKP&Fy+b`q4m^8oaY;ZbgUea84aQtpA`Wgn^QY4x3dpJX^6TDYFz*XA~SwH~A zmE1FVtW1kM>OBu#9j%zBUIg?$>p&9p^;xe^LOLA*$5hU!qM~gS|d)clO@UWRHPKpl1h*eWS zlRuLmUJ%r-sd z_Ps?Yx!6Yto9=~@+YjAkuMbJ}Ppp9WeQR)Pz-S1NHelY%Hvaif5yaJdaA9&K6#M>$ zhF+yma`Oy`Hm#O64jcik@9)rc?+i)qiMMoe9`~49=m{43uy3jg z>1>X`yGMRN(9wEy@+-m9(WcBpVHABd^bYzfbl~O1ro?2>EvS@Agx-#Ka7ch6+do!` z1aV%nNo5N}c)u1Fp18r!Qt84@-QAcID}kSZ_UxF`1DteAn{2o1M#gjI;o?7Nr*i}= zy)A^?dS`($Bo&Cgj|BgbRd5Vyp_8K!ZV?ac~ z-J_^Z(Ec2RU+&tlrTa8V^=%2nIG;pGREBUkN*)gX3&$a|dA>5SMBJv&`*pLdx3ic%C5r|z7vQIuSUvWmgODJm^jGZzi^gKEpdwng$DfeTsFwF|wY!3-m5pAfeF&H;Y zwqjw&eo-Uyaqxv_!7eu{@V#>lBt6!}*j#n^<)Oop*G0n7XRTnnzyb%7S_l&9Lk!w5 zv0*!8Z?t7<4|zX!sXfmxUJ>>>z5vm^hqO`uv*1zsQ0Ps}$^E{)NmQXRJ5p=Ke1?6+ z82dy}{AxuS@2&tBx5YGNWFodJ&*u!10m6-I+N@{31Mz?H0#8r>3$FL=SgX4^Ee?nS z-HS?0HX;MG>t$4bt_9d`xCGii##6a&E#fih7+w0R7>_Eyz!Zga>}a_Jc7X>leOfQb z5k5j#-C{UBt^g8N{70MgG+C|jEhr4MK!p|2IHYD2ioVY%N;n1y%$e?&N@xvuA_N2{i>-#M(aDv^K_RaRw?#P+v(g3>r?kVEmNJn3 zy#>xu_rV~t41GQC35}$RyGpKz^}d@B$u(zGNPmj0f(g~un+I>7NHH_Ofh|nW1J$b$ zcrjX)Jhh*PmPh*%aX=`(JZla$D<8qeU(GPmtP7HO*TOq|DE3theC85tLcna!q!&Lyt+SJ_{#ga_s5o6}aW3 zCF>ik!}j-X#*8EF^jaTzX42W0DZkwcxn^O)?05DoxP-sUM%G~JQ4^u{zL@4_eHYZf ztiXVf%i^w`=fE=f1il}sOFI9phIDYi?3!LoB;|XH+46#RAn!K?^L|0#A3;)5B%D6h zgP8@qo9giZvg$S2IXf+u`|Bwcxg9DnSj+n%S=F3n^SiLb`Lb{f)X3T}Ba+Zph4z1_ ziIVkWg$!$+LH=&V!sm0|i{ljty|WS=%Ubcn;n(22@fVB>4CDHC4VIf_$Byfq#?F-% zjIB2yTR4Mlrb{ZyY4#>A?k=>ZID`8=dWmI42ZXmnqtVJfK(KP!M*T|^z_e47<)`&U zHr||cMu{Of;uEzCIgD4l^+~R)59G8h#?#!@6K1P~$|ByKH|>C$L)8%HeiWW;R3`?i zX`pp08r%9mfKQ`!Nq0^;UbM7jzv8Pf*6$ZxJ5iOzJU0c8&wA1sI%*_h=NsBgR)EEo zS(qE-O5xT4oZ9UJM?!2#yO#*sR3or|r3s4=$uJ>0hl={mkqQ%d|3FidDYx#YB~%bA zL9tT+tzv zzUVWH78wx;+uTf^u?T=KlcRlgE=3-zX2d|I#izarT%*tfYvx2 z*8dmJ16IXK|5RiF$A$^H;VV%Zxf8^jKY@P#G;pd@Ad;?g?(ua$!Fg>W#V?!}_Jq7t>iLfOXMAX#}7d_VBNGsPLIPlsS-ygohJ zkcIU;BNS&+i0M-lSlX5t6g4jZQN2o$Y~V#{y^#Rv2NK|2s6NqNzL`$=bOx@PI`ZeU zIazw(4rYpyz$e*+=jAsDHQRWXe&tq(%hh4UL9gIP40j=oZJ?2fXCc-9lhE*a7JA-L z$CXQYKRbDLVYkX-IRCr_Cg1+cyAUd@33o%Y(|6qTGasK${0yP5`mh$UIkWmMkB`@? zlD0>jnTuaBb)6YzI>;>?#_cs{!VTAdDJ^|Wh{x}sJpn($G$I!K8m zoUwtj!iTgyzY3M7U!{{m`T0Zjm=N|zl~v4DVATdc;rc=mSzQ(bVGnIEU|0+`x?iA- z=ZNp_+JRTE7?F|b8?o1^1kCg9&DN`B^Gd4M;n?AoN8%tPZ*bC30%w(;f3LteR&5 z0+vIg^Jm0OmYfm%m}-mH(At6bAw<#uuIso{aM@*uJl6^X4}S!aMG9P=WlJ)ePC|Y5 zI?OFHgs%77@W_$gB;eW?vHxjnc)5BxF8kFNT}qYc$rA2HS6K>k9+rT}G)o-Ykp$Aw z6iW=|(`#ZyjJX52z+IiBj)|o!pL3T$dMx%@xC?qFXc3XgNkO#miB$A+r+e&}Lqg>! zFIZr!%@#D6a{p~LHXWUTjg6WtuEGcNpUBX5lsqeWWsmpgx1o)H8d`l2g!}pXF+uc~ zdfogBy6FV-fumOo*i~@%jg%ZKlK8~%Ih)t zJ-oO7_yk7o`Uu>o>^Wzc)fvEZDX>7ya$bqp-t zR*GE{_Tqp&Stwe!waB)j9a|PjxI^GO_*dw&Er}LnCC`b3SO0-qOXuRJyQ)lmhY5O} zE`W9Scwgn>Lt#k4eHg)6@Urnc@I++~bPu`3&!z^%)6<*Zt7OVQVGHf(>d6C8*$LnV&p&I+Hq+p?EShvhA9V_NujQ;Ex-D&>vB`=M z{v#ihC7h({m?aHO>La7k+NXJT<~glC z*NbhRz|h|HA=od=1ixOZv4=n3SGe%K;GS$jx~l_ox39rP0~Lte0nX6yxUgdT$~NDH@fo_=R0)&8%5 z)?BLtt!4Z^`c_|Py!?<(mCl8|SAIb}=W_nXXQ?5xZ-dl?_u8T)G}M>je8Gf8zCH%` zORR~+vzK&$pE0?WXvn}=g=O}0N6l4w%<5x>w9Ut#`-OH2$|3Sho#$AF?)ZYrkF8j* z?kecqGZb<=Bhl~E6V74Z4nuUNA$hGqUd9`<(19s?u)--1Vc4{hu} zf&035;1Gu^SlPCgrwc0u(acq++^=pVsgPL^ zaH>XncatOP*KER3O$^Hue?iHoALw>mgc*T1IP2jwi1Kf{-&~>05|3O3X;KZ!tW98D z%Oms%45o4J5xDV89(btrV%I*c0kiZ+xcK>3$XJ?7y-%-%s;%m9PFs&;blO9Tz6y!U zI|mcFKLUIAf{)>q_)TWT=+t|7DcFHs{-wy`mnPte3>6lnxD8Z1xg)ICG<@=#b7=a1 z5h_j1*=X-xR5tuAT-&6NBbTQ`gvWnyOV^HxaIm;`vpL&cZco~{$01`D&jJaF zQl(pect`XfRH~Hmu2YpzcrXxlm2^O&-f6JNN`t9P4^Me^W89r~{26CWj%?Q;)f!6d zuY*3zH+afD2zG8KMt#GH(egxb_A&0vG$rHITshPCEXId4fM2^j%N;rZL@BZEo{lf* ztgr^0<6VPmIe&e&oe>FH{{bEM#S4x4BIYbUivD9AnTPsK>A0t(F(zvu<{TK0>;G}a zl-eV@x~Bp@%;|xN%^D zzkdR}L&QSJ8_rc(cn;ot83VGip~9O)Z@4|H11DlHBD!FJmCLm0%|?CJ*rAR)&l(e( zGJ^HWyeF*%s{KB1QP9#P67gFXGPgN+dbV36<#!G*i}uC{KTs+HzmgoP6oHf4*4pgy$j7 z_J@~~oU!w^HdZ#*)3;W7gu0x-{uR0)+2<;yS(nkOTAgO7pAcAxIeTM03kt)BKt%RG z0RsbZ^0RA@y`>xzCq01yVHV{4H!1px<(Q}MUN|3G1ND`?*}$SksPsGvKYs1T_rDF8 z#I0ONHB*5<%{$;mupFt>@}#_{fu^?Hi{Sgr&GX?U+!|TVyNHcAop;3a#q*QpdD|LHRGL+VWS!vRvX_|NfPIe4TgQH$q%zmFjx z(swC}^7?_&9V_rSpE}HZl?e?|m%(vvHZ9=qjFgumrm|IqU9XEpqk?=YuVPDHRBJNv z3qL$?)tac(*|7LCzd_cMPe0vPCgxqG=>2OUjeFS#PP=}CH|IxTTw*W?CcJl&!5!4$ zw&VtPz2t667v}tz2Pv;OkHaDi^G}6gv$-+KGMzx@XgB&K?u3>3nq-u|4okl+!Y|hP ztoF}-@GX;4t@Alpx!+eXJ19fHCEFq8<6VrLV#Y42Te2z7YS1cZ8V%Lq43orT5FS|v zHR5MzpWTbCZoLL3T?f#U`&%j}M+(jE^4K+VAS8Zx36BTvKw`+xpYeOp+glk650=p5 zSIvp>3=_6MFAGb`B+_w8lTb7wp(rMc&-w0-#DNVrAynN9S{TE9S`I`t_$j24T(QxV zM9%S^j$u>0>C5C`2+DQ<^F4MXEU_=vuDr}M&cTBI?L^q1p-k4Ud5691Jy7wyIr&i5 ziPBvK5JAK6+hsczTzs5XzBFTIzmCJ?dR-Q}HWo!;-wKk3i~^59r0aHS5F^j)d?s`k zR$7Qis+T#Aa!tjA5o7tetr6!hcVN;%$#7I=%cholfcR`ThHcIl?EiZX`NQvl9N%L& z{`)S(dYr$HI^hQ=T4g#$X48m-hRvIttDRc7ER zyB=Y`-)g(8t;qRoV1-Ve%^cLdo?#F7)({Nzpb?%bVV7{v?!O4<4 z8M7V>3yvv~)|oj_KJfvxJ1>WALpgW;(Gl#PdKB<8hL zY2FVM&3Po;dl$y990dA;QS)52(Ffi4FGFXTw_6P|x0+iPRUsbW(xAe;=awM3)b<>7H7Yo&ys<{0|Lr7tysJB5Y!$hLXR>Zlx-@M)=0V6 z_Kz}YT~37EuSz-J(UQ-Z6J&f z%lKKM;~WGH|AUw6t(mu~8e2;gh?1!ka?e-`())8TckpMFyJR&s@Y6h_e@{l2C>9pd6(ECKq@YvB-gEcwWFl&ZZF0C<7&ptKb@)gwJmk z;T*kxFfzFWDoysF+wL#)*5Nde1b!EGRa-O7^?xyvvs8kdCV|Liy}RP@zQq5oCl)OK zjmaNO*~Y3}_<31hR+DrS6s||XX9ou&ip!v0d7S?-(FBI_^W1}nD&**hm!S1Zl_mVq z5+cLvAp7xmwDJp~w~q6MN>?mKCmw-uckQ84r%E_BNrSofvmw_>U)Hnu67n5~^u*B1 zaCWURS?G2Wy!A%YAkI|nRQxV%yRXXkv2ir&++T?Kc^n#v4}>v8w3^o&@wpv}3YUlv zn&zRfT!}zq7w1y+qMmCPgU0|@afeq4c=t339cAaC*`gm7231kJg$ktgQa1ehybrTE z_dDZm1NFG#1&_+gpgeC5p6A)2ie+}h=xz*MxZwwk(+!30cby<=s4Uzv&ysXjr3v+d zAv?6C581kG9{k$Hy#=L<=(P)K%=AvM4K)>S^_%`@F8uRv?EoZ>m zNreO(sH3)FvyeNd0bRBA*xtPN_;&Uw?6iDM=RfR2Msat#kAgLr2z7k6ZOiTzBz4S}h5E^+3DA((Ys1kKjhkd$&=FbTd4RvA@d6U!#fYN!(02JD8zSH_{2 z@CotNM=bI5rb!yUn31mmj+esFQmR5Q$`2CyN6^zA7qHE!8V_{qldzqqgw*FR#8Zcl z#O3*lWaUUr(lG1+Jlww(Uh(eDU;fbM>UNFm)6Wx6c`vw?GCLCfX5w zKWF~$P-Z?iRzpdeqd09L?~ZSK?4I;M7dBKM=17y^mv1zd4UU6U-95st zu{YrPNd?k1O_e2YdcnQ&Vfg2mJu4geo`yR92W6T$!dydJ;+wMxDx!_a@%2`$o=n5t zwx&e1q(!QBREd1Q8ixzdHNmSnJ8|i9BPJTk8IQZD`x?F<4cmK=4x6dK#uRtK`LSuB zoBIsk`R_%!r@BP+$wxRG$?u-Y=Y@PP2J5$E!1e)I_`Je|?AjNKsij_m%>0bFa7r01 zc@ig1n$n96m)nQKY5qszdLG|Y4W80E-l!1h7Oo|S%|hyKS81FTWOOX*{9$9le_x0O+=tck*}MjbLF<1PHzs?E5( z9Xbc)3V8`NP~m(APrLqw%B=I$>4qH{IpjQN{_4_o{9Gp9J6ozJ;Ek>LGQAO1Mn zGL^4NM0>|BY}s7{@7EP!Vbu%jQ)>!a`A$C|eg#!}%R3vNnuXMluC)8jQ+!#a#lzC+mZ^B;uG-ZyBo|1=*=F63`R+IX;D?X3TXSLqv%6KkxP_5 z|9#gdLC2ioG0$MC9{7RLs>e{WV82u`h4cS=R?xI|&e(EE8Zj;+3gZk{fk)V4TE}yT z?=Dd8s{R7!K67@!iJRQ1b^s)uVl?AfobDms=;oIQvy{zQe|hc^_q8VWF~yKJ=s)z) z7sH=b190S?R)~19hYl&u!Do|2B*8aa$WNG$7KzPx)=7(4r}ZHN78nzE&fqWEQ6{}s zyqWjEID`L64OMdMf%E>nOP=xyUoDH}>e%6By%P z3q2v)MDnvh(Ea-Z(zi!IP+xnP{d^~8++GeocWfcuZ6BI*rmCCiXR6)p!n>s3sJen2 zYD~8x=Q(q^?jh#^Z0f_Jy7S@tyi|0W@f=GtxhryE1&;JM4$jAu!TWm&z3FR7)P4T) z-Ni{PU&k3hvnHcx0Tu6FY(eA~=JQPZZKp@Xe|k49IYVpDWav$ooaXmFDB9^mrGZE3qdr zeUw>~cNVOz;XUJi)%3B)0bC!s6&C2{J2(qe24B8=^*1Xu$Olx@a#;8bbOdLTOySsnX{vz(Ip*Tq^s~Oejj4HJ`zph_}N2u zEls%^i@wfY_-D{B=r&D(CeAguG};OJy2+EzsrKaY5>uue!2L$i$I#Qc3ny_Wxc~Yw zXmv6f#=Y{!l5z9JH@+y52=x(QJJ$_8XT*TvgjoDCBM(a-$w6#ltFY>W61j9S4CCBZ zfs?%hsoppoWs{U~Sbs}qv^EQ`UH1Zy5blPGsONh(J|mmz2l+qlL7E!R!Y+vw*NBVY zgQ+@u*l!M4t(wI%h;`uGoB=I|ZbGc;MVdSGg%G~^DsET#gHvSzup_e)TOx8{$3kne z;eZiQ-PH#60W;8hZ!rAPIgf{@iooB(1P_nnna%^+%qL5U{pR+u zdl|Ah1AoE?xgwam)QS{O58wMV>j!p zIAf$4Yjod))ra2W#+c90ko6Ke+l~pIrPV^*z=hCnR35rY(xC5DO%@S#N06m+v)K3` z$Yqm+#>gBD=zU6DH9#I_-R#S3%qt<{*G70SPmUzO2qEm4FOAiXf;$&^cS4;%qw3`F zKu`s~m}fx3Dt^;q&rDoqPz6VgZe!S2J3%BlOvi7qB3Ct7!Kh}nn2mkf;CUt#xwAzgaD%lVvowHLI$fr_i?6d_7D?dqvIRjD$3v}-?TV@HH6GyVXK*2X!H4&c9pDjfLX z45S5VF{$SZD9cHtok9Ab@uUnBW{3pw(lp4uCJ}5)DzH_)H=KXn1G!~N!Vo)0mRt6Y zme`Geg`@(aQoh1!-tigp>O9YoUPC9jU)cM3DY{G_CxrSw!}B)E%<_pGYY6-bsTodm zbD{*NjkP6feI3Z~Yl^J(4aNNrwMchk9n4vw%Z3#CW5m_n__#Hbf8I1%Cf~nEZXba; zeDAzwwGmms`FCHVy0NcXDaOv~poVqPI8s~)Rnz7|>2m%I(B33ee)vh7uEj#s;0)NS zRR9MsTN5`|H*idd5j6Zv$iY%=;@LA0Yo7XI+qTp2u+0{}78?-RIuk(!8nIPl5S8to zC&YK&gQ_Lz_<^%Sv>&LmjT^`DbK)J$QC4KO#fA{e_qsJ1uV8|r4_2nv3oj$M@50KF zbWNWGwd>ok`_BwKe4_~~-kXwXM<~jv=nxgYBcATh`|1rTFt%ESSV^K_%)#%-Obv<5 zvO}6+&_%&#_c%>Z=3>Q*k&W9)o4x*YJJC2MSt)}+>g&X zjfnq$a!|I*9e=)eU{!UiLH6v5czQp++ZeBn)fe`_uc%}UJM>zR+7&?Lku|7l-49g@ z>Otf;tLWP7!JI{R11uWK5XbF;YGuxKHj03pyZb;tB>>zed%>(o4e}sOmrSWKCbsf= z@I0UIiE8-neWahTV7fW+U#-NRmspa3XVoaI+leug+i+#1B1tW{DnwdIvEl%=Js~5Ho8;zxY3CNC`F#C{aki%zZm9HjITjvunNqi3Lg?!8@cnf~~{Tch~t+3jl z5yEDF$J|pn!pO{UymZBi4cU7f^5)uO!^EdBlmD?9sm9WCGNAj(OBl+tII%wR7_h|) zs>T~ay-GG#PrL}pCVBWejq?@HUWAcbwu9--2JGx9!QTT7Szh#8(3);dGRHjNVL;w_ z6XeL$PmwT?ckhSw(_{ISp=gwQoGyHsi(cxdag~M+>nyj!nwAc1QD_INsCXgnzAty# zp5wcrLlD+$FIZSqg8wBM_6xa*g9G%KO8ZebOJ|2k z@ifnij-SFgix+ya_JJqy&CSJlS>29IPV0um*>ddkgdT8i{sieoCn3wb3X?UCL37hC zeAqM)hV(rGCT|MC^|>xfH#-S)oPR;*+IsxQ-5^u_v|;k}<9JojWJWD@v?Py+eLs{4 z->+J*Ytv1bjm}TdmovtaR(a+!%NTNw)j(BOUucw5U;_;LkiFlVp=v7cZHCtg(y_PT z+v>fjZ<+~h*6%>}!x3Y$W5BDd77}XjW5To5bi%k6aIlgiKNgs?tG>H1u|k_oPtM~k z8Am3st4JIrr|7QR+%>Os8CBI=NZ&?AIy^c_Wk{s@H%fl7H`8Z&N z8A!I*6bSt*TD`S(>O>m=L(AX0Q0dLc! zrm>M975_OFvR69B&6sYp?mggRA2Z6Z|drj(*x2VE|uqHXU%|;$0g#t8Y$?m?*wJC z8CG0MK?6B=T-n~2?33q?jIYYpq|rqM zvh7uvM$u=Kh+et(GgT!|uJgIrMn`(~uncXQe9&`C4K+16jiQXl?%sh%sVX!;y)+X4 zytici`yEA{S`$)o^Mbf-o-&IqE)=x?jHUkf&2ap6Q<8K-4V`b?#n6EUO!)Z*t^ab* z)sTtt3 zPZwfzRujBzc}8WUYQ)`VdNJ(*oXcwOz+`{li5C^wv9i%O>Egei0a+~fQI*ottQs8h z@g2;{;SOz$!-9vyG-0)89T+b%BW}lzWBd)76|2J!(G&lrg`TF?5pcSWc zCbODiGAa-45h_NRlV$~NSaw96P4277!ZOv^+v}F3A*>q7s@`Pg?mF0aN1Zq>d?aXT z_8|c;qJ+-*Ep+iAg6laSJUXQUqgQ-|wS_8-7|IcoDd8yj)Kc`fT$yO{d}iMdW<+`J zBxqjjfG5^pg+v~EoIT8rDP%_Dlq0wC*+U6-I3{7<*atA&MV&NUZiDj&KVY-z6wXAx zO69*;5Ra)^sME;LRHbrs$p+rL9%|2o&w9jL`jS3fI|-e)#(?A24-|HJ!!BtN1aj|Q ztcf~qte1c!wiF`=ccXGof}nYt^W>NsopmD#OMO1#`3G-c@BC$GMfwQ|XFt#qjRxWC z*j!Ay;|o83UPr6*-D0EQOo;zogaNN_2$QB=LT%o22$SCA{*%M#?~nvtCZnM79`{?6 z?iA3(7>;c`ghs!nLaUNLBy8>R{sny^Ht+c5Az6h;^x6twR0 zZ1bW&;u-b|1e(sFf}#vv+^4|MZbedIbQMEx^gyclZ}GC%_t9-8_ka=Jt=@TGcz){{ zI-N2ns>^OcE%$wBH&qMgTziw2QJ)dU^kuP))iizJ7`Ug?iwxm*}%DO(HpS zO*(a^I}B|92;~#F`?>8IwV(g{|9LokvvCB@%>D~WqKUZSsuhDL*5EN|I6XR~0-Off zk-_)v2|QR1D`fhtM)Mf#sC6JYlYXFDvJn~Zz=YUr;5~_Yo?F=*0J8HTbPL!r`w>z& zIopKyOB+Ds;Od^a^a+Gt55{ZN*Kx!ZV`5mVK&JayGjnHkw&raPHr=}mw|sS2TFV=} zbuk7{{C5F|*Z;tKC#{G_??Ync0~#zp9YK;Sq1T?RfJslKSR7)(CVa|)>%(-2p~hQG zS{h5U9$B$0SrUqrR>1HKGvZiq89%P;P1a20&dIEYP;z1ujc^YJk2SNU4yTmJ6({bQ z>uEsW(Ypl6Yai;fS&PMeHew$7VK}Y)0%p98#`Mkt96LG*cSs!Air^!d_M!k{BfEs$ zGc{;y6OOej2H?3reMnltT)aDEFGl`(0@8p3;OLP6A&=u>dFwl@e$U-q$1>=eMoffmeVPAa zz{WEg>_HZyY^xoP*Rv#%p`4TL*9}9~7_(U+@9@%zVetH~4f(NHoz#C;hlQb8m^gbd zCS9wdjfZpTg^7x+WllTB*6WP{T!v_F}E zutEVH?OxDdYj_^KR*o6DBDI-Vf|B{YQS!$h8?Q1y8SY2RZ(`=301`>964m%c$;i@xwe z?H9aLn=yg!&~xk>ol#^0?c z&cLIDDlmMhPPT@A#MXINwmQeO{Gxp?9S(V-RN)g z2G_5?f=34SVKnFnx;{-qPwfe4@M;ro5^>+jt6((uzlu#^H^94(9lIOb50z$i!QIkg zi0Z!uL#~x$=j@AkeR3&A#~U!w>KL@zIa^qG>L?_)48fP9Y*}E122r5uEX_uhW#3g} zW;HG_f4MbT*sBRDJI|u%slW}Jv|~_G zHI3GMx&_l?-eB<2-H=*UPEU>3V>x%OL(j~5^>WDP zSsI(mFCf3?F|<8@1xs{#Go@u8U{p|F(keHA*0%k^k|Dmru*gUl(yswbIcLEr#g65N zsj#rJp&-mtCGk(rAc`izuox+d9E;ueb<30Zphh^n?=Y18z9UF}wn`TnsF820kD#P# zJ6$PXg0At0(D~d{%ssXq%oXH_|IGt<;;9l_9X=Cpddibs?b#sdcZy#8r%j?1kK%+$ z{OgHbpz_C5*ffhX3#{bHsJcU#X`2Tw9-9R}&bv^a7$Y3HXiS>m3k=Rafr}>=LNq9` zpb7$8PgUWh3-7VTXdSYxZTRNTNA!MefL3+8!8uh1nqTUmE+-Fn^*MvybN0iUeVj$} zHxS-*D6lzB7r@T{Ej|k~VkKuU3eDYSI3~6XqR#}OI4uRfS#kzpmMTVl%LN^?beuR` zm&{LMxWr0_RXL`^;3Z~+AHtcxz7FK^yxx~`TOt~GSP*$Gk{y~KTr8yBxj${dqm2$kZFqmfa^n;S;A<|y5U+{d2A?NqH!U?-{lynTE z_W9r7ALqwMuxr@%t{L)H8L;-qp%4~WP1iJNlig1FoOK;U-yBwACGT%bKX=#@E9n?2 z3YzHV8od=Ynsu1by&p8*sRKeo4Vl$|RN7X?Kw;cG)SAK1pcOW3w5y05;#{FCzjer_ zOPrJ8GE?Yw_y{Ho{$kXiCs4~>0VP?9QjbL*!eQg}aDTB9DOhI0HhMjQ&l)8d*QJ8< zo^Y1xr&(f|j|c~mIDG1T2|X9wqsq<&v@lB-bEAl0`?>~usl9{zv2}b#bPeQO^+>V4 zCR^XYU1Vzlgrc3?4`Pu+cctBeTgP`|MTioy_*)LGhlUE5x98|M}}bfDHUdx_=w{-eBsmBGZ?x46?p77l6pHQ(rZ2LIE z<)5jl+cuo>_XnL%lt4-Dcj@HvN(BG?sNG^p2ky86+6I0gcRvqv^R5b$f0cq&%Ti(E zI|X)6>OdB)v?lrK4Y1xc&1B=A1*gyLdg!n`Na`TxFx~&pEx7j zHeM|It0w&E_y?KWn&DmV-YlVy4qZ`T!NOJq!ou(t@M0H1`_5(R@%*xMkm&`Kg<7I_ z`x*GL@d8G1cWLjra>VcBZZvvZDFmmCz$r~o5WwR9va#ds{ zJ>F9L4ZERVFDtTv-+3$dS%N0ngeNW>fe3vs-nVU~rC+|_reu3kIVKe!mPg>9z*AsD z3h*>9)sLVqmG>xmm zYtDMi+4(DU@?6@8znbK9trO;le?{G|?{Vg+C$J}3n;3>#5Zh)$Cel0zwv|JnI8TWg zKfZ^}*OZuDvLXrCRwS0mrHS2`j4E_a0 zjEX{43rCJGf$s66uAm zDKu+QLgRAabk;MNRBBD)9MvG+0x|RJ zK$NYllY0J(L-m7w*r%29EX<-t*ua@?9$ok8?@`?EF*j7|+y0U+nPbON7ao?@e=Nf> zZATz>^h+A@x&u0LYOrd!CYq=`fy(&vg6~ZauzA-6Y3rWDuXC!zanvzT-)0Ki`Mqi| zpEK)PpT(WH8^f3J{b}GlxXJUx5^X13Int0s32!h~s1RIw@1nzp_aZ;F_k8(i-TxvxV3zmG5&swZP*{47TqX2`+F5bLaIFEZ_!|kx-cYv<6BB)k??iuqPY+ zFGF{WfD5%AzjW3(T0y*G+J2C+|IM*)LY|UVxRcYlN_WqhWRHO7#3~igAUD z;h!R6SS{!AD)toydVR#q1NZTv@(d6S>=w6UGMYUuLf^3m1=+0(Y19kARL?Ye91f$X zX|VgwE^F4J?}ahT`-4m`OWc3vTBz)(pm85e*vR%~SYaR{g?$3ScZe29E_4(rrkRl8 z+-ckAvK8rXGJ(I7_-xyKD~1*rV@qBh44taa{KMA3rD@)UQ zkR(Y+Dv452lvH~6@BIg!=R?lfd$0BVUY9H5I9}QghhMis;cH1)*OHEtD(+y1=`S1| z#aJ4N0q|lT<9S^O1NZHfaA;oycGNHr&4~4!-1AIsz4LSC2nqJ4;NtX3vJ@u#h8m9 zve;*22!riy;D$(v#GK4Q;fVvBVAEd+atwxsy+M#Xt^xyx889DV3YOTJ2}T;|QHdkp zF`UN1gdOW4&h{6KExL~rk4ckzDq2Kyq#QM^mgQU@{DK(9IW3;`2GGzJ4#qp;Zf9}2 z>p1gZ$|qpSiEZ2@K@w){4@8wt5pMn%!gx;N)Nj;U+<)i^B*&`K#I`QD~r5B{L38?u-9Ip;r(jb;Uv6m-ArhXG}GZUWcKyPpG^pAH`BH zIBBZBz`f&6gPZ|#q1{YCi`i%KN2~#j&p!(G%4_&csRHi$Ugp^y{S(~QsY32KX)gPe z5nY`vL`QeVI`L1z^#`JHrH=`59PxnDjdq177{+}wry%1S2k{zJ{P(ZS6Mj;jzc^W* z+`Yi^{7XZ5?xr+}ST+(Cjng4yi8yh*vmb_MUBHhyd(h|FZG5ZI1SvVC+;}rpbM09u%wl;fu_(THSShw0_v2F9t1;576luIUjX!*ekC)HD8QEfF>{2-r z>Qaw~x~-}ArCH#Tm>_6se9n5=k=%*`iqQv535FTLhBZxC>z%_LAE89vZoCbtx&!c~ z;01orF(VJ8RjAXL8k|>f1m~7W5#O3DNRj%trSeHlA24mKA!uzkq@ygFad*aP^iN>@nMpDNuTV$4Vx&o? zc15D{AAK7ApEBubHARET%waUOgO?kH+(~Bz5;fNuo*uBKBN=lvNs{$rI473oVL=xr}^$Q8kc1{sf&O5GMFCVu~F((dRK0x%SW2o^~lg>$E z-H69i(Ce=ymZ&`ywZ){1x|kOzci}>4W4)&nBh6{KauCF7SfIaF9A5{$uwuRhaX9b` zrsv7i#!PEWS>(ofv}VJ$Dar6MfbBs2>oMp^Cwybw5LIbAPUdq1xJ=r?xtNXMBLYmJ z#9T)t)_#OLIO`~^94XDRbBx2^+5`KftcgdS3>q9!Bfh0s{HUc9aah@LR6cwQey^4z zF3<09Q?(z%j))8x+one(?-&v(wFPkXv@{(^f5UGpnaJ|$In1Zo!7B&cfNR@%Tqb2h z9XyZ2ISVb48>PXuoN#4M`qQGcIA`!@{fn@pzj@gXeLD5pWte_oI=Z|cCtABziRw8p z9(R8L-*L7Ojpip~?k)>n?10cIf9ZSZew_i0p`o04QU%oR=;X6*y@U0lZ>Z#&h}F_+ z=s7G3Y)@oQ5yxr1fu;YF@U(LjsxC31 zJz1;y+R-w29c8I-)O@b^{(11S=;ZJ1Hl!gu^9t{j;7t#2^t2FfDm1 zs$D4L2Clk;V(AiSRP#es`)Qoa*#c<4ElJPz7!dcwUVO0Kn&fp2hn~^vA=@w#QXAvo z)0d<8sO&TD36`VtN^ZlNnQd^WS(m!D$6;jH7udF@5Iu~|AoWZutYW#BHP_OZvw1%M zuss;}n_AIjjS8enQId@CH>B@vHR;F5R+y)qiz}@xi0yM-tQVgKnVXhiU-~aRGn_dE zd)fEoxk@hjtQw7pYK4R_#sV;JbNV>v6{Lz^2Gc2P_@F2?I^OXF{*_p4X z#0xhrfs?lRxNN2*xinmyG&MOfMwbV#e%^?-rX`@x9~+WdSp(j;&G0Vs>^THZz>4O# zypLQiW-v#Lca$C-^ZXM!&Zy=cZyD3DhQEv>tBpf13(lhn!p^JUD(pDXjOD`~VMBTyQcqb@^`8;_CbI-bOg@R3MHgV+ zu?nz16v&S<5aNiqw_ulRjWrW3XqzZnpr5Zs7hWxfz_kM2;e9F&J=3I%A~mRIf(g}r zphs1k+px_okC$rB#L@rBkoVUNsOcC*#_F=*$66SZf`hrZLQ|Xgnz(Z%y2kvk0vop9 z9KzJJ zLw5$iG&=H?_NvwY8gZ zCX`6r3-+1O@fN5K&*4HWm5J;zNoqIj0FG74z&7RCT*~$5oLYxC-5WO=PD-nh%n${7 zbRFv`Xw2bga~?Kr>VfiBF}m)fF;Qc#aH&n7uyL3!7WY1d&Z0OzWqzHgc>D)&epw6$ zuCjkG`%}E{#y&xg=m(k>c=C<+y79VO5p=CRip3h|L1niUY0^B*?!5V68=wnvbJjub z)e!6%mCbL=lq22!KG^#5o%2Cktq`yO z=AmoBebn`y1N9n{aJh~aRbsn_;8iShR-`2`O*Y~GrhLcZ+pN1e%nDPdyg--PyF^p} zBtu!NHjVmg$!Er0g!r$6d{IL#im6yQMOY}&;suRxaIrFGCrp7U=2=*J{1Kdd@)<3& zTwq7sJLax#h1(-|^qtr#(BAe7Gy8SWSNy){^r-O&A8CSWr2qN$Z zW~l5y#~1gwO_g6DX4hv}=r#&l?(BpU@tWk+dB$?u*DCUmDB^PqG-2_&JT?y&Lhkcu z&TTsf8#ijv(&f8Rp|y&!|HEK^({FfxhUE&q4cL7AH_lC}#UqZoM7TLs;8$}U+^n2nrIip5?hb|wpC(Xf zR;LLqy2SL*c~RvAZR+X59BpNj1^T6yw8O%Jnm*B_!h^#Fo>NXj?cHbGx|3mek#*m^ zHvQmCH)wEst1fcVhDub9vx7-q5~RdOn>X2*#dhsW;VtV+!X^p&(KZM6Ub@cf{=3J; zwCqL?RTDlz)rQOrzK(y-sgXyG;-q{-DBg1FLfhI;{NDy8GS4#x^V$8k&Eq#8wz`%# z?#qLkgHh;kVgX*h9)YHVmi(xG37mW@1UtUfqoW{~`!Ogf)G{TP3~I*#7!*TUED%$1uy2RaQu@k>OSWb`#nYIcxiQ=Kh|;cj^xV);8$ zmn`6(T*2+`iZt_J7QgQNO>{Ytg?DSDh)XkY5r5Pn&~qGUE3;hXPfKdd*l=pi}^naru6h5 zL)!gZiT;%~rRlDUr1V!fs9I63#73L5yFCx51*nl@{ia0UG7Ej)>k;8ccaU>j3l%p$ z@eYg!xWc&rPa3F@;^bCrd$f{Un94l4XI^si826~;;&xG^SwDAwojehK7xB<`7N$=5 zi9!tJ9azWSp`Q8RepKP^!(EuI6ah7A`535c&U)jfWaYDH?D04X*|$`nrL&7<836k~eucI%rQA}+^*zOQt|gH-1oMiRFXF~O7#TE-MD>m2b7Pls-E*X{S6`lP zwLOJ4+U!k07C?39UyRI%0Kx2U;Q22V?$5Fy4t6hK^q$AK~}RSDDmJhz2oJ zMaSh6*c;=VGqi0y4i1brQPS=xkhavO_UjkI@S!fOEHt6Y`XwkR?gq!bf3Phtn`I(b zprAGs_J(Z5_IsLmsglhZls~{>J4>o-`4T;^dSH7_4*LJS2)4JULDgZF9XQ$zrfW`c z@pJn5wk_Jcx8_#(ETKz|*_aSH=>&fL?Hic6)&olNW^<~u-}6C(58-#IE(x8U0AYU3 zy!<1^$~eD6uy?BoUgfPxqx3n5^m`4N*{7j8?=1@F^cQ*eyMu69qG+7MOBkB?3ql%L zCh+k@h-y-%D!*h%WzPqYT%<=bt6rhA?I0XmXh3!{#?Xk%doV@voFG-{F-{%R2?K?y zG~t36DgSmKR5=CsHm3)?9CJ{4LMK}8HKkiwh9cK=8)O73(8gi6aEiGWQD^Tx1y+bA4)W?#6lv_5Ay3DQM)v_&B39`Nr|(+-SIs$($v57hpxZqf+6~ zhBQ1l{3;BXwQ*0ED3gWEvo37SbF#md#0~w|08{UaKw^>=c8vaw-5>O*2J7JZ-O!*V zjQ4fRI0LvOZ6c-`Ul>wtMO}xQkv8XJUgBc-xbG9&RWcr^?~20ANsqX?%YPu)do8$_ zwPD4f`EcsDG5Pp43;w*ahQKF1xcU*xQ9OIVerJVzL-z`t^S2F41L81m%K`}dat)HQ z=E}uwrTf zFYT{Ps?#~hJu(Y~=TC6WmrThyC`8rc`%!n_c8DK+l{c>Wi76xZ;LI1MY>)Pn+s|f1 zoeyLof64>+HsL-5t&^nW*OMI4KRLvZCtG*foL*me*4sh_)Pv{M>A;yRHqgItAX&)a7nl}Q#%Zcr_T9vS? zU6s7(VVSGOCeB!?85?D~`I3!QyhGt*;9K>m^~1NYDAb6qnPN^cdllCMUhiq*l_=epp3{QA9Sg9IZ%t#i_o_=LZ7&xhZ+-#E3b6=GsdV7jJoi&((S?!PTOSS0eB;O@Z-; zS;k}0NPO{In+TqMWgZd6YaXeAmlY&P-Q+HiU=FudFZPzGpNV(y1{#aMhkc)oN$re2 zuB7S*pCS@N??f;3h}MLGE9JcQ_d3Qm-;Z{!L6DhTfI(A=!CAi>1)mJ))Zza?ZJ!)D zvCWcpUoe5elveDV*UqPGKMa<0(_lqQA@-@Q!k*wAoUL;ps+T+fLC{0!`6|r`&GvB3 znR?_1^JxexMq*E{HXmp&&2Jf@0I8x1H0(2?J}ft}Wd!T*7V5J1-v!aw1Wn>GcL{fB zdIIkM^cQ}wA8{LfX#CJ7(b4IJR_8+T*)H!Uhl)u`A6BavNL3e3>4 zq=3(GbH!C$;cr7Ec3uRbLJlV}Z2(HHxrbbbMCQMo$3%53W2UY)BaBQgsQP!V?*N8Elb;E|dy|oTK zV)}8@hcED*x!7dgR7v-VUQ9}spu$8fs@(hmb_+j){?BMod2U5b7D$njz$ow?8^EW$ zzRhPUYoh&};hbg29b8j47Tc!92zH4WW4WQ156n@)xMkz8B>Ep7yjp|Cx4%Kj(Ic(urMTQ`4?xu>s=mn8z_bHs3QGxAmonWl(ZM?_2EmC5(G}E=7^JA=q8}}@b4>;oJtdOJzNHv?m28`*ul+SsYNS( zD^fQrKWJS!9w)QT_@PP*#^D_0)HZiJ_w&6DNxaYjS)-oe9C=Ap-EPlET)d2Z`fR%v zqd*cZ{-Uo|hamJ7%gWmP!?p;8v-U!|r#c?2eE!{jIPnZBo1SHCAS4|=w>J8*eKD)<@LO8l?@+R*ahAxb6_u2 z^Y#M^c`3sa@JKuhP0LpEk1y!ZU93wg*YuEg%&X_-Bq>u%n>mm(S(_dc$kKy*M&Kki z`#LGbypvMX;b*ofOSKl2yj+Z`t`FRFBqx z{R8(w%*l$Yk6sHQjNv=+%w5z@R;RvJrTn1qDEcm9yPIfn8f*~?QyD86zjBxoUnlS# z`CQZzDTygTme_Xgxj-T+5@v1Er*dNN_(iOvP-vk^|GYa7y=>mn<~xNGE+`dvsxq#D zp)PH!d&PUL)T5%BQ|O>_97jHIqb5z~yz<}k>z#Q#_QQrmO4wsX zB|*pUr#RJ13Gj^Zt5jn;xXw~9_*TO1L>6`YbUT3DlQsO6u1_eo(o1CG*oA_>?Kt&r zA4Vu1hLXE=yw?*oYR!707h1dEKn&ybb%yej@7`nm&{w!UzX$E_Jmyr3zH#+pA?(g} zkDnwZNyLu$I~o2bON%b1(PVo_}O4>!9679~aW^VEdmAI4`ciprxb)Ck! zeewq4Y)D@3Hlbhdh>?S8r@-<3G0yCUBKhXH1WSI*=f>uz;=A=HP|R+QVDFSm;Q6u+ z?H08%MmY$C8yrQ&Csv~?ic?xPmV4f(2=_5sakY1{S5oESbY zD*0zGTAvt%krUrSvF=qU?5g5zZBBu(@+{9=vJ8t;2UM!1L&0hdqNH{TE5y?vF}(rm zB%i<+#UQx<=Q4A><-pM;Y6L2ef{TtgyC-VX(2q$FALEF(UCMCDb17nWSC0z)&Ip>w zIb3pIlD0kb#OWW^a9#NYG|b-z=2Va7LjhyQ{t^gh*}&e%z4&3c0a0!Kg|RK+7`wz8 zRLGiaPbdq=c$|ZI9LZC#NjPApMwYM>{CI|lpYm$z-UI=_;$IYA{gt@Y> zv2KAmRpyf+*E*1O%cPlOy&2rc)q-EY3bh|$#FdqffW(%OsG8{p*;EZXdTzjTYguwk zMu(X0ap83|%}K*QFDyQ3MlEOPqwfh>&gDIG*ttC(71n(Oq1hHLO?)zQmZpkSq!h`8zqt_Dw1=}yT7hZG{xG9Vi@1)U z5O``nmw7<~mb9zV+Nf-t;r|k|lv|+0aR^m6oaAYMItknN3Kv~teZ{i9jC;j=1-G>6 zII~7{-hCTy+enfx5To)&`rLOKQL%X>dStk#0V+6(_tE@E95~{cc); z$E$DL9$tscF4Llx(#PS;lha_&dBW7otmAq+6WWFraupe`dG9^5AUB+K-m_eBgN7;@ zDmAC$8AnQei4_KVc|zygc#Qeb3nfGM1?s`EQ1ZBl4@_Uh*G<}wQjN)YIwA!zOd5rE zt@&og{D?~N;9E0#!89pJAX^|#%ddQcgHG~vIsYCTJED1IT~E|Dz6%4!Wn9mjPJUtS z8uZ>OgtUx#&^9Z9Q}qjCu4xreJ=nmPXwCv%nJv)Q;R9d#7&COu1yRt#RA~E_hn59n zz;0$dL>BZjhP4&!+^z)ko~FVDbj6S3PvhW-)JacjJW~tk>C*4!5pTq_%;VBh;LtuiO!W^SeFED|01}v2bn88 zUI@+al!;4jHka~g1Lr)Z7=4^2h;7#a{K&KZ=w~_9eJ4$qZ@&QVv}D;FZ=NV_=rh!4 zm%)!TO?s(Em)MVqgz1Yvpi9aw&UV89cPi*9wir$aPY|auIfdvnMwU=HeVSUD52yUi z$fIe7B-vevr}y1NO}z*V^m~RgFDa9X%OzY}&T}r8sPX<*YoQZf;+Az1bkmtid?Lxb zcDE#G^~o9FuRMh2)9o0eUxQxy!uEpZSKwiX9%*S0LZ#i3q;TwezEWug&U`OU8Z>@^ z%?D)?U93mSR^@=73d^T!XwlC5Q5fH@g4;Q98Yc0J6Fxr8TfLJc4kO!P&Gih_YO*0q zcDAvO<5SKkPmvr~(W6c``%$^z3sf49z(J?q5crt+z*0V-!3dU{O^AnA?5wwK)kHC& zL80yTblziv8vS%hmG*66XZ^=Cu5SU$o(=BDQwODqv#t$wHcf!f|FlV{LqD`UF{=%!4teYv)qsGfm0S2nj zr!6XE@v`42HuDY_|1XqNP1okUq%C2~zevETO4R?4JQSw$T;51a2;4RuiZ`9d5|Jx! zzdw>MabG2n-Kt9C67xXJYM7IPsRCW!b`mhmist&ycn3{9D50gPFtAvh1dJSYU)VXE|*K$(o+u0~aL ze7O1S=Z*|V{Bg4auKZVs0q@LNR;-YVu+ITqwezStMwWLeDL|h!YK%4d7fVB$!P83w z3pJTTfFFXO(|u4Dast{^3Pt+Sk0D^KGC6y|kPhy8iA!$=VU$%czh1Hy_20F#uCyWP zT%<}%A*`h2EzEA z31_i%Up=_&3F6e39D@oiSxEUDgK5{M!mgW)t91Acg1P}6{MHAz$_ha?LY!=G)+E2< z6-emyr{MWri3YLvXNnQy$X@w?zM6GBqcFgHKO4rHjAV0xRMEyQ%)`t$vGbQHkq?tD zV9DDy(W)`)(? zs{Y?_@Ubq97|Sxz)f>>lL!T^~@({;6UBeyr=b`7C1l3pSz%dUh(CtYyD2~$v;kAaM zhvmv-Z$Ubw8254OANGS4yT^!0x;aG^#iFW1246pA74q3{QE}`tI5|O!jGOirU&UEb zubN7J!21ts>byj;xs5olPJ%8xRf7wv#Hi^@U#?Al3}oEy1iRlN+_pIvjEhw0Ji~Ty z{}T;Ae!hiUucWB2$1&D9`R?TSQVhZ#-{HjKRk?|?w8(b0BTx24)VZxdT9<9d7WdviP6h~0-+nGT!)JJF__ z<$L3FNaOJgSbky#&aoZOe10-?{m4tG8*vIB4i@8L^)75N+KfJ4`qX#daj@=uj_cP) z!ickx&=gPsB_aDoA+1_uOZaiNk9)>VLNlVf^Cs%VvVOeXe7@!80$BK`7F{gTMUGvU zvDDL+PTW0=jC^T8V_G7>sUjFh$lX9ieQ6lsSPwf|2B7nm2WEw4?ef&Qs;;6Iza z<;6|$ynHJLo2~)h#7|s{hBTy1nh9Uq>(R9Qt{_EgFIUdENfmd~xKY{h;Ks7*4%`2t zR;C#Jq4EzdviD)%r4bNe91FP?Y~ORDl=t18z&-F%Amtww=?!^t;(I-V?<_dMoIk90 zLILFs|pSlC_8#lK-^Htfdk>w0L~^NY{Yy#p1@ z-^MZb%l?;Vq03r~9JU(3sP%r_LA6ZK2+*M`c_T8kN{`-^Fd(_|WBJGpSD;qal{+n! zfo&Ftp!1FrIzT)A{M(O5`sJz2h#YW}rTi8%9oje188@FSz@cL@G&uVl%n6PGx;+Tv z4{1Q;#INXA8xMqI9jK=T5I&DNdM4R}+;T^_x`6nk8*Av~Ac`G}A7{VT@xns@#I{;Fk%8_gYbpormyP&JDE7Ny2d-3o)KC zCtm6y?(MRqE04(2KNrJbwwll064 zk~%VAwsQ(D9NUYTQ&J&jQ8T1DbmCBAH8`;OkH_4PoVijBw1vm>Vs6osu9}`j#ix@% zhxHx5bw%Kx*}Aw>VhwCN(FhOYEl5z^LwLBB`2jj5c{873#NK}{Cl{H@6&i%_XWN;x z?07qu(&xhGE53kGN%k(cl!Codsk|`!vuNWNHPV(6ED~<$akBMhOgs6H>>ZZI<@SW} zE|34i+p4bMz@Zz90cj&E5z$B*kK( z#cXi9G!+9z8xy~qESuYwC6F_(<_x)ixc*Qt^yX?auZBHJe%7WJ%zv?*OOHTUF~F5> z*p74TW}$yiEa&y|7gv~mm%Ev%PtNQyBrc@`y!UlGh;Dj-Do2HAzsQyg-d2E7n&Es? zK0@9lBbvYb0bKg3Kn#P2fsnB_Th44p(T@UfGk(OG8p`oMqQ8Uo)W?uglqRqq`3Z6i zrAWwm10q;&PS01F&@d+-aAx;}v#P21+uDNceB=yn?U#6;Pb_;YZb^%;-DRKaE`i@J zDYP%?<^osF=T~X{1#{6Y@R}xp9*L@0$g_EA;vQIat_tG12Enc2Iv@2R1chnxq9i+Y z7}Cs#+4gF*e}*~B<{@<2@i%FRv za@}!MD1HGw`{TKiJ4ex*{SMU&enD@mJiV)CNPax(h0Yty5gBELDS?l;nHu6$?76Ne zv-vje-4x6l?%a$`J8E$DL?O$HNK&`ZEYP|kM$!`8AV29CVDC6|@a}{CJ9^*;SKhS zFzB`b`|n-F3b&=8`r4Y?7LvlUk<#EI+r!oOd!wAlo9H8NDHFr?ndvB2`>mj@ekDek zkLBXWoa207IrCSFt?8zLD#(2^o2whz&OF@uQ0xB}4|M90tW`JQ>n(A*d*xHC`lUd+ zJr99Z`c1TCyNptE=I^ju!TIi)#z!?Lf$B6nPO5(yeoK!>Ek2%Q&!q^*Tc6xzdvkQdKQK;S6-0ycr+@L zpvNCGp8Avh*e0h!Q-)588a+Zdm+mVpm;HecALzr!XKbkS3^r%G(<{iGpoW=P#y9;c zg}T38xaIRW?4J>fa+xjM&ga7cKg@yLz#+a->!Nb+kG(laSn)`&~w45~Db~p-Rb<-i; zjCq2S8lmG!5xBThQEF`uevGWe*9I(a@yZUQ~uhlp5Ml0DaeWWh=zEqkqJc9YCYd836_4VK;^oFe)7;B}u0ON}u zVL$@Q&n9#VI{$vdOhGow>9HBpy0K{bq6vMgeBr+oHv&E>tUJ4hMat>U!?-Vt#y{Bl?O_=yK5^c4Sf2jQex21fBP`~T&i}Cgs z?#aTnXAOvlcry0flLWDmbh8Q$ik^TDZ@NGjPRH-vYs_r9X;UBoJUIE70$3X7&bHZ& z<@^Qr`G`j&!M)}Vgs?2>kv>aOwmTX0&3;3#D(eV5VLZM^W@JIR4GHr+hhukI(!gW0 zak*?X7%phTt+8xwtDI95ZuJ1N?bT`e-!u%GdIu{OvHSwNpX7-yz*6fl+`G^d!ksRF zs;x3Di(kjMd@ola6Y^)8tn%!fT6@`nDs$}Mz_fk zr(qVvYwQ&+cV7UPvx>3e9#x4BpH!fut)$4oC{?_>Hx@sNi_t=@LVog7SrW=vK0VQ4 ze75`%81fb3w&k}$ZOFNc!1RbVc>it1rt1$N*nx4e#8JBA@bYm_Y($*)wa{{=+8e15!Fr-GQW;DF}4mc?>{=)J^&P8tqmlzfY z9p0=HT^=e5tn=sX^n#$hEdlbCEvU+DOX`*Oo9hlYrq*koLBci0n%0-2G3&2@B=ZJt zJixr1OOm*GTa!U*a2|}Y*C0>4b%^qqVA$2FKy0QI;GDbfG0o{Y{?T`X_}i)wcHugt z%@%^RwJK@-DS|eaxfnL|0hNB$p!=a1*ur+8TbvHVAsr>UY_BSrSoIV0a#B#X@gC-l z|A2u;>-jhTJx32saq8{1iupk0>3?pv^we?-T8FL7`E9@*ZPH@9=!5)^a24wPrUSc` z!@)D_E%STM23xNn?!iuLs%x?UqyiZ8&D#+wPqRkDTUC;8*Myo%?uZvEv0?H>P`Hx= zosF9@Vtg?!t@t17@E<7DyP#{^JGfMN7wi8j&_1>OC=v4(BinD`HC1sUa?AsXxCapV z!i4_X$@0l!6!MrCOi%SFI?id~1;<*UDTLkqCN0GxT?6uYJL@VbyW{;Kc@mft&Mg?N zPrhbr)6y6L{{6(58JpJ#uJ%ikPET>{seRAOP5r@TI+$XKvjp7jtwAx@C;Z-d(R}7l z70PW*!s+^fprgfhSlbQx#~ExF*`P*u>hEKoOkLWV@E(t}w@WZa=^vWfy%c4wbi$y)By5r%fNq~4=xH5BeGHXJ9GitHJ<%rJ z*Tk{G@G>-8%FqLSHE3Gz!~M~^fG%xumPno*1Oww5#JvB0t#h~uzkKbF@F37pC41Av(4nm$>el&JD-6; z{buZBb%GN6FzzM$8OVaOm{J-huv~NjE5EG9JjDg@b-xbLU%DJ~Jw`AmWIwEnQKZ49 z%oX*Q^~g4{%y-vyIA>%;DvzInnh+QC-s}!#ljJdaiy004EZ~KTlGjDTm(Z~~zal)Y#uy;5Ei>%JzeRkFiPiCy> z<&qdbY&XbdMqpCoGHCC;hxwt`adG-t5X*eUtqCZH+N5%R&c&(F<(Ysk>&m&5!%Do; zo?IO4%7@901|;kBbL_t)Mn)~lf1HZuys5-8_8c`k_E z;RWvgWtg3}3wk6!qyNg=TzUB!RC7CzhUXN~aMdP!H6%uI7cjq*>Ov5+TvC|Q_DPU2 z+=>ggT|=*>Dcq5L(U>^Ck?lu2!BBN9%g5$&hEC%k$tD4A*WbsQeIFpo zYAm<3-I}hmkRrz=t;xR-F(OxXjE}!K#9uDdqzU2@#BUR09WLkqnX&|U@x+iElo^NN z`uCy4=roRh&GvtP9*T&#^AjX-~}ToXB$3W#}dp zq0$6fQggTwMr`bW&+_I(jN66Zy@wIkGi9)<{Uthn&Bt7YZJhShD=V0(t7hn%j) zc|LI?<2#&*;}3WlkqXbRn7Q;5*X9~7DjD-yG&9ANRLcqg^;V(PstgR*UWWE(&VzqG z`x_{97C43lz|C}R^3+6z-CIZEp(z&RoP90YtB&Hj2kp>aem2f$Gq+YQ3B#_aWBe64 z_+gd9SOJt9ecyssL{(wU}#m+>0LAsLb++rKUK2h9ynecm!fEX5;d*=iq(tFh+8v zkkWgRH~y?cm%U=&rC&bqVmCXO?^%_f^Fbe-Yj@*2?kvj4zJ%-zE1~I5BDg4S74$Pk z`Tap-a=cfU*h7LqY&Hz1(gB(#f#xh5YFJ*3J4a8$KT+C{eD^#C zEWCmhne20&TFs>_y~R!VH3_{34XK4Rk6~YJ!KJyLmzx_1o^$)a<+&QQRXoQ>ja$mO zcB#`|XO<0%1>Q|&1;js7hH10!g(${aQun$%;hPeoHGk$T+zwC;Kli z!=M$K^z;H{@@s;mJ zpomb6}qS1xA?}!i-!aA`~3pr(gMjV(Cdv zcD*c6gK!I4FJ_kP{{-D^XA5M)fSNUz@KY=BC_jwzHp{vyi zIDA8%M(e7R4$n&Xx;+9rM>O!89@ayo@G%7b(#O9Q1L&L3AVQxbkb1iqo~~CXn^-?p z7&9NlZnpD&>?~WID@}z8RU)-1sx)fuaQ;rO1brH#Lz>Gpi1c)Idj66gExeh|m9*Ru zjC@;wndx!RYowVk%D%08KxwwAk8z_-T<1b3x!g1=X ze{x0y!E;q(M3SvLMp>1YCa<2F}(SanID7c*jGPNczap0g*YK zt2c~P&Z&o{4`;x&s2z@|>9U>VF5W@^7sk)N$u~BgR7(#>Rx_Uy)|{|FBAl}b#N;$Ta%S<#L1X6D{|cF@8G^Me(8JlB^Eh@J8+Km-msM8< zl5XNeZb<`r-phx$$*J(W$dX)8?1n$yA_P@dd3Xy@d2C9U5glpQ|zJM?7*K_xj)BC%ih&`la$@&1VYb zc5EjnEGX(rO9S^0W3XMg2g6){bD>(Fz%}?8OksYd;P(e0+&mw`lug;*gzYnj+`zD2 zmMSU=(Z%bCz!W zQ%-R3DNGo<2WIVIU7WPFpy)XbY@6p{1s?(~4>#bkHdB%>AwlmM8j>w*$H1RB6)N@Y zAx^WDA>-IQUq@1nPVA||y+>A}t<)^$kCh@)3nig>lO^$8GJu9E%i%#s3jRmL$ZIVj zTDP`>@5{w}%;IxczvepR#M=-r<7d3@hg8vegDCj3ZzD?G*@CdZNn1V*0`vCqlV@dP{lfuF@re|z%DV@V35?AeUp+~jGMAUjzDa*NmcZuIazt$9 zF43A(XCYALDen^M!=-eL;ZRrpal%>*WpJT`GjcVaC_(;&++Q9Be zA~3eR0Vx-G&cnu?Q?I!TQ(sTOmW4J{xZ^U68XQE&okqCn^9&T(ZeT3FUQwv~eQ15( z3vw%Tu;CQ*cCG)7_Aia;!SK!O9B<(qbCNk@pL`JJd+@f?-*YLGa`u3)osXoM|I+>WXvbRAM7sjgD*BLC!C;-hb#VIiM=`c z?rz6e)_3&`x`*xYOCg5NLPxDt&|_!@3d~W_?R*Dk5Br6hj}NRCQKwmv zw9Sg(ZNrX1>}4C6@lc1XIo^gAY_1{<2;<$=5qNg~DiC zzdxY#T8ms5h{cD=85m!FoVy=YiT5uE>pk8S~^S+DGEX6R02s*;p4vZa>`VUNNpNl$gDWKnQ zJFq{W!aBUK!EXF_$n!RW`lYNHI(m@*cI7pyru#y0g*E8@VcrJc*D&tXU8LvD>FD3l zXh(P9-bgLD#WE8PcW%JTAYD>Ztwaq{q{z>|A{;n469k3t@Y!!=`i>ciJxs*8$Bbif zk;}yW|Hsjp$JO|KZTK_~n&)|*kR(%`XKg}ALZ)QSJo_d?nU922NJ2s-NfJV(&a-xs zBq^FDl?q9cB$ZI_{=I+uAkOgYz1DqSSM*;<+rqvR-cvADZbzKLX5il>X^`bMOkDO0 z-@OP!*Q1Y+P8XwpX)Q^|fsYsQ})JQiB*XT4beQddM0mS7?C`?dcf*?wPnXX*9(Cw4$Qf zOT8#wuJ_iF*4!bIne5urH(pDrAkwf=XQ)yw!+xbbsSyu7Gt-JRo|gIjyOP z?q_*c7s!C!V>n-+jjGLrc%Rvu)2;03rw!<0OXl%V}N6b~{-^KEM-a!%EdmSk%Y z=g0*}?nmRqeTKxb@C$cIs7KCgmgDmqrnIf{D=f)<2NPzpnd-!g;33X{O^h?1f9g1d ztx+I0Uz_1^unoC4QI+mueCNST)*t;@4ec7&k$2G`j$s`99Mq(fXWoO&B?dHeFNXr# zY+?MLFJi&Hr`)bDk?1^00)Oo=p~^~@WXdBAf*WqZ&HEOlIPw8zrRXwT#!)`xXFWGL zvk8YBYGn*-8-7pk7W87B3acgF5T>C_9UbHGI-4iBp1&$AJ<`Mp4IDAykR!@=)PduK z6I{p7O7Je4g^ksNVpn}4zOHFP3e|6dpNTP!`~8sd9vEA6>T9$%G$B{^-oW&odeE8U z&dIFOAOgD&+&>pXG9j}Rb2J9gBO?|H{(OK(j%GBkh5dhDi~_T9G7$CC1RM9S;u53p zLdbn}P-A%&5#taRHHBk)>}0U7VV$Fv{UEvUEPSw4BByuUMI|+JShjo}oV}~T=E;nS zvZaQP-lmSN-jhL1ewl>~B1nR>WA zgFRQIAF;IbDB&74JyPIa2JV8{kSZOC))xfik-Y`6^UTLMo%@h@)o5(xsUcf!) zLfAgNm3^KWyHK8a#me2Vu$hCK7nMj%>L$!94FkVz-+9M+If&+4_@5&S(1(34qi;YTUFPBbR*;U&V^d+$PS$R+N|Y9$cpZFFw=9tLhgPe_PTrDkuG zFlU+^P3g)8&sWW?uV6_HeM(Uq^@u}pFdVY`10G-Epo-0-6I`ZY+zfN-^Dl$@g94Wc@BI*QCEe2j9UmR+ZZFO z*pqX-FofH@U6=Y^DiYR>aR9o$A8$r$k_i_h(6vh%51ck*b2%IGHOqjgjm_n$R}RWv zn2ouwYdP_nC~(m4fe*#9#5K5B94OVpMS3^F*Ai3u-|edyB5{Iijg|v{#>UT6y99|c zQdq`W5W{gYr0rW6IIijBBH4GDBPQ~-){GUWDoa*u8;QeL8&XSID>9{7kw|tl0Cz)~ zxK3*0E$ecy^8PDK_PWnkJd&e5GLm%G2j-%Bb`qrpdem7c zKGT7rTFY7 zfzO8}Sxw+!@*n=YWJGnGq^NUEE$>FZV$lh<6P4S+``tXw`}Mi|pYb%9FzYeQ zR%KZ^X%&1lHXn7Dn2czs zMx@i@JD#1yb|H>UVDwS~{A@Px#D%$gTepJR=OR?f3WIpVr(9`aJLh$BCtQ0f!uXp8 z;5+26&^PM7c(!aT)Ub?5(N4zHO>fQ4y|+lbU6_m`|5f2C8yR*s@#J$;HVT_qo~YF4 zDemcFewCT0(2j8yL_xPWqnzOob!$2PY=4FEIaB!lq2D0HG>fZ`P$30xs~~GM>qzB= z;a%?^XwqNE&OPVFeWgd4vx{YjvJoy6J%f$sSbyRM-s7|Yo zThhdDy0Ap|BGgKUKxfTNaFV%-KKpKg)_iYp{i)3-dj@m=Cdm*rrMcYa{yR9(&YTT< zW@1eATYRx%2x-!fMcfmk@C_8r8}Lpujqj1p%$#xK5ouJS0cF_DN9J zuOGN@&K`m!9Pn_Y4Yhx{5Ys-g87F7P`wqql`)+^XP4(a7!VizJM*TY1v{sL<^DrRM z(`4wh1p?~g#`Z84S=^GTi||8$4(V(k4r}x?F#oU}I2Nd2k?K=W|7}Z5x3PPi!)+LC zUxecahfvp~TrOnyM)cuEV^7^*sC^a=>KkQ9pu%Oi*(yyoXE6S3qJVz$w4`b+3RJYB ziF+Sy$~fH3ko;vcY+bm8J)@P-&0{!@A8JkxNt}UY!`5Trp8p^|)ejRVX`s(I3H-KS zkA9po0t90;K~U@itIK$RyivHqqXXTNXF$sHS~%_c10(zuIC-0FZq>b9Ov$_o=6`I7 zrLq*=cYx(Ag1_-C-yVSTq4V4-XO@9~_y%8T&BJdM=H%C{-G42V&gCT<*E%{M-I&lxQFfdP>h z@XB*(NIAgrQ!gfR|KlRaIKW=5~Rhn6C$Sn;Ky1jk!8FYlvqDT zf!jJBzwU%-E6N#r`Z8`zuICNorKrKSmk{TpMi;(fGtdz&oWBmBYe5k9eNN&(w#(q& zHD8`u4%v&m8(Kui~e@{tMp^T|oPL9%jN7LMF>Ads|&<)fg#dEhp#g=YV4bdUxpMOKWgBCGcmmyy4dKs6fy<|+z8f?FL z3*)P$xL*c##9#Lo=5E=^E3t=o`8X3g-R8Hl-3UT5JLPofXN_ng{6SlM4ASUSr4W4^SD(_D_fV zz)Pxw_iNY>%}$b3a(*G6OVXqJmYoE-UUjlTV+bwz-+gXjDj#pY78X@j;W^Kv=suq3 zA1>A=OR{P)YV8wrnzSD8uG@g6MgdrH9w=qlKNHjBPP%#Z_k z{K_B%Hg#|sk{yus$Or@{DqRHj!(ANzP2hU2{f2??rKqLl0&1@(bMe-S7$);m18HUzV`m{U2{SN{D~j+pJ9%dfxt5Ar@7gAo&0j^K_FW67~h zoLoJdnLmQVtCfkbkpaI-T8?bin}{~!rr?;R7s1Gbbxd#QlksyjNlb7almEp~C+!=?>iZ|Ju#asaQFz{9(@A`B+*I{uH2h#4rL@jBWcI!Xp_-NZvEA>JpD>KE%eE_>K#e=M!TUxlK5Yp_xpsXL?Gee@@i+O`jZ(z3g7v?381mal zf>yKphkb}FRkGL%e!G@%l8gUAd;NUqT^57Rnaa51Zz{V(PQ;ttS5V5vgq)t*0P}*5 zL)YiO=(yqx?*D5@)7k82e;C_8$JTZkaAk z3Y}E2j?E1Pqt>x`p#g16H%Cu1B^31MI7e%x^4qV3LurL7ukw)boK+s6@8wutT*thz z_H1qy!)Lc-RKko2x#%c2loM>dVIML02w3it<5wFqfVA7JvPXqPa7LV73oS zD@;%^_AlBe%7Eud8yYHUMT2K4V?$IbL_`wKwlEeS|B zYnXk5?aIGh%g*$~TW^P>aCi?a`P&M|x7I_oZ8wHpH>YmqGhlC`3E5hq zNmKHkg1Aqe`n+`p-|zQ1Co&%|wP=utv3o#GaR!)Wh&a!Q9P3a^(N0kx7S8y}*Uz>i zW0{|$uF3`KEZm^2YBqkX8xO5-dbx@d11O`%V>P>@n|yx&%j;WE>FP8%Zaoo|6r^bA z-xhG%y#wXnaOiWdiE|9CMSoETc^*3I9pMc3cei5&YgUc3ynR8?JKP@GT0E7N+N2^#(O8q2B=*&5ooAgt>hkbruuQMm^*GEA-XNV%7^-yr- zBFNsp!(6$poa*2SoPXb%_0t!No&JX4)QOtZ_rrbhuC0OWjNLEpX0Ece-%bJl>>0X0 zJq)b}*204?ov`?&BrVx4Pd1Kp0hZ?$7X}Zajqgiv^j*X|UwO~1e%cFhqdQQl(U8WQ z)Cdn4r{aVZ4?Hz8jhzu^alRJ2prEZDRL0K2+?mn*o=YN_g~s$pZY{1#79l<`!PeOuG3AlUiB6v&ILTl3& zJT~2gSYKyZyM}Qt?RCRp%u5f9f4BqmzB3lL$5KAN_=4EJL>-0qqOlZqLX3M1JQ7^T zU-`k9dO#bMPhG*_&)sMlph<#7Y))s}19i>OP<-BvuL{o3EjqxpRx&4M5T9 z1biE+LY@`q(A>Z#lyJ&MgB87Sr8JLyegZL`KgBKDAxXl+EWkT*fcbLIV9n3~n0i2h z{40?luFp3KM+aHc(jV$#rd5O~-K4H<_%}7~q+HA2TUAN&O3+O$N$@27nFK0qtVCB5I^$|M}Di3puLR!#+X9> zI&4PnHWCHzB(SkDlpD4ph|LNda7E~2IQN3}U=|vYwmHW1sF@^LE!m4~4?!Kr4DeSE z$(Yd? z8E7AM0|c6V+*%QVbe7%EJFiW>%q>BF$1h&{lO?(OQH+k3^Z9Ut5zzXF`KW4?A-d%b zA8~XY1}^9MIak85YONyuGD4aL4g~SnjOD1HmU*fxZwhmlC1bV8F4mL12>%#YOvSMm z&aTiUM}{lXD@WDoR-2{JImH@puT`VLn}$Nr&J1)-Zxv5oaS84p)+K|B85R~k|3txP&Yo^pMck@vM>ozUzB=)J86G|YT4dSe&gXZ@J_J9i&y z3Ww0HSp!%b{R+H8?!lqGuRxGf<1%i31stAiMf!U_{F^ismIWzOzqd2F zsL}GsvA^S0QzcZo#iFn*8Y@C8VN|cY@dK8ve zm%!`C66|Lt{QAx^Xnu7FM+WHAzWzEsAkCH>7!m`OWn-Z;Y&6Key@+}HMbJ3h1vP5& zFmNq9A8kD8?Ds{)e>!>{XV+C>TjLwpXf+aCO9Qxr;YOtTO*VKlR^fV>Apq-G|}3m|wQh3WmC!z}zYWOpd(E*=sMsO=T=AQ+WK5#_Q)$ zQ_#$tjlB*Y-;!Z=$V4==ZR9#1?ciK{q=i=|t%R$&=ESLcCjOpnN)~MDWL}LiXr!k_ zl^#md-WDZlGxiURx@Sq8KNj%u!8JlR2U|MKG#zdxe`M#f1LEgvw29wub6#!!XYNsn z89A4%Lqvbg#QT|h2H%ds(%cMQen%+!pPP>>;_BgFh#~oF%QDbYQ=qWz8o%x)`}6ef zFiuQV_QNJw`my{FimJuD#^G<^`kL_$_IrbY`xi*Oy#!YNX0!384zb>;R_^*T1u_lI zvGw*5@G!_>bC{*v{_Q#>=H74g3ETD;(~UqJArgWH!-j)BwX zFs`2fcE3^~9fs);Yt{~~^eq>4Hv!$wuui7SKTf{qI=esGif36!k(PlX@G47N^JHai@3|#a~$m&glVDpJFUaa!5M(G;wk=YB! z+X_&=GY^kP{l$_1mftw?v;lroyu5`GiQaO&rima3%vn3Mp2Nmm3d^01a#Q0L1;e0`V=XTd8bVW zaie7#?=OE1MD0IZ1gncsG-(HKQBnXGI~9oHcqyu*aSC4ONs?)wcG3&wX z_v&O%V;8tCo5(d3-iO>NiNe5}jkwEKo%Jn_3R@+4uCii2RBL}mH)9RT^N1&ejC*{) z2PPhsrhR7dD78zBrK#pr6n#V-e`qFFsoum9%-1nHu?q97dSUsQ$M`7DoCe>xitf8? z!FO~6pV%pcuI1lBsjCS)kL(7ir7tioD}#S{?S2dm-jfg8xVDUD?lvqh@B%N~to^EfHVLVOjRL?GS1hfWvxULVf`20xSInFDorH)7p$VWh(Sc zj67MNmIg!gRY}XtXCSa^!bWo)urm9Gf&+zF&(a34efV~?6Xo;&Z89ZZH`D>AMuPBE zD*V_WPd+f`yZZwv92cJo?z8=1b3io2E~$ppzBq`#{EzchKF=2`oPdP15aw|2LXF~A z=(j?i6PUYkQ;HM_`yvOmUrk)~RB8I*whCEx)eP1Qd`Fwk0Cd0o2``iugXt(8;(YHA z#;*6FI$rrDOpG0$tMY&Q3|LW%~DlY+OOUSa=%jo6B-K{UTt zIHCA5x(qWUKFe7?Y(*jXvVIn4qD~zY_hGx+W$5pIi^s;F!l=a2;AK4@mOZY+;fI-P zXvkO)Y&Z&<&vj_hs0{eLrUP9DKB3x*rF>)LHu$SDAL{OFW2nk&EV6tHw!hSfZ^%mU zimpQsnKO)Q`V-aWO#{bal-n*JjwRcyiCVn|JSt~1J5DF<1%b5f6U!rd#_oS8LND=h~GD< z1VqBqT+)oU7{1z=9;$i+g`(roJhdOLKW@iIsYYafqYRnC_S$2-lkh?RW<0tw3VqL) zF#c(|&}+3Os`cq|wha{s<yS0LqO}I($DL=4?mm8k z#!)POV-KqirLz9m4^Dm(@}9}pAk*v-oZk5Y^!9{cXVfk%V42+-`7eBapEiZXHW0t; z8di_`3s&iC@aJ8|3Vl4t`JYy#S6{0Ux$DNnGmY&GgdTj>&>Y^`MjH;cGX7qxHa5Pt z0BIE$aJ=o!7}>ExRqLlX=7bMcZ+Qj^R|fI!)x(%q*MVtsP53KcPveH~dX(Q5jUG!| zp`Yag0#sNxKJP5wzqc3EOxEF^E1zM_C25k7eFkplu)9vQ3@zAR&9a!z;ww_IaNx*) z=%aXxOP>3OYdv@a9ZwEN4U=x@XfUHwLQZ4$M3!}nE)&{Zyb6bI0D4aLfIr<91dkfh z)D4VFbmkiDE4c`V3|K#R(>oA&IJgwbyhQui^0+eJghn5|&1+pZM1!dNkTxuqGw)EO zJ#pjFkNKB&%rC=$Q=RB_fO(lKEYaEEI4lW3ESx*QHI#dxyn`Jy)_md{oMSO_4a>ca z_JW@+Rz%0U0zziofMZpbfL-fHL!XQnI1b;z@yAT)1r-I_T08<0mMYN6tBN7HegfN-RRL*NBa$W; z;m#>T5|Vj}yEsIW`h5uI^cyXxtNmJ`Xy{W;ek=&4jZXXmtQs`xfyA^MRFcYJ)=wGZw&Izv#-K1vszFMU|F;F4x#7cv3OTY zggNy!ICrB0b+@OS-CSAlthB_x^f2)K)+A6hgUi|X2i}}!S(kV%a1U7v(X&eV%X*QR z>nQ;-U;7~QUkkVz9KfLmF5vjrzp*)>10wwP@;+=%q_^`9pSmFz*=Yt!=g44P?hZ(s z?9RuZJt~YV`hY(l$&g-Y#z}n8jSsdk$Bl^<)p_y-2IQh(9-IFUE4|71@3zpFd<%a` z7}4$AZ>Bg>i> zaSofAXW`9vUNS5YqyHlBNWEZv=vjDi&62>x)o^mEJ`p7>fu9n+8a-9 zwIo^}*5T^rbjZ4rh8JdjgpVBCrCr*`xkq2(UC;mE9Ygiesg&izGrNSj?J?L|wH$Mn z55mUSGJdwF9d#IMMP2SlkoMiI+dPCq&#tGC_V@zhRes{HS16H6EqCm%t^u1c=G&Cf zp?)DT=-P3c3w9D?X~E?_tiZP}0`k*AlMJ@79*ogcZdOqN zrcNCLYM8@IvHo1$*k?E_U?|#eHUS3PCDL|B&_bme?3#*M-&%)Cx3b;vCXvgZK7As8 zu9_3jZCuksmY1lz1UP>hI3()R+3)_rWbgIp8~#n)=fc>s6^p@S!&KH68pA22C{Xn~ zx~yBlnB;rgV6g_viWG5RmmUC{S(o!?nK^x*qCjddf5XWM*I>kNQ}RE~>$DIiK`K>% z1Vzf2%L7nbdk)(YK0~H1d#)^%feFhg-l>rzp>vvW+ZquzUOvdx4He+WIl8!f?sKfY zcM=4(X2NHug7K|_Jc*i8hmMC8>FVu&@J+!hEIoJ==duiXLfcSO>r@1@b616@^It)M z$2lk(rc4W1x5#z7lra9REU#9^codfHINl+jWkpkk^V0u74zafJcp>$ zq?RbOneYmZywW8_RZ`SdcfGhS(iOWloPcR}Y9V>T3^e(13UfC&2&4SSk(b9qMG6P>U0(AUeBV{7%D+I)6P0B97TpgJWS#z~8IwcVNx)={3AzSDnjj-1s5|x%4 z(8hHvzqV0_BUf2a zy?tM}kVrGUIw2b^o;`-GJH5bZ{R3$K`~V&IzvCOX)S}F9J@TU0h?KqzgPuio7+tyq z=1x#0W)no>vj&)dSq^;2W40t;h*eAIhx((s#BtIr-mKv#ZoIb&Qr5o($s>25#q$otTWM2Q|8&lGR-~{~ zwu`TIF9v(TT4;F|26h=?yhA`5JXvHxMb;xZdw*jTb;r5vJg7xNZWnUyfuH!??g75_ z$Uolf{u55^_#`kttVniFD#rse6mfNLIq0o40a@luF5E4L26w8!{oO6t6?hBg?P-G) z^%^i*;RlVod&DztXb`7^%$>e;HWv2;!`75lAh(If+)b%i!_r zAGmg(Yhvtj}ExYfggmO$t*I|A)ZmUt_IF$%dd=$o<@ z(;4?obu-(&jI$&sstfSKxoy}W(}z~R3E2H}GUiHZV^aKE7|ZTN0z2ibw5$Q{*2-Q? zHvY#i2r-4HbFGP}R2nKz>r;X8g=~e1&#>>AIt_MN4%S92!;zy;%^w)kmnV!!_B314 zoo5b?$478xb1v~=2^ysKUlV59hQYfZCX9R6!@DJ2!znKcFi)UNm51t++=xTs#vdnn z_YwPWf?GL?G*bC4VI1lyo6^Rmv0PD)9_{SAhm#W@!P6BcWV^5lN9*8=$qjz*YvC$yb@#hEHDI{`;TzPei^cNxefIm zUWKQd?!zIYczEq;NhAW^;IN;s5hbD^-d>767?7g>P1h$4cg|yRtv<}}XAEa!Y4O>f zZ`f6-MSX@V;m6^(AS|;5ed5NVoy}B~sb}mC!9=*qb|#XCK0@EsGpislU^S8$1Y)W^Q>V<+N<9hcG0KM3MuBtibrXuQ;` zM2hzG!0jnoq%!R@K0gwPO9VNPzT`5D)L=QKE)PDMIsD#v8j(;n_Ulc@FyzBke$8_M zDO%fyKX*oB>iPYUnOuj1j6GuWUjZzr6GCS22rOM<17k*`}CxF$+J+(8RGb;J}+2v=`VweTEY4(-{s?oW9_Ju7!qh z#pg=@&5ODVuTLaf1qh9En}Qc7hXR8FzOsX3j{#zVPwj>M9Qc)?qCV_Ce_Z zWsvk@eA0a>LIbiMXU^9oepBYa=SQkEcZaR;L1--OwJOFXZ*svvB8=rqq*2=IF!Zl^ z3bHLFSlf68hVEy~ntU5N*CUg8Hq6Onu7WX2_HyBkmN2a>3;yR~U;p|Pgq(T9uUK#c z+@p*C*LUWBL|M@0NNXBDk8<@oYNRghHuR6q#nCdb>Hy)YI@n&7#CpB`sA_0TEOOZ1&+|Lz zxdiiORu9EphigFe?>}+5#0$8%R)*YV%*4i2RoJt37baY1-HCF2h?V;Zp})QW#z|wA zZ8$e?SSn0jElFEV-g84&E0Bm9D~NsZ9zKj>+;ZKOaBKoQYm2^NZhR@1wj_yP5%(YF zu-T*5-h*J~S<0)NScAfytcR1KL+>Q!u-B_5yGyzWr1d0V*_JFUS*%Kunls?ZRz;%Z z`v}FmJD~R6J_MFWJn5)NrfOIc?^HJ|W#6;?$F`%+s`cpjOM>%Nv*CQN?Pss)ZLA%4 z1>&oe!EC%HuYOyB+|!mNdG{!^T(F>WZCccFPBSiO%EGiA#~_K_BNLvQ<95bMx$DMU zgW6~L9$OFedtS-Kf4MJIJj>kQ6IY_1S0LE#HmCCCN4TN)6iL%3b0RvXBMz;(1jldo zf&V^5+Inp`NdCMFKEb!StRq>d;Ms@!W{V)Fya2owtOX0>8j!v|5@T27L+)oq?%90} zQvP%)ep{D^^_OB$rS3K6%FY+N-ZTcijBz+8*M!E$Rq>q$Qe0ZW5KhnFBcC!V792i5 z0+CT3FA-%)26ns$y{j%TX^$jzTXzv}4TPh^kO9n>XFH-Z%dp7wA%wH<;m)?V2oFv{ z?48H(YtdKR=?#Jc^4&5$-*hU!nzq{q!nsN+RXFuC{{cFt5L;`sIWbgu@v zHdTjMZBeEE4cB2}lQElPr-0h%jbLeW1`mI+B)Qit;lVP-x@!;OJEgaA&SRH@)H_)! zGO@$O)-3CGSVd`^Ce(tR9wo!pIkXX%r^g|2K4Q7!H@ zd5A@0pMm;8U2;7^n)=zL@$ODrK=Jf8Y&D4E?T&2#`^hUo&0z!|vGX!-Rv*qiPmpCC z`5XKUTT7BUTM{qKi38#4Vo=g7!s3B$oTFEXP9DA}WLbdMU!};7@ntx=s2)wu#G%i+ zwNRb(6_+RLk*RXGuqE>%HXJ?!sk-+d_+&ILki3s}>P4L1hO7M6BxxwkOyLeQr?Ov~ z4_~X%j`!Fz!^nzp)Ep%#G@WE#SvS}<#*ev&6uBR)-<(x-5c>zqV1oO@eFL{0)<`?qkk{}3YRTkQO3tQ8SmO%j_~4e}A=|*vp65rJkjC|MLFIHi*k^wJ(_P1jDnvriX=Yihwww68d;!RkK?*anJXk0^r1~{ur7g+i5@LRX zXy5N_wWGRVDbgjoms$}OA0F!c|6<3MDsYrF=6t1>ay8PYIh(Lr=pNpI-{ybCd@pa9 zofrkdntEu%*xIsZL&1~ntph{fa>9{C5Ij)?qUBNiit=^XF#0?QuCT=B$#Yoz_7YeX z-e+Ctm0bIl`H+?=K{wWG;l4xl@LSrLz7jJ>ePlY@|0$r!SrKMtc7fuTW)M9H2TCqr zU-esDral5ck2;4kH&jV$T^q0QIUdSW3~7XQ43;KZ)6jdh;AgY~d=Cxd>(U-$P*Dp! zA7V_?QzdEbkeN8`m@l@)Y0;Q&=0ke=hL0XOj?EdZpnBX{D2OeCn)J!2b$=bKfBOqP zbZ>(|7qG>=sMAgx7+rall1z4MH^Z{ zBjG-DMV7&YFLSW8oH>G`TOf7VW*o;dAFjRk#qPzW{Qq*Xt_yXzkKh1Llpo={ibt6B zf#;6SQD!~kE#RG800qltMA({Wh-5eSwJ zVGiN%V7lcrmL?SQou)nf$g2-=@{R#09PSN!6vQydGIYlGp5wr(Drj^+FC2_9CJM}d zvhBP&xicyQ^}hW;uhji~AXBB}-VupwtCnFy^&(VVQ;&sK%UPG}QkK1gKdi{;fUA4; z$*-=HC|VGJt6Aniul6kWc!dEOQ)~pvd!?AmcLqq-zJx!~(dZZcoOA6z3EQPAPA~tV{gXE)rw$sDsay4lGvm$%hCk)kKDK16218Oj z!;ogU8<5<1Eg^($1^54W&iThK7;IoO>dSk<>f#0nO<}zKCt9fHVhZX16li7i1$cdo zJv%inLG6qP$eSxk6>sN3@R^Yq{Y3y-N)6naj(ALeZA??9@vwE|2ACXt3t;KKInIoER$+_={*zI9odP{t&USsJX?$*?6=+*n(9V1NIp26)IGz0kJ8cU1m_~mr zlT{*j4%?8JTlb-F2<18@r8u`;0q}(7=lldi_-hlD>Ar<*uXfTB&(BXrd70z9k}w=% zQxoAh%RZ;cI`dbDYm(Ru7j!S$h>qtM^U1EkoUMv2k+xU>v-TU4JQK#5Pg@LIKFJZ+ zg+6G@?m*Y4#o(v8jI;7zj?mG2Gv}H=kGp#(4dl)KaQ2m3A;zQwHcx$zcGF@x6=!RF z$6UpYb6R=9#-oq?CaF-5%g5luPL|I($1)7-@3CxhAH-Te1<|u>F2QfgvCW_DNt(JC*6Y^F@&MZ#*Y*>TwxrZAV=9Jr(zUlO&yB$>olkDGc8vL#1CXfmHwh zAnb|^%USh9tLzvU?mUEyD{4lOiyjby&2+F?TZr5HHlS}vCi^U3gn4=rRLWdHyxwo;?MJOc z(Vcv}@NXM#+;)@8Tg7r$?{c9}l!Jl2sgR^)N*P2Nm)*0afwKm=F?Yv+$zn0wUYLm? z`p>uq1v|RHc>r^@|Do+9SsM6o0po~W1*Jl_tl$r9l&8PW$dWB1WXSYZ19I=oM;te!65OUrL0Lr_M)*oV|6w_5xA_Wek9rFJ zRf&-PXEydvegMlRNzx-Sj2q-zhcDSo_vg*?82i2y9*3#Zz^SV6gI6HcTNw`^s1-`m zq-ktfAle;dd#=N;U>0*PUEXd<@^2)8Qr%b7nUDs)>)5^jpA1j+89!!1Be&9%u}TBS zL5NQwZ#{U6bu4G_g00K5oW6d>m)fetVd``HKj_mguX@JYHphsG&b&zSsmlz; zP~SfKH@sM;N^6uCfbIgew|`jydTFV=VCrnheb&M$M%BUHvUu={`^$BlKw1=WUIa(i08!hyy*4C%VcEzf4}f6bf1 zFg6RG(XC8Q+>FK*vG<_$qBPBYKAXE1rcRtreTTg0I!Id)#Eq9v#$i42ux#Ra7#IB# z9JmhX%rk}{mxp-ASeLFn?ho>o%;Uw*A7(jGtdCdDm5JC{X`U=Rde5@nFN3mOy^n}H zi%xNY35-j-y$YwUG^bB9O=((lEa$aXlbc~>N_Jn+rPJO%$JRJUc)b5A>e!zI)w4>p zIyeKSy-P$HNpoWT{1{5)nUJE1YtVGMB=NFT<-$+p;}|Oy>gNd{#ax?vP9-rG+a2tB z^$o``zK$*Hc?Tz6gO<5YC_T3d=j$`~z#T)z4i6W1ZrH~8?D@+tei@3bLvQkGhSeDT z_%_3U!m)LHlc==CEo6*MS{@}o;xs0Fi{uPGKFM-^VmfS(s zeaIT83=N7~@QIQnQJpJCMlpZhx+$tu%{3aFcYo%+YR2$d>{&6e`z54wyu&4PkHN+a z4T!kDk-IheEk=HS!<@1d*PXMbYt|VP^Q)RP@X=+ysNw-6XMf=n)AoapT|YmTF}Ne$ zdvN4ZP2#0v%HK+~B$aEIVg8$WI3!$yI_d^7Pwh-D{izk~YafH@oHwkjEJZa#T{v(* z4E=N1ch6CtoBZ=1>a+Rf${uO5Rec$_e(S)$wmu*|{}}L{Y@cN`A5@gPK>kf6XVB3H zI!33NPgsMlz8#G#9B5#Q7_|>+xgz(V_cthn z>eE6QA2_|?6+|8=f)oDoL|hXF_760u&hT6aIdBH*=yYsnnWw30nk0~p2D?dpXg4AU z=bz2Q=n>)Ed&6s(6j=(53lhQWkQ^VARtjoEXF-X8@s_^5=RD?kVcG|Ee!Ig2T<~Ba zypgmfyDOexn#C&qLP0g;tnY&9ofRw>p8!7NhJnCqlZ)TH$9zP)HVxELqTw5~F@L)O zz3wARFRf7{>t(&M*`^lU1Il<`@gn~8+j^*+bscPCuR?4}-QNfZ*IocEhHEiNsuS2omYw0O;cj*ptjN2@9G$lzbNnEzxoJUi29B~B zqAu8(IHJDN5i}}44>9{cupZHGmtH+t@}i^~XEKLx`Cu#f{?_Mlt_l0S($va`y$>aa z!m@}4^qszp7fn4c{2ri8wtp)}oj4V`GoJahSB2w)^+}MoI0oX_dwAO(Ws+2M7tgMc zAOj;i@XMs*IN`sqY|o-aB-UJEOerHsb8_Gw7E6)9>B@}tG{xm0%ldZf%*7^qBQj=^ z3eJ4`0&9+h^MU3?oL}-UbTFESH;xP;Ud1AQRF)C7e#cz)hwEW_gECP7Il{^O#MWCq z{Qk~he9>Y}R+0~x9P5gQ&b z()#-d@AyF$4d(76)K;WwU-c-1u2RvMZ+v4)v@q9fGzgZ6vj6t`gRjA3-nX|- z++|V>-DTRep!OL&qJ|`EMLcsP`Qh`)%*nR%4Mxtsh4qtd$iGYzlKg_r7(Y!D?opVA z4Uy)UCp!fTB|G>ldA-=-81Fkm+4qShX%0yFiPa zi7_Q%6J<$vml5u3WVw~rZQL8?#Zdmi?%#>$(62?C4{5iBYWxM>M?S;lHw%&(uecQd|KsRP{A&EZHhh`~&6>y49A85c>O5;FnH7>L2_YmQb4ZSaBuS=Jl8_`x zLY-&rBvC>Vnj}do6iJd$@BY1i03YR?=h=I$`@XJU@U)$s-}T3HyV)MtIdd>MS(ye5 z)&ahwmO$gaRb0xr3a(F$d0Bq+gZN7T_f&rj_-V)R!q==Hr@Nicb{WTY$$tXTHrChd zo{F2=^~sLJLFBquIHr6&E^&osURalnUp)+IlhXtg6n(?S)K>1&;koFt^*#hIw4(Q} zpTIv|@p7ydK=>LDhnhL6EVB{ zJHND{0o8h$r)laDtb5>q!`UXVSEuU<>pWeh)@P9s>0vmSmT#94+&G0xM?SgGXB#OX-0WU)T8;jAqG` z6&KFH=-`9c{5%dOFJxK69z9Z2-3njNJOgj*SQt%Dqxgy@N~W5y@6aAeL;Xom`FsMt z_h}Q?fjY(zOyWQ8O2M1v<^<-;;OEO9K)N5WY}7|^Nw=au?;6snC(dARuNSVXQ6=m1 z6-au(I_9xE3AQLpF8Y|${FN3&|LJG`bK+XGm(J$WkI7=-mEWMrW|YR>ER!r-hD+qS zFyPl=u0TzP#xDKL3ssZ2?YH{bbFmf59~x1`UuXHxe+$4z&Wx_e=?0zMwj^Aw4+qkX z$%{M{8d?$pp>MB&>!{PBmc=nJ%y}KW?ocBF!O6S5529I5&Wg@itxU8sTcGPyCF{~y zGS9m*Z49*n-))xgg>g(%1RY#`+9ufTszkm%KaCBt#n^dcIM?Mo5idG3_MGGmYTQuZ z{wz$xSkt}y(@o6D!1!2kH(%mz)TSe4tcb?BWqeG%2@TP2<~`d^g7nDiu&qX#NG>eK zh3qr7w6+{(on?M;-{;^rv{cd_F9(yCN8z@wHnh@{-Tm9vi-LcRfxHEEeCdx=zHN3U zx8`snHaZ+&9wR-fKlC47x^)QK1b29e)L-;FBSyW6W>nA=hN>nX?f~z(ryU>UDU)z^e%;ma8*m)!*)BYeea+oyd`tw{uXpn2WEE+9rXJqc z%S5kNEo_fF4Ye|MV!N>h%uqf9u9@>C5$mmRnqo7zgRyvr4ylH3Yn4dNAja)~SkE}K zQ4)}V!@b0S= z*?-4^L@-z9cPrMpEVG4{*HW}jx(KpQx$}2|BXEoTAo4&-m7e=xNn$qo;yn>#uQe{= z%k*m?;8Z-s`BpMFk0bm^&qMG3^7)$jIGn4YN1py1j4D^JgQD_Rev#)je8h4xvwG{< zJs=&Y&R3x|cIGhHUqI8jH2BhKO5IuB%&F-(WH3)d{D?Z<>hlF$9&ALKY;S{nem10d z4C0(#vAxqE|N;*$Jtp(pF=D7SA!ec3mBaN=*2*=NF>Q|crqN0C-quEVjl z2LP%^f_Rv#Yp5dC_@qgVSBJA6)2qDBgz;eg$BHCJUfqXJ`S#rC^u4QO;=a3&d$u?yoGEn zu*V6ApWf!SzEOY?aRbZ+?8?28KMn@*EHl?~Mijd0Cn_~1VQB~JkYvly6}|T%gPlEM z6YjC>$sUM|{Elv$cc5USI{5B%f_=Mv@?FeF|EEa* zn_r85`y08U^^34(xG`7`bV;7^JUEv64g;p@z_L~Cs9T#0iL7Jf^qz7hDHc@k zwt((>&K!9zyZN3kdeo^~1t$HJg`jF}dhoz~C?32G#(t}0I};z2Za)i7QeU~nI~k`- zxe2o;_i!&lQ*p5bah9zF`m=w56FW^Qn#)6`mVm~Qd+>amF1eF=AKkR3VAaS(X!$5Z zW!rVZE}pq26w1JEl@83gXT-Q6?zrhC?7_QQUyTU^2R_0Y`iFVwYVHeo?H)@DEp^ zb*Bv}npg!zCpTiSM>ZFE@+A(7Vg16b;gYTytyn2t4J25V_66>PA;$#da*quyds6}b z?pxBp_9W)gXy;~`%2Nekv(B*jXjQr#hF)P#S9f2WS)xE1%|GzG?P*k;P|HU~U*&~c zcY^TdF(}-2h%3BX!4+#T2d%?|{J44>EF69jy{fN+rt)1BcrLyhX|Mrebc<4 zSRsk=APPa$l?B#=BGE!ifp(Zkla%+LdBNP<`Po$_n6Icyn7IJfu(I8!6RfAuk;K2v zV0#BUFOa^`1;&j6cCR17?aD}iWB&P!A5g^JSDBC!{$A2;wigeM>BY$Nqru$4kO-=e z}DN~0v@1bvj5mo(YMpV~W zljTpfXh8H&PJ6~+Xe^Esl}8R`_dHd)L4olhGYIT@nSoE)-DCMLX%cB22S;lRY4e{y z*mtcRg2*NyQ?yAN8Q?|Vd$4b8DcCos@ZleiL4n>iv>%A%Q(jC!_cRNV<-vSi8W=S%nCkh#h zcod{O?&qUd45E+MYLm4OwTS2Lm2i4U2K=4bjd^F!!|DfYH!!*wal8q*?RyWV%YTB_ zZUx%8;6DGToG~AbcZ#%*bwOW45RRIg!utG^ctL-xq^hb28#kGAc0ui2g?~L4GVdKa zwO!}xQnuqlm9Lnt^oIAV@#6)*RQbSaQ<|9^4$GhSQ00?=jYdVNFDT}xEuD{LGa4}5^C=wBw4>EWF5uI3A;p1{*1+1_Yv5p*WZxxDRF9nJ?670NngPfn|dQSsRvxEoBIQuS>AcT9$6tIs;k7d8n_M#Roo1<0 z33E&oo?RhQesmks1sW(0cN4{yE(X8)(|qq$eVlc)9E!tNqw1klbQGkqy?`VCKISx9 zU7ZYZ#hSFU^90|>_!nink3z|RvH0x+n+dbMeb{ZwN=q1cF~j2`$`(OZO=cnu<}LIx<=@k z^#pr1j=<(#TiO}Bhj(|iBh8Y2w7$l=j0;OJYJVBFEid8fgMXrDr2wxMC==SGO+1IW zV(Bqen$ON?H#J{l&X-RpSh+oanX3`;xMKo^_Z9i?W*Mlg@(EHLUr9z3et_RcQrW#o znl5)xrk(3XbDJ8?>21d9DE+Sr*SE6WellaEh1Y>?pbk+S`<=U05{+RN%r&n(SERIA zj`Wltfxnxb(0UvD**!ZX7GwIchB@fw%`=9&L6^Y7;D3A7V-UCL9_(MDPdaxx^B-7N z#5Hj~W?KcqCUO)ze1z1snB`8d%?5{`RnW%n_Q9{C@M48I@zv9V#;=(o)md7k+Uyw$ zy(i(oab2?7oU!OMWnjib_IEuvj*2O!VDd(WJdIUFai9$KDe1;?v!_t#tH7(&%F&oz zCNw*-h&x%E3LYEZq2tSWFimA41gJ!zP5E8uVtKmAgR&TV)s`>1$l+|p$}Btd5sZyl z<{>(q^ZcsDWn|}Lr@0x}Y%-<-`M;7~y?3yT9EPI4msomhGQ?KQgk1IMU|XX`g8Wv) zxV1J!#z&WY9U`O^J5TTvc?A+@+7DiBQRp+$1s7!(V6qIGJ?7rzueGopFV1$}?t2h| zX3Ar6ybcY_TZhtBJJ50a5{P?S1Kx5yoS|nc&eqZ(cBkgUX*uSeZCBuSjeUb~SP5UZ z=Hu5Xt>DLaM&hJSoRwAwe~%|aZ^>k!^Hk}q`P=X@%jLIQ?!|xcT4d<3xoEL(0DegZ zQxEeM=$g&4bHh~mX6r2QR3FT_WR}1PdzJ_4>EqA8uqG-x$02Pl^QTlI@8p#LexH@G zPbLrc{gonOfrg~e!%Xz#g%J^if>ZX45!?pn53r%9j5%Z8peu|RsuAw+0L!#oXgUeso(BQlt9C~CqiiPT| zLuo<%QtY|dk?(lHJ#CO<=NOGMCpoh|E5-sgg@Or;w=JIo*VNg(XxUXr(&eDB#Frn( zy5)_2OZa1_RcIm0$BMrU=hW9(l7q9V!TpRTEkAG#+*X}{+$~2TJfsJbCoreQ=O$eH z(ujb<9&mjpMfK0z<(?F45!rdnImohMm)D)d?_fO0qLwvJa7>{x)n)UB_m7on|8pxW2(E51$xnf7Iv9@6pQPf;cqNkaX9;%R7V;;LnP75E587Jlvgh*& zjGg?CcMlv)EHZLHJl0y&@WKn}igUQ_njB5N^BI)WZsXF272w8ivKL~F-vvIUvKi-@4#&1>Pk8&;aoklUmTT{28D7@m4hdv};?vSp z@Mb0DoG#wF|%N%@nF~GH(`}RbRHoqAG(bY}t{Qm^n)?MdU z7=6R7?>r$mSc>M@Y{A>v)~%r#P>p1l<)x66x;vK zOKwB6w?6e&$>U~RMf}3DR3eA35W`p-!S4e3u*V#JWBkQ#H5(|OJOM3!MnK_`1tP(! zz0SQ}Ied!01s{+;1ScID27WbS(SS!d9=6ZH@gsN?Cft;~xhhAxMf%jQIvB<;WsJd= zg@AS4*w|Od%ilZ*;g?Fl`(qc5UvEoVU9V!IT|Ry$MUW$K$C>Gx%v%uC12RF zsspXh=U{}Y0sP{bUsQDv5kE}lW$RaCK|(dVV-Lq-sn;mVkfxq;E>ORNu`dl7FQC?n zTeFJIRFmhy#diWyv0RNyX)xzprwx$+ zn4q9f%bqpEomN#ExJ`wgZ(tnUv&t~XQsK!)++EcjE;IWO%2>G;ZUe!Q^1|GMsm2H&8fF*NSR zD5>3;euy$weFw_K35js38*jHqneMW>0*5|G)1hxaf>ZJ}RPtiZ=<_4_W1rZrB~zZ} z_A&4KGAVlgwLVp;QKp+yK0@2(c<${)Ez)@96hAG~ng+ym@ClX=pxN~dHeLz@mnkQ} z_gxfp{b3xWjf$9k-5veN7k<$Yd2*mtp8Tv95Y?0H9ca`88gk4RQFejxlH|1 z{j0!vaS-u-Jvy>%*_$f|8NYgfu?6Kh3wI5gvF!x7I#l!arXp_7v{G~)cO1uji$HO~ z6v^kA8*rN2RqoCF=@giC*;yj67V=rR;C#{d5Ln3de`)UG@Lu?yDoSp#>$!TKWx$1UPY;{w22{u}?d&k&Z{jz)6%7EaW- zfO(8FzVXBjtlE1BHL}igcO8$R=$kf`J7Ys!C+qPmGQNRi@lkl*cMrFxvpvc5E=YcG z5CVTCv5bKXd{@^e|4zw~Ef1L|tl~B9d#p@EV;;cK_5g>nupo9 zYq@vCjyPH!#n9*oxY=Y)>ZC2PEoB*alnsR^V^}s~Qw#`3NDy&2%AF8 zX{Nt5*r}bzo%)QuyKfi<&-dgW3{fKX7Ng)*)mwBs{|uVazT$)Xs)Vf2Ccdp!sQr2} z3MC!OXTh1YbXl zFSKol?9EXy=#>=B7^+9L9%jQd+f+WQ_!_2krgEc=Z5glq4>q1r#KQG{{OTZUJl)Gf z)Qw1h7S_9R9nY&>RHGr)`{32t7JS0?VolSGs4L@OO*-oj+w&M#DCoP$X}Ss2CFMZH z$}Q-9{1ZP(=MaijmvGlYS`lY!FsFqgx>R<}{qKPTR^oRans`&sD&#-RPTWuO*HxIPKFG6Eu4X=Mj0OuC7nQoLO z&Gaw;!)JeB_x9gt-0KYcRn4jK(lDuQ_|LpxUY5pI2|PNxQ_68{~?FsW|~>h5bq z|E1bs!Wb!mGk!qqq5U|5<+s#2zCe!JF$g}}h*8lJD2s@K?B^G`?+ur*bUfQnP58$7 zD6PeVz4?$mj_r@mj^p2y>yTfm3go0qAOx4VbN5^a6Y*)9pAv5e1PNGq-8omVZ$_I*XD%kofeQetQ}x(-Y=f*J=Va$fOw zV65aFsIN36N-1VUGL!Z14W@~D)*OIrk9f{CQbr^^G7|cCO3?M4H}BNh%KuoSO72R@ zkYu?pV6kc+l&TBC_m%Y(C-P*Q6&dr+oal7CM~$P* zkJUQ_6YtxS^h*kKya#h*YO9mTo7Oaa`x8E-CV}OEZ1C~2!PNWLc~qJ#O@!(nxM`c~ zIKS+vTtufOq(4Xg(o!~;`<4bh0k7ba@>$4thrvsW((|`@sfi1do@xpDGz)UdLTB-hHHMe z2unFL$cPz>o_ET51$}cu^Q`IiaT095aUSy5Skj*kdZd$etSU|?qG0|@&PYRzgcaR_ z!qOm#pm)Py;y>4!tOP zIZloiZn+}Td@D^;{tW_SyU~z$>?>+ z;k`v5bS@VaF8qa_8z%DMf-WfalcBEdU$}&W<*+NI3u7}n(V*6rROHz4e&e=)%_=jx z;a3ZalWz0rAIxc?w}&V-;5_U6q>F?@_wYZ;Wr#c+N5(y5VS7DlTxU&>GIolqVui%> z^9+9YcLUOXLlxg|6_Bsf_2{#WhGd%fI_LetnpOsCQ9P@U0=XUeV!0KP+uBOx($N(V zlsFwEJ#Qg9aTT`p4{*Z9EBxQ)abUPWlfEu0LEjmwFqCy?lq+t-j@j}=*yX~94D|#1 z&xDI*T#)@S0m=o`nJooW1cZF6!$>o^K;`NPe9R#Zdb z9k1_^!xgHzh-NKng4qj|$np;|)a_(8SkUj_kW>YIpDshmzf4R!R?65^cO_9vdLh^O zER^^jL67jQcx}8ssmPtkd9FLkZS*lD_5;tkM#lm^<+UAenVO4}hiMV5!(U-Pn{h5_ zP$I+XzhQAW>m7J}2X7};s0vSI&Pa1i8F`i4=w(QYM_1#E0abeP5p(eFd<`kG2Sr2V z*c>|`84~_GffDN+v>BOlILV<%Z}+GH$6*7q7(UYyL#YUqOO0q?1ml9|vE)$JS!_MD52rA;Lxt&0 zF6oK}MUxn4$Q?wlA7f6px|Lk@rf@uU-JFE3P{f}>N+c}qC+=EPhJxGdY%{D=q;YLK ztVcr<_U$EfT2JRwTF-LL-cw;rfgWk|kK|+Tin&V;yCA_}KANyDsw!jW4suc=OGSC$ zl%9$*p~fU)Ir|4)+yXNltY|jb4DFLc@r3?;tpCM$V>@giY;pJya;W2({4JdD3SaZO8I(1`KnmhRn%jcnghAy{9d-{qGSZRSyM&3}&{Bo{pgTUNCNGjr-xF|oy$gDbq$K5VqqIhqkN6Wh-8vUAlw2w0~Fd7ACKeP<`O)o1Yq62?7p z^hDzahA48irXe4)xg(i84pJUOo=#wW>@8CHBORZhIPbEg@NbtyaIZw-*_Mk5DZOAS z?Ton>c0q9TbqLnf!E^uQ$>iWOcxz%Mjux7efjv6pqo)B`lg>-F0*u{~7eP zq-e_c0?uRpEEw%LnC#gnLk=Fk1)}sQe6;>2jN7h8{tog3i=ZSpwm_e_tc=2~qXVIL zb~q>eWWra_c3!TV@ex@zO;gpB%o~`F<0eTH@s9$2!MidHSKo}g=9i(9uQ66!zKfn_ zo?!7-mTFJ3#GpexSoEkLet$oQKV4*qi(wg@|IYGM_r756;fo*`F_d?m9V2=lBu_@p zO~tKIOVR#A1WcO|2^E?g{&iyvj)!m=*E49hhE2zF$Q?*o5dKv;U2fvS&eK_lp&{o)S$W_29~-hT{F=gZNeAx+RU;xg;bzu?Q==OPZ<2@`enN!2znC>6<(o0HT? zrkVzIIPQh-U93sPkJY?iRWj@C0=O;#k>9!lBJr(#l6AeNB;NB7uejh6-#yI~dVQut zk@7A0K3Q zgZ{8Q+>Ab2^A!r_HG=EgIU<3r2e+c@9Nf8hmvO0%a1~a-HQxvZSFg>YX?K%2y=pD$ zIruPMWAiWJ?z802E&9?_g$jJ4V3@HoJ`3+hk83I52lBKrbtbxwoyQNma}W$46@XpDJ1h>H z059Ed;;YamtTNe+l(}A4w~a%UwiG;KE>HTrYhlWqpP2HF;9AoIte=_$z5)RTdi}<7 zt+%MaxJnH+rMOx02?EL^p#C{yKSd*?$o-bsW>>OamL6;t51`=DcIT$rQ*dUt1*yIE z6HTP#$n;~(MYn7w7;~T$7h3tokLttZRTC{A2BE5IA|TN!um@|*uUTb z#A&o(tI9aB&8xQDzC=IhHt!iaC+HpdPhc$t#aM;mdan>?)=`3+7--9UFW zJ5nrez|Fc!v_{gxFNsnhtBpT{t(gwlZKO|^CfKmu^8j?r)*;=h1Neb;$qM&O;6$fN zz$N@D#@zY>FHP9}Zc9H`kzfk?CpF>6mE*YhhCB&q@r3qM!&pZD8T^cAGpPPnX5U&1 zHLWUWKRp-cFO#G7VftV+gn8nm=E9mk%Cx!qEGU-5am^~87!zkpJr~{NwU7A18 za;kv1u6@UU|ENnOml3``I0p8-3^srK55+r1z!axy6rYF@6*;tmYu6>%e7p$yhIT=* zRvfr~nkRA{?Z~$!Zv#(DDc(1%4Qm22IpwFdpff!VU6mt&92e63WkM=w_?)M&n#diF zxQPPi`J%hq2ayQIFwfI^geNjBXxXwuFmU=L%b?7Lkho_UdrFC$ERm&mJ|yCg;nLLp z_jOQh*C7Y)$depVJgR)Xg^LAAn6<%*ZfH3O0T%4cD*OkM6EzUN{|`L=`5OIbJ4!#R z1o<#Ex^&A6Y`%B}93{gbQZ*3z8&1NoUngPnk#g`n>CQXqZGvP!16mf852aF*;Z1ZJ zYK&;dGRYLIFl*xuSPDo>@IQz&QGk||T1eSAfSL-|&~^R*_h!2iX>*YNXHDB|Bp1qBz{QN#v zUT=W?8Oo$#dkQ3(CBlEKmst8U2KNM=z@md}p1=7ypLM4a%9nV8&=2+PTFC;-vtRXBV<=sVz5N>nz5mp5nxU2Uu#R zK!ZCixY#lc47C0Rt^e{kt=w88Jt zEZ*)z4F6Kkj=Y&@N$QKD;h^L$*toxh@`ZWulySMIEzL#2S6!4|c^Uk5RlwC|9=Djy zb$j05fYyX#m|@a@4d*Js<-A;vQ5sKUPIeTY*MPdFV>`sZm!Uc z0V+g=bp^L+en)|?9Dk@6vFEWZrj(oW&68^J>Et0G2wlj<{$^c*B@u8fzX26@k3)xp zZ^36-D;#F;?XJRJEV3}AVRwFlQnEZ1uG#^u2aTxE#}t$GT0o@G0MmpN<))vn5GV!Zw!93ikO@;i?Z=|=u0S*3{&gQF2aO{NvkPdtUX_XzAu+5sfF3FN4B<@Cmj=Mz2I83oG zWqFP0Vf<=`?WiWpczIj?VA4u${Cw>h*mHOJ+Y)W6vsysX*nVj>nx}`4k2&Lgnq7@JKL# zcWOG&o@LSSRXFsAn$hODuh4vy4bfsO{H1Suz`CtL;&WpiNw?NpV zFGsu$j<8&Y2y|65Q8dmUEe@679k0nO2lx^H-U&qIzBZ^8Kga$?A8hQsC{jvMCq5HC zgRnOlHqZP9x+5E*Q0|{Z?cYI&vs{LX5s&%)si!e=dEbk~S^J*TdnzPR z=jtFYz8eD~v!Jj&n)jUi4c8F`Dmb@-KRn8eUY?qc8k_#`s%Le`lS>vPuj?oGY-1Zn zbuy27fhHAgZcT!55-NK88p zC~>2ZcS?))CH)3iXijql;~;s=dl+$`1pFqw<~rXR!hvCzaqCDiI(ZqPpZPj|Td*Dp zPDE4_@-O5pi0f1@FjE*{ER$5ccg2dJXgUa5 z*Euhn~@YHqq=W9s|Gbi&x$pPMV zlc!`?d?su#{{VN8La3fIbFuL_t&Mf0f2ngp<1^q^3wxg??BjnZWMN_PC_dxJKdAci z16}SlfoHBXM8B*>HL35g{cI})B+SP7vslkDzL_ii_kbJXra(h}>|r#jbZ2-i27%MK z{NQ?BKIQLlE_1LJKwuc_$s^}!z6hjL*bcVT*;$hmKJ35>w|87d zQ6r4I&ic_unvibNBZZURaAjUBZ@5 zmvGX9tg&$*AN~F`qZI28X@|SRX^TB*AGMnkJXz|jG5t3_oU2E~k5@}-{C9zVaU?G^ zt`=GCTE+TEeS`o= zQvknaZV$%3odvP%45l@r7*%Fz;R#Nc2(Av`uGy8C9XpbHn8s!y?Aai`7nT3%&N)0$ znvQcKZ=sqO%R=>B=hNqCz=}!1*upZ?(XD3GZelGex5lBiiV}DqQ^Iw#29Z%BJreJl z!A;q1LI+$^&}swYY)weRVVxehaI_88__Y#dd^r#O$@}3?ygKopSdI67E0fsmCV2m6 z76uJk1V5hL#>Tm?Vr{Iz%UjOAA zzS+ed(pO#P^?C(#_H$dhj&;%3`3xe3dW^32LyC5>JY$TUA5I!^9!!_E!XKqU#Pi4+ z&gSAKRAo7ZR|dnuS%t9~e~x0jL=&#lB?k1b^>V$|8W>^{$E{({cOQAyIh#C8WUr^k zoo1h}%{kTBDR0g_Y%a&{8!7Ph7GpEc@4(N|7hvAqy(n5QqX0vZ!mafWV<(78pK_k3$c4HqyD{^rlH zZU*C)9QJ^6s!y9-*5c#hXBcKY12bJeA$9nH^8{m%og&cppd04;ZwB#|Y7CiejE@eQ zQc%72k5cvLFnjP=F3u+rj9AX> zt3d^9TxU*hM4A$@`#DkJDNF9s&1krFTZ{Vjz7Uy&=n==CBVfpSmScC-1gGA=yuJG< zUZN~vbFWb7CBNPWa+3B2+3HDxw6bx<7dga zJ~Cz5nO?~o3kBjIuLX6Bm8oTk3=x)=iy}}R8k(%}K#3BOTcJ#D*OXylcNy2%s?A5p z>_GnyzA&+u&5pf8^6b~^;^5^47*hL#bG+pUZa-_GER3BG-)m4YJ0DLO`w~utvuFA^ zO~wxkhj0sRx-P_;M#i1xS6>`VZOYp4RwMJSuV-h@!5O^2WI7n`L=Z|nkPNf8r4@Zf z5P#k3HS?suU3&|i@Qv%2b_Gx!nj>5_mKI}J&VCE&CR`dH{>2C?&_AR||ezS_MXdZ#Ax$G$R# z{PR8h#6U~d2|5aoZr{VgQF374Z49pRN?5C5Ow$tt*m!CrH)}!%ya=@>RlBR8=Y2P1 z^ErIY&LsdgDkumy7p+|=Mb1f7XoS8BT7A^Sl}}B`AJ&^~eAmy%zgUW!%Rj+rT}{&M zz69@_xDBV?Ggsi)IoO!#3!QV9fj^D{fl58PS|y8K9AeJ$-Ai#n4BJO-wQ&}yG9>R%34O{(jW|mQ@&2 zJWOMITi5<^B4KY3?>Qxij~w}#GuGC?ca_ia7vqg?nA?eceHCa#>e6w2y{Mj% zf=_3e(Zb#L@so!(?Od|~R`=WnS)FM1Jq2i6CX1?USMpOuiiXvsg0DL3$!nT%_9j|< z=CcZ{u&u;{OF8!QghB3^eQ3xRzzCTvI2pJVhfN!fg*kR2-PQSUb)Gi8majz)@3@0Q z*}c0l`zl|tc@1a!F9I8HD&W`oHdNI~nvChyrV@5m5iZ;h&(zEa^J{bEJ7s9F6bEGw z|3kO*e#qB*jO}69;M#u`SXFWn3X6w|hPAqZ*iX!|w#L+FdN1qU&f%2%SjNYjam1#r zgavHo>VEtz*0mW@-GU-G5GhS$ZH3sgl<};d>Cxb+r_gi$VXp4iO;8DafYmdiP~QCs z+_ICS;*Ky7R&|K7t;=|wjVz10{x}x?Xco2Iz0aLaU^(`V&D@H=0{}h($T{>6KBTG< zyJL}DQ-cKL**ivv?A*Ralc*2XB!*Vgar4GLs2bgh?%F2AYs4r#MZcoJs?~XhIfZXZ z7cf5U6&L4+`1MI2WA`z>wU-$eP@T;S!#;BM@;#i_^BHhv(Rk-6z@q^L$G(OOOKED>bP7fINQi(IQ-(4QX+C01Ej^v})}i=$u{5 z4V6z|j5TfEez`2~TIq_{9~hE?cZ>zwHx$gY@-Wxs1Z+P28r<9;LgAD~E+D#%4_sf$ zMAwYve6^hy&sT-5>BHdM1`X0ZX&($NI1Bzy!r*7;6C69*kOr$+@Lwi}WBj&R=ofB} z^qv-(!x)KSb#36yK4U3UZ=&1Asc7Rg1#7pb;{-Elk~w%clo11}NzdT!=QTKEcrsq0 zTBO{v1%4$wLDy+}B*BLhp#4ZJeDGJI{!WX~HSr*)Wq%9YMm&MpL4VMos}Mu?-$3yc zUr}D(LTL0?3C-k<<=lp7v_#>I8@han7_{G1+ z0)=uQY~NXC{0CD)E@K(nJLdmk%=rg}(9@p|;!QW5#Wvj{^$>HS;4Vey8|lzRFEVjN z2xD`7XN>+APAJa)<=p6Fz-N{TF!fR|5)Tzpv)6}t22(`Y0u{bm%cnpT&mP z&%r_QEGk}=V!7L#JnIk{qH{)_gj{an=4Z>2UC!T_bKMGc>#l&oI6JCevK}^?oWhs< zJ&Y;n17U23Wcy)rxFh?@w9dkFm`ozy(VoAq1=#sFbrquhgHX3#n!9ICI>RPi0^dlqq zMJmSBLs<)5v~r-(AePrPe2BM0bclxcZT|3zVodn20P3*V(19qAtE6wL-< zg)i9a?c$I4o099F&!V8Uh)Z^5Ory{f5HHul@8F+c#>k~u`hxi>{;HGC{18rQCS%}z z=J9-#IaQxyP87#DLgNq*zAull+4Q2o$GHOi7k1#oT@hH}Y>3xZ2}t35P0=sr&uVqN zi4Efz^Vi;-ernSpp%&|~QSrTK{(N@Vs_@~=SF;`Ho0pg}%aXd?*@lHf)<`^V`eE6k zDVY4a4&Jfzo8ukkH2P{vtV9=}ZFeBHc_3%X@-wCVkNLFa?kMcg#t}Cfz^-cx>V40~ z5MB{ogC|NN*X@AjX&0gWJlikzKH=-1*ux#g-A%#rzzxm)t_q=z9~pEH=W`?LL8CfggfHDSRZdlW>a zicb77Ce7`(uLz>VYd|Z&5I94A{?D#NRt2 zOY9v~`8`GKuBx6Ys(oietQFLW==eW8KCz75gWhtji>_nLxLWl3X+YEeH1HcnSHbd8 zIoK{|0InM~p)+D67dR{jUp&2snaOZ=$hOLj{-BNDTlz8VW-A1bx&i)EH{!BDV>-&DAJ2BMZsYrhSfd^f7xWZ~czSL= zBMj4=(km$3mML0c(hc&{Ct=ah?{LuQ6SThwgv^6a@QJTA9aYP+45eeRVK;N?F$U;s z14}yBNrimqXMFBR8xUNS;Hu?PMBi%}E*^FT$G+Z=n=i9m)V#%Lll>LmJ!Ni~d*Pth z%(!Iq9vF}8!?l})^yqR;n(ZJ*z12Q&h5xePtcNyP|4xJMk&~l$Sr2ZWpCc*{{(<>w z1=u!D0fr1?nR@%zIDC~3x%J(W%Hl>y-4TGJwL9?oc4Ok*6~m9sV4R_CWn8~v6FQLn zpu(Q*Au{{%Q=$d^n)C?rt8%d6+6)P?_oxNFYkmenyTT#nRoCzsx{Z?=0UK{ zE-LC#Ms44w!D-iLF#1&&KImwGBTp1j&Czg*NH_+01tqK}o@+~o9?(rWy`gU-=C zZBe88Q+NWNoRKz#YGdX2=W^+rgOqmSfc$z?0l0144*{>DDKPn|DCfub>O>(BA2F+AkO8N)GDu5Ja;;fj*&5R4it5d zln3et-6xbYVTE+6e>X0@^@YB5OrURLR3r{qu=+|J)x0?h7mVJM&dcT;IM)Z<4VC05 zbz|T3iDW#YJ(nyW2D)2Hq3G>KxZ>IzEf;j=6q~m+wL%}a)%D~rTl$bm8&eKyW`u!g z%+dDMutF0@<^zP+_OiBP!lvJ}BuvcEy!Dklo7c-jhIi*LDVf3vGKe~umr~Z{tyJ~Q zh#E6T(0BV{cya#>EC_9lrXugA+VwKuGHxkFJ&y#xr&naNJzl^knqiuGM|s4~7OZEw zSI#`rk~iHKf8(HelFD6Z%bFmYR(DBl1rrC`mxmB;5wO}XwRMI zT!AgEn;^GfKX~q6Ogq1}#@Zu0fV5^)xNQm~{}ksAo#EipJ`FC5?0daQHl^}h7-Qan zEo) z?WS^`b~Qgx92GMyKur)PCg|M^EAW4_**gu zRnwi6E_fmIG1NINq`#K7M(4R&lEU#~euSSc)wS2=k9(}CtyLo|nqw*ay1!whm?LuT z_f>X@dI*ZA{qmRZ7xiwXB)#l>QS`0`%8GCQN{f25#J*80so?v1un#*cTMQ}|^T_Am z$dc|ji9~K>#PR%jt+jbl=4GmQwUMqbYk?*iA}=zwE6p_N&Yr1Bl10rzQm#1+JJr7_ zHN}S-{}QZ;x7G6M&2hB0M=pIF^bj1qpHP*lHxvoR$fLD7{N4O-Fti7{DryyVGmSY| z84CWfK*sV(Id5&FG(7qddH&1t_ul)#X!;47ZF!U;TU5b)y$l*BGEMDYia7;!lQcTG z4Q31*2eq|7Xy|waDpvfFzy78kpSH{emqU9Y@~}G=%Bn%Syj0>dfXvB$rJD}S4HJMeIvc=0Iph&E6nDyi! zxIbC}{$HEm!3DwLxtC5`oC-m`u@JVih9b@0gS*|oQsnHbP<~AKdY>qu_HS)=7eBKJbq&vp7o%T9xpizE*_&v^UrJP zZ9;eKF}oMr4A=zDfm#&W@|j%MxfTl6=uk|zzo1p>4Z#Nj$KU^m+}QJRYOf}uUE<7E zw)+t!O`io>{`u6_;wnV^GUaEv$7N-rLBL*Ka}E z;b5@V%Y|yUTv^e-P6~c!0ihAz&`;!dH66m$1-;D3U#ure)l{kH6-x}CWWn}6pDALw zA>~^C1sdyvk_vA+{p7igp5$~@@hyaAkg=x~p|=V;E$ zZaB}RHEP{{14`Q%b?he%4eurR9#wkm7jjiD|GO0#Z=DM__T8rohXk8x*N`Ps=VDtx0SW*Ca(&Emd(Z`+L{cgvt0q#%V&4>{`W zC}{8=MG<{E@SeoZSY;wyH-WB{HnKnptzJSZ^*!};r)n5{l4@*(G(Gecq>mjytyuJUF1kR-U-yOMXTLnuWIHVz^b(vWmCF5JPlbqfzMxv- zsvdmUl8tAI^ZF}8i1+>p^?JKt$KOU+YY|SW+20&8Qy)RxrzLPKL53^iHc-Te2eh$8 zdsK|q;Q^~!p=ax%P=mif&84J#wH0Q(x=F z+}$T=Swb8reB9J}|Fq>{VrD|6HB4@h48>agUe;WGEmfN*N@?;Gh+4l8k_YK=#mj%` z#(`__%v=wxMz^GOFO)p~h;U(UH|MS`Z!6MuoWub{37q^PvMb;it4Qo$Pw?-q3`(z zpt&3FsM`14F>daDO6*iBMMi!Cjy^<}Ozu+d)SINY;jq*^%j5tR1hMc1IvB^;HO?K zQ(tq+?YUjBoxAhnh5l4BW}5u(LBUuZqso6F@)Snf+L6PyOEkE}HLw{XYQ=rU%v1bP z2OaZvd}q8FZfC(<|Ff<1-KYtYR&<3&Hrf~&@`GZhRzR1C*8Jy*I8qLB1HV<%CC|dI z(z*0M=vVo1_~p})i&kgCxzB>9l3NHB*MCvY*Eo4VKU35Uyd@cR(PHDBjq<6v%~6rK zAaB8~EA%?J5j0CT!m79Ws62IDj;Vo&d)DAk*k(oTzwCs%z=_cTZ+P>e^(YW9#8q{cq@*t8oSWR=mNQHFS9PZgw&GAH9RZ%8r1B)`@0 zo;)i*ou*vY$KrvR)LO4Q77uNJ;v2W%;1EOXB>ruF`94J7ze@A(r-`$hB`fc@gEqUi zikb7x;JR%WU6@!8qm(S36~%Nyw^;DOov7m<#@zn)pLFW{A!096G{`QZ*9&DZuPugT z|KHLQKk?pOT@S5VIa0wdZAf?NPg8oCVs)P^NY|Vj zl5{yS#96jqc~7cW9fZQ>=cL>l=LP%Vx}1406;vn3fwIX|u55IZ)zfwPk4dInyy~Io z`-|C?8=myz`fWPi$_zgkSn#(|f6-^*a?rWkk|wOY3Tu_!`CQajxRh{+PK>F5+mDo- zWqFvExeHd$xr+=5rNv%>7NQ8eI=@RxHQ=oM_051qV0Cth0e7QGmn zptuITW?0j(4(%~|cO8^F%^^x_!K(cG`3@o%(WlY`>yxbI>JIT>KP5*xE}oet@9%@( ztUKzQpNa5T_br(yPLcn*c5XAXM>!0>7rctWfi5FS4s00o8#Lq3j8DF7lr=tkjmOvfbsF2a^2H~ zbYk9es&t)32DS_3#j~~0(>g|;{j4QEInRRjJwnehXT&Qhb`!-rC*<4DOl|I$-{y96S>91Dcxx6op zU$4OIDp3zM7aWGjY{661<`bvCQ{lrya>kfhki2vdnO!pmd1DXG`|G_NszNz1!aEfkrvfLZ`=IHfkrF@Cc=s6PcDp ziQG;@C%ER41II)@(fZ|Q;iq~_AAkB#*58li1*gQ>?o}YUX&nWFqqAi%XaR2H-;w{! zY|!g-RWkl&N#8aKr}+U}*)(44FD#zRqv}ZPi6Y>(-~dG$9E1M-i)m$I7pxJShu1+j zg*WJ;oV&pS;@?z*X8$2aRr5aZd4U$cb?b`xW*tzo@~zZXXDnTQ^Bi_Rctn0}eIVnG z5sm%jM2p1?UwX}5SlzEXe!krUzYbTTrcuu^G2cc~1)7n;^5c?G2VbCm8^PeqGTB$* z52^Q?^C^+-S-4FNnqh`&gJwBWihc(Uxo}m~MSqcn_CaXY$&|vd+c*hPHcioJeG|g$9@(6UB)eK_~y@u}>Ujb{k#8Nx4 zpPsr)b`FnF=bZaa0sU@~%L9VUUiZLzY9?5GkSNdHk}J<2kyI;YA{g@ZeMu z;9LdHjOmWAC)H6k>p{`&(^RwND(pF-Kvjo5a`;HGky+*UU|CgGayDj;n)C)AG*RcrC?P5E%Np0 zW2Fw;GiWt;z!KsA>gH5NIeC4g{`Z!{)*A)1vtVEB{XqHQMZs2{xf4cfT4B+~`CL8crYC+gzX^Tpi8Wl()O zCDm`!rR<)w;QpO9oQ!H}YIl}WMb6W7x;a+YeV2Ki7N!lUmH#|wgtJzSr-hGVsFJs; zZ>rmItky%YTAV4n&1}ssHBaF611+9gqsw3F_EW~Rm9(OzHv70(gJyezqme>nceN57 zzdsX9>G>U5J@T{Q+%6%7Zbz_hogzgZ*akCCC6Qv|X9s6DPjzKskW}2?f?e*d2Sv;e zhZ~1>6K3U5N%ByrAG|{s2k>P%7nCzo%!vl6I7p`DtV4;q4#!S zpT88&EfxQHd5+W8Q7H8E6#v6l8%1-);w1s|f(Hi737a!;kliSkW{T+#(x#Ku>I%|C z8>%fV2GF!Kf^{2Z!J5jp>g>dEly$@bTr%cTVN7TAhUFJc;U}w1T@&JUyF)Uni z4P1V2C(l_2WXmAIPhD}B()V41`aW^emCAOg{<8qW>ec~u@gS3c&oJHnFUS+}okA2;I{ialUZs4KsHltnSGeIY$+33UwUiHf8&`GRpD(izZ| zhQ02M6OK8Ob?-+sLcbke%X~s+IpfIvaW^QkP?E`*RG3U3p!`=S5NTKnPMHqQtHR}? z?yafUuT<)B)QG*Nt%2`J7r-TQKUr+r3x)f(Naxl!Li5kPdGEXSm^4DnqrN%>KTh}L z*nu(_j<`$_I}+ssCt72;>1%ipX+c)M=EABrO5D=*6uh`q0$`O6b4KOS<69|kuIn!- zz3&c|Q^b2P%1Avz_cr;>HHD}hYbo$k7fxvU8%EjaaZ%7=sQGgQEiDjdDaSJD0^A|J zmeG(ie7@ut_C@se#ANZgc%q)Rbgab~8YyxD>3_!57PnXaqS`L zNn5tf{+k}%ZGoA{G4BCet%ru$!^q zUXFYQaYqZ}6tl0euwy&^JbW+pO@9i`6Bg6Dw}SO@=_w6M6nW-329m+rVEMuP4Pa6; zl?v-FNS3)ekhsP}$_e^Mnzuq5Z(rR`*MFF?Zf`BlNFD}9?W|b2B1p1&xm9{4oFysh zy|8jY7fw4nQ%)ZJiz=W0lH*&SfqP;-%F_7*j!UN4sLG)8f}gtL3R9fXVoC|HV1rUm z`NW7@ux^V$0*n_4YI7CIRo#3w>3XT{2 zpnnBhOkF4DJGxJx^G$*;d!P|i=f*hRUu%H73`ItG-8uDZ^=TN}@Eu+({RfK03{Uul z0x&($9QVCw%gJqHVA&Hr=4N8n`N=QXVcZj6s|V6^kqx(QQ$VLR`@o{&K73sG5JKFA zgG{U+yDq0)SBO>PKd=A!^<&uoxqc`Vs^R~A{r4LD_a6A)^B(xm`ti^5s@(sq9sha# z $|JVBQKWtR~`;Grzf&X5C|4pvIf7TD{lV{)m|6e~G{=0tsZ*q_P_s{(A75IO< z0{_eF$6%ZPo?rfZcKh$y^?&nw;D7o2GI-GcWB>7=-xKuAV%~oWum5@df1QKy|6Ye3 zJb09ggJSys>X*u6p_~|7O>v`KA-QxezMf%+@hLNL9W3TtQKNj(GYU(!%`xP{Qugia z&5f6p7}shi+33fC)(r78s|In^(k9TjE{EjLld$~5EwI{d19_Gi;5%eDd8~N{`@ zAJ9C}hgBm!%UceHbK#<;sOfdfvBa%FuJlhLm8ruzRA3cQ6(_C;m zUdW14-~6H-hPZ!#7oK}Rk!vp+pq_7k^w>0nn+6TSj|N>hrkR)_?%;>U1EV14OgO@g zzo}qHKP+}z&Sed&Audg@=`JRbN8x-dD-OaN8^>To=m`isKM%|P9m!^)W3ViL9!wu& zhr50b!0btaU)-rwHapmd8~zmT68Q>`|W zyIQp2vYV~BzUG=_VPVa&PbYAw(GAJ>`vU&*Y9JPf8o5DP2q~gkV^(4dN@&c3vglD@ zaI+hRzYu(}O{P?G(hxEhI&oQjb1u|;rjj6A)Yx2;(;}^?-m4o%?CFk1gSs-9uH?eG z1L#GiDON4Y2mOpwQ0^LnA1?-@YPc48A8CV%yeU%Hm3`n`vRcy2G9#}k?b)`fp6XzN zaMfQT#nf1-?93Xd4m<(TJFT%+8X(*S$JN=*jIi{|E@%qczb1Qh|z7|>jiQKqFcq1aL`J>Tv4%~7?)QTG@vbzIUUQd%$_j}{6+jF?` zn~j|Mk6;m+{-%r&eVl&Yn^le79NRwDW)IJm7_p%peoqqha~p4ru5#sTZzf=^dNULp zegH{7y*Tc4B}KeZqT9_?SmbNUNgMxx+HZjv`B2PFyB?R476oENqsY)~Tc58g`BUx{ z*pG{2M&Yhjwmdk`l)VbdMOJJi#8mykUpmZVo1{tDDcz1MGyEyHxC;EP_n@4AM{si4 zP&78zp%`F#42NE^>wjx8tRZ!b?<}{a$dD`tv$REw((i2AgK= zgS=HHl-#8`KMq-iKDURW^1g5rNFA`jqzE!z)<9!=Jo)}BvNXFMQORR*UyK(wUS1Gt z%vZ4A4e|HXSaRcqH)$AuSfC-ERdW-M`Nd+CK&v5391B>O!MfFls3Z|3Kzcu#X3`TvpqsJJ63YC?*ukH zJB7ZdzY`AE-BR|u-H_8~Ay{wgjn3OM)hUTf1@mpa{HvlphK3G@ zz{QI(XQjwWxTjH}%_B-|xlHuaFG~7xg1uPs$)3JlqsV@LhWjG?&$TPjYL}Sl zxa`H1VRz)R^=mm%XC+qscICoEpqg(!=;oCOAD`=DY>fk}4=-TvK7Qyt_8RE56SF4| zj>{?b?Kxv(J~`Kiv+wBK=XA!bvcI9QZU?wcyCImZ$&RU~GRb4059cZUq3Hftc3&#;yIVss z`BbXt_wLS%|6$1CZLCBuauQCz+>>jPrn7QysZ{wiME2@-j=-3-t6{Rv>#vU6E?gBB2k5#1_(zN{{Yl1yd8GTIh4cQIx3&efdBAV|;%*UuLi?BY- z0yVAQLX%B%Oxw0ZFfewI=H@!dZ~1sR^!j^Hc(=e#&`&lru$Ktx{z1e5dC=PRRLkp2t zX>8_!A6Fu(M=7{Isa7hx^qh>#PeN!-JvbFL!w~CPn7+3I>n)1|1(Y^{(#OTkF~x5+Hm!}oy!63T@*^1nR|=1c z`2w~I{YKfYU&wuF1wDQh&2jC6SoKX$vT+OJT{{KeC0|_Mt<&VZ1!5+o`7d&g@s^S& zO~b%Z-ASlbGnQ2)E5WC@75V($24>6J zv;U~2tY6g%ZJZ(~r`a*8`%4?^`_@QKU$wZY`D5~{>&U;BEynDM&S*6&0IWybbLKVS zO0pI{!g<0ixGG3m25Y#%z7Z0m3!%(r8YedSiv8JVP(G|Cj|~=V`E(i9O_|1u+$6Q; z+bN0#M1!khooY3SE7J@yKywgY$kTXx8#{E~cvuQ---$vegmOe&8(IkSS+h9`Y)-_; z9^QJa)BFSIFI$0%g6ZtzV@(srbz)7r4T}LhwANT+d1(>Ay)ts%BK8#fM&R^Vf2@Dh zk`w#3B=^Y+SZl~=R@9X^I1j(1)(iSVlRbj5qIeEl{aVPSeGJ*4dNISJJK*z26XoRF zv+DGF%3b;|Bt8{;*5gyb^HV*QA9zbP{^y|S!D7^L>4Xj6)YN#q83in^ptw$(pyajK z1DC2D`tM!`B}+)QN%}_~b#XH8at`L0DRtzsDTbJMV}Vnx9*8tHmN6u&c8;*YiF{-;PX;tM`sQR>&TuPcSr#%9nsrtCa2|^ z!u}1m9Cq*!c=~&hr%3|!_g6#Hoi2E6Nq^L9cr191lfn1r1M;3Yg#G+G$(fJ3Va)6$ znD%~-nx0d(?%C z0*YYS2YoJiua^CjhO^>AS4V@6$0;lB1ZDV%9Q(PCv}<<|XZ-HV7rqO=-!2y}nzD{6 z@AYTf$cb!kx`rCcQb1!b93%Smkk~erf?)*5)Gg*B?MZBY*qgmKXbJwoC@{WVN;UyI zq#Q}6oXTdTA7RA)vOP{tS%?j{#q<04I`TZI3q}J@3+~e)wW`G`?*HL1$Agj^Q2pB3pCG#7fG!DcqKc zt2l65H*(_c=yO)&lRBK2qVq@a_No?GHrJ2(+hl_0(?l>!o65V^c0l({PPC<4nE1Rp z(CN|$yxeR)>N)I(EqZOye*I%8y&H z+GgrzP+bx{#(t9^R4{Ne-fSd2!N5@V?jYxkHHG4LQ@A>BGwFZ2L=h+dz+?H%FtTzE z8%;bz{hNu5wz<1JxpjBUu*{$d11Dlbm;E5z-}pLkE+@|s?2A`HSiMT7xOX<3B=WaO zAJRb8#!y`ku4tehga(ReK2h2Uv+w+X2VO>~`@1z4y5@so^Tqu1VePSPM>ifj%o!7> zZjhpf4dv>%WY9Y_hm3^#+keqSE^^yTs)lK@pLc|;`V;}q1DRB2XQ)Zpl6}t11D%!S zq%Yh}aevxDn%Mt@UbV!sdlxDAy8)m0)&H@*h1Zx5$8n*#*;jq>8iiFw(JW0A<@K3|m!4cSl+93B@e* zdZ2;EHUlWNUrRJA%A_>A_B6`5FE;Ew1t}jp3omt@qsu1)P8$SJyWES@&ggROh|U}w z`iuh3_vgYpejF*XrVYLJh&gEqHFLi?RvxU7z6fWUqT-9XHg6gpYi@&1GlI~q=WGuA z*o-Q@PJ(8+9W)g8!xE#HvflkjDBs@-%e_r8Vw`}3rwWiPp6@*HWeDQPt)Uq1?+dGo)WaIIA_9nIp=|J zID7cOg#K%|?pYtyKkx@C&xDe)&;*i~+j7cKC;l#cIMssXV>n|Zdmj1$n&YF;y(SMX zFS109$xh08x)U@lt7+H1(R@PX&Vm0%lV$aD@>n~Wr_b+z{f{+)&cJYPs14^{OFMJH zc_X%-7>;?JZPDQ8N!i_KF&LckV6!IS;Hv5jf%_MO#^EWLoF2xB3r$$lqd#Ups1tLB zmmIUl>Z9>x9dObK=M!zlvHf#H%y}NgiipSd{R29))l1=cxH%1XDW;A5H{@)e1T8|W>X0}T9$=FK;upD>Y5}7&O$8v^Z zFjt@IiUrEwptB|j8}Hgf&c|{v$V``<9uCLEym1)lU?e7|&XSw9D~FqfvHpqGn0)3b zSXzD~qmO+=b}^homU&?1ojNLbFdLl{mUFB1cKo<_Cf0wbr2wrDpc(!Is&}rXk}0!s zVT|yg6gyzv$;sGsW)tKb%ciOu?b&^12-Ux=5d6+xj6FnyOj7fxG7 zR=GpTBqN-yev9?{buaS26^aETL_PIjZ`4}UAIoYcLX6iEEYI3Wnn-`PitNNn#WG3{ zn8r8W=%e$@J(69s-mE%TN10w-(S4;ghxeXq;AGKB@b1kYRI`fWcfyRVNxgSk&7ou@LLZaI$KA2@M&vREV6q(eiE9xE3- zkf+aUhsi-}#o6@_DB64w{BkCs;{6SE`sWU;JTK~`ZUYD&O~kbBc9M>$s5Bb8ld7VH zy3@|p7?<1`yi&Sx>?c#flQCf1^?fj-VhrDK9nN7_U9mA_AbT`SVc6h-X|LW>)T{GQ zIdp}bwY&wSI1I$oYrEmXim|9T^#SJVt>*Y^4mftA1E#$jCaccdgKw{FNOiwTDjQvF z{L7v*7WL({I#*eL={$Cul?he5&A8y;cF?=}jckj?3m2l5ENWgjj9<=sQF!I6uS zw?%7AT(M1_-qIc|ofol{ZVM$d0gsXn|)e$idRD4S#9 z%cG*c(O#}yPyjYg2DIH>2U80carQC6xLb8peq1+~V~;Db_@>~2riDBjA%^~(R3z2`(*ay*w+M44jUq+*L3x+^I8Hnw*2NPC$zyL77(B{{%-d_YleVZW z?)?tMgXUvuQymp$idwAVt!76wZO8w!oLm z6IkXRmDFhViGpYBBST|L^xJ1n zKY^Off}hI>L#T?)dt09}G2a0XawSQ{rJ;)_bymg0<>_cD|y*yK@nI zhH>Sxc9No%U_C#bPunf^*k;OPQlz$but_;D)6^C*;(YA- zgX*^%V^(1&Dz&%ZzWdssGHErI>h<9N#`Z+PXnF4)B_U!IyL4?nM(++N|ZAa2?3KxQ7~U zq*1SaN(@`A*T4Cs{*HYQ2@mx5o z6ml%>IA=y7g&G73Ms+wR&+7qYTBWpo`WpOh(h93mOxSSJI4t3XMzdjJj#(QYwl4|UM10IQ}kU*P?QRhfi2NDOAiy5o+LllTj0CqHCbeiWak~)IH9Qr z7d`z0vySA0rmhpl4ZK9okK@%Zo&=+9_6!cJnhNeY z(H&;J3+6EKY)P*hjb8N$WMbVLb!WR`==uYYe0CNnTZqrqt0K~ia3W{j?dk|iB|6VL zug;vh2*ahZ=-3ZA?WLGwH56{e1)dD;_d!g9HXB5aC8MiKbUHVb6HW;v%Q(TC7&Q!( zZN5-t=fkpU%NyzLsPUqP4(#0U1kODdjLZGK#Z1mFO4IU}Y*Omw65V4`NkwPyoO=ht z{loD4Fi(yeG8MB6V<9hGFzbKVuueO{Z#?CUp)G}Tc%)!8*bU~gdr@d$(gcyIC~Er$ zAkAWkWHovUmff7gB`sc)+XD->N!1c=t94X(F%Ue~1+x3~N>bcB=J>sIEElbH6MNS_ zdFAnYz~kBmsMd7gk`)`|b%&N?Tk|oj?2;krj~T#K;X_ejF%>I^4#M&l)sW}rBWkhZ z$hM6$7L7A!#qEw1+VDZvWU^Gb59NSPqV|^a5RRF2W1sg6Fm2{UpNdKXz| zo<_KoOQe{cgRm%gK0BXVr!Mb#m=vS#sa0dfaasOI@>^~L0c}R&mmyZH89xC67QZ58 zvkdaoiUfsk2QVmZ&!N?GxXCty6lHb}0jc3wcD_4h&$&baPkvEIGcC63y%rOGv1}1WUtcpfP7PKM&PSkyJK$H2Pg!01^4GNp{{C1B<9n|zfn-0PZz4R5Q#@9kVt%)9AG9-E=*U$Nh7H4T#+0@!P5IJ?Cv&@O5w zD=c3*_U{ryN`pg?w{|pdf76%sb~$p?xIfr8w~{OIdiF2K1D{`32%=R=`{X^WS*h*qqu)-MwmEm5k>WNV3kf8I4^!ADVE1cH;Q~XBl|Lh zEe%1p1728o(+w-%FP3eN8FS@T1I)9%N1A?NlJ4&nte?FMJwwF*?UgmBo_Pz(csmUL z-iP(yEc^)_v9Ti0G37k@o^-+BW!*V+n35yfe}|ZxV=-;LHhSHDCD_YX zU{vXR)a2wj1_;-lOX3jhHNXv1g+tmb`y&_(Ur$Zvm!sm$1_zDj6v5Y#$?$VWj$EOI zWe2;U=Ne}S?A`|IO|M8^ix%N_H^C;@CpcT~ldx%DEB?}X8Jp#7gj(_5`1YQP3G!_? zX19o|I`?JcKEu&ySZg+%FoYfVT4G|WU@XC{v@RkL^>-Ym@`d6IAF&gR;*79t!9uPd zE$YrGd2*BJ_xa&|NIJcj7ACaj{ncIAGv}>Tbn`Q0O<%};$p!OXyaDx@`Dj%moIyV` zA>!)}@)=kTo}IEN^(Xi8;xZ{ zTT!c#4jAV2g`5K&ZlFj_MbzuReJ>BSakG<82Xhl@4iH_CRZ1?)U05{nhn(X9Id zwy@}ds*fuin|2wZa=}?C(msgOI+=sJVg}}&UW!W7la#W21{z(eqzNBvI4pb`dhQA# zv!oMnW8+|qaPlMPqP~*#S`(}q|2O50+6Xx}{*voXwdBB+tH40Nm^Al_)M+=~$sae4 z5Z+n`*8IT~db>Ne8o!En4H=D!p^4zqEdpJPmT-i{32<&)M2ZsGF?Wv!Yz}RqRPA4I zdH)2g?rzH_EmpJYFIRPw?;t#JWHkqzeoLAkwUjChNu`mG!TGc~WL?oAzx$JA_aUn= zcVad)Tuyp{p3YPa^Sg3I1lCj4i zaPtT9JGLFlzI>wcU-qm>9O|IASr`2DtK=lB8SK|pu&@sl(}l}6IDh%ipLI| zRuYBlN{rEWbv-0IdQpMdI+{G8Bd3@KU{n51QYG1e%{x)+5IuBXvl>b|7Ky5Bz0s!m zOIX%PxW-h$Jl}F98U-QyeVr!yyn+*cZw6Fb2-eTy{nYp1Y~H0aoo!02(Oeyg!6T#C zZ?&7GV>bzF&U#@dVN_50I-T`fbmB&lokMSrPd(4Gz2B}n}nyQ0ktTdZI63>top zog0X4NKhSVkc(*pa5w74VY*qQ1 z^u8RX`c{?ljhV~Q^WqDr53vB}vxCU*bh+Hn(F&d0ewAKq7X9FxkENn!tvKS$1oGc) zhryjIpkj)WCvRzu`xQ%3u}JLC-T`~gDu-iUfSy5f;mi2d=y-E7s~j(?C->B0!#9@L zcee{G@qp}Qy9$$j$3v9;3kq0aig7o5D0D_1Rr?PhosNg$*yf&`y=(y&h_gmXb){sJ zI2IC?E#$QAcWC}xGv1!C3}^lBkEOCH4$kpH#eq0=Rc4a#|BB3#`&P`fz5>3N)DBo?^a_ zXPrcGj`AOlp|@6w{fsld_}PN1>ql^o*;?3MppW*grnAdQPxfii96kQy;x$Dy6ZpQ`KuWKIJVrEO91! zw&}#S9~HdpMmyFE)D<4!N6_i@WQ;%Nz;64*c_?Hh&)?sYlRvt1Oe~_R`vy_AK?Gh)wBaun-dv$Q z11EcqVf}drsnNe-{;p}LSm=*2=TAUJ zt_4mQF^2K=83Y#;lWoizRLnX<$>ZNaS)d{DtmU`dd_j|Q9n!! zwc^n8&!h(7vCy+`C)`;px$@6L;5=+9`%O6qUV}!m(ZZ3OwdW5`JsQQeb9-a7;c(nF zX#wgDjzG=uzbQt&lhQo%NpZTbLxz718NAGwOX{1!=_!k_@yBF}YgS5ek4!MI{c!T_ z5QfWwJ+SO^GoqVYDK+aiJpMMHiv}Mg&8A_Twn3YcvP7n0zvqPctf`0Ur>GfB?(`#qs@aFunql- z()+aGLSJ)^^KK4_AMe2TLC!qbIGVo~o1w8wh3L=E<*do=>AUA(^aG?b^-f!%en>f0xeVguL-`Ok;4bv8^0<=PR_e&6|A-Y1NmkGV;0n}=YNRxa#%;>5q) zy!c~OUrdWX0GYadF*w73AB2iM@w@U58)pZ23Dt)~#vc0Svm-%e$poH4nBuE^vp zb;y}zz-9j+ru`ojoqt?R`S-^MNs^3&kPJeS43f+}hd~I*NRmV&Ns=TXNd|2~Yhy!e zV~1^tZ3!XWb4Y%LmLzR{CrN9=Y~s_>w%_;n?|nS19=Z3Pb6&64^Y!ZzIo7@&cKkh= zn?6(u?opWdjtMVQ%uVveoMH4svBWX9Qgx2T- zDL%_6W6mqnj^$3NzvOIo$)&U<|EWXmG1J8a4N@xOxAkveh` z9bb)odkPIQ5D)F0jAnZc!k!?KsM+75#Q!#m(eiH;Vfb zOr;H7$g7`t21ct<)wPQZJGPVFDi+dCexTaXi_v+G@Cf{{O`|%al%JLz6+1vLaNnsD zcX^~-j}>4zx)}BxbYz!_bH#VPUP`%FOeOc!SVHlbaeNfT{5Sy>J10Th`&g>X@M2Tl zaakA9m+f!5VN%m>N^Wb7l`HnrnYAM^XN5nWH!0C0(jUCL_@VPZ-kdVM4`pt?P3qsA zxM`=*m0cR3>f={%wEvDmF9)L9EttH1Yfn)tw0QfbEmt}(#GW6WIPLFbvWjSr1DkXh z^l&le4qJ|meRq<}*zYN{Wd~Gwy71GyR$RDvHWoJ)fp+o*!A{!=HGlT!P!mCnm~B-P zy319;5=ZuZO$GBE(CJqTwCXW{>+c;DoV=w_cf5%d&+Zm@-F6YoO$7h_@fcOHM05o` zlRf610Pod#6i_^tH1Yt{1)F2Y{%NRt-T_qgLUT;M2Pp$nAkKCkrq6U|oA@qlekqW1 z{fFSS3ldjEP!1*0w=s@fOHr%@__dKOTXBJY5UHT0sk(@5gfeI zuff>wi(tw1T_t=zcJ%hxR89*y23d3CQR_ZbHfD6>q#b!w9O6LL32WHGNLhKX=oz+cDtB>7GcY6w;lG2d!5yOx_vBC#=?^_jLx_{=pcI=7OcZ z1GF8z<)l;3z$;)rgl-Z3*yX1wdr2G{6Rtt{>_rrKB!Tz&PQ}dRM1K6)k9Fd%5_F>MGF?u z^UThu%~3*-M=v~K>%a!-N`b<){-VGDOF3Dg==MVo;a&WN0zUd+%8WMT5gdbcR!2dh z{(g+b&8wh)-Imf8^oD}gHhlNECu{A$6I>`ITby&JvI}D|VPlN&<%(}tge5pwxMFzM z4njvbBPHED4%Q@Q${{xj0!B6q?R<-eM`s7F2%U;*MJveK`-X~>H(<{{e8f#58!pYC zk2`#9xM}4I?0I1-mq)*$Lx1<<=pT9t2I`NXyp;@fO_Lx}%sBfB#T;L-fjzr=aB7V? z_sbrOaUB-X(S|@&*ewEM%_a)^`Csx|^cN|Mev!R}-^?RQ&B|q$De8xTXkOQkGo9Xm z>6Ze@RPqJNN6rxc&QyHk+EiRSV<>v0dsEN@@eSKH5Ik*#XXy4_Ioo0nlrQ;A6(JrR z?Qu{3+`28+ADP7&3x<)}Xih3OSuWMLbEKZtI{^fiVb5Wnqbdaxh`zB zSZGV_EZMXzj-%q{a_G5FpuS%$?uq|@dvuj*bC+^c`YBRah81`Vj&?y=E5V=CVoJqW zC>+>V(p^c1n(RpSclAU?>H~25Lt^)r3D|h)1?hr^k>BZ#xM#>544*p!V#W?OK6~aq4vL9c7+yfgqtSxAv&&UP0yKLIuOQrxfsDARQRz&g!pv&@~+ftEB%R^N%m2ShS^}Y=CbG|h#zmyb}|AG;|wtuIGTAI9pLZ%Oo#vBS<_UN|BEi~d-^?(O{1H9uIq%j1C3 zr?WEgz8tV_Br9Gvlj`(gp#5J2FVde%zZr^6we3mW&xXr7EfnnI)f{b}0g#aj`k%T= z5vks68h(PD>c#J4HKiKRfx0_ltfioiK!P?$85ZG^T>i*MoApR6}0dKPm517Nx9sNX-dW zSQ#;uY+HJuaz_&6M}$Car5T$QzavH9T3Kr`8MiDO$^G`4W6G**O3!bmk`Lq9Fkpoy z<@|Kv%f2G}>|cON{}wWp3I zfR?pxsD3t`lqC-&{j(D3#;x(JbkO0J4^z1~bt8{e&%uV2d6=0a{PaV+B6RE|zSW6j z@;xQ(*lowq<{)JJQb2~ET4V0PWGU-@JUU(ekE*Zq!)&{oaKc}O`H%Kf#P6%w@W)8n zwYodDt`-bl!DCH(B6RQWHz9RS5HGA(qi2ioy?5)4y4hMvaaurWzB9ybJ`xQ5mD0X# zUD4^E0iw6(Aw}o@A?bb_2yZjRyUNFz>zc+>X5~wwnj$zfLGYE$`?9*}lT;`8=_OOe z?4kT1$3=?Xi#=EHzqyvI#=x)+CH;>mT`(oKU^ON3{Vt+}$cCr%5nM@YJ>6Luw^)${f^MaC|%dqzOQ1s~(AaXDp*c7o-)(yNu2BE8! z3<^a3Y_WSK)<}*mSHakY zh0q#519*-T-*dkXn6hjl)(F2!;egHZwO7OV^e9~OFF{>{ZE**e(?F9$_py*Haz9AK;LfP@aGA;|~k6T_Y zLUsH&w0eJnbo(Y#5WO$%i%zKZ)~BxwUdS`|U3i zIxnE{I?4Jh(WUje8#fEjZLU`+mh2Hb$&n0F@4qLf>^Moe%QB>Zy{_Qd`W&fkeK@wN z3&tLtiOP1fQ0uvgCipI4vyVSha?DsXd~;v)HvdOAY=^Sy)*>nI*G+&@Z_%EoVjm4cRLWprf+6};_DEpr7oCDzwg1yc^!op z_aJ00kO3?wv31{7TsrO%8PmT~&lo#SSob3Ym?mSKaRz-+b;4Z1{%$_~geKI+amiVG zh9nz~uKi7lsF%3|W(D3~Cz(W8RWiP+GPDz7!ATrk7@XV!1o|SdQW9^iYf_8H`>xJM#pQ zVNE6BjSikIvSw{q-=-t_97c575XKFOBB$0(SzvV2f$_lWMLo9d-P zlS!leWgR-l==u3eFiF)QbET3J487SZ`7PvIi`}d-L#lFq272`d zP8s6}N%PNv*T-wndWMA8tXiSV;bmA+KA)|)nbdO+%L50fF!G1L!DXch9Ix21By_UG{A=U*{O}HVd%Pna za2mz~mv3T)cNeN(b`-vJiNyM%X;@J^5zBYB#<%U4bKJa7Ky!QXh&$r`a4UrS3E#DT zatwHdt%17EPM|Cerm8Os!CSWs)0Xa|h5k0Ib1Rmcf+L{hSR89tmdR((jGH^!<78*y z`};1At4l?m{gCkI`}X6U_M=dDa51SyEaB*b8rEL5dju-ImOyNoFDB~xp-1~0l)F9_tLy~B`%_QOdpD3X#;Yj#r z%|>?)Y4k#CSd5k~(=csHi~RX;3;0rn7p+C!fT%6AIS{d<7o#VbH@=+PK;ua{!J z7M(vw;?Q%o$i}4oCQ@K%e?I<4~ zmjh#5xo6TC)-QTV9vdZAYLuLD+#4E;kCI`3EP8ak3+jMwpei~HsW&y^mi9`@{i_^` zP6V^dtP%WJ3BplF<#@Fz6O%HX~f6B~fHw<@sY%_405)lT&8c|zRD zGek)@=+L*r(9munxt#D8x}78CMty)PyH!|hFZkh86R7G_u&ijVxtOO~&nd&)u;5rQ ztG=6yS;4m;_0=t~ohF0+gGmZ39D(8O-@um@lTmSEI@I;?AZ5T^$yV$Nc5iyI?%@;J zr2QWlf1W{*PzVh{YgzewC@998Y1EG#L6J39bMi_A)44s!lfW?RE#-+kOYCq< zE`F)T*2Zv-d2G#wyxwGL*hvwWJp(gcS)GYK_~K_??m?P zb}$ucnHFKw}gv*_AJAFSAJk9B*BpejzsrVshDYl%Bfc9eME<3!ZWT7`Z+?m}*~ z(D`zvOWICT(G=Z}ic_bfYa2i-F~^yzn#jA`4v605#=6hW?0r()w^k0|Na2I{;vR(& zUZYXf*;!Ej(PDAq0D*3V+F_-%U#`eO?PO&o-Zhg~JT zaTjC>pL1#I4ymB^EYyb$mTFd|k)QrD<&W?K)ya{vGT{pOcCkW3OKd?ZtU@~<;H1LN zTp1`josPK4%{S~Y>Qf)qiMgQhA2CO_`Av%6){%Wv5;1?RJ2~5Ua~e5e{tQPn)xL$C zlXmD*G>@w-#dE7~D_d-LhNPn&*t+dX4mIjIdb~Y0+SZaW-Uhukc~g1$w^TS{kAyEx5IwO?spMN6}#mL8Vxtz?}%nL z<{bOT4-Hq^K~>8oIZnLGN!3C=zxtrljKP#LKU-u`w@Tr*eev=7vFJ4jNYQ66X@6GB zW`8fhYfryYX{;CdT^Yiu&xfGTzpm)CG9KK=cEj?;Vup9EgRr(Dhj5}n%3W=MM$f~r zFk%q;Z`p`7QEpf}e3-~j7K)6SfpYIwi$18HaCCSpJPEV;M&dBeK9Y#4ylT01f~e~$ zoy~fqBPz$VqtGak-+fgnD|Xb#H+DL+rm-E`ogRR0eP(f3tiY78Qlnl=|T* z^@~`+Rb$qJU#!@HG9oZ5KbVcToF)5%KSAAAU)a-r0Z0DzQh0^*XfZ?N;3}r#Kxl=# z+O=kFdo!w7o`~VkyKr7tk+~`UO3^6`(cz~Up6qDFxl6{1tmzgot?-4o+@9jUcSPkd z7jC&}$96ZSVY2IV)YpVah35{DT}wZVZY{oNMblUt#h?*h64S;nklk$nhyVAAY^H?b zk-fvv@`nU;nco+ig;sUKW+6H*8^Q?(UPAhpV9sdNLDHO5GJI}LH6n{~NMuS%H}%HU zo!!x;(g@94Vpw4_RulQB(9WKU?!zs^v2I%g>$hv6;?NLmOw^)!%PUFs%|?pyTf?ay zf03#CRXIA~zO;7LAWliOrpCpMwC|lArtQg>qed;^0OvNGGcFG*J^a8bq!a72-pFd3 zjqDj3%Au`4)8q~woY5)}J!Sxxk6OnO@1|k&*-AO!8*>agKARi1Pr~paB3t6_hG$;R z;hLj@1#vl_p2~HUVc7#x4&4Oh>(P)uV+dwUZVh9*D!5>@$PjOLlhW0HQ}J}M=Xte& zwX1?vFTX<7tpKT5-wM-(H#To!9;hALanc-1uI#;w!rPUAUKX?QCGqY!(;%BxbwrmJ zQ#jq`H^@C~6#Xfk;7G3xsBgX{XP;h&3hjB1=S9xkIAAl-=r}ekb;tCanV9t39L&5j z4OPLZpnVXB2T}&Ix3?qaf2je}!`a~WZWyNv?vKaQM2?eBgQ@>rs?Cc;r^?>o%Xum7v3Yknna}8t3i-XH-!~KVUE6bIff<|MSjwi|i`aav4bu@z z&TAWoX~%-7-&801w62Msi5?WUY79ra=TdTk0@G+6#)_^XYgZL|^;-zVCr*)l zlE~>lUWFc;bZpFQE2U*@o&3kt_Qe~iV#Fb6P{ zO=qWvgSoP;iH>epvu9HmR*U{gFb5e6y^N&`{~-LHN1-5>za3 zDM)ai&W)Expm*IjaOi|TA8>U*pfUJ4t20}SXDHd!nU%?3 zq)flrtZ@``FSX{_JEJ*c`58*{dMv(qPeC>4m7MEbDVv_nXRVXD==`046@ylyOHHkq z`DbFrYh@p{#e|?07zno(?}xexK)|`cS!~p5;cnp0*M^mq}=Q z>IxVd#J+atidjooa~ zbFvIMgVQ1WqBXYcnJBmvb4W4hazXiuc-Fo8Qv#O=c3LP?6| zHHj5bH>H@R;cW084B1DApw;0h{t_R?=TEo6x_Qq*b6_T?Oq)zmq@s8{*?A`CD-=}t#0!q*TC(HZzi33?STqh;Ek}^Z zQW$5mP1r(AZ{L^A3Ra;Zzd`c)#~K~)ek*d8;cWH2BR>9C+`guWEI_E>P90v%p%LOH zedVx}c`^*+a--->KQ(3#4nX~o-zDc)9=xm7L~Pu=4NN~glp_aUgn+C6fyhA~bgXt=iz#n=bM%NDGEH8MwOt3JuJa>`b@JhT-}PqbFMKE~qR{%-YF0l_ zl>#C}kCM|s%&N+s(kNAu}iN*w$lr!Y#ayCr%J(Ndp%{W z&xOc>6O`e66;7OpL+6xv?6m6?D8e6s@#tl#a_1vBvfiET2MSKa_=%*5N{6_ELN}Z} zmm}CR991oPQC++S(7NG z-)vm?DVDQq$FXIbzPQksOQYofVHx`r2|bP}1x@&Ax9`5W*W;7^LJ4e=PUQt7uwG<) z6aGsf``o8g*yFyG`ptU5I2$ZwM~R;`?JAZ0Tq9BwrFhN+#C$+%|)Ht5%gF6P;S{k9349!R=e$s}zBB5_{77jo$K=reZji}3Th5GSh}~R6@waTTwrLQC zdVd3Z&e(B^n;#i|o+g_e*ba8jEihqhjd->sC>*+4^8B|e2iB}WZRSqdf43*<3mg#a zUV_P0WSYD$kp5+Zocq&{P%ftn4xob1uU&~Xnx9BHVLgS+nSlxtd$71CWc3<}^|ODa zr>8{rCD57=JdWki7w+tPYOG+OY$mUVB0v542v}zczJ^1deD}yyyzyZUnmNtH>@#8F zUDH+`u|JeEp7#KF4C?+8#V`-)cOvPKoNww&ZZblXY#+f!)Ix$Y`3x-jDs! zBm62<9}2>>$Zw%0t3M9>F&K~7DA*!>6j-$@rY~ZiFl;}qQLI}f`Bf*=nRp3n+FqdC z*$P&Mm4ki82~fOn0sF%F{M0=f8=G6RYSa}uH24M>7kwj7ZtTH^zVoGQoJWT=BF{cc z%t|2_r0~8X-#g}ah~Cx$X#>mT!b!H0Vn!IlwRRlerWZDz_2K{+0l9sBsqlZ@z-*Ve zP3c}piokCQf+jh#YPv~I3H*ziOLl;+cCS=wq)X6HWV{ z7y3^ZvqYFlE_M4##;fA*-~CStv6nDTEb4L3oY477DCUWqYu3entgm*KQ*O7A;+Nkv zJ?o;lrcm4wyR_$&;K5if{(V-=QSpJ9zU;nZ6c;YigV*aaNUBN&MF%g|9&(iJ6}xHR z2|d=_-wcJzLn+Pz(Bn4~saBSQo#G^6XI^2qVK5Mb+iSR+;HYg4MPqrT@dfaD~gWtzjV0~^SA6Y88 zqOXpHs^f3u=vxap@}wVo%^8N;>hl`KpJFC{CVbzI-;nWG8>kZAt)@M<=!U2Gh8`Cl z)mLjJ&+n^*CadBz|LL)ClPjdHT#m744B!l|d}6k%xLp7@G`eBXzg^MPaVBMq@kPVP zM^Lw@9mVeNjv24ADQui8H{)i|zPwJ^n}3HB+buXUFOLpAbHs?5A{Qhw$0-eAtefQt zU#Js0?dU-&Lp%85&quxW~9b;M%9ujTR?D?% zeQ;kG&0DS`&^mcR3$A(;gQdp2(@Krm$hY` zN4C6RVXq)g7&S>`kH(PFXBbR7tH6Fwg4yYDC-hn|gDX>Kf%|lGEbyF+5d}KTIBCTJ zeL^_kg)`N3REdn3hQ0sC;P;Er8avj}^I?9xN8G0a#EkjAm&hJei0@kuKaof3&rK<> z$TYPWa)k%6IDQ}m96tnh&0838+8pbC1WI`oMuB4!*?$G_`H8OVUN3Z1--BfFcoW*E zU89yP6&9WK<_UK^P_h1cK~g^xScYss<9-)8s9KA@zENl}jnpW5#Y=`U`!&i1W{~_m z5fjoj@R9x^2lus4fzuq}Cz=wB2@ZneHpz{l;~c@&sfE(FKT~7+2=skD59@{g^mvjD!n^11n42C;rW%a|}WPBTkfzNGO zq1be>YVB+(@*kmttSgk`5>HW8r*>$p5bx4rD>x){>+0kVShsKxxoq1Cch|(S-Rf}m z2;L56bJB#DWr7r4w2^3}V7K)U{d$2R?2;OS>e(ScONL=X@P9SIO&UZ zd#)0k4>zwmsHD+Y+_wv-+ttA5+6m}XGf@2bZWw9(2qN^6?0!ni3a>RDo=Lw@s8tut zE$zaMotsGU+;~w@2+*hw=YYk#=y4Zo&Rzc_bQ>wYxAx-M4YZ;#U(tE|%Nvd1ODK6< zz64Rx9r4nqschEe6$LE*2u9I8m|ZFEZ#h@xxV_b+%CaGC<~qsB$_H&1Y^AKx3(#(3 ze=J($#8mkXvKs$|ZYP8n^v+=^CDaW=_KoMJvPQCfc8S!<|4OchrlNlPG)eR4Tnt>Z zfn645kWsJ-n~>+Ol^Did8)-TtNgJ1SvqKsjihMv#hi2167_2a$?pYwFso@3c8d>3)f6Qs1nq>_ zHNM=`Tt-z&XNgKJxa_+QoGw2Gm#fRUl)sg;-40XMQIX3u#K|hxjvO#~KV%f;gZZ3p z3@f&fLrM_3IbR^1vl~`fmP*FY;_kye_=Ha^o-uXhpw9+UFa1sQwdYWs_bjG4kKt%Z zKXkd70)G9^iC())QqYK>sBzv5dDPUw=aS7k#DT_f})d8``xy+Eg4+ioy!9y9b#ugpNbmgjveTjM`fqKC| zU{f`m8+5){`ukxB2^@(THNl|!?=@(z3m>h)9In;fg}`0`Sh=Scez`MD`1BW|wk%6J z@=-8Me*a#)uVN&JKgCUA-2kcb%_DG|F_DXY_C%`~;bZw`5e=*ne%pbW-WCh?_-@U$(oJkTsypkK?;tyk@XEz(mV35cj5U1) zC%KDYSOr*c+{-0UF=#js)OW}9zbvr6xPpqGyr$2&!VCTKlAOEZdni=>tMRN>p<;Ig zy-- zkI1YDzez%i{Mf%6hu>-i<|9?;(l(u%!$x!DRs)oM7%Et<9k_7&uhi3fxnSD%0XLc*657jr0#-WLc%a+6mr~|;h6WX4|`bqL$N`}2M)$@&X5#}T-*&)umR&3a)ku$tUQo`o}XkojN zN`AZxRZB(Rr|4i-yxvieaA!8x&l|>GH6>tlbeC*D8X$b>8>l)vp2`j&H@^9i8rNxA zrI}4y=f?$yesIE`=N53>##2-l(vG*d%s{9e&oxz3v7}%B%P2kDVVc2w5 z=pOHmLr+U_qnUI{6LEJi9=&hDcAtvqy?QO{pLAh^yISM3d^oy`ZeLsfdFXfX2&vkZ zkZRdF(sW&mRX0&ecYOxY(?3H(P%wJON@!XlvN#)UHFY|XZSL_8Brelpz0-1Z>wF3H zU$;s}^!>OhaRF4RXG_r-%EjI<>F%HoT=icm<((H?c=x}-e#8SvoTcDm8}Z(L`7fwM z?m@BfqbB|KpX4&#M1IG9hAnYzx%sOJwzPC*I5mcAqXuKlg-|S5JXQ1v{7i4%gs$+q z3rG9@PPxH(5D>i`B3kuhi|r!T~sgsWth^9qbke@b=(rUEP;jd$ILpmwAJ zTRTie#|UvVA6f-L7lfy>r~}2uyaBu4mT`X0T(tQtZb<)pyy!B`6DxbpC*|7l=$kl; zA6q2~olYk8`)8ng`4{MoadPaoMsm3ofQno5z;jezR_WS+#ovJGnjKUJ-6?wLTk;h9 zk6m;JtU41*5!4rxRKR&zdQ7zJ%-IJzz`|-5tlSvKj=>8U1dqEavo#d1>MXln)v#T< zU=uD4M7KK|`F6!n;a^dM?$2kEm!>!F5cg@b8_hKBjPT>UEheW)R-AS&6zg{^W`p{g zlyfGQ2R>bap%Y!$FIT}{e`{&W!MWT`+}?Cy+vLPV!DhM_k5x%KB<(T8l#9PWgLvMf zKiIN$>KIgnFU06(TkO}P4=aBae#@Ew%v%}FrupkIqiqIMH0rtP#B!LhbR&Ph*AW%x zjRk?HreNXrk#tR41r1jRbHbw`ncWH~NFM}z4)=em^)sZW% z!mOZ?LZe>AO82!`yl6HwBndBE=2nW%u8^H(?*+T9`ysYyH~1B0i4KGhklXSR4C-;@ zcJwq{>(Cz4I%`OgaZ(O{t|evYH*)rS7gQ8?Vz2kp$vt0ar|K#)yk8DC{vFE7qnDv- zZ5J&1>WM+;?vOS|WXnQ!gJW?xy4A+BVNJ4}d+v->3c{;VxCS)eh)m!`4~_Yp_1qNJ z9@7ujfUl!qzbx1Z3fCmzEtx;2YP%bDd#^>SY01cim9E*+?qyXY1mt7atAS&f-T3k z7kf@+C_9W?hAtLn{JEqD`fG%iXEp@$_clUt&%2Oe83PuIIjCPdkBg7E!NUEmS?wrc z^qYrLz4~|3K2WhTeTQ6pZ!SbO6_9CLuIxUkEq1#!0Zlo%l-<;x_9eTa$HW`dGg@$| zlRj!DTwcu;eLHgU+ksd+M~P;}uN1grEr;%(!|Jk`6#8u>Itnc>_f~&6GIS>TOtWCy ztb62fL@+oznn1N^926Qa$W7TZF=5&}NSIhh={d*ALE8fpf*WZ^oE<8fpHPcBkzM2m zaA4sK&Q{E)*pF?|@3hFB{5cGLcSNu*{a4vzQG$(#3D0>Uo2)w5$;<#>}OJryl4!a3vbawz8U{4| z1EE=kr0vv2%AdHOg7#aY?sW`T1-_H?RXd>me^q4cdlkGqXTV#F?%bl#W7Iq^EPU6R zPww|*qswQ>!#Iob>)uji@k3Be{*F9-`l9{ujht~hk%6_>u8aZPqg5PHNN2eJ={K-K&k6zb}Wo`ctMOqD&RRsJa@*e>C!l4LNtcc;)t z7MwYDET$dYDVK@&dugvkw7xu*qn58k3(sYs>T`hh+PneFVyr6s>Y>H>Srwmw3acnUl2S8{gHKyoTrgF#oaAu&hXQakD} z=RJy>eh4;o_#5u}cf;bF4wS22%%#7Mf(mgHF}*$ldfQf@sJi5FsP9~Mn|f4m8M;!_ z+Afg1u@&k=DgZ{7L;1NioHqKL{P^HlPHAeU%+3jTL$FWNmS@Y&YrW9!KP{xa>A{Mm z)O_twJw?x{mF)9#$dJ3AZixQ%ykDMBadQDV{ijB~#|b*IWGP;=3ljdF>vHKYKS|mD zoT1$MLa3ax7Q;pe?o-*fQxw$Fb{XdBN{Tw3Kll5$IJ%mI&sO?TcjA@Am`6NNlA}?AYZGc zd_eES)yso;vS(MO3qOn7)P{lsab2RpmG)Iq^_XE?5HJHz-V`j{ z8D40rPm%@-jn(s7Ed_P!i`waF071U!QPwS? z@g>eB&(ui1ZDEJ1z-4k_zaKTFSFO;2R*1RHjO~(pTxD zd@Q>hwZgpM5VpQP4ejj29#G-R#`xvv@pLilINzV`0(F#d`voW(P|n!Z1tW*Jagy!# zkUM1zy1wqpzJ$T9{Ji((^1y?_gEO@)dPut=J8l8$;)b*1O0i=FzXOMZF`P8y7SxVu$0dty zK>5)M(p(yXW4}6b)A}FD&~C4$%48u$ID4^^oew4D0q1PI1cj&CNZRfnCCh0NNBbQk z$S-_X;znGwE;TV`u;eyVi z+^|?^dK!zVoH03_YONDE&T|U8Ox?gaad}Xma)lHQ>M^knyT~;EN2#v+Xf%FymegnZ zfR#yj@4ZLz+J9$a&f@)K5GslKwQ&T^|746LdOJ{&qTP0gAjAqT5c3gSb0;9Hf z;atTU%pR}+O7~3UYf}^WsptQ9^(^7aqF#{M@E2v3egkD9&uM+CJHBn+fR%p>4o3MG zP<;QZ9JPLg@aF$9Cc5=aFx9+h5F1^>{TZ8ocSLq{I`ju+T%VQf3$?|H$5db9)M|=~ZaF9*u_L z5Gnk@3s5TqsPM;Ll2YFRdpbvRNh=@rI_`>*cCX2?ER_l)80<}AXR}G7rj$Vxw6qKc zN^>w|^eC)Z)C{>p3Z($c2^9ZXJdge7k<&h3e(t_Rd|xc-kc*gaH+Z7%))mU!CEo4h zmr1GF$0>TG@bcNzNDkCh@VvTnz!f`~@Qo*j-t}PbrEAe3c&$d4KjaC{B2zE2J%#?} zs1mx>Tl#CvY4H|R9u`TePxtPdSRQG2xP_k%451k4usy?tagG=QTg^asUz zAIZM0n)5>!kvg|Zy0Lb;@KEi%4^Z{Gr5-8++FQiCRAIK!5Q?Ra5QJM>z0Ky+=j(7>POW8=m@A-D4j zs(dz*>IaJ6s(?Jon{T3khILdtwheks$N=+?!TfY|2UN9AqZE;qNHP3FL9_3}d0ByD zhx&;dXQdo#I6?~NZW_~tbny6g8KwNMGx;Aj!@@PgFzty2bo+J~_Y7Erncs(U>m+YB zYY-h=t-U1Z*n@-S_Gf+Z0Z_i*4~BvHkUb*_u1(!e|3}f;$Hka`Z+s*q$smMeBpC@K zA#_;kl3_!e*53O;LvvLy?J z)u&O^ZIC0q`Y)Kh3@uX3{Zy#>Y&rPncNSjGX`=QF-+LBq6N;upZ`H1Y((Klil+|M- zb7?|E%)s%~)c+efFxHVQHM<~e=SqliA4Xp9Zi6ZJqns7_M7C8KG5$~HJOAN9 z1^)5Ek2Om(vl+)bv}aQqvy5&IB;AuWl$Yy67Rx8C-+#%JK6jwVI_m?vv=u@>X1RQJ zR8Pv(&qmFu%~0O9H{Ofui#x7Zq3t0FU0rWNR-vABKLm-2uL6W-Km?76T#wc#Q}EG6 z6KGbm=i~%yF+XI0-sMNpIx!esBZ|RsRffnI5QjIttx>mrJf_5q;I(cVC=zqzJ0@pR znB%3;bsNw)Wu~^pdL6I93!yHwjD3iEg(duJsd*tY)?Y=Uam<%;-Q8+veK$r}cIKFi z7hIMLt9Uj|i|M`0oPhfy$nxs7G=9!(YUb=m&HaA3de3SaliQwk_C(g_@Kb6&O zH-gPK6NJ8poovKFk!aQw6_w**?$SgsT#;| z4WQZ=w?(7kg9!8WqLQX9Lf19~460H5PX7c64|zthVKl~AVzG9WS!`Xhh8|twvzu|E zwyc*G8gI=v`-j+o?ra1^*q;*BKNSg^a!*<<52KutoxIMB6`4OSq+;(YJRkl~G$mDt z{Ho=Yv|<>hH}KE#DnvGLpHQg(7GWuvFW>X;jvw#EQE}`*O4`^2k&SAKDX133Upy5B z%D;ua%Oug5_{x&k-9|X}$g|g7n>=M3vQPSr zA;`g#){D1o?9l1Q8&LeK0h8|B0HenXv*wf!INIcbkDPo_gbvkHU|Gc$MJIHa{M^c zhHP$+5V}$|Hgn#@@_L)ONfApabN35Xcq{n!8A|~lmZE2%18Jf^$mS5vdRkwW6n`g} zgU2otirvLf-zJnjJ8l@9Hw!#g45Zk~MVN7-KQ#n8i7@4Ep>FF0#<&S0_2MG(ACx7s z!i21dKB{&4yiVxKf0jMdyeP<8iD^q73cnf;Dw}jy_`i#S;K-elF)>}JdN;`#r>5Yp z{5iO_cmlQF*#$`xc7kT`4cSVw7O$UMiDG_xG>zXTyIqUK3#QTJ824B#Hm;zvH%rliC~malW7ZxRw>X4tt@aFp2c- zAGzBq3JuvtS$8o7F5J}NYQGSS>ffH;w(CO*&A*avojY0TelV*&l7#AbH{q4@4y=w0 z#e0AHv7g`>RQ%DC-s%jfZ(1ZPa=gcd%mQ>7D&7i>aLFj$kS!5py!K|}F&S$QS<`nm#Zf{4b z+ewm9ekpt_J}38ohLMMgy)OHQi(SJO(!o@3^6x$q_0j2)u5B>{|JDI|c3{86Hzs&h zJ^&LBTao{p7Evs>5qW;yvG&af4EnhZwT@4OZaz~mYs9y*cR(2DnN48l-4E*?^}+Cm zqfz<82VrXF-(RtmsxsD+vLnydWvPiRx42u;0FrDpYnr^j;uTT3h(_tiuyX{ zKsHXHkfV{99nyh253)JCmWbv6&Scv1x-i!Ys)Oo5b%`~x*nD&0n68*Jqa$Vg{!+U1 z1J9?%c~JR-VRSYw0VBVdPYGom>B*UeeBQE0twSFSJ@-VE`mq19E}4ug%H;;lX<kJl0WnY5dp_$w zIs4)bk(rt+j1eYt<1b5a{3X`rzqEqIs&S-v)=4rg;;dZ11T2mp589dj{rZ1+2;EemK*I~^9CT}HaGR&-P4fWZwrgyBnn%8dVqIrFGLQE5TF~m7~)s93V1$@b<`lTgHYZ6M^+Wj0FTm3qB2;Bh{cCR;<2fiojw&(%Un?XUq>wb zhtH?q9hb)3b;GnauGDZalyZg^2;IXPbLv9YAG__>YWgl9%j?NlGV*giCr`qpu`|f^ z$$luSIwy*oTIFnC8T`NA3SQ#}lK)yQdukm;8T;Jets}m__`6vAgFSbAdXVPf5(;|} zhA!hqbGG4M*6-Q~Me_!6;fWXJ{Id^QE4;+ZNkDIZ=z+{OH)l;u5`mg7%;&ubim%6- zOA8aIVbMWoNOu>Cu&>7LiD-cL3rm^F@m_>lb)yUOx}c-&5GZv@qULe8MIlYWeXkP9 zbmOpWlk7qD(^tr`kt@;gYc>Qq$B@$(St9t@14ztTj>#X6!1#$)m_CTT;?rbd=`oSU z#Hz@%brCwG_#iYoV@mgQk-{25cK<#i#@Pwd68;qT4)(`v<8AOhvBtjb?ryuN*V${Jzn8x7B0C%`#AT>;V;#ZOFT$j>7J( zp{nn-RI%%=u&c|59gn7xYes)`I@XQ6=X?*d{o7(v#aoEDide4y0=%mv=pzj#MeZWG z*ribNdi9+!gbx;BtT_jk8pVULtVb<%MXY1Jg_N}&DZI~8vcEWhs{X2E#!!mv&^Q;v-nc;I zdY+kBpX9FH$MS(7{__SI?zQq~vv!_TcJClRyWHJm$FtyU09_wFkky~b^VBs5kdcJ=m?Wx}h zh8i=ZIouRF!w+EiW-)1wb_3mu!4%TMO!kszpg43-Sdzbxa;6EPo|_LYPiLW$pCMCd z0PY&lmaKC1lx$gvs@r$Dziphk;0qrS`OXSU2S<`lm$9o2+~FRv1@|B)yU|cemz~#+KQ$=Kn-hRVe$< zhG6>b&rl=>VAbir*$b8-{J-Tn@)a*qcbP^{a2f{gSFhqnJI4eRZzKID_*)$ud4z4xP`ug1>eO z@2gM1|3(jJDdPQnpDaf`?}DnMZKMnRmyvH4-`nJFSpU97va~6M;5qF`A9NFRQD?Qu zZ^uyl=~fZrWG5VxnS*tIsCkUmor3#blav2g#tg_#=s11_mWMtN5eH_`;>E)#Z+WFC ze3C-`Nhr!5t;WFFW}!dbT}o=dT?FoUA+Q777v>R^-{=Co3{ZVjNS z_eJ1f+aBFZov>#<^NzZ8K%3I}WV5gXLSzrrOH1JFib>QIR3MVFmtmf6FH|0SC*F5& z5{Z=tv@Gk$Y#82`;txYWXFqhkc?TAUvB%1~3taH;h~*yK{oSt}jENqG$|UBL2Ye^2 zq9)S&eTPKVe2Zkc{j+R3zCn&&_!u;XZzNZ^FPi0I(6aWM6+cg;3?UKoX(ed($AGo# z4ROja5B2LtL!bQqs5t&wcD~3=+nzjIi8C`F*PW`He-!?`HVNgcLNJ|S9+vlCFn)9g z)GgCW%020FuyH+AZDy{HPge}BG>YJoahO(AE|Ld2ipBS1NnEfZ!`_{8eWr(eC(MOf zrg&2I2EM0I8F+UHAv^X~X`<5Qr1}`hLAFPkMt_R8w*cpEF~`|sCt?q zOiQo8z8cmfx>XAAzl@@;AxBh9XcY(ko=i2&Sgx!u63KfzQR&ULxX(SB;+Yv(G5A|B zzgvr$laGsnJO|dDNm$&1(P!0EvSU5u;EdH2G<7uhdT8YOK!4EH%>jKoX4Lk3t&R6u zMY=W9K>u(#Z9W=6infmC;UE3c&|;DlA_)b0NVA?rJk;jJ&Fmu=wu^tb=t#Ui(;dXdWcJFvbplJ0FBjRlvVffw-H zNHT%`_$*=wvj_fF_KU(%|^ zjRV*30n>f8RQRkv+4U`f%;48hWqAs|XWh_hl^-^2-whT2TYx!7Ct`H`NQ&ZIRqNaY zXv%Ejt^|KFo%0gKPm@H^MmGvw=}Ka0mbkOmhTa!Fho-@M<)p-aL~$o4Y#seh*k56; zvCrzxz0- zQ{Hn=((TbKF2$NEPoz?MiY}!D^4;cfTXBMC{>IY^@gPo#x+7;Kk3s36 z@Y^Q6>KDmgb>@X!l0|bKdzFJVX!r1nP_JGfY6?Bjam*a<7nw*3@0HrLmM_5+yj_^W z7Ri?LYI;9-Af*;{VBbFbpmd$E^@nyMJCk`HLnp}@PAjQm@(i(K*nAY~TGsTYYm=@# z6!k4tqH4`p3~o0Elb;O%{ef#RUh75g^--9yv<-73T+FJzY6#B1A=jTxxp0h8)wZ6kBGDC$!KW*Mk*M205?K4u zw>*a`yTek{7Ks$K9%mHncQs~yG z&)j7PR4yIMdRre+WQ?S@ReiY=y$jl$(?FlKL&$LKldSaN&;HwR4BMp@{<)qMKbg75 zAc1W_1ntWkK+vWPTJQIw;rCOpy3aByx;&Q74s*drSNDp=uKaU%F$zmtD+rimpxN1N zG4a+!RJS=U4t8sgPuNeFoPGvOJAMF7(+A1A|131#4I@kRZgaqVKG)~J6uPlNa?5PK zLvL&`n`+e*mB>2yCC(+h)r_mjn2WR7&l|DO9_xC)ggpbE2#eJNAwttd#s7CSWv|1m zHrxYr(iT$AjpEMmUgX$bP01bx5pmj&%3U}sez_klelUv~OPFzY`nFJ*4?_VRgeUd0 zF|d3khOHg}ny-$@d)};~815y@W?xjj908`ugJe&|NK`Kw&Wy9q$kN#wGX~8?V^A+K zyE&DVpOK`xtODPic^GM_7#k|Zk z+0gX@q+yOIPrVBbhyAHCd=nUdp3mJ>dqwc}>yUn{D~7jc|8>c)+W1bvRKAQEeg6%_ z>{H&*bey}xJIz6VuX~U->WF9>nJH`HV$nD@SX*z^g|m!0G!Oocc#x`6`x@ zdR-QqpLU_MyW5lT^H_6>(H;vHL_yQubCPxRP|6s;j+ty8)H))9KAzR^`Nl!m-Oh%v zfgiz6+K9EKocT#yjxYIFMFH>Afq2IFDN$C|v+Ll~QR(_h)->+^4-}hbnQiAVL)Xn6 z6&*d~iXENteeFQK#Dm;@xc8y|CvdXfCu07n6bBcm&|WbCvwrI! z$~LV-?}ircD)>t7X%0f=`ZRE#G>dr!-e7%f6~C)?I6QYgnU4P@a;)Om7bJm+`%8Q| zA6^!G1tLlYlIeMO$@1ob2)g1;&qK%2O=mldIqwE3Q`%r_{&NVnRLFTB?!hDLLI|GC zI_Qn3qKxlaHp!`^SZ)NTf+=+UQX*zeDSq9#%Czlz!4Gn8|yau^HNdsVzkYd&@?m_FID)XD6bRNbsMm#?hNQltuXAjyCN_< zoYX&Y*XlrTOi$|#0jy2zF!v$l_L1D%)s6IzFA4vO7E$q~zfksdAoag%!R}wPD6>B= z&N_}HhaN&C{nid^hC9%e7h&XB#xvtbcg@D!PLfUK092nn1vNZF(NFS}MfdH{qU}hg zM_Rr!xYCsfCDuoeC+n15ka}t%DOS&=g0dmxw2kjE{S&Eig$05(v}KlHSN5sQp-{Ws z&~)BL_*$i6=AtsF$loNQf5;ZEhVUKY(C3)TSs%quJ}8s zs*@7HVR#1n1=InSR)2i~=%bcaZ=>-(?BTg2y)`R=6C zHwoS8-g3?NvrxaLpVYW;IH`wRhYIFx>Q@BegY%QA`QX2z>JVojQ-9VfG9Jr*W%ktc zoI8#7B~kGiJ190T6ZsDY_dHkPSa^xgO1d^H^N2M@v>Aw4L*xf_~t zv`{g8A;uh;i_X$qD*E%WfF-`DHCv;@!&4&hfS!!A?4&TWGbr36Wo1GMsIsgjtEj1T zYUVJqTRMm=$A(Joo%r1NK%;c`@5*CXET2f#r*QXk9jIOrK&zU7O)_DNcdj z(Kkfce{75ZB){jdm^x%H5>^7j^2L*YT4i(uSmJ6*klT@vJ>E-DKSS9>L z^6F~gps0n2GBsIRYvF=(8xp^~fb33_!L#;rdZ`ScV&yRrw1fE)oYM_1Xaa?EaFW+@rdp3CK7Ja1Vx0J9Vi zq(}M^VfiUc)(@NmQDrkw*>ySET6f2Yj1R*7t_`-IkU$0J6Y%VsS$H$U8pHVgiBN76 zjT>iRUf$OtG={aGpC3V4bvQV-U5Qa${75N9ko_EMQcoC3RsK10+E6!)WxkpBfmR4T z{T`yXKY*$hFXnQ_qTj4JRC<3ZwYFP8+1@uqd0ua{-g+ILtJ>qf)V`$dcL0*Dy9#^u zt2Vq!rJNjhbh~In>d|vh4J#mU(dQJZ-$1$7df<+QYR)?I^HMwpbAq=+X!Zs0@5KM- zt!*eT;v@6<22pGHVdl~WQb<+^x$m%|%F->uzp4(-{!d303I&yAw1KTot1$F2&;Ayi zk%WPHaT-55+fh$dS#j*U-zn;%7ch(IwWK?lFJj6}ptvC+bXmdo^;L9Yg&Iv?E@3YC z9J$`cQ#Sf#m~D6Pdc2nNY2O*Z@lXJJEJur+MqBP^dni@*-V8=r73)ZG|PZ5;BMHhL`^>LSOY(DEJ`2*O~boFQ(#^J2o`_>re{o?TyM& zJEcneAkwreQPs4A8FgOf>)RwsS5Fnjq`i{XB@~^0(PPk$UC?SG&#{X73gwcwl5+jO z;PLH22#E=y@^>#qea$%$R$eaBx@;Cj2|-l(^9j+|=aa~M*p0$Ag^~BjNbuz~cGvw> zvh>hE=A1K7bB5<1P2DlqD*|J0sL|g>gVDwgsQ(!xMeH0o=-E0{JzNj$04; zW&jl@`--sAccS4w^Yy16h6ibJ)U?%J6rBB4oLIx%VWDkAmBUxE>A?l&Mtv}A+I%ZT z{@sV(Csm5~mJ*>}^B=Tc;&V=?0P+509O@G`$-&P;Qd9J3JP+dhN(qS?aTc`%Ce2ZZZS^U*sskojFsSU-H6P|fTu8@k;U4^C{vtc*EW zp3s*ytz?WZ`G$LSCSdWTb0T>1M`&%^1g*b^L50&qdN5-G>HqvrF0S_iA@`(@F&ogU zC5$})dMq90h{gTBkbO5Wk7AZ9Wc?Q*@=q`~cFuXQxm+rmk1oP)M`D;0$Iv8?N8t2L zwJ^MBlC#~kpx@q0bX)C4hQ4>9NFw2sdk7R49*Am(2Jk;@hF$N+W9FL4XuXfU-~%qn zhRL?fX*?sM%fE&_xjggPFpAXK66n*~$xizaoA&mmG%GJuW&RBg1+PJ|__bECdECx!U_3#w%K-8&iNtl!;?R9dDrLJdzoX)osK1aPIi1)ovb!{M7Ci)0QW_w- znf*e#cuMce-XpmU%pTjD{5vIyM(b2ee}4^3*3+nIzAf56>y7niGDLQdzRW(41&dc# zS@|FnjEOzuEVnyQu{fB#^E6QNr#1IY?*v`ORr!7Av!F^CA*-yEa`_fF?!j7$#-C#) zRZcfK)Nuxt?fD2Line&Ltsfb0O%RR~>P4R;E~slvksF)OKyk%K$W}#D#*WVPQb8fKO1nw^hV@ zx1rb&JMLP|Dl?WXriPKp=<;JPYF>W?OqX+|f=|q}IE7UIt%IZD&9E9tUw#0A;FzJf5@P2k6#2u=SOd@^YnYF=!EzzL^C^~OTT`|nFQ z^>ijyG7m3ftQ&e)9v9Y%G!ZCwLVfJ#^2fj>C}1j8_-$j&$pK>DOgc-uMMH zJzI+k-&CPaKP&2o2Z8SD9LcJ4dyGHyTIl;85~@G=bLzW_8SSNVa8|moIWSsS7xY3G z=2I*VOChhF`QUhkvwADPl}iV9#^m|iIE&U^&W`k@qSQ@7|M@4$_=me$zvY4ycBMCL zp3L*mwG*jg6!Vd~ju2PQ^dh5+kBIVJ#dp-{lzm_T79H`R@!v0^;om!8P5Y7fYB^GM zDfcKgdrL}l6{whfW%<_`G>t=WcEVyzna8XvzMJ-(H<%v%u@YSl_Qs-~?76a}faNrE zA+LQ-rs~O9i4z1fon#K*In3Iw{t~b2e zyBzZdABU7yTe4i>&u?fcn3e|%^T#Mss4C>(R!_(&NF`0@WstjEg~eYE!tkX7G0*9L z;=pt@)!VNTMegrG(Pj&%n#W*uwgI;mOhEOKk)SxYP>)q@`Re234 z?5CoOZ-3ImC(-ca0MzVRhyEQ?P`P(48C~~kEp!r6rs^^IWP~tIeIcnHE5PHoRhW5Z zF%`+%gz?cxv%cd(@Yumx)K4*__$*njAGjHIUG9a7pYEK=--%>>AyYi6+yk{+;=%Jf z)|_|!BdXqh53W;o3zdF^q`WADjjcZwb*O{d(cM8YjC0=2dW^ZMLQ~&dp$m=^rsCsb z$CQaw^v)d}dz};Bmp>P}_2-~>3KL?~lUGo(ig2zmD>5+<@Bs?A8BwlpItTjXs6!WgXjI_D>!yGHTai-P>ZAy=E+# zeqA6OGAEK!o&=4!5yS7VqLQgm6mz&E^>kW>H)EEOWneT}GVJ82>NdK(e(nPMqoq>`XK> zv7P9^*9qix=AtOv#b-|A_d=sQC0G4>OO!m@1ZO9)j=cAX@H)*bh~E#&PDkQVRlG^| z3~$f91G`1tq=%py)g4;TWC|blm7BW!28wS>wOQBVV7Q|{=dxoU`}s>K8Ow9?nX%}` zTx(PL3X%L(IGQ$el%Lr6(7`3#t;2i&*10Qre~n__KleNh%>`W;ckgYUN_8zoBD}2= z@89ik@nc7dY!gA{9r|I)#oeM7`OK$WNM+Tm?Osmeo>doAY&@)W_4)%^UAmIu%OJ9? zO2zjDe?$Iv_SDd|C)i|dfwFHWk%uGmS)NV@&$4KI8OigRT?k30gQD=SBz*gIM@oIW zku*P^ly!9t=Hg^W2rBK0F14Rw+2la+UNw;dl9il2uwh=uN)dQUkKxKlOn1x>F7cD8 zUj08XT(Lj*7p4=t70KN zQcS11eyyPSHV01iVI85PS=RmXhivS%Q8paADAMn*7FC7guyo*DGPW5Fs$*+;?VFA< z$KzSU`dZR|RwOn5z;p1o&KN3f7LLVXBC6ja%AFZOnyW`du;#d^Z^K+C*JDt9CQXzL zs^ISMEobyM{|7<(jnwq;rU+}Thtgh?(2G4oA)f20VL&Kq{@f~GC{D!dRh=;FU(U{W zeTFeV^%CaOA(S4G0v?Wki3si{E=f8fTB`W`XRDS9&p1)dvtVrW?~Qj_09}XhvvOmu zFkH9DY4KOYh1Cl%@^}dOJoH4v!oTH|8h-D$gaT~bB`&pd!TS0^5D|R=P6f=Nq9<+f z(TW+|Z^pCN9_!fy?1JmAEhoL>9g$#TpsJ(4LdDr6?i%3?4s#Oo&aTA*r|Hb`*gQsO ze+(MGw58Nh1`1~XLs50LF!FxWts#PP2DM8TnX16Dg#k?PmJts6bH)7I|Fp9akN5q^QhS?)oW1hE!(ph6~jeI3yBsHWT zwS#Uco>WoSLmZrMN1<1?3#EC7RJG-}Y`B6VxQja!{}4ouIc?FfKMEV-?I>YNI9e|L z1?l&FD6}nqujhI~Q`>dWl-wKiU!@A0S^sf_l3LBGfSp_GB}E=!6p)gI{nDXFqD5dI%~`e1f`?G?6m! zjqrGM5uO~1BTw6YXnZmmHBYjH?p&)Jw)DJEMk&E@Wp5mlwSje?NVIY4jDeHf5Z3nR zzKvhx)s=y06Fd!@zTYMKyl969nS+qs)ERtVuy^WhcTzm~OGNR%9UX8OqQms;MIAuZ z=2qdi%ntXgHi_2Bt3+0>S8@rai>Aa6!kT+u6F-e5&*&JuX*ZavH+~eRcUn?~-Q@tLVdL)Ue>)%3I$8aq8UP)Eo>;uKK4(L|df!r#5X-855 z?Rw=;%5evP(|cIIbBBDrC-?E(KPB8uGqB3a1@c1A!Je=DFzj#%b6|VOy2mlnuG?YM zem8Tp*u$f!W$#{RzQ3wg$(FryCCi_&Qkh{0>0Ex26+b@{K7xC5JJyP-neD)oc0gL3 z=S3#Y>1K89kB$>&h)45Jim>>$nEA8|#=QMQRM*#qv` zsY*ETr9Vd9<(bf@LRoR+7kHAfj2s(eNObn2{M*)KlZFIAA*lQ@5$g0#R8>6*BPQ$< zo96;KN~{O&JSYO=!pLbs9iP$Iukra13d&tVxiy@xw7n^Nt>OFN>VNn=#%NDf!OiK64X!h zfxwZ!3(M0na*6X6c-b|Hvc(ywJ@GYw=t%}Ef7GATG5;hO9V&95;BOP$>96LlfjyG@ z{Z82U(+c`{y&t)+u;KIW@8STT(XUu0V0|SALtb!x<%+k|x_L4>ebWxt16i;}-5l1;|HLiL3Cxqo_~X~!+e^6G`w;PaK_I((0?deVa| z>6_(`;azCm>~(mdcpCPM97FoWKgeNxcW+R0=RpTsO#Af-*xLu8{nD}Ins5)oKHL+A zc_XpN>1R0Q9Eq7l+u+WI5L*1I3$wz%7Kxq1(BqrsbbZQXRCQk{t6#1VeKdoy_=iX; zZ}>{&v{+Nln0;dQqUEH3KHz(0AZAWmi9HR&@WB!8KbJqFoP{=++~P9iwyeNJ5f1t$} zYVLFc)bJKgj0?s)eV7liZkzNCDrhr}Jc_H_cY?{a3hTm^hJ_j3y4OtjtI z56tu)i$-R=oLV%OhL`uEoI5As)rU2Bio4nB4~3)R2=6zMUfg$IAxB2KQ<_=I5d@pJH=tq1_!jf3Ke$T^PO}0Ddfz)0lvJ3gcUs(W$6R(L_iBGal8|VURTQD5MYVA?#Xb^I7w>D7)1ccBnl_k@K^p2)!;sH%*``-;Ko3n`S7v zf=VwrF>`eNzzOvIU_8ck#si?gc%epZW zarp)DV2>?@zUPemvOWNf7GY`rUE9>+BZ|}7h{eI9(UPr^RTmFSXPuCWKDlG#^Cv>_ zB8jq0>?8d)5f%U2r~Me`kM2X_$&__QWX&>&F?+|M{>dS!>isLp)h~jK7ygtr-u1G+ zWU%a>v>v_Qr$fj;fmnR;g4}0UI9W#C5U={A;KAVwsZTm**HU_uW!7=w*5XHofK*Ui zJ7&If(Tlv_F^{Zq94bG>P<`Hb(s%4D@ACJ$XT;nELALU-O#01vX=irzN z?1^`5k&?e*Pr@Q4GaVjlV-8*dwP`jvm7j(ZK7*&lGDGNB?yXPzUAV85(72*2Hoq(5 znc*{0D%1~x6KatHmD0epXY~L zxvL{@drxw-1XFX81C{5#hPrM%yT4@z&CeFn;;KN*?voCVhr59Hha=E@c|2|&&g|I1 zNf>zTF)XfgLyLQ(P&hcANxT(+!q%Cxl&sPA+a+`Z&j`zuU7$`o#HAgPb$H9@u7Lo_CAz4XA)Ih;I5}~_A-wXa`W=r;NNjR^VFR<6Eg;^{Z0dP zX-{rvJ;}6ttI*tOl=XA>NhEp3s^Cm|+OiKKW!w~0XeMJ~ z<75iW_#ed1XI9vc<&c!$3;h#j3)lU;Z*{vRr;N^kB<*8JS+|CHz~NAt#jKyM%qpFK zT-e-hfXp>vR1%gW(t2Hm2CMHOEb5g=T@i_q9hk#(>Ie3ueJ%1&&qVb-TU49=$LEOM zvL*IcZFA}_Xm&jZHJjE_`J#N`?4_lwLEM3RY6za1)Q|G(S=-*TNdz)e)vwjRwHrboBea~yW@c9pB-2C#s1x=J!^z)7#H5{& zo^z9X9tX(EYb()X)_PQ>+!FSi<54G1#W5!rq8RT=?dK;@#Xf6DxtIgdxxa|FLp&+( z?iFy}F_Cm3M`hO`++XaRgf?^ALt`_aBVwk@wQmYUcFle;NlD*8p68-m3{3otIhI})1=fjBUAGe)qbD(6N{v^}u&?J^*~;ES zkm9Drg4LTq^{0*GTV;)DyE||{v76-HKA298m`sbKb?Dk(Pab2tp!eXuSnQN5*L>ka zr#8;Q9TirzIjI|2M1ka{oP_0x7Ew8HD(!k5hS?2am>gs)uGp$EB&$CbB!3BYJO9T% z)HaxXYdvMRmmo{NEv){ViI%G~z)pQhXg@_`QE?P0kJpNdR?b~NnL{z3O~J-h)6wF% zNKz)3N)Aa2DPiMao=MN6>}z~yYa1!f&L4o5xBt@yJX%b9zOuo$Vgvh{&U(9H19x=!8rYL2I%4o3&VmIkqoz5{;?c!YQPw?QlocNob!%g} zCnsO1wlU{@z8x0%ehVJihoI!>&+HvkVExf@*pudpHCN*C%BNV$`f)yO9XpDeysRO6 z*pFaoc`LimS7U4oze8=h6v^-0jPZXjhVOCi@)gDG=bVC%Rz_5*lQjx&4JSQj!4D*`uj6#6||feNQ4 zt&RUa_;|sF8t-lv*}*)g(KKkGO$)f@M`8Y43E%$f%}fwxOD&lUx+kyA4f(t#9(XPK ze7yqGr+fl~r-YV*OXlQWXGLkc6FKc4MIXOrK1Rp$qWJ6~c$Gd7)s^2v#N4Ogm*Rkd zZ{M(w?vk*0B{ugbn}2>6RVQ!B>}C}Od$XCD5r>Z9 z%>TnB;PmQ((9in{RxenBg6H8ONlTBUIBJ|2>pMYcbULlgI8x^ zj?H9rEOH<_k`D+)Uco6N}~;`g6Gecl<$H7hB(%^^`e zS&!>9L8SeC2!^d|FT7KykvYK^i!Hwh;|H~@s$9)^n5CF_(;fH3tf6(e3#oq8BT@h4 zH&MU*gD5{ag>p21i1fjqi>yv>!T#6PWYa5!HIaQ<|2G>t`Xuc3 zc>rhAa<##W{)O0k2C^s=a$}|nn+p31o!3lAdjDGJ=6xg5zTOF<5hatl{6qA;BqRbIL ziGo=VM4;_g_x_y>m;QHV4a zw+cV1)y&5Xo^#c5M}YZd4^+1CgC~0zp#4lc+VxjIT+BVe-u{l1ho4iHgDe@g+?G6> zT&Z!;c2T(4kJQZD$XajwJ=xOHD(;0JEr^OFPQI_>cX!vcDNVYHosh3U`$6KIEyGe3N z{0L$F$8$%@Gw_RzV$RA(p?YVQi?i-X70+8i9r_kD`7fky#?_Q$_a{8y`QAF72Sejs zacW~Q`cEr{rjuW?2G9iiMs&xPNN;LCn7QJbRb;<*9O>pN#l96jr1+v#c1{n$+^u|1 z7wd)YuRZ2&ua;oCW*}bgl0fmJnCZ7MoK$`t$q*L~dGAKzm52Q(`_&=Qu*!|ZfZZZ+ z^%e0bzdeTbV|^%TDQG-LlIQgPsM|RX^t05MzmnO*bM6WK_B=41lV$($!$k3=M%nk( zD)M?9N{V}(rD$HuytlT3;!>Ge)f!91QNMs{Z!B}>y-2@%BGm2cg~6UdLX6)5N&jU- z(fR%CgS`Tt-^Y?_!Yiq6`aO}eSC5sGxeM(J&d#}>k$gSA;;z#a%&S<2#YJ}{MM*F5 zXyYqBgIyK@&JmcktWolwbQXNMPrKw+C>qub#Iiwu!kAw;v$Aw892l__V**`-`)|x# zf7jemt>+Gk*hQ_2Nz#JJgx>D3j=(Lk!u5g<{$izJpz!NI7nsIFmJ1 zYn&9Pbxh0vbAxgoVfyu(^q2sh!FycjtK9n5K(?+5L&wA zh_L1WDEaX!WaKC@;cMow?+zz;@Do&An~9;j-iZ)RUy2xcS{!)O3CEcDU9RC=YWSNo zm;4uD{`_g^AJG9@11f}fjq(s_l2V3cdf|~Bz61Sp6tU?DTvo4e+Z}ghm*wS zVrMjsP(k>)u{2xl!+LcnSzfLMo4>V72KuG*BvnTT%G$JD zHUxIV)Vrx@8~!=@cX-OVf!P1=%ED%sH1PGEhdH}gdp-OLg4^fIjdwYBbKnLn_FIqf z5yQ}U$YPEfFo$zR+hl{)Rq089A0>?~f!bTlFIq7f53+B*QF})eF6_%)-&$yGT}&Zy z*0`B-56|NlVODX4T)ut*TKDH1%Gs+TjXOwN7gPwnC@W_wshn0qLX|5GUpShS7Zu{(4Q4WJ8w1)! zp_nyeCRPoZAl^UU3Yw+Yg_3<;>0i7N(Z1CpJvEwpB4g2h{zA%|u@5{R#-OI@)oWEKXOc%WW)9frc;+6w>xK)b8;q%$Q#ax;2kO`zldI zt9#C(VAgLl`)%P|`8V2TJI=R02@yLwPe9$JU`P(_hdbh?;2!SSQv7d|)|3~6HEZIr zENUV&d8=vjGcD~2v!~YKU4_o&H*;j-I&9+aOkb#y5=xUXIctj8ny0|$-wZ@k;Z8}f z*d;zLRd5z;9W`#OWR6jp?BeT)1ruh8?6^!3a>oS&lyU6m*dk(mK4b5FXE3T`&5;L~ zS(N4>_6<^V5B;4nQAM_-D`#E9*H_rzN}%CST}Uqtky_8El48J0InZ`6ZKcV~_q+|+ zbGVbdCv&$aS%hNzJjtv7GR!X7EIgqnMtTk=!;p5QY*#16mkdJT)rOoSmSE`Bv!dv? zp0s)Cy8ol-{NrlAzc_v+2_Z?6iD=ktD_^Ljm>Xj!nHc0BFN5i9>DgEB-(FYhP0 z{RRpuX~)SAyx7+IFIsrWk16dol>VQ|yQnWEw}{3F>4W7TKTOg0$H*R>lqcy~6k9X_ zpS6s_v~dJ>b+a-4&1#uF2|r>-N7U66sY&+!SUo`)e;19gtnE!Ju4mD;xssM{Ne+ju$V{J0vaYbE3*-=JL{0r=8nG#Yx7ilbfXm4Y?Wu?Re?8fWB`a2MnLh z#+yD;s(&&as2nF5^GWIuogdrwxeER(-_zdcRaiWFAB0|?iy&`?(#GaoD(|tEKlr0# z{wjRZ_&c2(w}z|ly-}k6HbPf%gqE#}!g&u&`N_xss9JL5MZLZPzvQ0Mbrm;3p7398 zy(GiN9fc0J($b(Hy0oJ6xDM2 zJ*qo3Soq%YaMG&>Tl~0)D{ma8GaIu1dv|>~9Ry0q(Z_QWb zBf_Vgf#|tcscL`+Yoe`G)3bB2!Rae~jhKtd3M(u|qZ8^BzJz4iLXyKfXMD z3GbLRpG!>~!ARQ$H)9A^dn{thf*>qCodxOUZ=inqKM=gc1Ebo8@yQ30eUZ*yqS+U@ zM~{Z=NpqpjFC3f?t(08UW@T5uVQeY8kDA?Mso}&(_V4ow{EVkyWMmh#_4*?H=2mp{ zkq?IS^W}uoX;7{A#|{0~a`4w*z~#(csI+i}%>H>w=xjJ`pl&Ywifu+Yvt8Fne%m-2 zy^kR#_Sy}pQ|6;~$85}e|5E(EEx5SNNjdj*K>O>SobYlYmhSHcnY+JL9YgwK{EwSy z!wE#o-NLfmdXI{}{s4K8wyX7B0{C9nIry%PAMX7m{I4Igl+bmK?33yP(OddJj7LWv z*i2l-x5c+GxC_L4i(4qeh#RJcVQ}mSj9&a!iFFb8LE)>pkDno;<}yU=S&O?)x#BaIkr>`Hv!@XEohAy_jMBYS@h!NGQ1F-GPTp7IQR?nMM3gbV1F#T_ao{Nlty;AdIGe)oRs<3O0%-Hs}r<7~z5g z7q7(g1AMq{w>9~9t_JgwyP?WP7=OOP|C`tbODv4h*zs>lZI}nO%VIeqHJo;obd=7( zAozU3go7_P!>qAuSR+}9ycH3gIjz5Pz+@7;1g3(CUvJL7Ebp0wAn@5e1R^>_bIpnq z6rbAxR|Svb;7{VCbRG^dX1zGOOJ6<^K1b3A5l~e(g>TpUapsV9N^0SID2gm}cl`Sb zl|9cVzoaViS~G>4Dx)xCwLFu$tU3u3GN3uTT`;E9LG^0>rfz)j zjj80X(jmWnCobzEJF!+8dJRh54Qyb%^9JuK+Iibs(g>&xEWQWt7?L3AkDG1>FyQIMQ<=S0^`8_=Ya*Rr!A( zjBs6l4&@u)uHX%x(%}jbN1PF`=bIkv_xTZ=dEtcSkyXm7Fds~^YQrb;eA)28glaoy zQe0Oy6r-?HgAqallHpDPw5M^coBx4Z=!;P=fb@;xNC3y0%6N0V^;ge zkoH?wP845i0=ql(RX_BX=W?o=cJw9aUC+V0h~}7isH@_njM%)v#SKx2AZ)x9r< z{89fxZPam+@g>Lkx8T~CdU6>ukM);p=x8^8ZWr5J=3o=KUo*M;`@Aa5XkAPTmjp?6 z(hahs)4}4q0QOq{9b^w(#|?6ZkIw4PdtZ0ry6oj#ZkQ-Jd%Q=V$>tyu;nOD2XaZb!a+@)`dszg6C}^4tird$tZ`dm zjBsTxfNz31r=4TjuL6diIteF}BRSi08X0UxL;d_DNC_D)u4>`O(ijX2oX?Imi!i-k zIEKu0VCU!8A+BU9+U`3?y12cHNiWH#1`NRDV(AHNmVW!>m(*{)o}KwRkZ{#%y@$zn zIhl1QbIB0>zOV;9!79LOdkT44Hw!(jEDALnh!Q?%zDz(sPlH#UZ0 z=BT#n=H7GAX4^bG-(wPHy0~HLPfuaym(HQ+O}k{Ty&Mudzo+1BF61=Ghz;M2Q!C>C zkh7ZZEndWtq5XyT^dBVjU4s#@j(U#Ja8tGHPM%I=8@-M*+t{IYov)HtP^JXl@V|Anwmat|S(h2)x6WEJ+;2vh51$MXBL_%!>m0-$yGhme+o|Q}WcJnj4Fq57f|iTV zlbclwa^BMxQ@8gapOA%QtUFJBkN%`|?`~v1b{c7Zii3V~f5`5o=3Y{6A`-;-t9R=QE%6>XPGem%xi91q6wE~{0q4ff}(5*OKB_@Son zm_qAP@jgdL7h(DaWo<(ombMs2mHifi^PVT-*d5N!8?7+mjuT$~tQTM3&tzG3NwR=? zY&cN_Hn)s9wR652S(8p0=djb7fZx@~v5Psef@9hmDv$YD+ye0w zU>MJi_kR<1qyxkl>oLr475b0v4h<$Lkace;CT;Hwj=TSWAy>n2;mjaTJ^vA$o_`0H zKLug&fW5GJoi}U2NuBp#Ef?(F4{oPz$UfjY6hF2==aR8h6ek&w{l~~6WDI9&g>}++ zNYN}lqdM82r^t#xP6?aAnbyCnY0JVewqg}u{>B8&%MGgeSsghp4Zt1y7IDadUT7=( zc1?z#lG&;`XU2X~%Kd8GOYZkpcMLx%{Vu;5a)&HX zJ!2#<(Rk2ZTf1I$+!PJD<&v9Sb^)qSorb{3Z%9)xLeaHf?;aJ~5d(f(gNEOF(SbA5 z@pgef-xw-=pI)!1WL9tSX}yK)f8!*pvz4?py)ja{Jvj$V*;Fg%W3`3w=46`iSKe&DwsEp=c1w473bg5$Wb?gwQGx&I^T0@jn98zw%!3FPDhi= z$3__F-U`Eiml<*5_z|0DjKcbqa&of2OL?D%((ScTXd3Pb@wb4TJRC5SQWe7?W7X!P zImX`^#l`PGzy|4CxP1yG`}{lNFz$f?=fq19Gk}Y$U3k|Ne>5!fP-5a1;@Y(>@v`kW zye(&gXz*d@+pRfzt`ix%k3!Rpmr3t(5E7^K!fNk8;dhr%b=$Ai_+8)8%e}#vy8J9P zEX{x+a$YOgJ_Q|qS;g;6I%3wo(L8X^EUtL83nny%Vtv+9E|I%xl6fWsRLm820zsU~ z0Ia$)SGZQEA)cpVyO&-E-mZQPnXCV7+iDvt-2n=nQ!x`@rWNs&n zV$WA(zTugw8!--z|IlGY?*vZk_=56wJtyt-r{r_!6*=tb#6IqSQR&1taL_Od?~V52 zqia|3j;!uny!$cb{&Iyb$3}3&e=Wi7$^mema}W%9r`5oVzrqBwm0XaeW#@J|s`+9! z`MJ2QI=`Gw4Z}Ur-(~|j-*w^GBc~vB@+vGc>!GHc`iav1ZiUuSFTwamPfYpi3q^bR zQG>kW1N%&n=g$p#lpvkCdT0zD0WT%PrTLS9Z_&-=g!#7&a=Q2G%r$Dq=fA>DHp1@xMO zHOF_8x7G>mG6H$;+7@iMpjGc3jK|77i079_aPbZ~_ogj^D(7U%pEwc^d)Z;-q3uu> z<;9x^g>$s-wURnaq01|$az=yf4Ziy4cQmOHOh4K&*_8(qh@3H=K!poF`aK%4#$J1 zI-+|&YkYZnKAQFq;?jRTpcKuAW9a<%>(FOWsdPT(LssKzG<>lG_rd*8zxcAM8FZVX9L!L^|AumTZ!DKp z90I=|Z5U2J2WM9;m{xp&y)w($Jj#WwE`(!ixsPko7P9G=Tqv~`Pm7%eMf+wcX-CrN z%s|OEEc|T94`Ap5af zbGTyKZ{*!ia+lc=ki2{b8`;s`;ij*!1egrLUS}?ur^X?7IwY+jo~fYG<_Ff1fJ9`9@qp$0<6Z zD_OmDXG3~1<$Q6&a2FeFbm+vo@>b~Q6O0RAwZVgzR%7%)T`o>NA9?+xVw?`B+z(12IK^zO1YoJ{*f@O6EnsUVOukp!!vm>bTO6e7u)I z{NHw1{X85RZw^4~0A`@P|19GiRhLqd-ynFlBwjeXMRhj0g z*Uc_dou7SDLz0Zx=nvVG6$XRpMRR)h&jj?)%elwv1r-J5s=Dbtlz{xjSR|bl!~6Gz z(cMRZ?Zq!}c*SJaM~{WqCxzp-bRjk5N%zL4JLtEqqtZ{(`JZK^7PMQ(8@ieyt(SSy z)V8$kMhr){&Y*kWdg1}6IqX!|jq}cUQ$uYTSg)Kwnm-OHPED~`_i`E~MI-A6ETMuQ zC0p6dk5`Rfh@YP~XTyt~g{Gm`At`bJN% z<+uB+x#+{!YITRUJbmKPds8y8luSi@9&5kiHT ztnIaqYtJGYBpVf+8b#GBmr&+}5u~$rq|C>pxDM5DejgjY_FE@za&uj_8Ie@l2;O_0c*j4n2nEg9ae6JK4G-beX5y1E>x^2S z$J(1Udc_fjl zlO)#(uOZy-U%6|jBNkSSU~`BRhTCMYx!8=WqxY$K@f+oLVW;}`y*KB+&7ma8mR41D z!Mxl3xaeXNIBb%RgjXL3EnLc(kEW~s?YDvXm24$=jyw2893y1;kN3ln+777g8K4FmEr5jeKarvOD7pCDrwZe3pga1v zTG{PqYW>%IJowU`9RgNiblxMybpE%HdUF#D6ldO}9YZ+%!z|FnOrUHy0hw(Pt2X)J zvk5Duhp~Wt>$}LFyEo=^lNs}aZffbp*VBtpH2CT0!O@nNE_)d&Kk~E zS7xF9hy_Q+9i{N>eL(xqf>nvw48qJf#d;V!Jo3WV<73b!UG7E?oG{9xJqrUyHD6Z+ z6~R{smQLX(*`cWMIW@xZY-cK(TT>}+~OQ7g4>4a-!|9|lwgI(&w+x@xG0`k2-Pa zpPl&Vx*4377!B?+5~XYz&J)A6k^H4ahov1a0u z^=HGFaL)I=md^a#!8@2!nI-LT(%zD_r@3Y;FAqRdk!c$(!U(jYeoPD-h!}GkM+#>F17e zS9C8+sIZR{>$~+wczOXS#*qUb{Rjum=g3~@iduPbG8gsv$^CZXVt#A462o1s_ykJV zXNILXj1oA1xE~&Eq2a>O&YWKUkk&4oj=HlOm5A7O{CQI^G(BKPCldQf4&ehu+iD2P zkC5f1z2pLsXzmiq1M567`0wiw9WL`D6F+cVEAF`Sd%&aZVoXj14(GdMDA`vyaMm!^ z9hUiYWFQr!P9~@19hCROuMquk0FZcC4U)e}8!8UF`*+A(vg7?$&&NVu&S{;5L3Y6e z?LRbvZsc?*nz;z06OvThE;6f0^kl34k*N9O>%y2};ljvm&7nVSgpBhe*rml;$apoK z6AsQmU51Tvpp!67=bM0C=M`+({2iF@x5Ee*5c*V6?nJ;NoN3$?b*gb6#B|%}TB6vb9Qb>7($$m=&B@HJB6oKBmvbh#95dfa@K9u6FyP zWW9A>fpdL9KH8Y-;+eI>jp&oqljE3{h#ZcbY>7>6u84K5SL$j>4?Bui_!pC2M z_>nVYhJHw?wBAVde{{n5sJT$~hh%|d?-+gZcSYOI4@|!sM}F6;U|Yvn`K-rq`dnXd z*)|$qKMUjHmv;QRxDDp4nuMNrg)>(1fKsoHCesEhI`FMGrr#D%$WwdJZ+K7J&U@hJ z&#S~2@zyov&EF92D4x;RGq7BEdtT?mG4pAPYCheBsN_4DP4uFY1-t2*6|P;@CtU7SfN+uBf9Z}6%MRy$+4%#a(2I-u&vJ$t}@>V zzJCtq)NWmgDrF}5X%^L|RRC;P<^63$rVk|N^V<^ASO+NuQFAHlp9!22UqP>r1aj@H zlhnAsJwFPwVzW`j6piO8R#}SS+8-$S*G1@aat6GU&Ph}CeyGbyfuf7$O6{6i9Cm9B zrq<>vu&+7h{29#W_bz5X%}pq|DfjAx9NK$*JO}<-02yoIxq8F?QF_kmnQIGe`9p=$OE4n4@vLXlP&Ld!)oc#J3YM% zrRLQTXANk%EeCY9yOrR+L#g5$PmZ!n;KXU-D9m-_6wi}nF(Z}}f6s%c1EE~-z3>3W zd?9`OXeHa@AZ28Wwq|&{zYg=htXGPbr>d}1{4_&) zRZ|$j3aGVN4W8{5c7u&;O!o(h>_w z_D#p=H9bk&Pe(!bZBb*f;DIVm9EPg%pgyG5H(W#byF10H3#vuh`$}8 z!7$PoBGUSEdN+G^tuy7&!aHQK+=g#_@W7%)r4Zd~DCyFAkl*`h7`j$+LnaX%Uh9SB zKW8hD@`_rISc=tEHsr%yz$0JBhkgCoxcog8m3;;NttX)K9u4IE;>3BsY*p%F_N%GI zW#oK8gQ?vwQ1n&F?8H1&*P5Gfk?VA<8`p}p%hpicqI@dNTMX9Mb)4GsxstczXI0H%XgwVpN6%v0P4(ix z>w?bdeX-|OVLpuR!LOAd;W;bx`FtGb_|3*5Y{sK^QvjHk7-4qXtDt&t;_r6|DGy%5NX0hD>89uMNS9 z{>!+=v==1}(qp5OH^26fxpPAqg&$vs4Rt|mc==elU24mTk!?BswIAz}UQp{OJts65 z*Fmx$xD*+4z*h^n$wv4inT|N5EQ;SP>%!4XGgZ%#&2jV6c5EzuJ>$Q+aH*Bd_`g2_ zPu4`DiHikmf1RP0cAo=vi$gJ`XE?iMwt@2A;ugOw>_T5les(#AoiFWEk}M6BR`HkI zb)8A)dEb5Q7~v7#5})8X*$?07N5LM>RAaJCGJua2pOzkM$lgYoJOXqdCP8qQ4tUB1(jA`t z4OTnbvTJQ~ET7R9Oa}R(-hPsj86AQqzYO5Ubzh-*`ZLwptPEDQpNE+v*I~)OzH+wf zfu@~X3PYhir_VkC1u~D(e=MR%i^W(Uk^j|Y zxo0J5&eTF)iR?rpg}PUP_G04%!kzObavOw=w~ zs74t73MVIMIDCRTM(2+fzVc@BHDcds9AQAnI~63XAgx z{o%mXU9lHK9o%3p( z;aa>Fo#!~Pq3$I#TnNOV9mBZDw5~AkeTw4$?;fyuq2ufy&Oz1Q*JQl^02n6Jk@J<6 z0DqX`na^u@Ui}-dOJLM#U5P_TVVcF$!46N#QKpY6x_xH(#zy`qpuy@ZWDne(Sx~RfP?f~ zo#0IIBGk>59O3*vp!@ZiQmA)g=RT?6uQMn)e*G~0c?}(SIs#wEOXv51xJS0j9c@6a zs#Tk-?OL@#$Nv8ibUO-8ngdjQ&KPbB-|n^a$r`5o1c__I(7ZmBOoLRiX)y>B{iku9 zUfwSO@|kyE1DaDEDC>u-;OT3|Go#G7GA@B49YfjafWiS=yyU*STdiCsytZR6guQKv z2^Lzk>^+3dI{ykqG0V|gx_O!}GcfSTOdM#`n$z|*fN7AgxZ3)$&Ny2&a~{kIy*7|u zzX?jcA}RKGnxqnHDB?kPcqUmh&6Fu>(X(~RUPEWr{r0#}zq_+)v0csqbsN=tFk+H-dcX|t}VpCA>~wGaf{-Nmtm#XDbSQ( za!;7qi7T^1*=l(#m+wETCN{HV|M~{Su=gG%2tzV&-h0LJ*X{J~+-lU@$up*{GgXD+i)YiJ9_vCY^pKgZo`1%LggQKDmIpKXDrA8oBLMyz{(a+j+Q%nyZ%sZY)v&A zXW_1;9XadmHK;QTfus)KLB;(S6!B|cZk-audoRxCw|ly9O>=RgEghi5&zHW-iPsco zXvYP^RPx#q!5bF4v94Kjir8`-eA-!{zVM||^4^a>Uk>H4z)1Fw{i5in{S2x5_k%|I zcr~5>qqVlH&|R`w+q|53UZKp;<|M!we<%LjRXSL~&AC=*k4>TX$aAKL^gD(@e1@u$$y#5+F$mj zdapoXxb6mzEp5=^!Y~eVY=)s1ZcxtrU=BaLi>$i#=jd(Xxf=QjB04@KFS!dhS^q}? z-ZQaiXsYBWhI3Ru6Rc^r94&@Z+&+PGbDns|r8zH?^Z_p{LF z%WcxnAFF1De^is4Z$hl)CCPHmDKz!Ypo)H}NkFv~TQO#J(z$=P_U&RYT6e{
?sS$UHIDNba=tdr2o?-g$}bFga{4e?B>Y3+f+H!nk{& zowt(H0$cHu&08sWw1y4ZqlI~Meud4$9J#LEh30+NpS^77aLP>$I-KdvxzD}XNpkqf z?km~*iX%SD?IgR#7F-^FL`hu~MTze>lFPzZ)HFvy{QGQ%#mkK*3XYYQ3`;mTu9K?&~NvbD4&P*IQteb~UPDsI+aUf_C$3%e zANl`a&q+~}dBUu&82jQksNNW_G_>qaZcl9CcHvYuROY!493Z=u+aD>m{0kMul6!jh zU`(13Abt}q+LxAr-X>TrPr9p?>iU4)?wQ!QCyvwSn_%h!BhnYFBl{TGiUpR7Z zr+9S9?8@cdWvV83yjm(QI>Vi8_sX+#(Y$adG;D~Vyxudh`cK*GSoalfqHvPuHK@-d zALFIYMcuw9?r!E5!bob4n&E#IM(t~cPQsrD&o8vWRb~N)U#h{m!h||C333biW9kow zz$R%dJ0uxlg7BH>_hdLAdr3pVbhZ59N=Ump4{dK>poBrM#E)0E}^*#5;tupB^!Zs;+84%ZmFkk6B8XzN=GXAeXE-4!aBKNwOD zxMR&JH+0!p3=0np#=U9n*|D}CnGTiiUbY9=n&v=y-CfWv{@1X97O+7zo6PQ{{bw%}20 zjx|$tP~J=a+q*{UuKy%Uv02{PD<@N)T^dESTS*SNL413l{ETGVkYTokTI%ozINPeK z<ZJ^1+n&t zUe!SxxCX9#VnbhaOLszJ-|MjL@p#Fzo>uhfxlo&E%4NIn(}wrU(cV_LS93by+SMy? zf`c$O7PR9bPQoHd{0s`My7pc*WqR#X^b4d1J|&nn0X1NA-2qdj^Xu~_8XYBnX&A5q z%g+};=0A5-!^lDCJWS3Q@ugH`!i6<;GAB+p2SasV#qL{oxp#KuGGQ_|oEShBQ~P1T zmA>fy?kgTRbfxexP0?r9CJ0v|xvG_LmG)Re-tevvKY9r@){MlwgYBSk>I{rn{43Or z+Ds=_09(FuLBGqHba=eX;5WXac6*n~-@PBkx3*?oA6Iopr7P<0I&t`ZaVEBU4XL(n zkkuyz+^xLWgnMz;ra;b^vWrr;x=?!F4)U&c!h~x9oDx?_hMI7+52>Ps>C&ItJWlS0 zk)Zj<88wl=7P>9)m7Kl_==xqPEIK1|iUVt~WRD&D4a%im@@M3AKM$H7lhmY}N6D~6 z7`Q>bDe~*@C@*TJ?D(dl2@dA?&GyuIqXkCl4^z#5ry#p#Cflb9o5E`Zh6*#F;Lj}Z z^1MtA!`8CKxGy$Lw&uEa57p3ttNHe(W$4@|lJn+@hrK))pLJ=4MRz1`X|R*bxy<#J zFOog$$>HWLH>xR06@*XJae9s9Wxsx;lwO=eL6S?i{^Be7jVnM2o1xvXmi*aR{7^Lwm)0WyhT;x{((5fhvN@eqgFg*#y2Ohf5scd z@XKHBw}-bxyG{d`ss?iKo{OMS)^S9y)^t*d=ZMGkP;B;)%C6aAYS(|%m#5ouO|tkS znjwa-djSQrI%4I{Kn$MU3-an0VSauo7%T?38;lMXUKaLswQ@ks8YDaYr#-3YsD~Q$ ziyQk7vE=H6@su|55kzPnQ1ZuEJ~_sjb!+We^Vr~izA2QAIg zlj+3XT$3m}wC!e;+cAVgj&c3d}( zZ*vo@*UrTH*7=b1kMz-h@}TIoVUYOyDAcX;Apeg=in(zPmG7B`=Law2g>oM<{49Bq z+;&(ra1?GhZGo}&sZid$2S*>@qxSTe#cMAMpC~u$rue${#WS{<%U%miCMjkkw_D26JC&;wh4SF`jhbtC|U*Yt6wW#JW z#0l3&dwMJto5i8--@fpm!kIT~Lpl4)O7>qp7iN}>#h`9Ixy~n((qGwt?U?oSAUcZs z<+!od(I2#{3l!4^Te$IlIcGh~p_4WqX!yOYd+Ln#5cA3wop1TG+3&&w?%fYcsvOaN z*$cAYGZy_5JK^mM@($fCPOG5<89wHc@1JdWt;G~Bef&LXs()2Ox(mP1a*$$kP$M(+ zm$dEp2(*r)VnJ=ADxM^U2Qis($wh8;|f~qh4 zTQPflj22eTMr!i|nO8i9Qq5%2eIKtDO`C~LcE0RCOI&Kpm%@WZJ=ry}tK41OQGao- zqF>VlJ+~~t$Uf6CYr!nGo8ZX~anW2|FbGo4_7?AGqdW`8u)n=I=05SpthGNv)$&w| zIFv+LO|#KH&JBZX?YY#sH&{TE7gTer6&#JvEPpAs9E}pa3j}I*>?viV$3OuD4b0JhfL6A#uxCb?ShUtj~t$K z!G?D#=$DLOzaQ<<$L|f??k_pGH@+PFuRHnCDJY&9jp=_2S8kT7`+AhzhG#&8Rcp$RNu}0Z7NXgB@!Lk3b874|vTQ1$XNnueB=l@Ji`o0Fz?@k7>HL8|7{?Ed!SA9df zamK%Qsp-Q7(C?`PP5gVsQ&=CFfnSsf7JgiN`dbJ@Kg``&LaQX(+xTz+rYEf6h@|;c zpSqI*LS!#}XaHTFybm|%N|S5 zx;uM$HD_Ji9%?)=6R+Luin#;V375Vd7p2Z2+RtS}zI-ohd!4Sj z@KC(5!!d1bC0OqNiVJW0Ui$?9=id_tc1pkfI+{GX+UFDc^FAmQ1K z0}zji_f6sZ4w4=Dn5&A}Dg!v5XeG{scNFrqGe+C}1E!T7VB0msq$ww0$F3Pz{C5Fp zQWit`=FO_EW+j`SI!(5l#EW)8$N87Orn33nQ0wz6#Gem_+Ly=Xee5dQkdUi5K@mNc<+N15Ao@MdCg5&@9eKwV;#5{U!j8~ft>mL zFdRM}%hdUM+C116^UrUG;&^8+(oa+~Hv+$tf7^tG!te<51ok|Q``;{viQFv9XcAJ)9iRLqkSAv6CaWz_vl#o;$#+q|U+^T%On$WXN2 z5{En1C*Y8*X`FDhhSKs|p=0_Ss<^uZXFeUp5sUSty;Pu-&uK~KZ{9&=htZt9cn>+< zlNsgNH;VQ1Ba|0Vp&sth5j7p2jc~jw`QpXBxFm6nWX`^&XU#IW$S~Uf8R!dfr zCaXT*yaT7mf1&OTDPF$_lSX?FTE7qvLluIb&IeCaE9Rr?7o-)yoD<7{$ zO|&HzkJ4aJpBY@faUtls>{m5M%oK~gmK>Qb^WDuim4ZJ9i*w?Uuqn5K!#!6%9B+Y| zs?~+LvB$uC&w54ocbR*1>T{)CHy!Gh4Oc7suLZ3|06Df_Kv~K*3bq{0?T*Sjr(is1 zEtWIJZ1H2GOilou3$6t3hF4|=@+$bjxdTW!&UtWVI*&J#@DZw zan^-*q%*(o{w#JpXKyv7rZ=~!vYVC+r{g%m+aAl0C(`C|W3izsN3xNBD%mr}fO*0t zYTThm+nwK1%&T$i(R?6>h0Z~PU%q?9x~*h7XC~S8T7cDuhDqjjvr^o13mN?Py6;-o zjgJm?k#oo{D6-k0X5aG0vYuNYV7mCMn|&ayU5VVMBe>|PDMTC;hGuvs9f-F<%cbsU zq_<+{9?2BAw<8Bfm62c3bm@m2QJV^Z>)cPO4fgAx`tfAcwAin>_gc+)lUsB3)f`3l z{XMlxzR!Xx>4Nm0Mk%lL!YJ-LJa^JFFp~VW?UR1eWmr#|WIr;b*(pvA4yZp7NJX~w zg_Rq`@&0rpMYpqr^efA#F6$KNeydV-R!`JbM;y@lls7sroq_<%H==c&PJT?>)TI~bV{(~StNuDvgLLjX+8G>KLf&V4x*1fR42ph)yr$qALR->B3xk zlF(W_G}7&DejdCAhhmbCa1CV6X|dCheXq4 z63*{}+Gy#nlNHar&{J5X$>cFxG6IqE|2=DinJYd}y!c!yyDr1v&OcJq;8?5~>d!eP z-FfrIAaov+Asn|pKmX|;7t8aG!i~R|5{y7Mi+tx$%+gdfR#|NmLe4o-j=8<_#h7z#9Hy17XOL^Hw zKL6R>6^;7CY3-HYscCr_)*pA{wsd>)HNRlL#BuPdpNzx`H$smLf+BSrwB_ts> z^W2A7n}n9xmV}m((9gCcA%ypS|LU*$R5SD3_qon>eJ`iO#^m?;XXY1W$~!CnV6xR7 zg4%L6R#=Xwy{lph@i(CUK?hUBK2ZBk4kY`CI;fh?_w{c4e$=Uj&NIhY zyWAQz6&K|Sqt9yHvS2bkol3HOOpu(~&dRReWvX=!=(RE!t$tsPowGQHHF7b9PF(~> zpEyU@BT2a8=|qasU??!;vzRQ9v$!VE%LPf875JGMTU5e#*$B+p97B)JI^dg6oWYgo zN2eNGC?M{FSe+OFrM%lZbd)3ad`zQQ)*m`oWUwnk%qjn`C#+zCG1L6nENqz-!}p1^ zgcef;)t+!A$%rjtK6ciA>)Zlh^P1Jmb*|B&)^?&L?M5#8o9{ zefYDcF~!gkxRNyox?MJF4yJFn3FVwm)?w#NLG?<^{$~o=@J<-3@DR@1yvP(0 zQ(0AhEaj0G1XuJ&-R$Yif3+F5ZtsNcBw$?^1GJm59AOpTk4&~^iCY$+f;0FkBbq_c zX3rG;+MxVp0rPoM0jkNz1kLvYV#}pHyc0PPEG&D|_QCxqG5;Az9-b3xzO}JXzZ6iE zlyL^12f0m2g^Jq<&L;A_u?vFoK2Z3Awuk7k>?HqA@T7} zan~j@+}qigB41fy!>O+1{Jv7qtoc(cO5X|P->RWH5ir!jk-6^HN1p(me=mpypDZWJ z{`WO=?6aRK7p>vVxy~qkG@Mrbm_a4+#jySA5G>QZ6B$V{Qm-qjOzhPKyL`lm^?W|c zJIM^^tOvi5M_Jwiz{*{7&@t;Y)BEa9vW0_$k&_%zAt!mQFVA82NB)`krGyp47zizf|6>78({baKDHLwf ziwqCgVzs(E(=M6JBte!~Jur+p{}+g+hC@K|ITvzEoGAa}YRqxuea=_yNw-EqhMZh2 z``t;@`Wf+#mnaNf-W$XFZ(zE`i^UvT!MkyIKa@W2MrCuDH0&FbzGh6K{Zokg_RI+TvP<~fd z@VweUbpF|laylY0{=W>+P3grnzi*Of{IV6amc5`N`UohDc~9q~dI*hag4Qn!p>o12 zrnqW=K7R_7%)NX;Gggyg)Mk0qfsqv6SAkhtepeaw19Ia{(b+H>?1Dz4Thv4r75fsj z)4Z^(nCDzWUWlHZcH9%QUwp~^8&X4&<=F0E;Zvnl=e`^_ZV1ObN}}W4)?nn#Xc`&Q z8`U2clh%0zHJyIVvi{_~dRF$Fi)#tWO@@Nw+oh!C9ft=vqs4DcF%*S+P{WWzkodAh z(AMji$?Qb3d)tGQlZ-Jat~Xj<;k)EeFR^+;K66}^%~bk2u+KIp#rP`LVLug{ubV;5 zTka*lIG?Qh8j;?pLDX^R4WvBT!R*G_W0S2vR``r1pVOf{llYgsC^ecAH!Q(Q?J_)H z=|@|t)>2BwNmjXj59Ar$2dQv}>HepI^C83NN(|SMsw|iy@3vh2AqfjMPs7OhbFnDm z6uai;ND*u)Nj*{^DaM&|%iS@1l`Y?m+yMJk{m_1`j5B{0aL&sl2oi^)&$W^8eODSP zHrC1Q$IQUw$w3%ge4hCvet`fFKGQw6#fB+MNn&9j8W;>B)xv$?Sj2me_WaBI`@3Rn z>l9|`7f10yH^Fw)a#D`x{Xe%kyY-%wRyn1Uoq--*yA?xzOGc0?tqC;U`$71Uqs(={ zYz*)Im06YX*VAhp;T`OHR^64c&U#+=vkbM~9aoUd04r{>K@?cV_u|J)F5yVWq=slP=D=j|Fh+-BAb z24m(eUzRXkPA%Img6nTTk)Pfmz7xt2J->2(cGzEH@xC+6{L^F#bwi#zOlHA9BqUvw zh8@}*W^-#eWd?Em{n>I#R-b{aKch*rVKz8_8i115yM>f@J6U$;dU$kWB58hqsCKq^ zz;nvgB3qR}22bblKJwupS$g}jBwr_H&V9&gep^BLe{z2JRv8xX98Sf*rmV~=jQ8_K zqH;hP6b?7%`oc80)WrXO^Lb1bQVva)|3Un@My4z0-nOn2D8@a6B+h@id0f?}h}CxJ zGk!GMo4$bd5WefYGgb&c6To%vuADXU|2+{iN%qEE$h*Ps{GB~9!1}hB*0cvoqB$4E z%pUa)^UPbXUpcezSDxqo1}O`sW6Olo%wm)cb@YG1VrI=E1DiKc-@JlksTR1`oaeZo zJYZFa$MU@MYbY5X!QWF|MCS*kVo}HpR6(^`^KrMRxt6as|9Tz9)+VF+-&vS5^bo83 z#Lpl5?W`(@XTpx^Vf{RR-skDcnUK}2eai;c79ENz{W!U%e-!WD_F?xrreKoOR5W?W zUtd$M=lAPPVLRqg#JTz85}U`0{MM1%@kQ9$b3Qg~8Aev`0V~RM5VheIs|p^5CEEv} zYHlG!Ho0QwbrZBG566z^?`y1c zf(Os_$-%Vkn_z!tCDk`1ljBiO3b+>yi6yC6{AMufoZkv%9h(L7eL5yT!CCbe-m(f< z4gp5{H2Gl-wHPxGN>1K)|f{fRkW{Q{3>JCn&QH%#&S4-)se@H|gf>>QwlobGZo z?(9yU=h8@ef;iVaR}Ax;LYkeug!39F^6$GEcPSE4GBiOQe}`vi?u!u6c$6LYPD3z$ z4X=~?bJoHoR`%|9A)e0kV&`SnPs%O!w9l4+n8Zx$P$A zeb$#`dpZT!J&TGq?Begk7tnFhntYBjkcwj|u09O4+qVc(hXhiNEd-6{F1dPB3YLeR zXNkf8i6JG!c-Ngdh4Y!pbjxMY#c=_3o<7eK4sv#n>p>>X;yalGc1(SWYc%}1m-O15 z()w3H^<{p~9ax~Q4>$;JX|u`Y*-@6iv?m(WFlOe2De6|`WS4ozr6%|{U zojdni_&Tyg-q&mNE`kn+4W-xr^4-nmP`YQ&Ir&c)Q=W`_7?z9|wfa+d=7P_nzGV=+ zaRJp|;mr7^9YWi*S(N`H4o~s!m)ymk$(>6`RwsUKao_ojiADH zF1W@?$+%KUdG+R?8fGeQUbY-U&W%FnQ^YwW{0_Kc0=B+b0H@xq!Lq;dpj2YXeVG~Z zfVK;wR%gL#76y{lb3Tgz3$K=!3cFyf2J-{p*iu5gIV>bVo~wri8!csK_}lr2W|q z@AU71s=X&z{UN^ZyK)Aqo-d&$cT=uw@=lztbZ_Q(E`qtnR^le3aCmlq#8E zd~7Bb26E;^ULiB=7sT3Mz2|A(xI0;>N z#!B+`p8T4%C&^|7iKb4yDJILA>}E~G*H$wzXnq_e?H-Pj0&6$BK|!dQa$oIn(}fCN zo)V?)GblbbpIvjb!Y6#6Vq9|vE={thdcA+49_CVIT04}8{PUZV$1?Qyu{y4QDrc-G z^PyLn!o7%f+CO6f9gmp#(&<=S_lZf}H-fWe3WOin#yXxTvou&j@;V5NDtQ!&ySx zT$I^cL;Up(0DF3%tl^Civ341y6uD7@8Rv=I=tc%>tnuSqf3o}LLZ?>EpcK2ite_X? zG9GM!>cgB75g0-+gL~a0;_3U@i6ptNX5o4(vF6q>aMruTn$34XcFRY2^O|caR}#to z6KD6OMq%;DUqJFINc2n9LEK-x=$5Y$zFg%(d27e;J7pIt{vJR{nFFZKBoZHSe}Jbn z9`*7aXxDQO)J3!l+WB9E0AI#xYj~Dot%4Nx|HvbuKenG(K(U=)A?&YMN}kmW%D!5V z_BF)Ju$Q2D>nxb|{#{HTzL2!)7_9!+kTl(*1!lYg#}3xV#E5oC8MBM&?7T&@T2}d9^cnFF%j7$v{Quo#QDNLO4(C|SDHF24^$iq-6VSff zC~!+U04a4tS?HzVWE_2r?=RgcB8YRfw^cKjIuWv7c%Wo~K$5ke<<2t*;wJQ_P@6DT zv&NVjo-IV(%{lBN@6L!b?Mkt$rKmS*292@d@w&tB6iT~(seE2YE$Lte5`Yj^=_dMeo7(-qygDEr zs4JOtjw7EBd2Hkno;~=n2t#+bLbZbl_iiC7pPGoOtJj!nP%q9L_{5r@Ujo_E%Yy5_ zy|`bbQOsVz^R+8)gLcAV<~O_>l_TFJ7|tM{>#5*YpUHGmLsoENzu;H%h((_5jk