|
| 1 | +from collections import namedtuple |
| 2 | +from collections.abc import Generator |
| 3 | +from contextlib import contextmanager |
| 4 | +from http import HTTPStatus |
| 5 | +from operator import itemgetter |
| 6 | +from typing import Any |
| 7 | +from typing import Literal |
| 8 | +from typing import Optional |
| 9 | +from typing import Union |
| 10 | + |
| 11 | +from frinx.common.conductor_enums import TaskResultStatus |
| 12 | +from frinx.common.type_aliases import DictAny |
| 13 | +from frinx.common.type_aliases import DictStr |
| 14 | +from frinx.common.worker.task_def import TaskOutput |
| 15 | +from frinx.common.worker.task_result import TaskResult |
| 16 | +from frinx_api.resource_manager import ID |
| 17 | +from frinx_api.resource_manager import AllocationStrategy |
| 18 | +from frinx_api.resource_manager import PoolCapacityPayload |
| 19 | +from frinx_api.resource_manager import QueryAllocationStrategiesQuery |
| 20 | +from frinx_api.resource_manager import QueryAllocationStrategiesQueryResponse |
| 21 | +from frinx_api.resource_manager import QueryPoolCapacityQuery |
| 22 | +from frinx_api.resource_manager import QueryPoolCapacityQueryResponse |
| 23 | +from frinx_api.resource_manager import QueryResourceTypesQuery |
| 24 | +from frinx_api.resource_manager import QueryResourceTypesQueryResponse |
| 25 | +from frinx_api.resource_manager import ResourceType |
| 26 | +from graphql_pydantic_converter.graphql_types import concatenate_queries |
| 27 | +from pydantic import IPvAnyAddress |
| 28 | +from python_graphql_client import GraphqlClient |
| 29 | +from requests.exceptions import HTTPError |
| 30 | + |
| 31 | +from frinx_worker.resource_manager.env import RESOURCE_MANAGER_URL_BASE |
| 32 | +from frinx_worker.resource_manager.errors import InvalidQueryError |
| 33 | +from frinx_worker.resource_manager.errors import PoolNotFoundError |
| 34 | +from frinx_worker.resource_manager.models_and_enums import ResourceTypeEnum |
| 35 | +from frinx_worker.resource_manager.type_aliases import GraphQueryRenderer |
| 36 | +from frinx_worker.resource_manager.type_aliases import IPvAddress |
| 37 | + |
| 38 | + |
| 39 | +@contextmanager |
| 40 | +def qraphql_client_manager(endpoint: str, headers: Optional[DictStr] = None, |
| 41 | + **kwargs: Any) -> Generator[GraphqlClient, None, None]: |
| 42 | + client = GraphqlClient(endpoint=endpoint, headers=headers or {}, **kwargs) |
| 43 | + try: |
| 44 | + yield client |
| 45 | + except HTTPError as error: |
| 46 | + match error.response.status_code: |
| 47 | + case HTTPStatus.UNPROCESSABLE_ENTITY: |
| 48 | + raise InvalidQueryError() |
| 49 | + # TODO: Enhance the error handling. |
| 50 | + case _: |
| 51 | + raise error |
| 52 | + |
| 53 | + |
| 54 | +def execute_query(graph_query: Union[GraphQueryRenderer, str], variables: Optional[DictAny] = None, |
| 55 | + operation_name: Optional[str] = None, headers: Optional[DictStr] = None, **kwargs) -> DictAny: |
| 56 | + with qraphql_client_manager(endpoint=RESOURCE_MANAGER_URL_BASE) as client: |
| 57 | + return client.execute( |
| 58 | + query=graph_query if isinstance(graph_query, str) else graph_query.render(), |
| 59 | + variables=variables or {}, |
| 60 | + operation_name=operation_name, |
| 61 | + headers=headers or {}, |
| 62 | + **kwargs) |
| 63 | + |
| 64 | + |
| 65 | +def get_resource_type_and_allocation_strategy_id(resource_type: ResourceTypeEnum) -> tuple[ID, ID]: |
| 66 | + query_resource = QueryResourceTypesQuery(byName=resource_type, payload=ResourceType(id=True)) |
| 67 | + query_allocation = QueryAllocationStrategiesQuery(byName=resource_type, payload=AllocationStrategy(id=True)) |
| 68 | + data = execute_query(concatenate_queries([query_resource, query_allocation])) |
| 69 | + resource, allocation = QueryResourceTypesQueryResponse(**data), QueryAllocationStrategiesQueryResponse(**data) |
| 70 | + return resource.data.query_resource_types[0].id, allocation.data.query_allocation_strategies[0].id |
| 71 | + |
| 72 | + |
| 73 | +def get_free_and_utilized_capacity_of_pool(pool_id: ID) -> Optional[tuple[int, int]]: |
| 74 | + query = QueryPoolCapacityQuery( |
| 75 | + poolId=pool_id, payload=PoolCapacityPayload(freeCapacity=True, utilizedCapacity=True)) |
| 76 | + response_model = QueryPoolCapacityQueryResponse(**execute_query(query)) |
| 77 | + try: |
| 78 | + return (int(response_model.data.query_pool_capacity.free_capacity), |
| 79 | + int(response_model.data.query_pool_capacity.utilized_capacity)) |
| 80 | + except AttributeError: |
| 81 | + raise PoolNotFoundError(pool_id) |
| 82 | + |
| 83 | + |
| 84 | +def get_max_prefix_len(ipv: Literal[ResourceTypeEnum.IPV4, ResourceTypeEnum.IPV6]) -> int: |
| 85 | + return 128 if ipv is ResourceTypeEnum.IPV6 else 32 |
| 86 | + |
| 87 | + |
| 88 | +def calculate_available_prefixes(free_capacity: int, |
| 89 | + ip_version: Literal[ResourceTypeEnum.IPV4, ResourceTypeEnum.IPV6]) -> DictStr: |
| 90 | + calculated_prefixes = {} |
| 91 | + max_bit_size = get_max_prefix_len(ip_version) |
| 92 | + |
| 93 | + for prefix in range(1, max_bit_size + 1): |
| 94 | + prefix_capacity = pow(2, max_bit_size - prefix) |
| 95 | + if prefix_capacity <= free_capacity: |
| 96 | + result = free_capacity // prefix_capacity |
| 97 | + calculated_prefixes[f'/{prefix}'] = str(result) |
| 98 | + |
| 99 | + return calculated_prefixes |
| 100 | + |
| 101 | + |
| 102 | +IPAddressOwner = namedtuple('IPAddressOwner', ['owner', 'ip_address']) |
| 103 | + |
| 104 | + |
| 105 | +def sorted_owners_by_ip_addresses(*, reverse: bool = False, |
| 106 | + **ip_addresses: Optional[Union[IPvAddress, IPvAnyAddress]]) -> list[IPAddressOwner]: |
| 107 | + return [IPAddressOwner(*_) for _ in sorted(ip_addresses.items(), key=itemgetter(1), reverse=reverse)] |
| 108 | + |
| 109 | + |
| 110 | +def execute_graph_query__return_task_result(task_output: type[TaskOutput], |
| 111 | + graph_query: Union[GraphQueryRenderer, str]) -> TaskResult: |
| 112 | + result = execute_query(graph_query) |
| 113 | + status = TaskResultStatus.FAILED if 'errors' in result else TaskResultStatus.COMPLETED |
| 114 | + return TaskResult(status=status, output=task_output(result=result)) |
0 commit comments