diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml new file mode 100644 index 0000000000000000000000000000000000000000..fd6274d4d6041e73be631e5530e012d1fa243bb8 --- /dev/null +++ b/.gitlab-ci.yml @@ -0,0 +1,82 @@ +include: + - project: osl/code/org.etsi.osl.main + ref: main + file: + - ci-templates/default.yml + rules: + - if: '$CI_COMMIT_REF_NAME == "main"' + + - project: osl/code/org.etsi.osl.main + ref: develop + file: + - ci-templates/default.yml + rules: + - if: '$CI_COMMIT_REF_NAME == "develop"' + + - project: osl/code/org.etsi.osl.main + ref: $CI_COMMIT_REF_NAME + file: + - ci-templates/default.yml + rules: + - if: '$CI_COMMIT_REF_PROTECTED == "true" && $CI_COMMIT_REF_NAME != "main" && $CI_COMMIT_REF_NAME != "develop"' + + - project: osl/code/org.etsi.osl.main + ref: develop + file: + - ci-templates/default.yml + rules: + - if: '$CI_COMMIT_REF_NAME != "main" && $CI_COMMIT_REF_NAME != "develop" && $CI_COMMIT_REF_PROTECTED == "false"' + + +docker_build_api: + extends: .default + stage: build + image: + name: gcr.io/kaniko-project/executor:debug + entrypoint: [""] + script: + - export DOCKER_TAG=$APP_VERSION + - | + if [ "$CI_COMMIT_REF_NAME" = "main" ]; then + echo "Pushing Docker image with tag 'latest'" + export DOCKER_TAG=latest + fi + - | + if [ $CI_COMMIT_REF_PROTECTED == "false" ]; then + echo "Setting env variable KANIKO_NO_PUSH to true" + export KANIKO_NO_PUSH=true + fi + - /kaniko/executor --context "${CI_PROJECT_DIR}/QoDProvisioning/QoDProvisioningAPI/API" --dockerfile "${CI_PROJECT_DIR}/QoDProvisioning/QoDProvisioningAPI/API/Dockerfile" --destination "${CI_REGISTRY_IMAGE}/qodprovisioning/api:$APP_VERSION" + +docker_build_operator: + extends: .default + stage: build + image: + name: gcr.io/kaniko-project/executor:debug + entrypoint: [""] + script: + - export DOCKER_TAG=$APP_VERSION + - | + if [ "$CI_COMMIT_REF_NAME" = "main" ]; then + echo "Pushing Docker image with tag 'latest'" + export DOCKER_TAG=latest + fi + - | + if [ $CI_COMMIT_REF_PROTECTED == "false" ]; then + echo "Setting env variable KANIKO_NO_PUSH to true" + export KANIKO_NO_PUSH=true + fi + - /kaniko/executor --context "${CI_PROJECT_DIR}/QoDProvisioning/QoDProvisioningAPI/Operator" --dockerfile "${CI_PROJECT_DIR}/QoDProvisioning/QoDProvisioningAPI/Operator/Dockerfile" --destination "${CI_REGISTRY_IMAGE}/qodprovisioning/operator:$APP_VERSION" + + +# docker_build_dry_run: +# extends: .default +# stage: build +# image: +# name: gcr.io/kaniko-project/executor:debug +# entrypoint: [""] +# script: +# - /kaniko/executor --context "${CI_PROJECT_DIR}/QoDProvisioning/QoDProvisioningAPI/API" --dockerfile "${CI_PROJECT_DIR}/QoDProvisioning/QoDProvisioningAPI/API/Dockerfile" --destination "${CI_REGISTRY_IMAGE}.qodprovisioning.api:$APP_VERSION" --no-push +# - /kaniko/executor --context "${CI_PROJECT_DIR}/QoDProvisioning/QoDProvisioningAPI/Operator" --dockerfile "${CI_PROJECT_DIR}/QoDProvisioning/QoDProvisioningAPI/Operator/Dockerfile" --destination "${CI_REGISTRY_IMAGE}.qodprovisioning.operator:$APP_VERSION" --no-push +# rules: +# - if: '$CI_COMMIT_REF_PROTECTED == "false"' diff --git a/Documentation/CAMARAaaS-Architecture.png b/Documentation/CAMARAaaS-Architecture.png index 618a1eeb77809e3e2dd5189c61764b5277c72031..16a8d378264e92aaf0d214026dd7a5883833d838 100644 Binary files a/Documentation/CAMARAaaS-Architecture.png and b/Documentation/CAMARAaaS-Architecture.png differ diff --git a/QoDProvisioning/Makefile b/QoDProvisioning/Makefile index b109f4892f007b6c8788d3adbcbe8bbb3d10d9f0..848f34802bd5eb9fa121462109f2c6d3d550546d 100644 --- a/QoDProvisioning/Makefile +++ b/QoDProvisioning/Makefile @@ -36,7 +36,7 @@ describe-dummy-operator-cr: # CAMARAaaS QoD Provisioning API build-api-docker-image: - docker build -t $(API_DOCKER_IMAGE_LOCAL_NAME):$(API_DOCKER_IMAGE_LOCAL_TAG) ./QoDProvisioningAPI/API + docker build --no-cache -t $(API_DOCKER_IMAGE_LOCAL_NAME):$(API_DOCKER_IMAGE_LOCAL_TAG) ./QoDProvisioningAPI/API tag-api-docker-image: docker tag $(API_DOCKER_IMAGE_LOCAL_NAME):$(API_DOCKER_IMAGE_LOCAL_TAG) $(REPOSITORY_HOST)/$(API_DOCKER_IMAGE_NAME_ON_REPOSITORY):$(API_DOCKER_IMAGE_TAG_ON_REPOSITORY) @@ -49,7 +49,7 @@ api-docker-image: build-api-docker-image tag-api-docker-image push-api-docker-im # CAMARAaaS QoD Provisioning API Operator build-operator-docker-image: - docker build -t $(OPERATOR_DOCKER_IMAGE_LOCAL_NAME):$(OPERATOR_DOCKER_IMAGE_LOCAL_TAG) ./QoDProvisioningAPI/Operator + docker build --no-cache -t $(OPERATOR_DOCKER_IMAGE_LOCAL_NAME):$(OPERATOR_DOCKER_IMAGE_LOCAL_TAG) ./QoDProvisioningAPI/Operator tag-operator-docker-image: docker tag $(OPERATOR_DOCKER_IMAGE_LOCAL_NAME):$(OPERATOR_DOCKER_IMAGE_LOCAL_TAG) $(REPOSITORY_HOST)/$(OPERATOR_DOCKER_IMAGE_NAME_ON_REPOSITORY):$(OPERATOR_DOCKER_IMAGE_TAG_ON_REPOSITORY) diff --git a/QoDProvisioning/QoDProvisioningAPI/API/src/main.py b/QoDProvisioning/QoDProvisioningAPI/API/src/main.py index e2a6fce312d5fc8d10e57f98494eceeec83ac676..f401db305b0526ed7405075184c5615d3b95b31b 100644 --- a/QoDProvisioning/QoDProvisioningAPI/API/src/main.py +++ b/QoDProvisioning/QoDProvisioningAPI/API/src/main.py @@ -29,8 +29,8 @@ from routers.qod_provisioning_router import router as QoDProvisioningApiRouter from routers.osl import router as OSLRouter from database.db import init_db, get_db -from aux.service_event_manager.service_event_manager import ServiceEventManager -from aux.service_event_manager.camara_results_processor import CamaraResultsProcessor +from utils.service_event_manager.service_event_manager import ServiceEventManager +from utils.service_event_manager.camara_results_processor import CamaraResultsProcessor from config import Config # Set up logging diff --git a/QoDProvisioning/QoDProvisioningAPI/API/src/routers/osl.py b/QoDProvisioning/QoDProvisioningAPI/API/src/routers/osl.py index d4e528f52be36ebcbb98b4fc96644ec221119069..60ec78300129e9c8bfe2ec4490710ca97c746ae9 100644 --- a/QoDProvisioning/QoDProvisioningAPI/API/src/routers/osl.py +++ b/QoDProvisioning/QoDProvisioningAPI/API/src/routers/osl.py @@ -40,13 +40,13 @@ from schemas.retrieve_provisioning_by_device import RetrieveProvisioningByDevice from schemas.status import Status from schemas.status_info import StatusInfo from database import crud -from aux import mappers +from utils import mappers from datetime import datetime import logging -from aux.service_event_manager.service_event_manager import ServiceEventManager +from utils.service_event_manager.service_event_manager import ServiceEventManager import json from config import Config -from aux.constants import Constants +from utils.constants import Constants # Set up logging logger = Config.setup_logging() diff --git a/QoDProvisioning/QoDProvisioningAPI/API/src/routers/qod_provisioning_router.py b/QoDProvisioning/QoDProvisioningAPI/API/src/routers/qod_provisioning_router.py index f199afbdf963a46c6dc384a65152ebeecb79bc9a..8749f17903e2eb2509ccd08769e47668043b48f7 100644 --- a/QoDProvisioning/QoDProvisioningAPI/API/src/routers/qod_provisioning_router.py +++ b/QoDProvisioning/QoDProvisioningAPI/API/src/routers/qod_provisioning_router.py @@ -41,10 +41,10 @@ from schemas.retrieve_provisioning_by_device import RetrieveProvisioningByDevice from schemas.status import Status from schemas.status_info import StatusInfo from database import crud -from aux import mappers +from utils import mappers from datetime import datetime import logging -from aux.service_event_manager.service_event_manager import ServiceEventManager +from utils.service_event_manager.service_event_manager import ServiceEventManager import json from config import Config diff --git a/QoDProvisioning/QoDProvisioningAPI/API/src/aux/constants.py b/QoDProvisioning/QoDProvisioningAPI/API/src/utils/constants.py similarity index 100% rename from QoDProvisioning/QoDProvisioningAPI/API/src/aux/constants.py rename to QoDProvisioning/QoDProvisioningAPI/API/src/utils/constants.py diff --git a/QoDProvisioning/QoDProvisioningAPI/API/src/aux/mappers.py b/QoDProvisioning/QoDProvisioningAPI/API/src/utils/mappers.py similarity index 100% rename from QoDProvisioning/QoDProvisioningAPI/API/src/aux/mappers.py rename to QoDProvisioning/QoDProvisioningAPI/API/src/utils/mappers.py diff --git a/QoDProvisioning/QoDProvisioningAPI/API/src/aux/service_event_manager/__init__.py b/QoDProvisioning/QoDProvisioningAPI/API/src/utils/service_event_manager/__init__.py similarity index 100% rename from QoDProvisioning/QoDProvisioningAPI/API/src/aux/service_event_manager/__init__.py rename to QoDProvisioning/QoDProvisioningAPI/API/src/utils/service_event_manager/__init__.py diff --git a/QoDProvisioning/QoDProvisioningAPI/API/src/aux/service_event_manager/camara_results_processor.py b/QoDProvisioning/QoDProvisioningAPI/API/src/utils/service_event_manager/camara_results_processor.py similarity index 96% rename from QoDProvisioning/QoDProvisioningAPI/API/src/aux/service_event_manager/camara_results_processor.py rename to QoDProvisioning/QoDProvisioningAPI/API/src/utils/service_event_manager/camara_results_processor.py index ea22ca46a80abaa41bafa2a159a4bae3bbcfcbc9..4dd7de0db6309e7ed9245e1721f12c616a3a2a36 100644 --- a/QoDProvisioning/QoDProvisioningAPI/API/src/aux/service_event_manager/camara_results_processor.py +++ b/QoDProvisioning/QoDProvisioningAPI/API/src/utils/service_event_manager/camara_results_processor.py @@ -10,12 +10,12 @@ import asyncio -from aux.service_event_manager.service_event_manager import ServiceEventManager +from utils.service_event_manager.service_event_manager import ServiceEventManager from config import Config import json from database import crud from database.db import get_db -from aux.constants import Constants +from utils.constants import Constants # Set up logging logger = Config.setup_logging() diff --git a/QoDProvisioning/QoDProvisioningAPI/API/src/aux/service_event_manager/service_event_manager.py b/QoDProvisioning/QoDProvisioningAPI/API/src/utils/service_event_manager/service_event_manager.py similarity index 100% rename from QoDProvisioning/QoDProvisioningAPI/API/src/aux/service_event_manager/service_event_manager.py rename to QoDProvisioning/QoDProvisioningAPI/API/src/utils/service_event_manager/service_event_manager.py diff --git a/QoDProvisioning/QoDProvisioningAPI/Operator/chart/values.yaml b/QoDProvisioning/QoDProvisioningAPI/Operator/chart/values.yaml index aa337775682453752c6c8f1bb9d6728d67843465..b2d6f752b32d8b10e197fbc2896dfe8466f6f253 100644 --- a/QoDProvisioning/QoDProvisioningAPI/Operator/chart/values.yaml +++ b/QoDProvisioning/QoDProvisioningAPI/Operator/chart/values.yaml @@ -1,6 +1,6 @@ kubernetesClusterDomain: cluster.local operator: - image: harbor.etsi.org/osl/osl-camaraaas-qod-provisioning-api-op:latest + image: labs.etsi.org:5050/osl/code/addons/org.etsi.osl.controllers.camara/qodprovisioning/operator:develop replicas: 1 customResource: group: org.etsi.osl @@ -11,6 +11,6 @@ customResource: - qod-provisioning-api version: v1 camaraQoDAPI: - image: harbor.etsi.org/osl/osl-camaraaas-qod-provisioning-api:latest + image: labs.etsi.org:5050/osl/code/addons/org.etsi.osl.controllers.camara/qodprovisioning/api:develop port: 8000 logLevel: INFO \ No newline at end of file diff --git a/QoDProvisioning/README.md b/QoDProvisioning/README.md index 3acc181ac161e5bb1dc25df9fe1be8e4bf48ff18..cb21bd339f8dcd107989ab77978606966cb23b10 100644 --- a/QoDProvisioning/README.md +++ b/QoDProvisioning/README.md @@ -4,65 +4,30 @@ ## Candidate CAMARA API - QoD Provisioning -For this first proof of concept, we decided to rely on the [CAMARA QoD Provisioning API](https://editor.swagger.io/?url=https://raw.githubusercontent.com/camaraproject/QualityOnDemand/r1.2/code/API_definitions/qod-provisioning.yaml ). +For this first Proof of Concept (PoC), we decided to rely on the [CAMARA QoD Provisioning API](https://editor.swagger.io/?url=https://raw.githubusercontent.com/camaraproject/QualityOnDemand/r1.2/code/API_definitions/qod-provisioning.yaml). Such API has the following endpoints: -![](Documentation/Pictures/CAMARA-QoDProvisioning-API-Endpoints.png) +![CAMARA QoD Swagger](Documentation/Pictures/CAMARA-QoDProvisioning-API-Endpoints.png) -### Mapping to TMF Service Characteristics (of the operator’s Service) +Last, our solution needs to be deployed in a Kubernetes cluster. A 5G Core is needed for proper implementation as well as your own 5G Core Controller. -Having chosen the candidate API, the first step is to find a way to map the possible requests to TMF Service characteristics of the operator’s service. By looking at API’s specification, it is clear that at least 3 operations are required: (i) the creation of a QoD profile, (ii) its deletion, and (iii) listing all active QoD profiles. Therefore, we can proceed with evaluating the payload required for creating a QoD Provisioning. This payload involves various fields, which can be translated to the TMF Service Characteristics: +The PoC is based on [OSL CAMARAaaS Add-on](../README.md). It is recommended to read this before diving into the implementation of the PoC. -- *qodProv.device.phoneNumber* -- *qodProv.device.networkAccessIdentifier* -- *qodProv.device.ipv4Address.privateAddress* -- *qodProv.device.ipv4Address.publicAddress* -- *qodProv.device.ipv4Address.publicPort* -- *qodProv.device.ipv6Address* -- *qodProv.qosProfile* -- *qodProv.sink* -- *qodProv.sinkCredential.credentialType* - -In order to support interaction with OSL’s CAMARAaaS APIs, the operator service must be designed, at least, with these characteristics. +## General concepts of the OSL CAMARAaaS as a TMF Service Specification -Still, since there are various operations that can take place (CREATE and DELETE), it is also needed a characteristic to map this. Therefore, the operator’s service must also have a characteristics titled *qodProv.operation*. The DELETE operation is achieved based on a provisioning Id, and therefore another characteristics is needed: *qodProv.provisioningId.* - -Finally, it is required a characteristic to store the provisionings that were enforced by the operator’s service. We can define this characteristic as *camaraResults*. - -Therefore, for an operator’s service to be controlled by OSL’s CAMARA APIs, it needs to be designed with, at least, the following characteristics: - -- *qodProv.device.phoneNumber* -- *qodProv.device.networkAccessIdentifier* -- *qodProv.device.ipv4Address.privateAddress* -- *qodProv.device.ipv4Address.publicAddress* -- *qodProv.device.ipv4Address.publicPort* -- *qodProv.device.ipv6Address* -- *qodProv.qosProfile* -- *qodProv.sink* -- *qodProv.sinkCredential.credentialType* -- *qodProv.operation* -- *qodProv.provisioningId* -- *camaraResults* - -Additional characteristics are fully supported. Those can be custom characteristics that are required by the Operator’s Service. - -In regard to the *camaraResults* characteristic, to allow interoperability, it must store a Stringified JSON Array with the enforced QoD Provisionings. **The schema of each provisioning should be the one defined in CAMARA’s QoD Provisioning API Specification.** - -### TMF Service Characteristics of the CAMARAaaS APIs - -Considering the interactions that shall take place between the CAMARAaaS API and the Operator’s running Service and the architecture introduced before, it is clear that CAMARA APIs must interface with OSL’s Active MQ broker. Therefore, TMF Specific Service Characteristics are required to pass this information to the CAMARA APIs deployed through OSL: +First step is to design and expose the OSL CAMARA API service via the TMF models (Service Specification that can be ordered). The OSL CAMARA API service uses as backend connectivity the OSL message bus (OSL’s Active MQ broker). It exposes the CAMARA API, translates and forwards the requests to TMF Service Inventory model via the service bus. Therefore, considering the interactions that shall take place between the exposed CAMARA API, the Operator’s running 5G Controller Service and the architecture introduced before, the OSL CAMARA API exposure service must interface with OSL’s Active MQ broker. Moreover, for this first prototype the OSL's CAMARAaaS will be delivered via a Service Order. To enable this, we need to design it in OpenSlice as a Service Specification, so in general some TMF service characteristics are required to pass this information to the OSL CAMARA API exposure service which will be orchestrated, deployed and configured through OSL: - messageBroker.address - OSL's ActiveMQ Address (e.g. 10.10.10.10) - messageBroker.port - OSL's ActiveMQ Port - messageBroker.username - OSL’s ActiveMQ Username - messageBroker.password - OSL’s ActiveMQ Password -Additionally, we also need another Service Characteristic to store the UUID of the Operator’s running Service that will be controlled through the CAMARA API: +Additionally, we also need another Service Characteristic to store the UUID of the Operator’s running 5G Controller Service that will be triggered through the CAMARA API: - serviceUnderControl.uuid -Considering that the CAMARA API will be orchestrated by OSL, the client does not know where the API will be deployed, nor the credentials he should use to access it. Therefore, 4 additional characteristics are required. These will be automatically updated by OSL after the CAMARA API Service is deployed: +Considering that the CAMARA API will be orchestrated by OSL, the client does not know where the API will be deployed, nor the credentials used to access it. Therefore, 4 additional characteristics are required. These will be automatically updated by OSL after the CAMARA API Service is deployed: - camaraAPI.url - URL of the CAMARA API orchestrated by this service (view-only). This field will be automatically populated when the CAMARA API starts running - camaraAPI.username - Username of the CAMARA API orchestrated by this service (view-only). This field will be automatically populated when the CAMARA API starts running @@ -71,7 +36,7 @@ Considering that the CAMARA API will be orchestrated by OSL, the client does not Additionally, you may create a characteristic titled “*camaraAPI.results*”, which you can use to have visibility of the QoD Provisionings processed by the API, at OSL level. Still, this characteristic is not required. -Therefore, OSL’s CAMARA APIs must offer the following TMF Service Characteristics: +Therefore, OSL’s CAMARAaaS (QoD API exposure service) will contain the following TMF Service Characteristics: - messageBroker.address - messageBroker.port @@ -86,28 +51,74 @@ Therefore, OSL’s CAMARA APIs must offer the following TMF Service Characterist ### Broker Connection -This API has a *ServiceEventManager* class that communicates with OpenSlice's ActiveMQ broker through two topics: +This API has a *ServiceEventManager* class that communicates with OSL's ActiveMQ broker through two topics: - `CATALOG.UPD.SERVICE`: Topic for catalog updates. - `EVENT.SERVICE.ATTRCHANGED`: Topic for service attribute changes. -#### CATALOG.UPD.SERVICE +### CATALOG.UPD.SERVICE -Whenever a new provisioning is created for an UE, the *ServiceEventManager*'s *update_service* method is called. This method sends a a service update message through OpenSlice's *CATALOG.UPD.SERVICE* topic. When OSL receives the request, it updates the Service with the new characteristics, which are then caught by the correspondent K8s Operator. After processing the request, the Operator adds the result to the Service-related CR *camaraResults* characteristic. +Whenever a new provisioning is created for an UE, the *ServiceEventManager*'s *update_service* method is called. This method sends a service update message through OpenSlice's *CATALOG.UPD.SERVICE* topic. When OSL receives the request, it updates the Operator's 5G Controller Service with the new characteristics, which are then caught and materialized by the correspondent Kubernetes Operator. After processing the request, the Operator adds the result to the CR-related service characteristic, i.e. *camaraResults*. -#### EVENT.SERVICE.ATTRCHANGED +### EVENT.SERVICE.ATTRCHANGED -The *ServiceEventManager* subscribes to this topic to obtain and process the update messages regarding the specified UE QoD Profile Enforcer OSL service. Whenever this service's characteristics are updated in OSL, this class catches the update message. Then, the class extracts the *camaraResults* characteristic, which contains all QoS provisionings applied to the UEs. +The *ServiceEventManager* subscribes to this topic to obtain and process the update messages regarding the specified Operator's 5G Controller Service, namely the UE QoD Profile Enforcer OSL service. Whenever this service's characteristics are updated in OSL, this class catches the update message. Then, the class extracts the *camaraResults* characteristic, which contains all QoS provisionings applied to the UE(s). These results are then processed by the *CamaraResultsProcessor* class, which updates each provisioning accordingly in the database. +## General concepts of the QoD service (the 5G Core Provider/Operator Service) and its design as OSL TMF Service Specification + +Having chosen the candidate API, the first step is to find a way to map the possible requests to TMF Service characteristics of the Operator’s service. Our approach in general is the following: +- Design the QoD service that will accept the necessary QoD related characteristics +- Implement the QoD service as a controller able to manage your 5G Core +- Deploy it in a kubernetes cluster that OSL can manage (via CRIDGE) + +By looking at API’s specification, it is clear that at least 3 operations are required: (i) the creation of a QoD profile, (ii) its deletion, and (iii) listing all active QoD profiles. Therefore, we can proceed with evaluating the payload required for creating a QoD Provisioning. This payload involves various fields, which can be translated to the TMF Service Characteristics: + +- *qodProv.device.phoneNumber* +- *qodProv.device.networkAccessIdentifier* +- *qodProv.device.ipv4Address.privateAddress* +- *qodProv.device.ipv4Address.publicAddress* +- *qodProv.device.ipv4Address.publicPort* +- *qodProv.device.ipv6Address* +- *qodProv.qosProfile* +- *qodProv.sink* +- *qodProv.sinkCredential.credentialType* + +In order to support interaction with the previously defined OSL’s CAMARAaaS and alignment the CAMARA QoD Provisioning API models in general, the QoD service must be designed, at least, with these characteristics. + +Still, since there are various operations that can take place (CREATE and DELETE), a characteristic is also needed to map this. Therefore, the Operator's QoD service must also have a characteristics titled *qodProv.operation*. The DELETE operation is achieved based on a provisioning Id, and therefore the respective characteristics is needed: *qodProv.provisioningId.* + +Finally, characteristic is required to store the provisionings that were enforced by the Operator's QoD service. We can define this characteristic as *camaraResults*. + +Therefore, for an Operator’s service to be controlled by OSL’s CAMARAaaS specification, it needs to be designed with, at least, the following characteristics: + +- *qodProv.device.phoneNumber* +- *qodProv.device.networkAccessIdentifier* +- *qodProv.device.ipv4Address.privateAddress* +- *qodProv.device.ipv4Address.publicAddress* +- *qodProv.device.ipv4Address.publicPort* +- *qodProv.device.ipv6Address* +- *qodProv.qosProfile* +- *qodProv.sink* +- *qodProv.sinkCredential.credentialType* +- *qodProv.operation* +- *qodProv.provisioningId* +- *camaraResults* + +Additional characteristics are fully supported. Those can be custom characteristics that are required by the Operator's QoD Service. + +In regard to the *camaraResults* characteristic, to allow interoperability, it must store a Stringified JSON Array with the enforced QoD Provisionings. **The schema of each provisioning should be the one defined in CAMARA’s QoD Provisioning API Specification.** ## How To / Demonstration -### 1. Dummy Operator Service Service Design -We will start by looking at the dummy operator’s service we have created to demonstrate this Add-on. This Service will be offered as simple Custom Resource. You may find its Custom Resource Definition under `/DummyOperatorService/crd.yaml` Look at the CRD fields. Please notice that these were defined according what we disclosed before. +### 1. Dummy QoD Kubernetes Operator Service Design + +For this prototype, since we cannot deliver a QoD Service implementation for a specific commercial core, we created a Dummy QoD Kubernetes Operator Service which emulates the 5G Core configuration. In future releases of OSL we will offer solutions for some open source 5G cores, and/or some code templates to build your own 5G Core Operator. + +We will start by looking at the Dummy QoD Kubernetes Operator Service we have created to demonstrate this Add-on. The Service will be offered as a simple Custom Resource (CR) deployed in a Kubernetes cluster. You may find its Custom Resource Definition (CRD) under `/DummyOperatorService/crd.yaml` Look at the CRD fields. Please notice that these were defined according with what we introduced in the previous section. The first step is then to install this CRD in your kubernetes cluster. To this end, you may use the following command: `make create-dummy-operator-crd` @@ -115,27 +126,27 @@ After creating the CRD in your Kubernetes cluster, you may access OSL’s Resour ![](./Documentation/Pictures/DummyOperatorService-ResourceInventory.png) -The next step is to create a RFS Service to expose this resource. To do so, you may read the [Exposing Kubernetes Operators as a Service : Offering "Calculator as a Service" through OpenSlice](https://osl.etsi.org/documentation/latest/service_design/examples/ExposingCRDs_aaS_Example_Calculator/ExposingCRDs_aaS_Example_Calculator/) documentation page. +We need now to start offering this as-a-Service, ready to be ordered from the Service Catalogue. So, the first step is to create a Resource-Facing-Service (RFS) Specification to expose this resource. To do so, you may read the [Exposing Kubernetes Operators as a Service : Offering "Calculator as a Service" through OpenSlice](https://osl.etsi.org/documentation/latest/service_design/examples/ExposingCRDs_aaS_Example_Calculator/ExposingCRDs_aaS_Example_Calculator/) documentation page. Regarding the RFS Service, you must set the following characteristics: - _CR_CHECKVAL_AVAILABLE = RUNNING - _CR_CHECK_FIELD = spec.status -By setting this characteristics, you will rely on the value of `spec.status` to set the service as `active`. Ideally, the operator would have implemented an Operator for this Custom Resource. However, for demonstration purposes, we will use a Supervision rule to set `spec.status` to `RUNNING` +By setting these characteristics, you will rely on the value of `spec.status` to set the service as `active`. Ideally, the 5G Core provider would have implemented a proper Kubernetes Operator for this Custom Resource to implement the requested QoD. However, as discussed already, for demonstration purposes, we will short-circuit the behavior and we will set `spec.status` to `RUNNING` immediately after deployment. -Then, you can proceed to create a CFS Service, which will incorporate the just created RFS Service. More information is available at: [Exposing Kubernetes Operators as a Service : Offering "Calculator as a Service" through OpenSlice](https://osl.etsi.org/documentation/latest/service_design/examples/ExposingCRDs_aaS_Example_Calculator/ExposingCRDs_aaS_Example_Calculator/). To create the CFS Service characteristics, you may use the Service Specification available at `/DummyOperatorService/OSLArtifacts/DummyOperatorService-CFS-Specification.json` . You may manually create the CFS Service, or you may onboard this Service Specification by making a POST request to *[{{url}}/tmf-api/serviceCatalogManagement/v4/serviceSpecification](https://www.notion.so/CAMARAaaS-OSL-15e11fa2ed8d80808254c87d9393cf51?pvs=21).* -After creating the Service Specification, you should mark this Service as a Bundle. Then, go to “Service Specification Relationships” and add the RFS Service. +Then, you can proceed to create a Customer-Facing-Service (CFS) Specification, which will incorporate the just created RFS Service. More information is available at: [Exposing Kubernetes Operators as a Service : Offering "Calculator as a Service" through OpenSlice](https://osl.etsi.org/documentation/latest/service_design/examples/ExposingCRDs_aaS_Example_Calculator/ExposingCRDs_aaS_Example_Calculator/). To create the CFS Service's characteristics, you may use the Service Specification available at `/DummyOperatorService/OSLArtifacts/DummyOperatorService-CFS-Specification.json` . You may manually create the CFS Service, or you may onboard this Service Specification by making a POST request to `{{url}}/tmf-api/serviceCatalogManagement/v4/serviceSpecification`. + +After creating the CFS Specification, you should mark this Service as a Bundle. Then, go to “Service Specification Relationships” and add the previous created RFS Specification. Regarding the LCM Rules for the CFS Service, you should configure the following ones: -**[Pre-Provision Rule]** +**[Pre-Provision Rule] - Short-circuits RFS/Sets RFS's spec.status to "Running"** ![](./Documentation/Pictures/DummyOperatorService-Pre-Provision-Rule.png) - ```java { java.util.HashMap charvals = new java.util.HashMap<>(); @@ -153,8 +164,7 @@ setServiceRefCharacteristicsValues("Dummy Operator Service - RFS", charvals); } ``` - -**[Supervision Rule]** +**[Supervision Rule] - Detects changes to the Operator's CFS and reflects them and forwards them to RFS** ![](./Documentation/Pictures/DummyOperatorService-Supervision-Rule.png) @@ -193,55 +203,51 @@ setCharValFromStringType("camaraResults", getServiceRefPropValue("Dummy Operator You can find the `_CR_SPEC` template used for the pre-provision rule at `/DummyOperatorService/OSLArtifacts/cr-template-pre-provision.yaml` . The `_CR_SPEC` template used for the supervision rule is available at `/DummyOperatorService/OSLArtifacts/cr-template-supervision.yaml` -After that, you may expose this service via OSL’s Service Catalog, and order it. You do not need to configure any characteristics when ordering this Service. Confirm that the service order was completed, both RFS and CFS Services are active, and a Custom Resource of type *DummyOperatorService* was created in your Kubernetes Cluster. See images below. +After that, you may expose this service via OSL’s Service Catalog, and order it. You do not need to configure any characteristics when ordering this Service. Confirm that the Service Order was completed, both RFS and CFS Services are active, and a Custom Resource of type *DummyOperatorService* was created in your Kubernetes Cluster. See images below: ![DummyOperatorService-ServiceOrder.png](./Documentation/Pictures/DummyOperatorService-ServiceOrder.png) ![DummyOperatorService-ServiceOrder.png](./Documentation/Pictures/DummyOperatorService-CustomResource.png) -### 2. CAMARA QoD Provisioning API - -Then, we can proceed to design the CAMARAaaS QoD Provisioning API. To this end, OSL’s team has implemented CAMARA’s QoD Provisioning API, created a CRD to offer it, and developed a Kubernetes Operator to deal with its internal logic. Start by packaging the API in a docker image and pushing it to a docker repository. +### 2. OSL CAMARAaaS QoD Provisioning API exposure Service Design -Open the file `Makefile` and update the repository to where you will push the docker image. Update the variable `REPOSITORY_HOST` . You may also choose to update the other variables, but it is not required. After this, run `make build-api-docker-image`. This command will build, tag, and push the API docker image to the repository you chose. +Then, we can proceed to design the CAMARAaaS QoD Provisioning API exposure Service Specification in OSL catalogue. To this end, OSL’s team has implemented in Python the CAMARAaaS QoD Provisioning API, created a CRD to offer it, and developed a Kubernetes Operator to deal with its internal logic. -### 3. CAMARA QoD Provisioning API - Kubernetes Operator +#### 2.1 OSL CAMARAaaS QoD Provisioning API - Kubernetes Operator -The previous docker image shall make available the CAMARA QoD Provisioning API. However, these APIs will be made available through Custom Resources of Type `CAMARAaaS-QoDProvisiongAPI` . Therefore, we also need a Kubernetes Operator to manage these resources. The Operator’s code can be found under `/QoDProvisioningAPI/Operator` . There, you have the source code of the Operator, as well as an Helm Chart to install it. - -Start by building and pushing the Operator’s docker image to your repository. Run `make operator-docker-image`. Then, install the operator in your Kubernetes cluster. This action will result in the creation of the Custom Resource Definition for the CAMARA QoD Provisioning API Resources, and deploy the operator in the cluster. It is this operator that will manage the CAMARA QoD Provisioning API Resources, being responsible, for instance, for the deployment a Kubernetes pod and service that expose the CAMARA QoD Provisioning API. To install the Operator, run the following command: +The previous docker image shall make available the CAMARA QoD Provisioning API. However, these API will be made available through Custom Resources of Type `CAMARAaaS-QoDProvisiongAPI`. Therefore, we also need a Kubernetes Operator to manage these resources. The Operator’s code can be found under `/QoDProvisioningAPI/Operator`. There, you have the source code of the Kubernetes Operator. The Operator's Helm Chart uses the default OSL registry docker images. If you are interesting in developing your own, please refer to the Makefile. To install the Operator, run the following command: ```bash -helm install camaraaas-qod-prov-operator ./QoDProvisioningAPI/Operator/chart --set operator.image= --set camaraQoDAPI.image= --namespace --create-namespace +helm install camaraaas-qod-prov-operator ./QoDProvisioningAPI/Operator/chart --namespace --create-namespace ``` -To simplify things, you may also run: `make install-operator` . After this, check if the operator is running through the `make get-operator-logs` command. +After this, check if the operator is running through the `make get-operator-logs` command. -If everything went ok, you should have a new CRD in your Kubernetes cluster. Run this command to verify if it was created: `kubectl describe crd camaraaas-qod-provisioning-apis.org.etsi.osl`. +If everything went ok, you should have a new CRD in your Kubernetes cluster. Run this command to verify if it was created: `kubectl describe crd camaraaas-qod-provisioning-apis.org.etsi.osl`. -Before designing the service in OSL, let us first create a Custom Resource of type `CAMARAaaS-QoDProvisiongAPI` to validate that the operator is behaving according to what is expected. To this end, you may use the test custom resource available at `/QoDProvisioningAPI/Operator/test-cr.yaml` . Before creating the resource, you need to update the fields: *spec.messageBroker.address*, *spec.messageBroker.port, spec.messageBroker.username, spec.messageBroker.password*, with the values that relate with your OSL instance. Most likely, the values will be the following ones: +Before designing the service in OSL, let us first create a CR of type `CAMARAaaS-QoDProvisiongAPI` to validate that the operator is behaving according to what is expected. To this end, you may use the test custom resource available at `/QoDProvisioningAPI/Operator/test-cr.yaml` . Before creating the resource, you need to update the fields: *spec.messageBroker.address*, *spec.messageBroker.port, spec.messageBroker.username, spec.messageBroker.password*, with the values that relate with your OSL instance. The default values will be the following ones: -- *spec.messageBroker.address: * -- s*pec.messageBroker.port*: 61613 -- *spec.messageBroker.username: artemis* -- *spec.messageBroker.password: artemis* +- *spec.messageBroker.address:* +- *spec.messageBroker.port*: 61613 +- *spec.messageBroker.username*: artemis +- *spec.messageBroker.password*: artemis For now, you do not need to update the field serviceUnderControl.uuid. You may leave it as is. -After these updates, create the Custom Resource by running the command: `make create-operator-test-cr`. +After these updates, create the CR by running the command: `make create-operator-test-cr`. -When the Custom Resource is created, its operator will deploy the CAMARA QoD API in a pod and expose it via a K8s Node Port. The URL where the API is available is published under the CR field `spec.camaraAPI.url` (e.g.[http://10.255.28.73:32630](http://10.255.28.73:32630/)). Check this field by running `make describe-operator-test-cr`. To confirm the API is running, access */docs*. You should see the following: +When the CR is created, its operator will deploy the CAMARA QoD API in a pod and expose it via a K8s Node Port. The URL where the API is available is published under the CR field `spec.camaraAPI.url` (e.g. [http://10.255.28.73:32630](http://10.255.28.73:32630/)). Check this field by running `make describe-operator-test-cr`. To confirm the API is running, access */docs*. You should see the following: ![](./Documentation/Pictures/QoDProvisioningAPI-Docs.png) If you see this page, the CAMARA QoD Provisioning API Custom Resources and their operator is working. You may delete the Custom Resource you created. Run the following command: `make delete-operator-test-cr`. -### 4. CAMARA QoD Provisioning API - Service Design +#### 2.2 OSL CAMARAaaS QoD Provisioning API - TMF Service Specification Design -Now we can proceed to create a Service Specification that maps the CAMARA QoD Provisioning APICustom Resource. +Now we can proceed to create an OSL TMF Service Specification that maps the CAMARAaaS QoD Provisioning API CR, ready to be ordered in your OSL Service Specification Catalogue. -The next step is to create a RFS Service to expose this resource. To do so, you may read the [Exposing Kubernetes Operators as a Service : Offering "Calculator as a Service" through OpenSlice](https://osl.etsi.org/documentation/latest/service_design/examples/ExposingCRDs_aaS_Example_Calculator/ExposingCRDs_aaS_Example_Calculator/) documentation page. +The first step is to create an RFS Service to expose this resource. To do so, you may read the [Exposing Kubernetes Operators as a Service : Offering "Calculator as a Service" through OpenSlice](https://osl.etsi.org/documentation/latest/service_design/examples/ExposingCRDs_aaS_Example_Calculator/ExposingCRDs_aaS_Example_Calculator/) documentation page. Regarding the RFS Service, you must set the following characteristics: @@ -250,13 +256,13 @@ Regarding the RFS Service, you must set the following characteristics: By setting this characteristics, you will rely on the value of `spec.camaraAPI.status` to set the service as `active`. The previous operator, when it deploys the CAMARA QoD Provisioning API will set that CR field to `RUNNING`. -Then, you can proceed to create a CFS Service, which will incorporate the just created RFS Service. More information is available at: [Exposing Kubernetes Operators as a Service : Offering "Calculator as a Service" through OpenSlice](https://osl.etsi.org/documentation/latest/service_design/examples/ExposingCRDs_aaS_Example_Calculator/ExposingCRDs_aaS_Example_Calculator/). To create the CFS Service characteristics, you may use the Service Specification available at `/QoDProvisioningAPI/OSLArtifacts/CAMARAaaS-QoD-Provisioning-API-CFS-Specification.json` . You may manually create the CFS Service, or you may onboard this Service Specification by making a POST request to *[{{url}}/tmf-api/serviceCatalogManagement/v4/serviceSpecification](https://www.notion.so/CAMARAaaS-OSL-15e11fa2ed8d80808254c87d9393cf51?pvs=21).* +Then, you can proceed to create a CFS Service, which will incorporate the just created RFS Service. More information is available at: [Exposing Kubernetes Operators as a Service : Offering "Calculator as a Service" through OpenSlice](https://osl.etsi.org/documentation/latest/service_design/examples/ExposingCRDs_aaS_Example_Calculator/ExposingCRDs_aaS_Example_Calculator/). To create the CFS Service characteristics, you may use the Service Specification available at `/QoDProvisioningAPI/OSLArtifacts/CAMARAaaS-QoD-Provisioning-API-CFS-Specification.json`. You may manually create the CFS Service, or you may onboard this Service Specification by making a POST request to `{{url}}/tmf-api/serviceCatalogManagement/v4/serviceSpecification`. After creating the Service Specification, you should mark this Service as a Bundle. Then, go to “Service Specification Relationships” and add the RFS Service. Regarding the LCM Rules for the CFS Service, you should configure the following ones: -**[Pre-Provision Rule]** +**[Pre-Provision Rule] - Forwards the user input towards the created CR of type "CAMARAaaS-QoDProvisiongAPI"** ![](./Documentation/Pictures/CAMARAaaS-QoD-Prov-API-Pre-Provision-Rule.png) @@ -283,7 +289,7 @@ setServiceRefCharacteristicsValues("CAMARAaaS - QoD Provisioning API - RFS", cha ``` -**[Supervision Rule]** +**[Supervision Rule] - Updates the exposed QoD Provisioning API Service with information about the undertaken actions** ![CAMARAaaS-QoD-Prov-API-Supervision-Rule.png](./Documentation/Pictures/CAMARAaaS-QoD-Prov-API-Supervision-Rule.png) @@ -315,7 +321,7 @@ setCharValFromStringType("camaraAPI.results", getServiceRefPropValue("CAMARAaaS ``` -You can find the `_CR_SPEC` template used for both rules at `/QoDProvisioningAPI/OSLArtifacts/cr-template.yaml` . +You can find the `_CR_SPEC` template used for both rules at `/QoDProvisioningAPI/OSLArtifacts/cr-template.yaml`. After that, you may expose this service via OSL’s Service Catalog, and order it. When you order it, you will be prompted to configure some characteristics: @@ -336,18 +342,17 @@ Confirm that the service order was completed, both RFS and CFS Services are acti ![](./Documentation/Pictures/CAMARAaaS-QoD-Prov-API-CR.png) - -Additionally, in OSL, you may see the URL where the QoD Provisioning API is exposed. To do so, please see the characteristics of the CAMARAaaS QoD Provisioning API CFS. See image below. +Additionally, in OSL, you may see the URL where the QoD Provisioning API is exposed. To do so, please see the characteristics of the CAMARAaaS QoD Provisioning API CFS. See image below: ![](./Documentation/Pictures/CAMARAaaS-QoD-Prov-API-Characteristics.png) -### 5. Validation +### 3. Validation Now we can test if the two services are communicating. To do so, you should create a QoD Provisioning via the API that was just deployed. You may do that, using this command: ```bash -# You must update the url to correspond to your API instance. -curl --location 'http://10.255.28.73:31637/device-qos' \ +# You must update the url to correspond to your API instance, found in camaraAPI.url highlighted in previous image. +curl --location '{{camaraAPI.url}}/device-qos' \ --header 'Content-Type: application/json' \ --data-raw '{ "device": { @@ -370,14 +375,14 @@ You should have received a response similar to this one: {"device":{"phoneNumber":"+987654321","networkAccessIdentifier":"987654321@example.org","ipv4Address":{"publicAddress":"203.0.112.12","privateAddress":null,"publicPort":59765},"ipv6Address":"2001:db8:85a3:8d3:1319:8a2e:370:7344"},"qosProfile":"QOS_PROFILE_A","sink":"https://endpoint.example.com/","sinkCredential":{"credentialType":null},"provisioningId":"cb55f9e9-802e-4898-95f5-d1a5a2552483","startedAt":"2024-12-17T15:49:21.995399","status":"REQUESTED","statusInfo":null} ``` -Now, if everything is working properly, the characteristics of the Dummy Operator Service you referenced should have been update. You should now see these characteristics: +Now, if everything is working properly, the characteristics of the Dummy Operator Service you referenced should have been updated. You should now see these characteristics: ![DummyOperatorService-Characteristics-After-CAMARA-Invoking.png](./Documentation/Pictures/DummyOperatorService-Characteristics-After-CAMARA-Invoking.png) You may also query the QoD Provisioning API to check the status of your provisioning. ```bash -curl --location 'http://10.255.28.73:31637/device-qos/cb55f9e9-802e-4898-95f5-d1a5a2552483' +curl --location '{{camaraAPI.url}}/device-qos/cb55f9e9-802e-4898-95f5-d1a5a2552483' # notice the "provisioningId":"cb55f9e9-802e-4898-95f5-d1a5a2552483" above ``` @@ -405,7 +410,7 @@ After a while, if you check the characteristics of the CAMARAaaS QoD Provisionin Finally, execute this request again: ```bash -curl --location 'http://10.255.28.73:31637/device-qos/cb55f9e9-802e-4898-95f5-d1a5a2552483' +curl --location '{{camaraAPI.url}}/device-qos/cb55f9e9-802e-4898-95f5-d1a5a2552483' # notice the "provisioningId":"cb55f9e9-802e-4898-95f5-d1a5a2552483" above ``` @@ -415,4 +420,7 @@ You should receive the following response. {"device":{"phoneNumber":"+987654321","networkAccessIdentifier":"987654321@example.org","ipv4Address":{"publicAddress":"203.0.112.12","privateAddress":null,"publicPort":59765},"ipv6Address":"2001:db8:85a3:8d3:1319:8a2e:370:7344"},"qosProfile":"QOS_PROFILE_A","sink":"https://endpoint.example.com/","sinkCredential":{"credentialType":null},"provisioningId":"cb55f9e9-802e-4898-95f5-d1a5a2552483","startedAt":"2024-12-15T11:00:00","status":"AVAILABLE","statusInfo":null} ``` -Notice the `"status":"AVAILABLE"` . This means the 2 services are communicating. Now, you just have to implement your own Operator Service Kubernetes Operator, and you may use OSL’s CAMARAaaS Add-on to expose it through a CAMARA API. \ No newline at end of file +Notice the `"status":"AVAILABLE"`. This means the 2 services are communicating. + + +Now, you just have to implement your own Kubernetes Operator for your 5G Core configuration, and you may use OSL’s CAMARAaaS Add-on to expose it through a CAMARA API. \ No newline at end of file diff --git a/README.md b/README.md index 74f4127c0ef8eed1d6b892290348739ed36407fe..551a48917e8731b10b2a6f87cce310fc2814671e 100644 --- a/README.md +++ b/README.md @@ -1,42 +1,130 @@ # CAMARAaaS Add-on -The **CAMARA as a Service OSL Add-on** allows telecom operators and customer service providers to expose OSL services through CAMARA APIs. By doing so, it enables runtime operations, such as enforcing profiles on User Equipment (UEs) or updating 5G Network Slice characteristics, using standardized CAMARA API endpoints. Thus, this add-on enables the orchestration of CAMARA APIs, which will then be used to control the lifecyle and the operations that shall take place in an already existing OSL Service. +## Introduction + +The **CAMARA as a Service (CAMARAaaS) OSL Add-on** is a prototype service developed by OSL and allows users of OSL to expose CAMARA APIs for their TMF-based services. By doing so, it enables runtime operations, such as enforcing profiles on User Equipment (UEs) or updating 5G Network Slice characteristics, using standardized CAMARA API endpoints. The work is in progress for future enhancements (e.g. multi-tenancy, etc). + +In a nutshell, CAMARAaaS add-on performs API transformations from CAMARA API model to TMF API model and vice-versa. + +The supporting use case is the following: + +- An OSL Service Provider (e.g. an Operator) has a running 5G Core (e.g. from another service order in OSL). +- The running service exposes already some characteristics (i.e. via TMF Service Inventory) that can be configured. Thus, someone can reconfigure the latter during runtime (e.g. change the quality of a slice via a TMF API service request). +- On a subsequent step, the Service Provider makes a Service Order in OSL to expose this running 5G Core service via a CAMARA API endpoint. +- The CAMARAaaS add-on is a wrapper between the CAMARA requests and the TMF API Service Inventory models. These CAMARA APIs will then be used to control the lifecycle and the operations that shall take place in an already existing OSL Service. Therefore, these are the key features of this add-on: -- **Seamless Integration**: Operators can expose their existing OSL services through CAMARA APIs, maintaining consistency with the OSL framework while offering additional accessibility. -- **Dynamic Service Control**: Allows runtime updates to characteristics of 5G-related Services, such as UE profiles or Network Slices, via CAMARA REST API calls. The updated of the characteristics of a Service can then be consumed by a Kubernetes Custom Resource that will produce an operation according to the updated characteristics. +- **Seamless Integration**: Operators can expose their existing OSL Services through CAMARA APIs, maintaining consistency with the OSL framework while offering additional accessibility. +- **Dynamic Service Control**: Allows runtime updates to characteristics of 5G-related Services, such as UE profiles or Network Slices, via CAMARA REST API calls. The updated characteristics of a Service can then be consumed by a Kubernetes Custom Resource that will produce an operation according to the updated characteristics. -## Architecture and Interactions +## Architectural Approach and Service Interactions -The add-on introduces a generic **CAMARA API Service**, which acts as a wrapper for existing (running) services. The architecture ensures: +The add-on introduces a generic **CAMARA API Service**, which acts as a wrapper for existing (running) services registered in TMF Service Inventory. The architecture ensures: -1. **API Exposure**: CAMARA APIs are orchestrated by OSL (offered as a service) and their endpoints are exposed to the end clients. -2. **Service Mapping**: The CAMARA API Service references a running service (identified by a UUID), enabling targeted operations. The invoking of CAMARA API endpoints will results in updates in the running service’s characteristics. -3. **Operational Flow**: Updates triggered via CAMARA APIs are propagated to the operator's service through OSL0s message queue (Active MQ), ensuring synchronization of service characteristics. +1. **API Exposure**: CAMARA APIs are orchestrated by OSL (offered as-a-service) and their endpoints are exposed to the end-users (clients). +2. **Service Mapping**: The CAMARA API Service references a running service (identified by a unique UUID), enabling targeted operations. The invoking of CAMARA API endpoints will result in updates in the running service’s characteristics. +3. **Operational Flow**: Updates triggered via CAMARA APIs are propagated to the operator's service through OSL's message queue (Active MQ), ensuring synchronization of service characteristics. -This architecture is presented in the figure below: +> Assumption: The Operator already processes and exposes a 5G controlling running service + +This architectural approach is presented in the figure below: ![CAMARAaaS-Architecture](Documentation/CAMARAaaS-Architecture.png) -As already mentioned, the step “PATCH Characteristics” is achieved by send a message to OSL’s message bus. +### **Components in the Architecture** + +1. **Service Specification Catalogue**: + - Stores the Service Specifications. + - Contains the `CAMARA API Controller` Service Specification, which defines/exposes the profile enforcer functionality. + +2. **Service Inventory**: + - Tracks service instances (e.g., RFS - Resource-Facing Services) and their configurations. + - Includes services such as: + - CAMARA API Exposure Profile Enforcer + - 5G Network Slice + - Network profiles and UE (User Equipment) services + +3. **Resource Inventory**: + - Holds information about resources provisioned in the system, related with the respective RFSs. + - Contains resources like: + - CAMARA API Exposure + - Profile Enforcer + - 5G Core + - Configmaps (used for managing Kubernetes configurations) + +4. **Kubernetes Cluster**: + - Hosts and manages the 5G Core components + - Hosts all OSL operators (OSL can manage them these through CRIDGE - not depicted) + - Facilitates configuration updates via ConfigMap and resource values + +5. **CAMARA API Controller**: + - Processes API exposure requests and orchestrates resources for API exposure + - Generates the CAMARA API Exposure resource on demand + +--- + +### **Processes in the Workflow** + + +There are three workflows depicted for a fully orchestrated end-to-end solution. The first one defines the prerequisites to deliver CAMARAaaS. -For OSL’s community to get the full grasp of this architecture, we also make available a sequence diagram with all interactions that take place. +The following two workflows explain how the CAMARAaaS is being delivered: + +1. **CAMARAaaS - Service Order Creation**: + - A Service Order is initiated to expose a CAMARA API Exposure service with parameter(`ExposeServiceUUID=Profile Enforcer`) using the TMF Service order API + - This triggers the service provisioning process in the Service Inventory + - A custom resource is created and requests via the CAMARA API controller to create the CAMARA API Exposure for the Service Profile Enforcer + - The `CAMARA API Exposure` resource is created and populated with configuration values + - The API Exposure resource is finalized in the cluster and made available for consumption via the ordered CAMARA API Exposure service. + + +2. **CAMARA API Usage - Service Operation**: + - The API request includes QoD (Quality on Demand) parameters and is accessible via a specified NodePort and/or endpoint URL (e.g. `portal.osl/{serviceAUUID}/camaraapiFQDN`). + - A client performs a CAMARA QoD Request. + - An event is created and circulated within OSL message bus to notify about the request. + - The event results into a TMF request that is used to patch and customize the Network Operator's Profile enforcer service in the Service Inventory. + - The configuration is passed through OSL Orchestrator and CRIDGE down to the `5G Core Controller`. + - The 5G Core Controller modifies the respective network configurations such as: + - Slices (5G Network Slice profiles) + - User equipment (UE) profiles + - These changes are propagated to the Kubernetes-managed 5G Core components. + - Resources in the Resource Inventory are updated to reflect the enforced requirements. + - Updates are made to the `Configmap` Resource to align with the latest configurations. + - These changes ensure that the 5G Core components operate with updated slice, profile, and UE configurations. + - A similar event as in previous steps is generated that enables the CAMARA API to get notified by the enforced changes and update the client. +--- + +This architecture emphasizes automation, modularity, and interoperability between CAMARA APIs and 5G Core Infrastructure, leveraging Kubernetes for seamless management and deployment. + +The first image below, displays a normal scenario of using OSL to deploy and change a running service deployed in a Kubernetes cluster, using Kubernetes CRDs. First the service is requested through a Service Order. For example, the requested service can be a Service Specification bundle consisting of: +- a 5G Core Service Specification that will deploy a 5G Core through HELM chart(s). +- a 5G Controller Service Specification (deployed via HELM) that can change configuration of slices for UEs. This 5G Controller might register further Kubernetes operators for reconfiguring the core, slices, etc. It is developed by the Network Operator and is able to reconfigure several components via e.g. NEF, scripting, API commands or other means. + +OSL deploys the services via a Service Order request. Then while the service is in operation (ACTIVE), the user that ordered it can submit reconfigurations (see loop in figure) by changing characteristics of the service. These characteristics are then propagated from OSL orchestrator, through CRIDGE, down to the 5G Controller Kubernetes resource to handle it. ![CAMARAaaS-Workflow-OSLToday](Documentation/CAMARAaaS-Workflow-OSLToday.png) +Since the 5G Controller Service is running since the previous step, the user requests to expose it via a certain CAMARA API. The user orders the respective CAMARA Controller to expose this service via CAMARA API. User just needs to pass the UUID identifier of the 5G Controller Service (facilitator of CAMARA Controller), as seen in TMF Service Inventory, during the Service Order request. + ![CAMARAaaS-Workflow-ServiceOrdering](Documentation/CAMARAaaS-Workflow-ServiceOrdering.png) +Since the new CAMARA API service is active, clients can use this new API to reconfigure the service. This request is passed through the message bus to the TMF API and then down to the 5G Controller Service. + +As already mentioned, the steps that require to “Update Service Characteristics” are achieved by sending a message to OSL’s message bus. + + ![CAMARAaaS-Workflow-ServiceOperation](Documentation/CAMARAaaS-Workflow-ServiceOperation.png) + + ## Important Considerations -The OSL CAMARA as a Service Add-on depends on 2 Services: +The CAMARAaaS add-on depends on 2 Services: - The OSL CAMARA API Service - - Is a generic CAMARA API wrapper service implemented by the OSL team - - This implementation will be publicly offered as an Addon (Helm chart) -- A custom 5G-related Service (that shall be controlled/referenced by the CAMARA API Service): + - Is a generic CAMARA API wrapper Service implemented by the OSL team + - This implementation is publicly offered as an OpenSlice add-on (Helm Chart) +- A custom 5G-related Service (that shall be controlled/referenced by the CAMARA API Service) - An OSL user must implement and provide its own 5G-related Controlling Service (following OSL design patterns) - - Implementation is custom - + - The implementation is custom \ No newline at end of file