diff --git a/manifests/cockroachdb/README.md b/manifests/cockroachdb/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..6807afbb04c590213202b0849ee13ae3d9e50236
--- /dev/null
+++ b/manifests/cockroachdb/README.md
@@ -0,0 +1,53 @@
+# Ref: https://www.cockroachlabs.com/docs/stable/configure-cockroachdb-kubernetes.html
+
+DEPLOY_PATH="manifests/cockroachdb"
+OPERATOR_BASE_URL="https://raw.githubusercontent.com/cockroachdb/cockroach-operator/master"
+
+mkdir -p ${DEPLOY_PATH}
+
+# Apply Custom Resource Definition for the CockroachDB Operator
+curl -o "${DEPLOY_PATH}/crds.yaml" "${OPERATOR_BASE_URL}/install/crds.yaml"
+kubectl apply -f "${DEPLOY_PATH}/crds.yaml"
+
+# Deploy CockroachDB Operator
+curl -o "${DEPLOY_PATH}/operator.yaml" "${OPERATOR_BASE_URL}/install/operator.yaml"
+# edit "${DEPLOY_PATH}/operator.yaml"
+# - add env var: WATCH_NAMESPACE='tfs-ccdb'
+kubectl apply -f "${DEPLOY_PATH}/operator.yaml"
+
+# Deploy CockroachDB
+curl -o "${DEPLOY_PATH}/cluster.yaml" "${OPERATOR_BASE_URL}/examples/example.yaml"
+# edit "${DEPLOY_PATH}/cluster.yaml"
+# - set version
+# - set number of replicas
+kubectl create namespace tfs-ccdb
+kubectl apply --namespace tfs-ccdb -f "${DEPLOY_PATH}/cluster.yaml"
+
+# Deploy CockroachDB Client
+curl -o "${DEPLOY_PATH}/client-secure-operator.yaml" "${OPERATOR_BASE_URL}/examples/client-secure-operator.yaml"
+kubectl create --namespace tfs-ccdb -f "${DEPLOY_PATH}/client-secure-operator.yaml"
+
+# Add tfs user with admin rights
+$ kubectl exec -it ccdb-client-secure --namespace tfs-ccdb -- ./cockroach sql --certs-dir=/cockroach/cockroach-certs --host=cockroachdb-public
+-- CREATE USER tfs WITH PASSWORD 'tfs123';
+-- GRANT admin TO tfs;
+
+# Expose CockroachDB SQL port (26257)
+PORT=$(kubectl --namespace cockroachdb get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="sql")].port}')
+PATCH='{"data": {"'${PORT}'": "cockroachdb/cockroachdb-public:'${PORT}'"}}'
+kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}"
+
+PORT_MAP='{"containerPort": '${PORT}', "hostPort": '${PORT}'}'
+CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}'
+PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}'
+kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}"
+
+# Expose CockroachDB Console port (8080)
+PORT=$(kubectl --namespace cockroachdb get service cockroachdb-public -o 'jsonpath={.spec.ports[?(@.name=="http")].port}')
+PATCH='{"data": {"'${PORT}'": "cockroachdb/cockroachdb-public:'${PORT}'"}}'
+kubectl patch configmap nginx-ingress-tcp-microk8s-conf --namespace ingress --patch "${PATCH}"
+
+PORT_MAP='{"containerPort": '${PORT}', "hostPort": '${PORT}'}'
+CONTAINER='{"name": "nginx-ingress-microk8s", "ports": ['${PORT_MAP}']}'
+PATCH='{"spec": {"template": {"spec": {"containers": ['${CONTAINER}']}}}}'
+kubectl patch daemonset nginx-ingress-microk8s-controller --namespace ingress --patch "${PATCH}"
diff --git a/manifests/cockroachdb/client-secure-operator.yaml b/manifests/cockroachdb/client-secure-operator.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..618d30ce6c2e7ad8620a04e9755afa7fb3be905e
--- /dev/null
+++ b/manifests/cockroachdb/client-secure-operator.yaml
@@ -0,0 +1,51 @@
+# Copyright 2022 The Cockroach Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated, do not edit. Please edit this file instead: config/templates/client-secure-operator.yaml.in
+#
+
+apiVersion: v1
+kind: Pod
+metadata:
+  name: cockroachdb-client-secure
+spec:
+  serviceAccountName: cockroachdb-sa
+  containers:
+  - name: cockroachdb-client-secure
+    image: cockroachdb/cockroach:v22.1.8
+    imagePullPolicy: IfNotPresent
+    volumeMounts:
+    - name: client-certs
+      mountPath: /cockroach/cockroach-certs/
+    command:
+    - sleep
+    - "2147483648" # 2^31
+  terminationGracePeriodSeconds: 0
+  volumes:
+  - name: client-certs
+    projected:
+        sources:
+          - secret:
+              name: cockroachdb-node
+              items:
+                - key: ca.crt
+                  path: ca.crt
+          - secret:
+              name: cockroachdb-root
+              items:
+                - key: tls.crt
+                  path: client.root.crt
+                - key: tls.key
+                  path: client.root.key
+        defaultMode: 256
diff --git a/manifests/cockroachdb/cluster.yaml b/manifests/cockroachdb/cluster.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d36685109c8f69bf72aabcfe6d075bbfce6dfaa4
--- /dev/null
+++ b/manifests/cockroachdb/cluster.yaml
@@ -0,0 +1,70 @@
+# Copyright 2022 The Cockroach Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+# Generated, do not edit. Please edit this file instead: config/templates/example.yaml.in
+#
+
+apiVersion: crdb.cockroachlabs.com/v1alpha1
+kind: CrdbCluster
+metadata:
+  # this translates to the name of the statefulset that is created
+  name: cockroachdb
+spec:
+  dataStore:
+    pvc:
+      spec:
+        accessModes:
+          - ReadWriteOnce
+        resources:
+          requests:
+            storage: "60Gi"
+        volumeMode: Filesystem
+  resources:
+    requests:
+      # This is intentionally low to make it work on local k3d clusters.
+      cpu: 100m
+      memory: 1Gi
+    limits:
+      cpu: 1
+      memory: 4Gi
+  tlsEnabled: true
+# You can set either a version of the db or a specific image name
+# cockroachDBVersion: v22.1.12
+  image:
+    name: cockroachdb/cockroach:v22.1.12
+  # nodes refers to the number of crdb pods that are created
+  # via the statefulset
+  nodes: 3
+  additionalLabels:
+    crdb: is-cool
+  # affinity is a new API field that is behind a feature gate that is
+  # disabled by default.  To enable please see the operator.yaml file.
+
+  # The affinity field will accept any podSpec affinity rule.
+  # affinity:
+  #   podAntiAffinity:
+  #      preferredDuringSchedulingIgnoredDuringExecution:
+  #      - weight: 100
+  #        podAffinityTerm:
+  #          labelSelector:
+  #            matchExpressions:
+  #            - key: app.kubernetes.io/instance
+  #              operator: In
+  #              values:
+  #              - cockroachdb
+  #          topologyKey: kubernetes.io/hostname
+
+  # nodeSelectors used to match against
+  # nodeSelector:
+  #   worker-pool-name: crdb-workers
diff --git a/manifests/cockroachdb/crds.yaml b/manifests/cockroachdb/crds.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1b5cd89ae7001b3e200c0de7da240b660c461f3b
--- /dev/null
+++ b/manifests/cockroachdb/crds.yaml
@@ -0,0 +1,1385 @@
+# Copyright 2022 The Cockroach Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+  annotations:
+    controller-gen.kubebuilder.io/version: (unknown)
+  creationTimestamp: null
+  name: crdbclusters.crdb.cockroachlabs.com
+spec:
+  group: crdb.cockroachlabs.com
+  names:
+    categories:
+    - all
+    - cockroachdb
+    kind: CrdbCluster
+    listKind: CrdbClusterList
+    plural: crdbclusters
+    shortNames:
+    - crdb
+    singular: crdbcluster
+  scope: Namespaced
+  versions:
+  - name: v1alpha1
+    schema:
+      openAPIV3Schema:
+        description: CrdbCluster is the CRD for the cockroachDB clusters API
+        properties:
+          apiVersion:
+            description: 'APIVersion defines the versioned schema of this representation
+              of an object. Servers should convert recognized schemas to the latest
+              internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
+            type: string
+          kind:
+            description: 'Kind is a string value representing the REST resource this
+              object represents. Servers may infer this from the endpoint the client
+              submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
+            type: string
+          metadata:
+            type: object
+          spec:
+            description: CrdbClusterSpec defines the desired state of a CockroachDB
+              Cluster that the operator maintains.
+            properties:
+              additionalAnnotations:
+                additionalProperties:
+                  type: string
+                description: (Optional) Additional custom resource annotations that
+                  are added to all resources. Changing `AdditionalAnnotations` field
+                  will result in cockroachDB cluster restart.
+                type: object
+              additionalArgs:
+                description: '(Optional) Additional command line arguments for the
+                  `cockroach` binary Default: ""'
+                items:
+                  type: string
+                type: array
+              additionalLabels:
+                additionalProperties:
+                  type: string
+                description: (Optional) Additional custom resource labels that are
+                  added to all resources
+                type: object
+              affinity:
+                description: (Optional) If specified, the pod's scheduling constraints
+                properties:
+                  nodeAffinity:
+                    description: Describes node affinity scheduling rules for the
+                      pod.
+                    properties:
+                      preferredDuringSchedulingIgnoredDuringExecution:
+                        description: The scheduler will prefer to schedule pods to
+                          nodes that satisfy the affinity expressions specified by
+                          this field, but it may choose a node that violates one or
+                          more of the expressions. The node that is most preferred
+                          is the one with the greatest sum of weights, i.e. for each
+                          node that meets all of the scheduling requirements (resource
+                          request, requiredDuringScheduling affinity expressions,
+                          etc.), compute a sum by iterating through the elements of
+                          this field and adding "weight" to the sum if the node matches
+                          the corresponding matchExpressions; the node(s) with the
+                          highest sum are the most preferred.
+                        items:
+                          description: An empty preferred scheduling term matches
+                            all objects with implicit weight 0 (i.e. it's a no-op).
+                            A null preferred scheduling term matches no objects (i.e.
+                            is also a no-op).
+                          properties:
+                            preference:
+                              description: A node selector term, associated with the
+                                corresponding weight.
+                              properties:
+                                matchExpressions:
+                                  description: A list of node selector requirements
+                                    by node's labels.
+                                  items:
+                                    description: A node selector requirement is a
+                                      selector that contains values, a key, and an
+                                      operator that relates the key and values.
+                                    properties:
+                                      key:
+                                        description: The label key that the selector
+                                          applies to.
+                                        type: string
+                                      operator:
+                                        description: Represents a key's relationship
+                                          to a set of values. Valid operators are
+                                          In, NotIn, Exists, DoesNotExist. Gt, and
+                                          Lt.
+                                        type: string
+                                      values:
+                                        description: An array of string values. If
+                                          the operator is In or NotIn, the values
+                                          array must be non-empty. If the operator
+                                          is Exists or DoesNotExist, the values array
+                                          must be empty. If the operator is Gt or
+                                          Lt, the values array must have a single
+                                          element, which will be interpreted as an
+                                          integer. This array is replaced during a
+                                          strategic merge patch.
+                                        items:
+                                          type: string
+                                        type: array
+                                    required:
+                                    - key
+                                    - operator
+                                    type: object
+                                  type: array
+                                matchFields:
+                                  description: A list of node selector requirements
+                                    by node's fields.
+                                  items:
+                                    description: A node selector requirement is a
+                                      selector that contains values, a key, and an
+                                      operator that relates the key and values.
+                                    properties:
+                                      key:
+                                        description: The label key that the selector
+                                          applies to.
+                                        type: string
+                                      operator:
+                                        description: Represents a key's relationship
+                                          to a set of values. Valid operators are
+                                          In, NotIn, Exists, DoesNotExist. Gt, and
+                                          Lt.
+                                        type: string
+                                      values:
+                                        description: An array of string values. If
+                                          the operator is In or NotIn, the values
+                                          array must be non-empty. If the operator
+                                          is Exists or DoesNotExist, the values array
+                                          must be empty. If the operator is Gt or
+                                          Lt, the values array must have a single
+                                          element, which will be interpreted as an
+                                          integer. This array is replaced during a
+                                          strategic merge patch.
+                                        items:
+                                          type: string
+                                        type: array
+                                    required:
+                                    - key
+                                    - operator
+                                    type: object
+                                  type: array
+                              type: object
+                            weight:
+                              description: Weight associated with matching the corresponding
+                                nodeSelectorTerm, in the range 1-100.
+                              format: int32
+                              type: integer
+                          required:
+                          - preference
+                          - weight
+                          type: object
+                        type: array
+                      requiredDuringSchedulingIgnoredDuringExecution:
+                        description: If the affinity requirements specified by this
+                          field are not met at scheduling time, the pod will not be
+                          scheduled onto the node. If the affinity requirements specified
+                          by this field cease to be met at some point during pod execution
+                          (e.g. due to an update), the system may or may not try to
+                          eventually evict the pod from its node.
+                        properties:
+                          nodeSelectorTerms:
+                            description: Required. A list of node selector terms.
+                              The terms are ORed.
+                            items:
+                              description: A null or empty node selector term matches
+                                no objects. The requirements of them are ANDed. The
+                                TopologySelectorTerm type implements a subset of the
+                                NodeSelectorTerm.
+                              properties:
+                                matchExpressions:
+                                  description: A list of node selector requirements
+                                    by node's labels.
+                                  items:
+                                    description: A node selector requirement is a
+                                      selector that contains values, a key, and an
+                                      operator that relates the key and values.
+                                    properties:
+                                      key:
+                                        description: The label key that the selector
+                                          applies to.
+                                        type: string
+                                      operator:
+                                        description: Represents a key's relationship
+                                          to a set of values. Valid operators are
+                                          In, NotIn, Exists, DoesNotExist. Gt, and
+                                          Lt.
+                                        type: string
+                                      values:
+                                        description: An array of string values. If
+                                          the operator is In or NotIn, the values
+                                          array must be non-empty. If the operator
+                                          is Exists or DoesNotExist, the values array
+                                          must be empty. If the operator is Gt or
+                                          Lt, the values array must have a single
+                                          element, which will be interpreted as an
+                                          integer. This array is replaced during a
+                                          strategic merge patch.
+                                        items:
+                                          type: string
+                                        type: array
+                                    required:
+                                    - key
+                                    - operator
+                                    type: object
+                                  type: array
+                                matchFields:
+                                  description: A list of node selector requirements
+                                    by node's fields.
+                                  items:
+                                    description: A node selector requirement is a
+                                      selector that contains values, a key, and an
+                                      operator that relates the key and values.
+                                    properties:
+                                      key:
+                                        description: The label key that the selector
+                                          applies to.
+                                        type: string
+                                      operator:
+                                        description: Represents a key's relationship
+                                          to a set of values. Valid operators are
+                                          In, NotIn, Exists, DoesNotExist. Gt, and
+                                          Lt.
+                                        type: string
+                                      values:
+                                        description: An array of string values. If
+                                          the operator is In or NotIn, the values
+                                          array must be non-empty. If the operator
+                                          is Exists or DoesNotExist, the values array
+                                          must be empty. If the operator is Gt or
+                                          Lt, the values array must have a single
+                                          element, which will be interpreted as an
+                                          integer. This array is replaced during a
+                                          strategic merge patch.
+                                        items:
+                                          type: string
+                                        type: array
+                                    required:
+                                    - key
+                                    - operator
+                                    type: object
+                                  type: array
+                              type: object
+                            type: array
+                        required:
+                        - nodeSelectorTerms
+                        type: object
+                    type: object
+                  podAffinity:
+                    description: Describes pod affinity scheduling rules (e.g. co-locate
+                      this pod in the same node, zone, etc. as some other pod(s)).
+                    properties:
+                      preferredDuringSchedulingIgnoredDuringExecution:
+                        description: The scheduler will prefer to schedule pods to
+                          nodes that satisfy the affinity expressions specified by
+                          this field, but it may choose a node that violates one or
+                          more of the expressions. The node that is most preferred
+                          is the one with the greatest sum of weights, i.e. for each
+                          node that meets all of the scheduling requirements (resource
+                          request, requiredDuringScheduling affinity expressions,
+                          etc.), compute a sum by iterating through the elements of
+                          this field and adding "weight" to the sum if the node has
+                          pods which matches the corresponding podAffinityTerm; the
+                          node(s) with the highest sum are the most preferred.
+                        items:
+                          description: The weights of all of the matched WeightedPodAffinityTerm
+                            fields are added per-node to find the most preferred node(s)
+                          properties:
+                            podAffinityTerm:
+                              description: Required. A pod affinity term, associated
+                                with the corresponding weight.
+                              properties:
+                                labelSelector:
+                                  description: A label query over a set of resources,
+                                    in this case pods.
+                                  properties:
+                                    matchExpressions:
+                                      description: matchExpressions is a list of label
+                                        selector requirements. The requirements are
+                                        ANDed.
+                                      items:
+                                        description: A label selector requirement
+                                          is a selector that contains values, a key,
+                                          and an operator that relates the key and
+                                          values.
+                                        properties:
+                                          key:
+                                            description: key is the label key that
+                                              the selector applies to.
+                                            type: string
+                                          operator:
+                                            description: operator represents a key's
+                                              relationship to a set of values. Valid
+                                              operators are In, NotIn, Exists and
+                                              DoesNotExist.
+                                            type: string
+                                          values:
+                                            description: values is an array of string
+                                              values. If the operator is In or NotIn,
+                                              the values array must be non-empty.
+                                              If the operator is Exists or DoesNotExist,
+                                              the values array must be empty. This
+                                              array is replaced during a strategic
+                                              merge patch.
+                                            items:
+                                              type: string
+                                            type: array
+                                        required:
+                                        - key
+                                        - operator
+                                        type: object
+                                      type: array
+                                    matchLabels:
+                                      additionalProperties:
+                                        type: string
+                                      description: matchLabels is a map of {key,value}
+                                        pairs. A single {key,value} in the matchLabels
+                                        map is equivalent to an element of matchExpressions,
+                                        whose key field is "key", the operator is
+                                        "In", and the values array contains only "value".
+                                        The requirements are ANDed.
+                                      type: object
+                                  type: object
+                                namespaces:
+                                  description: namespaces specifies which namespaces
+                                    the labelSelector applies to (matches against);
+                                    null or empty list means "this pod's namespace"
+                                  items:
+                                    type: string
+                                  type: array
+                                topologyKey:
+                                  description: This pod should be co-located (affinity)
+                                    or not co-located (anti-affinity) with the pods
+                                    matching the labelSelector in the specified namespaces,
+                                    where co-located is defined as running on a node
+                                    whose value of the label with key topologyKey
+                                    matches that of any node on which any of the selected
+                                    pods is running. Empty topologyKey is not allowed.
+                                  type: string
+                              required:
+                              - topologyKey
+                              type: object
+                            weight:
+                              description: weight associated with matching the corresponding
+                                podAffinityTerm, in the range 1-100.
+                              format: int32
+                              type: integer
+                          required:
+                          - podAffinityTerm
+                          - weight
+                          type: object
+                        type: array
+                      requiredDuringSchedulingIgnoredDuringExecution:
+                        description: If the affinity requirements specified by this
+                          field are not met at scheduling time, the pod will not be
+                          scheduled onto the node. If the affinity requirements specified
+                          by this field cease to be met at some point during pod execution
+                          (e.g. due to a pod label update), the system may or may
+                          not try to eventually evict the pod from its node. When
+                          there are multiple elements, the lists of nodes corresponding
+                          to each podAffinityTerm are intersected, i.e. all terms
+                          must be satisfied.
+                        items:
+                          description: Defines a set of pods (namely those matching
+                            the labelSelector relative to the given namespace(s))
+                            that this pod should be co-located (affinity) or not co-located
+                            (anti-affinity) with, where co-located is defined as running
+                            on a node whose value of the label with key <topologyKey>
+                            matches that of any node on which a pod of the set of
+                            pods is running
+                          properties:
+                            labelSelector:
+                              description: A label query over a set of resources,
+                                in this case pods.
+                              properties:
+                                matchExpressions:
+                                  description: matchExpressions is a list of label
+                                    selector requirements. The requirements are ANDed.
+                                  items:
+                                    description: A label selector requirement is a
+                                      selector that contains values, a key, and an
+                                      operator that relates the key and values.
+                                    properties:
+                                      key:
+                                        description: key is the label key that the
+                                          selector applies to.
+                                        type: string
+                                      operator:
+                                        description: operator represents a key's relationship
+                                          to a set of values. Valid operators are
+                                          In, NotIn, Exists and DoesNotExist.
+                                        type: string
+                                      values:
+                                        description: values is an array of string
+                                          values. If the operator is In or NotIn,
+                                          the values array must be non-empty. If the
+                                          operator is Exists or DoesNotExist, the
+                                          values array must be empty. This array is
+                                          replaced during a strategic merge patch.
+                                        items:
+                                          type: string
+                                        type: array
+                                    required:
+                                    - key
+                                    - operator
+                                    type: object
+                                  type: array
+                                matchLabels:
+                                  additionalProperties:
+                                    type: string
+                                  description: matchLabels is a map of {key,value}
+                                    pairs. A single {key,value} in the matchLabels
+                                    map is equivalent to an element of matchExpressions,
+                                    whose key field is "key", the operator is "In",
+                                    and the values array contains only "value". The
+                                    requirements are ANDed.
+                                  type: object
+                              type: object
+                            namespaces:
+                              description: namespaces specifies which namespaces the
+                                labelSelector applies to (matches against); null or
+                                empty list means "this pod's namespace"
+                              items:
+                                type: string
+                              type: array
+                            topologyKey:
+                              description: This pod should be co-located (affinity)
+                                or not co-located (anti-affinity) with the pods matching
+                                the labelSelector in the specified namespaces, where
+                                co-located is defined as running on a node whose value
+                                of the label with key topologyKey matches that of
+                                any node on which any of the selected pods is running.
+                                Empty topologyKey is not allowed.
+                              type: string
+                          required:
+                          - topologyKey
+                          type: object
+                        type: array
+                    type: object
+                  podAntiAffinity:
+                    description: Describes pod anti-affinity scheduling rules (e.g.
+                      avoid putting this pod in the same node, zone, etc. as some
+                      other pod(s)).
+                    properties:
+                      preferredDuringSchedulingIgnoredDuringExecution:
+                        description: The scheduler will prefer to schedule pods to
+                          nodes that satisfy the anti-affinity expressions specified
+                          by this field, but it may choose a node that violates one
+                          or more of the expressions. The node that is most preferred
+                          is the one with the greatest sum of weights, i.e. for each
+                          node that meets all of the scheduling requirements (resource
+                          request, requiredDuringScheduling anti-affinity expressions,
+                          etc.), compute a sum by iterating through the elements of
+                          this field and adding "weight" to the sum if the node has
+                          pods which matches the corresponding podAffinityTerm; the
+                          node(s) with the highest sum are the most preferred.
+                        items:
+                          description: The weights of all of the matched WeightedPodAffinityTerm
+                            fields are added per-node to find the most preferred node(s)
+                          properties:
+                            podAffinityTerm:
+                              description: Required. A pod affinity term, associated
+                                with the corresponding weight.
+                              properties:
+                                labelSelector:
+                                  description: A label query over a set of resources,
+                                    in this case pods.
+                                  properties:
+                                    matchExpressions:
+                                      description: matchExpressions is a list of label
+                                        selector requirements. The requirements are
+                                        ANDed.
+                                      items:
+                                        description: A label selector requirement
+                                          is a selector that contains values, a key,
+                                          and an operator that relates the key and
+                                          values.
+                                        properties:
+                                          key:
+                                            description: key is the label key that
+                                              the selector applies to.
+                                            type: string
+                                          operator:
+                                            description: operator represents a key's
+                                              relationship to a set of values. Valid
+                                              operators are In, NotIn, Exists and
+                                              DoesNotExist.
+                                            type: string
+                                          values:
+                                            description: values is an array of string
+                                              values. If the operator is In or NotIn,
+                                              the values array must be non-empty.
+                                              If the operator is Exists or DoesNotExist,
+                                              the values array must be empty. This
+                                              array is replaced during a strategic
+                                              merge patch.
+                                            items:
+                                              type: string
+                                            type: array
+                                        required:
+                                        - key
+                                        - operator
+                                        type: object
+                                      type: array
+                                    matchLabels:
+                                      additionalProperties:
+                                        type: string
+                                      description: matchLabels is a map of {key,value}
+                                        pairs. A single {key,value} in the matchLabels
+                                        map is equivalent to an element of matchExpressions,
+                                        whose key field is "key", the operator is
+                                        "In", and the values array contains only "value".
+                                        The requirements are ANDed.
+                                      type: object
+                                  type: object
+                                namespaces:
+                                  description: namespaces specifies which namespaces
+                                    the labelSelector applies to (matches against);
+                                    null or empty list means "this pod's namespace"
+                                  items:
+                                    type: string
+                                  type: array
+                                topologyKey:
+                                  description: This pod should be co-located (affinity)
+                                    or not co-located (anti-affinity) with the pods
+                                    matching the labelSelector in the specified namespaces,
+                                    where co-located is defined as running on a node
+                                    whose value of the label with key topologyKey
+                                    matches that of any node on which any of the selected
+                                    pods is running. Empty topologyKey is not allowed.
+                                  type: string
+                              required:
+                              - topologyKey
+                              type: object
+                            weight:
+                              description: weight associated with matching the corresponding
+                                podAffinityTerm, in the range 1-100.
+                              format: int32
+                              type: integer
+                          required:
+                          - podAffinityTerm
+                          - weight
+                          type: object
+                        type: array
+                      requiredDuringSchedulingIgnoredDuringExecution:
+                        description: If the anti-affinity requirements specified by
+                          this field are not met at scheduling time, the pod will
+                          not be scheduled onto the node. If the anti-affinity requirements
+                          specified by this field cease to be met at some point during
+                          pod execution (e.g. due to a pod label update), the system
+                          may or may not try to eventually evict the pod from its
+                          node. When there are multiple elements, the lists of nodes
+                          corresponding to each podAffinityTerm are intersected, i.e.
+                          all terms must be satisfied.
+                        items:
+                          description: Defines a set of pods (namely those matching
+                            the labelSelector relative to the given namespace(s))
+                            that this pod should be co-located (affinity) or not co-located
+                            (anti-affinity) with, where co-located is defined as running
+                            on a node whose value of the label with key <topologyKey>
+                            matches that of any node on which a pod of the set of
+                            pods is running
+                          properties:
+                            labelSelector:
+                              description: A label query over a set of resources,
+                                in this case pods.
+                              properties:
+                                matchExpressions:
+                                  description: matchExpressions is a list of label
+                                    selector requirements. The requirements are ANDed.
+                                  items:
+                                    description: A label selector requirement is a
+                                      selector that contains values, a key, and an
+                                      operator that relates the key and values.
+                                    properties:
+                                      key:
+                                        description: key is the label key that the
+                                          selector applies to.
+                                        type: string
+                                      operator:
+                                        description: operator represents a key's relationship
+                                          to a set of values. Valid operators are
+                                          In, NotIn, Exists and DoesNotExist.
+                                        type: string
+                                      values:
+                                        description: values is an array of string
+                                          values. If the operator is In or NotIn,
+                                          the values array must be non-empty. If the
+                                          operator is Exists or DoesNotExist, the
+                                          values array must be empty. This array is
+                                          replaced during a strategic merge patch.
+                                        items:
+                                          type: string
+                                        type: array
+                                    required:
+                                    - key
+                                    - operator
+                                    type: object
+                                  type: array
+                                matchLabels:
+                                  additionalProperties:
+                                    type: string
+                                  description: matchLabels is a map of {key,value}
+                                    pairs. A single {key,value} in the matchLabels
+                                    map is equivalent to an element of matchExpressions,
+                                    whose key field is "key", the operator is "In",
+                                    and the values array contains only "value". The
+                                    requirements are ANDed.
+                                  type: object
+                              type: object
+                            namespaces:
+                              description: namespaces specifies which namespaces the
+                                labelSelector applies to (matches against); null or
+                                empty list means "this pod's namespace"
+                              items:
+                                type: string
+                              type: array
+                            topologyKey:
+                              description: This pod should be co-located (affinity)
+                                or not co-located (anti-affinity) with the pods matching
+                                the labelSelector in the specified namespaces, where
+                                co-located is defined as running on a node whose value
+                                of the label with key topologyKey matches that of
+                                any node on which any of the selected pods is running.
+                                Empty topologyKey is not allowed.
+                              type: string
+                          required:
+                          - topologyKey
+                          type: object
+                        type: array
+                    type: object
+                type: object
+              automountServiceAccountToken:
+                description: '(Optional) AutomountServiceAccountToken determines whether
+                  or not the stateful set pods should automount the service account
+                  token. This is the default behavior in Kubernetes. For backward
+                  compatibility reasons, this value defaults to `false` here. Default:
+                  false'
+                type: boolean
+              cache:
+                description: '(Optional) The total size for caches (`--cache` command
+                  line parameter) Default: "25%"'
+                type: string
+              clientTLSSecret:
+                description: '(Optional) The secret with a certificate and a private
+                  key for root database user Default: ""'
+                type: string
+              cockroachDBVersion:
+                description: '(Optional) CockroachDBVersion sets the explicit version
+                  of the cockroachDB image Default: ""'
+                type: string
+              dataStore:
+                description: Database disk storage configuration
+                properties:
+                  hostPath:
+                    description: (Optional) Directory from the host node's filesystem
+                    properties:
+                      path:
+                        description: 'Path of the directory on the host. If the path
+                          is a symlink, it will follow the link to the real path.
+                          More info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath'
+                        type: string
+                      type:
+                        description: 'Type for HostPath Volume Defaults to "" More
+                          info: https://kubernetes.io/docs/concepts/storage/volumes#hostpath'
+                        type: string
+                    required:
+                    - path
+                    type: object
+                  pvc:
+                    description: (Optional) Persistent volume to use
+                    properties:
+                      source:
+                        description: (Optional) Existing PVC in the same namespace
+                        properties:
+                          claimName:
+                            description: 'ClaimName is the name of a PersistentVolumeClaim
+                              in the same namespace as the pod using this volume.
+                              More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#persistentvolumeclaims'
+                            type: string
+                          readOnly:
+                            description: Will force the ReadOnly setting in VolumeMounts.
+                              Default false.
+                            type: boolean
+                        required:
+                        - claimName
+                        type: object
+                      spec:
+                        description: (Optional) PVC to request a new persistent volume
+                        properties:
+                          accessModes:
+                            description: 'AccessModes contains the desired access
+                              modes the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1'
+                            items:
+                              type: string
+                            type: array
+                          dataSource:
+                            description: 'This field can be used to specify either:
+                              * An existing VolumeSnapshot object (snapshot.storage.k8s.io/VolumeSnapshot)
+                              * An existing PVC (PersistentVolumeClaim) * An existing
+                              custom resource that implements data population (Alpha)
+                              In order to use custom resource types that implement
+                              data population, the AnyVolumeDataSource feature gate
+                              must be enabled. If the provisioner or an external controller
+                              can support the specified data source, it will create
+                              a new volume based on the contents of the specified
+                              data source.'
+                            properties:
+                              apiGroup:
+                                description: APIGroup is the group for the resource
+                                  being referenced. If APIGroup is not specified,
+                                  the specified Kind must be in the core API group.
+                                  For any other third-party types, APIGroup is required.
+                                type: string
+                              kind:
+                                description: Kind is the type of resource being referenced
+                                type: string
+                              name:
+                                description: Name is the name of resource being referenced
+                                type: string
+                            required:
+                            - kind
+                            - name
+                            type: object
+                          resources:
+                            description: 'Resources represents the minimum resources
+                              the volume should have. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#resources'
+                            properties:
+                              limits:
+                                additionalProperties:
+                                  anyOf:
+                                  - type: integer
+                                  - type: string
+                                  pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+                                  x-kubernetes-int-or-string: true
+                                description: 'Limits describes the maximum amount
+                                  of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+                                type: object
+                              requests:
+                                additionalProperties:
+                                  anyOf:
+                                  - type: integer
+                                  - type: string
+                                  pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+                                  x-kubernetes-int-or-string: true
+                                description: 'Requests describes the minimum amount
+                                  of compute resources required. If Requests is omitted
+                                  for a container, it defaults to Limits if that is
+                                  explicitly specified, otherwise to an implementation-defined
+                                  value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+                                type: object
+                            type: object
+                          selector:
+                            description: A label query over volumes to consider for
+                              binding.
+                            properties:
+                              matchExpressions:
+                                description: matchExpressions is a list of label selector
+                                  requirements. The requirements are ANDed.
+                                items:
+                                  description: A label selector requirement is a selector
+                                    that contains values, a key, and an operator that
+                                    relates the key and values.
+                                  properties:
+                                    key:
+                                      description: key is the label key that the selector
+                                        applies to.
+                                      type: string
+                                    operator:
+                                      description: operator represents a key's relationship
+                                        to a set of values. Valid operators are In,
+                                        NotIn, Exists and DoesNotExist.
+                                      type: string
+                                    values:
+                                      description: values is an array of string values.
+                                        If the operator is In or NotIn, the values
+                                        array must be non-empty. If the operator is
+                                        Exists or DoesNotExist, the values array must
+                                        be empty. This array is replaced during a
+                                        strategic merge patch.
+                                      items:
+                                        type: string
+                                      type: array
+                                  required:
+                                  - key
+                                  - operator
+                                  type: object
+                                type: array
+                              matchLabels:
+                                additionalProperties:
+                                  type: string
+                                description: matchLabels is a map of {key,value} pairs.
+                                  A single {key,value} in the matchLabels map is equivalent
+                                  to an element of matchExpressions, whose key field
+                                  is "key", the operator is "In", and the values array
+                                  contains only "value". The requirements are ANDed.
+                                type: object
+                            type: object
+                          storageClassName:
+                            description: 'Name of the StorageClass required by the
+                              claim. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#class-1'
+                            type: string
+                          volumeMode:
+                            description: volumeMode defines what type of volume is
+                              required by the claim. Value of Filesystem is implied
+                              when not included in claim spec.
+                            type: string
+                          volumeName:
+                            description: VolumeName is the binding reference to the
+                              PersistentVolume backing this claim.
+                            type: string
+                        type: object
+                    type: object
+                  supportsAutoResize:
+                    description: '(Optional) SupportsAutoResize marks that a PVC will
+                      resize without restarting the entire cluster Default: false'
+                    type: boolean
+                type: object
+              grpcPort:
+                description: '(Optional) The database port (`--port` CLI parameter
+                  when starting the service) Default: 26258'
+                format: int32
+                type: integer
+              httpPort:
+                description: '(Optional) The web UI port (`--http-port` CLI parameter
+                  when starting the service) Default: 8080'
+                format: int32
+                type: integer
+              image:
+                description: (Optional) Container image information
+                properties:
+                  name:
+                    description: 'Container image with supported CockroachDB version.
+                      This defaults to the version pinned to the operator and requires
+                      a full container and tag/sha name. For instance: cockroachdb/cockroachdb:v20.1'
+                    type: string
+                  pullPolicy:
+                    description: '(Optional) PullPolicy for the image, which defaults
+                      to IfNotPresent. Default: IfNotPresent'
+                    type: string
+                  pullSecret:
+                    description: (Optional) Secret name containing the dockerconfig
+                      to use for a registry that requires authentication. The secret
+                      must be configured first by the user.
+                    type: string
+                required:
+                - name
+                type: object
+              ingress:
+                description: (Optional) Ingress defines the Ingress configuration
+                  used to expose the services using Ingress
+                properties:
+                  sql:
+                    description: (Optional) Ingress options for SQL connections Adding/changing
+                      the SQL host will result in rolling update of the crdb cluster
+                      nodes
+                    properties:
+                      annotations:
+                        additionalProperties:
+                          type: string
+                        description: (Optional) Annotations related to ingress resource
+                        type: object
+                      host:
+                        description: host is host to be used for exposing service
+                        type: string
+                      ingressClassName:
+                        description: (Optional) IngressClassName to be used by ingress
+                          resource
+                        type: string
+                      tls:
+                        description: (Optional) TLS describes the TLS certificate
+                          info
+                        items:
+                          description: IngressTLS describes the transport layer security
+                            associated with an Ingress.
+                          properties:
+                            hosts:
+                              description: Hosts are a list of hosts included in the
+                                TLS certificate. The values in this list must match
+                                the name/s used in the tlsSecret. Defaults to the
+                                wildcard host setting for the loadbalancer controller
+                                fulfilling this Ingress, if left unspecified.
+                              items:
+                                type: string
+                              type: array
+                              x-kubernetes-list-type: atomic
+                            secretName:
+                              description: SecretName is the name of the secret used
+                                to terminate TLS traffic on port 443. Field is left
+                                optional to allow TLS routing based on SNI hostname
+                                alone. If the SNI host in a listener conflicts with
+                                the "Host" header field used by an IngressRule, the
+                                SNI host is used for termination and value of the
+                                Host header is used for routing.
+                              type: string
+                          type: object
+                        type: array
+                    required:
+                    - host
+                    type: object
+                  ui:
+                    description: (Optional) Ingress options for UI (HTTP) connections
+                    properties:
+                      annotations:
+                        additionalProperties:
+                          type: string
+                        description: (Optional) Annotations related to ingress resource
+                        type: object
+                      host:
+                        description: host is host to be used for exposing service
+                        type: string
+                      ingressClassName:
+                        description: (Optional) IngressClassName to be used by ingress
+                          resource
+                        type: string
+                      tls:
+                        description: (Optional) TLS describes the TLS certificate
+                          info
+                        items:
+                          description: IngressTLS describes the transport layer security
+                            associated with an Ingress.
+                          properties:
+                            hosts:
+                              description: Hosts are a list of hosts included in the
+                                TLS certificate. The values in this list must match
+                                the name/s used in the tlsSecret. Defaults to the
+                                wildcard host setting for the loadbalancer controller
+                                fulfilling this Ingress, if left unspecified.
+                              items:
+                                type: string
+                              type: array
+                              x-kubernetes-list-type: atomic
+                            secretName:
+                              description: SecretName is the name of the secret used
+                                to terminate TLS traffic on port 443. Field is left
+                                optional to allow TLS routing based on SNI hostname
+                                alone. If the SNI host in a listener conflicts with
+                                the "Host" header field used by an IngressRule, the
+                                SNI host is used for termination and value of the
+                                Host header is used for routing.
+                              type: string
+                          type: object
+                        type: array
+                    required:
+                    - host
+                    type: object
+                type: object
+              logConfigMap:
+                description: '(Optional) LogConfigMap define the config map which
+                  contains log configuration used to send the logs through the proper
+                  channels in the cockroachdb. Logging configuration is available
+                  for cockroach version v21.1.0 onwards. The logging configuration
+                  is taken in format of yaml file, you can check the logging configuration
+                  here (https://www.cockroachlabs.com/docs/stable/configure-logs.html#default-logging-configuration)
+                  The default logging for cockroach version v20.x or less is stderr,
+                  logging API is ignored for older versions. NOTE: The `data` field
+                  of map must contain an entry called `logging.yaml` that contains
+                  config options.'
+                type: string
+              maxSQLMemory:
+                description: '(Optional) The maximum in-memory storage capacity available
+                  to store temporary data for SQL queries (`--max-sql-memory` parameter)
+                  Default: "25%"'
+                type: string
+              maxUnavailable:
+                description: (Optional) The maximum number of pods that can be unavailable
+                  during a rolling update. This number is set in the PodDistruptionBudget
+                  and defaults to 1.
+                format: int32
+                type: integer
+              minAvailable:
+                description: (Optional) The min number of pods that can be unavailable
+                  during a rolling update. This number is set in the PodDistruptionBudget
+                  and defaults to 1.
+                format: int32
+                type: integer
+              nodeSelector:
+                additionalProperties:
+                  type: string
+                description: (Optional) If specified, the pod's nodeSelector
+                type: object
+              nodeTLSSecret:
+                description: '(Optional) The secret with certificates and a private
+                  key for the TLS endpoint on the database port. The standard naming
+                  of files is expected (tls.key, tls.crt, ca.crt) Default: ""'
+                type: string
+              nodes:
+                description: Number of nodes (pods) in the cluster
+                format: int32
+                minimum: 3
+                type: integer
+              podEnvVariables:
+                description: '(Optional) PodEnvVariables is a slice of environment
+                  variables that are added to the pods Default: (empty list)'
+                items:
+                  description: EnvVar represents an environment variable present in
+                    a Container.
+                  properties:
+                    name:
+                      description: Name of the environment variable. Must be a C_IDENTIFIER.
+                      type: string
+                    value:
+                      description: 'Variable references $(VAR_NAME) are expanded using
+                        the previous defined environment variables in the container
+                        and any service environment variables. If a variable cannot
+                        be resolved, the reference in the input string will be unchanged.
+                        The $(VAR_NAME) syntax can be escaped with a double $$, ie:
+                        $$(VAR_NAME). Escaped references will never be expanded, regardless
+                        of whether the variable exists or not. Defaults to "".'
+                      type: string
+                    valueFrom:
+                      description: Source for the environment variable's value. Cannot
+                        be used if value is not empty.
+                      properties:
+                        configMapKeyRef:
+                          description: Selects a key of a ConfigMap.
+                          properties:
+                            key:
+                              description: The key to select.
+                              type: string
+                            name:
+                              description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+                                TODO: Add other useful fields. apiVersion, kind, uid?'
+                              type: string
+                            optional:
+                              description: Specify whether the ConfigMap or its key
+                                must be defined
+                              type: boolean
+                          required:
+                          - key
+                          type: object
+                        fieldRef:
+                          description: 'Selects a field of the pod: supports metadata.name,
+                            metadata.namespace, `metadata.labels[''<KEY>'']`, `metadata.annotations[''<KEY>'']`,
+                            spec.nodeName, spec.serviceAccountName, status.hostIP,
+                            status.podIP, status.podIPs.'
+                          properties:
+                            apiVersion:
+                              description: Version of the schema the FieldPath is
+                                written in terms of, defaults to "v1".
+                              type: string
+                            fieldPath:
+                              description: Path of the field to select in the specified
+                                API version.
+                              type: string
+                          required:
+                          - fieldPath
+                          type: object
+                        resourceFieldRef:
+                          description: 'Selects a resource of the container: only
+                            resources limits and requests (limits.cpu, limits.memory,
+                            limits.ephemeral-storage, requests.cpu, requests.memory
+                            and requests.ephemeral-storage) are currently supported.'
+                          properties:
+                            containerName:
+                              description: 'Container name: required for volumes,
+                                optional for env vars'
+                              type: string
+                            divisor:
+                              anyOf:
+                              - type: integer
+                              - type: string
+                              description: Specifies the output format of the exposed
+                                resources, defaults to "1"
+                              pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+                              x-kubernetes-int-or-string: true
+                            resource:
+                              description: 'Required: resource to select'
+                              type: string
+                          required:
+                          - resource
+                          type: object
+                        secretKeyRef:
+                          description: Selects a key of a secret in the pod's namespace
+                          properties:
+                            key:
+                              description: The key of the secret to select from.  Must
+                                be a valid secret key.
+                              type: string
+                            name:
+                              description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+                                TODO: Add other useful fields. apiVersion, kind, uid?'
+                              type: string
+                            optional:
+                              description: Specify whether the Secret or its key must
+                                be defined
+                              type: boolean
+                          required:
+                          - key
+                          type: object
+                      type: object
+                  required:
+                  - name
+                  type: object
+                type: array
+              resources:
+                description: '(Optional) Database container resource limits. Any container
+                  limits can be specified. Default: (not specified)'
+                properties:
+                  limits:
+                    additionalProperties:
+                      anyOf:
+                      - type: integer
+                      - type: string
+                      pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+                      x-kubernetes-int-or-string: true
+                    description: 'Limits describes the maximum amount of compute resources
+                      allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+                    type: object
+                  requests:
+                    additionalProperties:
+                      anyOf:
+                      - type: integer
+                      - type: string
+                      pattern: ^(\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))(([KMGTPE]i)|[numkMGTPE]|([eE](\+|-)?(([0-9]+(\.[0-9]*)?)|(\.[0-9]+))))?$
+                      x-kubernetes-int-or-string: true
+                    description: 'Requests describes the minimum amount of compute
+                      resources required. If Requests is omitted for a container,
+                      it defaults to Limits if that is explicitly specified, otherwise
+                      to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/'
+                    type: object
+                type: object
+              sqlPort:
+                description: '(Optional) The SQL Port number Default: 26257'
+                format: int32
+                type: integer
+              tlsEnabled:
+                description: (Optional) TLSEnabled determines if TLS is enabled for
+                  your CockroachDB Cluster
+                type: boolean
+              tolerations:
+                description: (Optional) Tolerations for scheduling pods onto some
+                  dedicated nodes
+                items:
+                  description: The pod this Toleration is attached to tolerates any
+                    taint that matches the triple <key,value,effect> using the matching
+                    operator <operator>.
+                  properties:
+                    effect:
+                      description: Effect indicates the taint effect to match. Empty
+                        means match all taint effects. When specified, allowed values
+                        are NoSchedule, PreferNoSchedule and NoExecute.
+                      type: string
+                    key:
+                      description: Key is the taint key that the toleration applies
+                        to. Empty means match all taint keys. If the key is empty,
+                        operator must be Exists; this combination means to match all
+                        values and all keys.
+                      type: string
+                    operator:
+                      description: Operator represents a key's relationship to the
+                        value. Valid operators are Exists and Equal. Defaults to Equal.
+                        Exists is equivalent to wildcard for value, so that a pod
+                        can tolerate all taints of a particular category.
+                      type: string
+                    tolerationSeconds:
+                      description: TolerationSeconds represents the period of time
+                        the toleration (which must be of effect NoExecute, otherwise
+                        this field is ignored) tolerates the taint. By default, it
+                        is not set, which means tolerate the taint forever (do not
+                        evict). Zero and negative values will be treated as 0 (evict
+                        immediately) by the system.
+                      format: int64
+                      type: integer
+                    value:
+                      description: Value is the taint value the toleration matches
+                        to. If the operator is Exists, the value should be empty,
+                        otherwise just a regular string.
+                      type: string
+                  type: object
+                type: array
+              topologySpreadConstraints:
+                description: (Optional) If specified, the pod's topology spread constraints
+                items:
+                  description: TopologySpreadConstraint specifies how to spread matching
+                    pods among the given topology.
+                  properties:
+                    labelSelector:
+                      description: LabelSelector is used to find matching pods. Pods
+                        that match this label selector are counted to determine the
+                        number of pods in their corresponding topology domain.
+                      properties:
+                        matchExpressions:
+                          description: matchExpressions is a list of label selector
+                            requirements. The requirements are ANDed.
+                          items:
+                            description: A label selector requirement is a selector
+                              that contains values, a key, and an operator that relates
+                              the key and values.
+                            properties:
+                              key:
+                                description: key is the label key that the selector
+                                  applies to.
+                                type: string
+                              operator:
+                                description: operator represents a key's relationship
+                                  to a set of values. Valid operators are In, NotIn,
+                                  Exists and DoesNotExist.
+                                type: string
+                              values:
+                                description: values is an array of string values.
+                                  If the operator is In or NotIn, the values array
+                                  must be non-empty. If the operator is Exists or
+                                  DoesNotExist, the values array must be empty. This
+                                  array is replaced during a strategic merge patch.
+                                items:
+                                  type: string
+                                type: array
+                            required:
+                            - key
+                            - operator
+                            type: object
+                          type: array
+                        matchLabels:
+                          additionalProperties:
+                            type: string
+                          description: matchLabels is a map of {key,value} pairs.
+                            A single {key,value} in the matchLabels map is equivalent
+                            to an element of matchExpressions, whose key field is
+                            "key", the operator is "In", and the values array contains
+                            only "value". The requirements are ANDed.
+                          type: object
+                      type: object
+                    maxSkew:
+                      description: 'MaxSkew describes the degree to which pods may
+                        be unevenly distributed. When `whenUnsatisfiable=DoNotSchedule`,
+                        it is the maximum permitted difference between the number
+                        of matching pods in the target topology and the global minimum.
+                        For example, in a 3-zone cluster, MaxSkew is set to 1, and
+                        pods with the same labelSelector spread as 1/1/0: | zone1
+                        | zone2 | zone3 | |   P   |   P   |       | - if MaxSkew is
+                        1, incoming pod can only be scheduled to zone3 to become 1/1/1;
+                        scheduling it onto zone1(zone2) would make the ActualSkew(2-0)
+                        on zone1(zone2) violate MaxSkew(1). - if MaxSkew is 2, incoming
+                        pod can be scheduled onto any zone. When `whenUnsatisfiable=ScheduleAnyway`,
+                        it is used to give higher precedence to topologies that satisfy
+                        it. It''s a required field. Default value is 1 and 0 is not
+                        allowed.'
+                      format: int32
+                      type: integer
+                    topologyKey:
+                      description: TopologyKey is the key of node labels. Nodes that
+                        have a label with this key and identical values are considered
+                        to be in the same topology. We consider each <key, value>
+                        as a "bucket", and try to put balanced number of pods into
+                        each bucket. It's a required field.
+                      type: string
+                    whenUnsatisfiable:
+                      description: 'WhenUnsatisfiable indicates how to deal with a
+                        pod if it doesn''t satisfy the spread constraint. - DoNotSchedule
+                        (default) tells the scheduler not to schedule it. - ScheduleAnyway
+                        tells the scheduler to schedule the pod in any location,   but
+                        giving higher precedence to topologies that would help reduce
+                        the   skew. A constraint is considered "Unsatisfiable" for
+                        an incoming pod if and only if every possible node assigment
+                        for that pod would violate "MaxSkew" on some topology. For
+                        example, in a 3-zone cluster, MaxSkew is set to 1, and pods
+                        with the same labelSelector spread as 3/1/1: | zone1 | zone2
+                        | zone3 | | P P P |   P   |   P   | If WhenUnsatisfiable is
+                        set to DoNotSchedule, incoming pod can only be scheduled to
+                        zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on
+                        zone2(zone3) satisfies MaxSkew(1). In other words, the cluster
+                        can still be imbalanced, but scheduler won''t make it *more*
+                        imbalanced. It''s a required field.'
+                      type: string
+                  required:
+                  - maxSkew
+                  - topologyKey
+                  - whenUnsatisfiable
+                  type: object
+                type: array
+            required:
+            - dataStore
+            - nodes
+            type: object
+          status:
+            description: CrdbClusterStatus defines the observed state of Cluster
+            properties:
+              clusterStatus:
+                description: OperatorStatus represent the status of the operator(Failed,
+                  Starting, Running or Other)
+                type: string
+              conditions:
+                description: List of conditions representing the current status of
+                  the cluster resource.
+                items:
+                  description: ClusterCondition represents cluster status as it is
+                    perceived by the operator
+                  properties:
+                    lastTransitionTime:
+                      description: The time when the condition was updated
+                      format: date-time
+                      type: string
+                    status:
+                      description: 'Condition status: True, False or Unknown'
+                      type: string
+                    type:
+                      description: Type/Name of the condition
+                      type: string
+                  required:
+                  - lastTransitionTime
+                  - status
+                  - type
+                  type: object
+                type: array
+              crdbcontainerimage:
+                description: CrdbContainerImage is the container that will be installed
+                type: string
+              operatorActions:
+                items:
+                  description: ClusterAction represents cluster status as it is perceived
+                    by the operator
+                  properties:
+                    lastTransitionTime:
+                      description: The time when the condition was updated
+                      format: date-time
+                      type: string
+                    message:
+                      description: (Optional) Message related to the status of the
+                        action
+                      type: string
+                    status:
+                      description: 'Action status: Failed, Finished or Unknown'
+                      type: string
+                    type:
+                      description: Type/Name of the action
+                      type: string
+                  required:
+                  - lastTransitionTime
+                  - status
+                  - type
+                  type: object
+                type: array
+              sqlHost:
+                description: SQLHost is the host to be used with SQL ingress
+                type: string
+              version:
+                description: Database service version. Not populated and is just a
+                  placeholder currently.
+                type: string
+            type: object
+        type: object
+    served: true
+    storage: true
+    subresources:
+      status: {}
+status:
+  acceptedNames:
+    kind: ""
+    plural: ""
+  conditions: []
+  storedVersions: []
diff --git a/cluster-init.yaml b/manifests/cockroachdb/from_carlos/cluster-init.yaml
similarity index 100%
rename from cluster-init.yaml
rename to manifests/cockroachdb/from_carlos/cluster-init.yaml
diff --git a/cockroachdb-statefulset.yaml b/manifests/cockroachdb/from_carlos/cockroachdb-statefulset.yaml
similarity index 100%
rename from cockroachdb-statefulset.yaml
rename to manifests/cockroachdb/from_carlos/cockroachdb-statefulset.yaml
diff --git a/manifests/cockroachdb/operator.yaml b/manifests/cockroachdb/operator.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..2db3c37f8b930043b0d6d0288020348aec59b42f
--- /dev/null
+++ b/manifests/cockroachdb/operator.yaml
@@ -0,0 +1,602 @@
+# Copyright 2022 The Cockroach Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+apiVersion: v1
+kind: Namespace
+metadata:
+  labels:
+    control-plane: cockroach-operator
+  name: cockroach-operator-system
+---
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+  labels:
+    app: cockroach-operator
+  name: cockroach-operator-sa
+  namespace: cockroach-operator-system
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  creationTimestamp: null
+  name: cockroach-operator-role
+rules:
+- apiGroups:
+  - admissionregistration.k8s.io
+  resources:
+  - mutatingwebhookconfigurations
+  verbs:
+  - get
+  - patch
+  - update
+- apiGroups:
+  - admissionregistration.k8s.io
+  resources:
+  - validatingwebhookconfigurations
+  verbs:
+  - get
+  - patch
+  - update
+- apiGroups:
+  - apps
+  resources:
+  - statefulsets
+  verbs:
+  - create
+  - delete
+  - get
+  - list
+  - patch
+  - update
+  - watch
+- apiGroups:
+  - apps
+  resources:
+  - statefulsets/finalizers
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - apps
+  resources:
+  - statefulsets/scale
+  verbs:
+  - get
+  - update
+  - watch
+- apiGroups:
+  - apps
+  resources:
+  - statefulsets/status
+  verbs:
+  - get
+  - patch
+  - update
+- apiGroups:
+  - batch
+  resources:
+  - jobs
+  verbs:
+  - create
+  - delete
+  - get
+  - list
+  - patch
+  - update
+  - watch
+- apiGroups:
+  - batch
+  resources:
+  - jobs/finalizers
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - batch
+  resources:
+  - jobs/status
+  verbs:
+  - get
+- apiGroups:
+  - certificates.k8s.io
+  resources:
+  - certificatesigningrequests
+  verbs:
+  - create
+  - delete
+  - get
+  - list
+  - patch
+  - watch
+- apiGroups:
+  - certificates.k8s.io
+  resources:
+  - certificatesigningrequests/approval
+  verbs:
+  - update
+- apiGroups:
+  - certificates.k8s.io
+  resources:
+  - certificatesigningrequests/status
+  verbs:
+  - get
+  - patch
+  - update
+- apiGroups:
+  - ""
+  resources:
+  - configmaps
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - ""
+  resources:
+  - configmaps/status
+  verbs:
+  - get
+- apiGroups:
+  - ""
+  resources:
+  - nodes
+  verbs:
+  - get
+  - list
+- apiGroups:
+  - ""
+  resources:
+  - persistentvolumeclaims
+  verbs:
+  - list
+  - update
+- apiGroups:
+  - ""
+  resources:
+  - pods
+  verbs:
+  - delete
+  - deletecollection
+  - get
+  - list
+- apiGroups:
+  - ""
+  resources:
+  - pods/exec
+  verbs:
+  - create
+- apiGroups:
+  - ""
+  resources:
+  - pods/log
+  verbs:
+  - get
+- apiGroups:
+  - ""
+  resources:
+  - secrets
+  verbs:
+  - create
+  - get
+  - list
+  - patch
+  - update
+  - watch
+- apiGroups:
+  - ""
+  resources:
+  - serviceaccounts
+  verbs:
+  - create
+  - get
+  - list
+  - watch
+- apiGroups:
+  - ""
+  resources:
+  - services
+  verbs:
+  - create
+  - delete
+  - get
+  - list
+  - patch
+  - update
+  - watch
+- apiGroups:
+  - ""
+  resources:
+  - services/finalizers
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - ""
+  resources:
+  - services/status
+  verbs:
+  - get
+  - patch
+  - update
+- apiGroups:
+  - crdb.cockroachlabs.com
+  resources:
+  - crdbclusters
+  verbs:
+  - create
+  - delete
+  - get
+  - list
+  - patch
+  - update
+  - watch
+- apiGroups:
+  - crdb.cockroachlabs.com
+  resources:
+  - crdbclusters/finalizers
+  verbs:
+  - update
+- apiGroups:
+  - crdb.cockroachlabs.com
+  resources:
+  - crdbclusters/status
+  verbs:
+  - get
+  - patch
+  - update
+- apiGroups:
+  - networking.k8s.io
+  resources:
+  - ingresses
+  verbs:
+  - create
+  - delete
+  - get
+  - list
+  - patch
+  - update
+  - watch
+- apiGroups:
+  - networking.k8s.io
+  resources:
+  - ingresses/finalizers
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - networking.k8s.io
+  resources:
+  - ingresses/status
+  verbs:
+  - get
+- apiGroups:
+  - policy
+  resources:
+  - poddisruptionbudgets
+  verbs:
+  - create
+  - delete
+  - get
+  - list
+  - patch
+  - update
+  - watch
+- apiGroups:
+  - policy
+  resources:
+  - poddisruptionbudgets/finalizers
+  verbs:
+  - get
+  - list
+  - watch
+- apiGroups:
+  - policy
+  resources:
+  - poddisruptionbudgets/status
+  verbs:
+  - get
+- apiGroups:
+  - rbac.authorization.k8s.io
+  resources:
+  - rolebindings
+  verbs:
+  - create
+  - get
+  - list
+  - watch
+- apiGroups:
+  - rbac.authorization.k8s.io
+  resources:
+  - roles
+  verbs:
+  - create
+  - get
+  - list
+  - watch
+- apiGroups:
+  - security.openshift.io
+  resources:
+  - securitycontextconstraints
+  verbs:
+  - use
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+  name: cockroach-operator-rolebinding
+roleRef:
+  apiGroup: rbac.authorization.k8s.io
+  kind: ClusterRole
+  name: cockroach-operator-role
+subjects:
+- kind: ServiceAccount
+  name: cockroach-operator-sa
+  namespace: cockroach-operator-system
+---
+apiVersion: v1
+kind: Service
+metadata:
+  labels:
+    control-plane: cockroach-operator
+  name: cockroach-operator-webhook-service
+  namespace: cockroach-operator-system
+spec:
+  ports:
+  - port: 443
+    targetPort: 9443
+  selector:
+    app: cockroach-operator
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  labels:
+    app: cockroach-operator
+  name: cockroach-operator-manager
+  namespace: cockroach-operator-system
+spec:
+  replicas: 1
+  selector:
+    matchLabels:
+      app: cockroach-operator
+  template:
+    metadata:
+      labels:
+        app: cockroach-operator
+    spec:
+      containers:
+      - args:
+        - -zap-log-level
+        - info
+        env:
+        - name: RELATED_IMAGE_COCKROACH_v20_1_4
+          value: cockroachdb/cockroach:v20.1.4
+        - name: RELATED_IMAGE_COCKROACH_v20_1_5
+          value: cockroachdb/cockroach:v20.1.5
+        - name: RELATED_IMAGE_COCKROACH_v20_1_8
+          value: cockroachdb/cockroach:v20.1.8
+        - name: RELATED_IMAGE_COCKROACH_v20_1_11
+          value: cockroachdb/cockroach:v20.1.11
+        - name: RELATED_IMAGE_COCKROACH_v20_1_12
+          value: cockroachdb/cockroach:v20.1.12
+        - name: RELATED_IMAGE_COCKROACH_v20_1_13
+          value: cockroachdb/cockroach:v20.1.13
+        - name: RELATED_IMAGE_COCKROACH_v20_1_15
+          value: cockroachdb/cockroach:v20.1.15
+        - name: RELATED_IMAGE_COCKROACH_v20_1_16
+          value: cockroachdb/cockroach:v20.1.16
+        - name: RELATED_IMAGE_COCKROACH_v20_1_17
+          value: cockroachdb/cockroach:v20.1.17
+        - name: RELATED_IMAGE_COCKROACH_v20_2_0
+          value: cockroachdb/cockroach:v20.2.0
+        - name: RELATED_IMAGE_COCKROACH_v20_2_1
+          value: cockroachdb/cockroach:v20.2.1
+        - name: RELATED_IMAGE_COCKROACH_v20_2_2
+          value: cockroachdb/cockroach:v20.2.2
+        - name: RELATED_IMAGE_COCKROACH_v20_2_3
+          value: cockroachdb/cockroach:v20.2.3
+        - name: RELATED_IMAGE_COCKROACH_v20_2_4
+          value: cockroachdb/cockroach:v20.2.4
+        - name: RELATED_IMAGE_COCKROACH_v20_2_5
+          value: cockroachdb/cockroach:v20.2.5
+        - name: RELATED_IMAGE_COCKROACH_v20_2_6
+          value: cockroachdb/cockroach:v20.2.6
+        - name: RELATED_IMAGE_COCKROACH_v20_2_8
+          value: cockroachdb/cockroach:v20.2.8
+        - name: RELATED_IMAGE_COCKROACH_v20_2_9
+          value: cockroachdb/cockroach:v20.2.9
+        - name: RELATED_IMAGE_COCKROACH_v20_2_10
+          value: cockroachdb/cockroach:v20.2.10
+        - name: RELATED_IMAGE_COCKROACH_v20_2_11
+          value: cockroachdb/cockroach:v20.2.11
+        - name: RELATED_IMAGE_COCKROACH_v20_2_12
+          value: cockroachdb/cockroach:v20.2.12
+        - name: RELATED_IMAGE_COCKROACH_v20_2_13
+          value: cockroachdb/cockroach:v20.2.13
+        - name: RELATED_IMAGE_COCKROACH_v20_2_14
+          value: cockroachdb/cockroach:v20.2.14
+        - name: RELATED_IMAGE_COCKROACH_v20_2_15
+          value: cockroachdb/cockroach:v20.2.15
+        - name: RELATED_IMAGE_COCKROACH_v20_2_16
+          value: cockroachdb/cockroach:v20.2.16
+        - name: RELATED_IMAGE_COCKROACH_v20_2_17
+          value: cockroachdb/cockroach:v20.2.17
+        - name: RELATED_IMAGE_COCKROACH_v20_2_18
+          value: cockroachdb/cockroach:v20.2.18
+        - name: RELATED_IMAGE_COCKROACH_v20_2_19
+          value: cockroachdb/cockroach:v20.2.19
+        - name: RELATED_IMAGE_COCKROACH_v21_1_0
+          value: cockroachdb/cockroach:v21.1.0
+        - name: RELATED_IMAGE_COCKROACH_v21_1_1
+          value: cockroachdb/cockroach:v21.1.1
+        - name: RELATED_IMAGE_COCKROACH_v21_1_2
+          value: cockroachdb/cockroach:v21.1.2
+        - name: RELATED_IMAGE_COCKROACH_v21_1_3
+          value: cockroachdb/cockroach:v21.1.3
+        - name: RELATED_IMAGE_COCKROACH_v21_1_4
+          value: cockroachdb/cockroach:v21.1.4
+        - name: RELATED_IMAGE_COCKROACH_v21_1_5
+          value: cockroachdb/cockroach:v21.1.5
+        - name: RELATED_IMAGE_COCKROACH_v21_1_6
+          value: cockroachdb/cockroach:v21.1.6
+        - name: RELATED_IMAGE_COCKROACH_v21_1_7
+          value: cockroachdb/cockroach:v21.1.7
+        - name: RELATED_IMAGE_COCKROACH_v21_1_9
+          value: cockroachdb/cockroach:v21.1.9
+        - name: RELATED_IMAGE_COCKROACH_v21_1_10
+          value: cockroachdb/cockroach:v21.1.10
+        - name: RELATED_IMAGE_COCKROACH_v21_1_11
+          value: cockroachdb/cockroach:v21.1.11
+        - name: RELATED_IMAGE_COCKROACH_v21_1_12
+          value: cockroachdb/cockroach:v21.1.12
+        - name: RELATED_IMAGE_COCKROACH_v21_1_13
+          value: cockroachdb/cockroach:v21.1.13
+        - name: RELATED_IMAGE_COCKROACH_v21_1_14
+          value: cockroachdb/cockroach:v21.1.14
+        - name: RELATED_IMAGE_COCKROACH_v21_1_15
+          value: cockroachdb/cockroach:v21.1.15
+        - name: RELATED_IMAGE_COCKROACH_v21_1_16
+          value: cockroachdb/cockroach:v21.1.16
+        - name: RELATED_IMAGE_COCKROACH_v21_1_17
+          value: cockroachdb/cockroach:v21.1.17
+        - name: RELATED_IMAGE_COCKROACH_v21_1_18
+          value: cockroachdb/cockroach:v21.1.18
+        - name: RELATED_IMAGE_COCKROACH_v21_1_19
+          value: cockroachdb/cockroach:v21.1.19
+        - name: RELATED_IMAGE_COCKROACH_v21_2_0
+          value: cockroachdb/cockroach:v21.2.0
+        - name: RELATED_IMAGE_COCKROACH_v21_2_1
+          value: cockroachdb/cockroach:v21.2.1
+        - name: RELATED_IMAGE_COCKROACH_v21_2_2
+          value: cockroachdb/cockroach:v21.2.2
+        - name: RELATED_IMAGE_COCKROACH_v21_2_3
+          value: cockroachdb/cockroach:v21.2.3
+        - name: RELATED_IMAGE_COCKROACH_v21_2_4
+          value: cockroachdb/cockroach:v21.2.4
+        - name: RELATED_IMAGE_COCKROACH_v21_2_5
+          value: cockroachdb/cockroach:v21.2.5
+        - name: RELATED_IMAGE_COCKROACH_v21_2_7
+          value: cockroachdb/cockroach:v21.2.7
+        - name: RELATED_IMAGE_COCKROACH_v21_2_8
+          value: cockroachdb/cockroach:v21.2.8
+        - name: RELATED_IMAGE_COCKROACH_v21_2_9
+          value: cockroachdb/cockroach:v21.2.9
+        - name: RELATED_IMAGE_COCKROACH_v21_2_10
+          value: cockroachdb/cockroach:v21.2.10
+        - name: RELATED_IMAGE_COCKROACH_v21_2_11
+          value: cockroachdb/cockroach:v21.2.11
+        - name: RELATED_IMAGE_COCKROACH_v21_2_12
+          value: cockroachdb/cockroach:v21.2.12
+        - name: RELATED_IMAGE_COCKROACH_v21_2_13
+          value: cockroachdb/cockroach:v21.2.13
+        - name: RELATED_IMAGE_COCKROACH_v21_2_14
+          value: cockroachdb/cockroach:v21.2.14
+        - name: RELATED_IMAGE_COCKROACH_v21_2_15
+          value: cockroachdb/cockroach:v21.2.15
+        - name: RELATED_IMAGE_COCKROACH_v21_2_16
+          value: cockroachdb/cockroach:v21.2.16
+        - name: RELATED_IMAGE_COCKROACH_v22_1_0
+          value: cockroachdb/cockroach:v22.1.0
+        - name: RELATED_IMAGE_COCKROACH_v22_1_1
+          value: cockroachdb/cockroach:v22.1.1
+        - name: RELATED_IMAGE_COCKROACH_v22_1_2
+          value: cockroachdb/cockroach:v22.1.2
+        - name: RELATED_IMAGE_COCKROACH_v22_1_3
+          value: cockroachdb/cockroach:v22.1.3
+        - name: RELATED_IMAGE_COCKROACH_v22_1_4
+          value: cockroachdb/cockroach:v22.1.4
+        - name: RELATED_IMAGE_COCKROACH_v22_1_5
+          value: cockroachdb/cockroach:v22.1.5
+        - name: RELATED_IMAGE_COCKROACH_v22_1_7
+          value: cockroachdb/cockroach:v22.1.7
+        - name: RELATED_IMAGE_COCKROACH_v22_1_8
+          value: cockroachdb/cockroach:v22.1.8
+        - name: OPERATOR_NAME
+          value: cockroachdb
+        - name: WATCH_NAMESPACE
+          value: tfs-ccdb
+        - name: POD_NAME
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.name
+        - name: NAMESPACE
+          valueFrom:
+            fieldRef:
+              fieldPath: metadata.namespace
+        image: cockroachdb/cockroach-operator:v2.8.0
+        imagePullPolicy: IfNotPresent
+        name: cockroach-operator
+        resources:
+          requests:
+            cpu: 10m
+            memory: 32Mi
+      serviceAccountName: cockroach-operator-sa
+---
+apiVersion: admissionregistration.k8s.io/v1
+kind: MutatingWebhookConfiguration
+metadata:
+  creationTimestamp: null
+  name: cockroach-operator-mutating-webhook-configuration
+webhooks:
+- admissionReviewVersions:
+  - v1
+  clientConfig:
+    service:
+      name: cockroach-operator-webhook-service
+      namespace: cockroach-operator-system
+      path: /mutate-crdb-cockroachlabs-com-v1alpha1-crdbcluster
+  failurePolicy: Fail
+  name: mcrdbcluster.kb.io
+  rules:
+  - apiGroups:
+    - crdb.cockroachlabs.com
+    apiVersions:
+    - v1alpha1
+    operations:
+    - CREATE
+    - UPDATE
+    resources:
+    - crdbclusters
+  sideEffects: None
+---
+apiVersion: admissionregistration.k8s.io/v1
+kind: ValidatingWebhookConfiguration
+metadata:
+  creationTimestamp: null
+  name: cockroach-operator-validating-webhook-configuration
+webhooks:
+- admissionReviewVersions:
+  - v1
+  clientConfig:
+    service:
+      name: cockroach-operator-webhook-service
+      namespace: cockroach-operator-system
+      path: /validate-crdb-cockroachlabs-com-v1alpha1-crdbcluster
+  failurePolicy: Fail
+  name: vcrdbcluster.kb.io
+  rules:
+  - apiGroups:
+    - crdb.cockroachlabs.com
+    apiVersions:
+    - v1alpha1
+    operations:
+    - CREATE
+    - UPDATE
+    resources:
+    - crdbclusters
+  sideEffects: None
diff --git a/manifests/contextservice.yaml b/manifests/contextservice.yaml
index 5c07971a328a389473899375f2d2aad9031f473e..8201aed3ed85cf1796be4380d702b90508c52e1d 100644
--- a/manifests/contextservice.yaml
+++ b/manifests/contextservice.yaml
@@ -46,6 +46,8 @@ spec:
         - containerPort: 1010
         - containerPort: 8080
         env:
+        - name: CCDB_URL
+          value: "cockroachdb://tfs:tfs123@cockroachdb-public.cockroachdb.svc.cluster.local:26257/tfs?sslmode=require"
         - name: DB_BACKEND
           value: "redis"
         - name: MB_BACKEND
@@ -54,8 +56,6 @@ spec:
           value: "0"
         - name: LOG_LEVEL
           value: "INFO"
-        - name: POPULATE_FAKE_DATA
-          value: "false"
         readinessProbe:
           exec:
             command: ["/bin/grpc_health_probe", "-addr=:1010"]
diff --git a/src/context/Config.py b/src/context/Config.py
index 6f5d1dc0b347dc5db27a2cfae973a4e5bdf7b4cc..70a33251242c51f49140e596b8208a19dd5245f7 100644
--- a/src/context/Config.py
+++ b/src/context/Config.py
@@ -12,5 +12,3 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-# Autopopulate the component with fake data for testing purposes?
-POPULATE_FAKE_DATA = False
diff --git a/src/context/requirements.in b/src/context/requirements.in
index 6e07456fce9c4200d33dbece8ca05ee5177b1fc1..6c68d692d6745e93f23fac8ca04be492a262365f 100644
--- a/src/context/requirements.in
+++ b/src/context/requirements.in
@@ -1,7 +1,8 @@
 Flask==2.1.3
 Flask-RESTful==0.3.9
+psycopg2-binary==2.9.3
 redis==4.1.2
 requests==2.27.1
-sqlalchemy==1.4.40
-sqlalchemy-cockroachdb
-psycopg2-binary
+SQLAlchemy==1.4.40
+sqlalchemy-cockroachdb==1.4.3
+SQLAlchemy-Utils==0.38.3
diff --git a/src/context/service/grpc_server/Constants.py b/src/context/service/Constants.py
similarity index 100%
rename from src/context/service/grpc_server/Constants.py
rename to src/context/service/Constants.py
diff --git a/src/context/service/grpc_server/ContextService.py b/src/context/service/ContextService.py
similarity index 86%
rename from src/context/service/grpc_server/ContextService.py
rename to src/context/service/ContextService.py
index efede01dea7c387abb2a06482959de5c23594779..c4881ccf59ec3fae216bd536eed6a338503adaf9 100644
--- a/src/context/service/grpc_server/ContextService.py
+++ b/src/context/service/ContextService.py
@@ -12,15 +12,13 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import logging, sqlalchemy
 from common.Constants import ServiceNameEnum
 from common.Settings import get_service_port_grpc
 from common.message_broker.MessageBroker import MessageBroker
 from common.proto.context_pb2_grpc import add_ContextServiceServicer_to_server
 from common.proto.context_policy_pb2_grpc import add_ContextPolicyServiceServicer_to_server
 from common.tools.service.GenericGrpcService import GenericGrpcService
-from sqlalchemy.orm import Session
-import logging
-
 from .ContextServiceServicerImpl import ContextServiceServicerImpl
 
 # Custom gRPC settings
@@ -28,10 +26,12 @@ GRPC_MAX_WORKERS = 200 # multiple clients might keep connections alive for Get*E
 LOGGER = logging.getLogger(__name__)
 
 class ContextService(GenericGrpcService):
-    def __init__(self, session : Session, messagebroker : MessageBroker, cls_name: str = __name__) -> None:
+    def __init__(
+        self, db_engine : sqlalchemy.engine.Engine, messagebroker : MessageBroker, cls_name: str = __name__
+    ) -> None:
         port = get_service_port_grpc(ServiceNameEnum.CONTEXT)
         super().__init__(port, max_workers=GRPC_MAX_WORKERS, cls_name=cls_name)
-        self.context_servicer = ContextServiceServicerImpl(session, messagebroker)
+        self.context_servicer = ContextServiceServicerImpl(db_engine, messagebroker)
 
     def install_servicers(self):
         add_ContextServiceServicer_to_server(self.context_servicer, self.server)
diff --git a/src/context/service/ContextServiceServicerImpl.py b/src/context/service/ContextServiceServicerImpl.py
new file mode 100644
index 0000000000000000000000000000000000000000..b5725f00741a2d24575cd77b210aa41b4343287e
--- /dev/null
+++ b/src/context/service/ContextServiceServicerImpl.py
@@ -0,0 +1,1195 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import grpc, json, logging, operator, sqlalchemy, threading, uuid
+from sqlalchemy.orm import Session, contains_eager, selectinload, sessionmaker
+from sqlalchemy.dialects.postgresql import UUID, insert
+from sqlalchemy_cockroachdb import run_transaction
+from typing import Dict, Iterator, List, Optional, Set, Tuple, Union
+from common.message_broker.MessageBroker import MessageBroker
+from common.orm.backend.Tools import key_to_str
+from common.proto.context_pb2 import (
+    Connection, ConnectionEvent, ConnectionId, ConnectionIdList, ConnectionList,
+    Context, ContextEvent, ContextId, ContextIdList, ContextList,
+    Device, DeviceEvent, DeviceId, DeviceIdList, DeviceList,
+    Empty, EventTypeEnum,
+    Link, LinkEvent, LinkId, LinkIdList, LinkList,
+    Service, ServiceEvent, ServiceId, ServiceIdList, ServiceList,
+    Slice, SliceEvent, SliceId, SliceIdList, SliceList,
+    Topology, TopologyEvent, TopologyId, TopologyIdList, TopologyList,
+    ConfigActionEnum, Constraint)
+from common.proto.policy_pb2 import PolicyRuleIdList, PolicyRuleId, PolicyRuleList, PolicyRule
+from common.proto.context_pb2_grpc import ContextServiceServicer
+from common.proto.context_policy_pb2_grpc import ContextPolicyServiceServicer
+from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method
+from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentException, NotFoundException
+from common.tools.grpc.Tools import grpc_message_to_json, grpc_message_to_json_string
+from context.service.Database import Database
+from context.service.database.ConfigModel import (
+    ConfigModel, ORM_ConfigActionEnum, ConfigRuleModel, grpc_config_rules_to_raw, update_config)
+from context.service.database.ConnectionModel import ConnectionModel, set_path
+from context.service.database.ConstraintModel import (
+    ConstraintModel, ConstraintsModel, Union_ConstraintModel, CONSTRAINT_PARSERS, set_constraints)
+from context.service.database.ContextModel import ContextModel
+from context.service.database.DeviceModel import (
+    DeviceModel, grpc_to_enum__device_operational_status, set_drivers, grpc_to_enum__device_driver, DriverModel)
+from context.service.database.EndPointModel import EndPointModel, KpiSampleTypeModel, set_kpi_sample_types
+from context.service.database.Events import notify_event
+from context.service.database.KpiSampleType import grpc_to_enum__kpi_sample_type
+from context.service.database.LinkModel import LinkModel
+from context.service.database.PolicyRuleModel import PolicyRuleModel
+from context.service.database.RelationModels import (
+    ConnectionSubServiceModel, LinkEndPointModel, ServiceEndPointModel, SliceEndPointModel, SliceServiceModel,
+    SliceSubSliceModel, TopologyDeviceModel, TopologyLinkModel)
+from context.service.database.ServiceModel import (
+    ServiceModel, grpc_to_enum__service_status, grpc_to_enum__service_type)
+from context.service.database.SliceModel import SliceModel, grpc_to_enum__slice_status
+from context.service.database.TopologyModel import TopologyModel
+from .Constants import (
+    CONSUME_TIMEOUT, TOPIC_CONNECTION, TOPIC_CONTEXT, TOPIC_DEVICE, TOPIC_LINK, TOPIC_SERVICE, TOPIC_SLICE,
+    TOPIC_TOPOLOGY)
+
+LOGGER = logging.getLogger(__name__)
+
+SERVICE_NAME = 'Context'
+METHOD_NAMES = [
+    'ListConnectionIds', 'ListConnections', 'GetConnection', 'SetConnection', 'RemoveConnection', 'GetConnectionEvents',
+    'ListContextIds',    'ListContexts',    'GetContext',    'SetContext',    'RemoveContext',    'GetContextEvents',
+    'ListTopologyIds',   'ListTopologies',  'GetTopology',   'SetTopology',   'RemoveTopology',   'GetTopologyEvents',
+    'ListDeviceIds',     'ListDevices',     'GetDevice',     'SetDevice',     'RemoveDevice',     'GetDeviceEvents',
+    'ListLinkIds',       'ListLinks',       'GetLink',       'SetLink',       'RemoveLink',       'GetLinkEvents',
+    'ListServiceIds',    'ListServices',    'GetService',    'SetService',    'RemoveService',    'GetServiceEvents',
+    'ListSliceIds',      'ListSlices',      'GetSlice',      'SetSlice',      'RemoveSlice',      'GetSliceEvents',
+    'ListPolicyRuleIds', 'ListPolicyRules', 'GetPolicyRule', 'SetPolicyRule', 'RemovePolicyRule',
+    'UnsetService',      'UnsetSlice',
+]
+METRICS = create_metrics(SERVICE_NAME, METHOD_NAMES)
+
+class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceServicer):
+    def __init__(self, db_engine : sqlalchemy.engine.Engine, messagebroker : MessageBroker) -> None:
+        LOGGER.debug('Creating Servicer...')
+        self.db_engine = db_engine
+        #self.lock = threading.Lock()
+        #session = sessionmaker(bind=db_engine, expire_on_commit=False)
+        #self.session = session
+        #self.database = Database(session)
+        self.messagebroker = messagebroker
+        LOGGER.debug('Servicer Created')
+
+    # ----- Context ----------------------------------------------------------------------------------------------------
+
+    @safe_and_metered_rpc_method(METRICS, LOGGER)
+    def ListContextIds(self, request: Empty, context : grpc.ServicerContext) -> ContextIdList:
+        def callback(session : Session) -> List[Dict]:
+            obj_list : List[ContextModel] = session.query(ContextModel).all()
+            return [obj.dump_id() for obj in obj_list]
+        return ContextIdList(context_ids=run_transaction(sessionmaker(bind=self.db_engine), callback))
+
+    @safe_and_metered_rpc_method(METRICS, LOGGER)
+    def ListContexts(self, request: Empty, context : grpc.ServicerContext) -> ContextList:
+        def callback(session : Session) -> List[Dict]:
+            obj_list : List[ContextModel] = session.query(ContextModel).all()
+            return [obj.dump() for obj in obj_list]
+        return ContextList(contexts=run_transaction(sessionmaker(bind=self.db_engine), callback))
+
+    @safe_and_metered_rpc_method(METRICS, LOGGER)
+    def GetContext(self, request: ContextId, context : grpc.ServicerContext) -> Context:
+        context_uuid = str(uuid.uuid5(uuid.NAMESPACE_OID, request.context_uuid.uuid))
+        def callback(session : Session) -> Optional[Dict]:
+            obj : Optional[ContextModel] = \
+                session.query(ContextModel).filter_by(context_uuid=context_uuid).one_or_none()
+            return None if obj is None else obj.dump()
+        obj = run_transaction(sessionmaker(bind=self.db_engine), callback)
+        if obj is None: raise NotFoundException(ContextModel.__name__.replace('Model', ''), context_uuid)
+        return Context(**obj)
+
+    @safe_and_metered_rpc_method(METRICS, LOGGER)
+    def SetContext(self, request: Context, context : grpc.ServicerContext) -> ContextId:
+        context_uuid = str(uuid.uuid5(uuid.NAMESPACE_OID, request.context_id.context_uuid.uuid))
+        context_name = request.context_id.context_uuid.uuid
+
+        for i, topology_id in enumerate(request.topology_ids):
+            topology_context_uuid = topology_id.context_id.context_uuid.uuid
+            if topology_context_uuid != context_uuid:
+                raise InvalidArgumentException(
+                    'request.topology_ids[{:d}].context_id.context_uuid.uuid'.format(i), topology_context_uuid,
+                    ['should be == {:s}({:s})'.format('request.context_id.context_uuid.uuid', context_uuid)])
+
+        for i, service_id in enumerate(request.service_ids):
+            service_context_uuid = service_id.context_id.context_uuid.uuid
+            if service_context_uuid != context_uuid:
+                raise InvalidArgumentException(
+                    'request.service_ids[{:d}].context_id.context_uuid.uuid'.format(i), service_context_uuid,
+                    ['should be == {:s}({:s})'.format('request.context_id.context_uuid.uuid', context_uuid)])
+
+        def callback(session : Session) -> Tuple[Optional[Dict], bool]:
+            obj : Optional[ContextModel] = \
+                session.query(ContextModel).with_for_update().filter_by(context_uuid=context_uuid).one_or_none()
+            updated = obj is not None
+            obj = ContextModel(context_uuid=context_uuid, context_name=context_name)
+            session.merge(obj)
+            session.commit()
+            obj = session.get(ContextModel, {'context_uuid': context_uuid})
+            return (None if obj is None else obj.dump_id()), updated
+
+        obj_id,updated = run_transaction(sessionmaker(bind=self.db_engine), callback)
+        if obj_id is None: raise NotFoundException(ContextModel.__name__.replace('Model', ''), context_uuid)
+
+        #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
+        #notify_event(self.messagebroker, TOPIC_CONTEXT, event_type, {'context_id': obj_id})
+        return ContextId(**obj_id)
+
+    @safe_and_metered_rpc_method(METRICS, LOGGER)
+    def RemoveContext(self, request: ContextId, context : grpc.ServicerContext) -> Empty:
+        context_uuid = str(uuid.uuid5(uuid.NAMESPACE_OID, request.context_uuid.uuid))
+
+        def callback(session : Session) -> bool:
+            num_deleted = session.query(ContextModel).filter_by(context_uuid=context_uuid).delete()
+            return num_deleted > 0
+
+        deleted = run_transaction(sessionmaker(bind=self.db_engine), callback)
+        #if deleted:
+        #    notify_event(self.messagebroker, TOPIC_CONTEXT, EventTypeEnum.EVENTTYPE_REMOVE, {'context_id': request})
+        return Empty()
+
+#    @safe_and_metered_rpc_method(METRICS, LOGGER)
+#    def GetContextEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ContextEvent]:
+#        for message in self.messagebroker.consume({TOPIC_CONTEXT}, consume_timeout=CONSUME_TIMEOUT):
+#            yield ContextEvent(**json.loads(message.content))
+
+
+    # ----- Topology ---------------------------------------------------------------------------------------------------
+
+#    @safe_and_metered_rpc_method(METRICS, LOGGER)
+#    def ListTopologyIds(self, request: ContextId, context : grpc.ServicerContext) -> TopologyIdList:
+#        context_uuid = request.context_uuid.uuid
+#
+#        with self.session() as session:
+#            result = session.query(ContextModel).options(selectinload(ContextModel.topology)).filter_by(context_uuid=context_uuid).one_or_none()
+#            if not result:
+#                raise NotFoundException(ContextModel.__name__.replace('Model', ''), context_uuid)
+#
+#            db_topologies = result.topology
+#            return TopologyIdList(topology_ids=[db_topology.dump_id() for db_topology in db_topologies])
+#
+#    @safe_and_metered_rpc_method(METRICS, LOGGER)
+#    def ListTopologies(self, request: ContextId, context : grpc.ServicerContext) -> TopologyList:
+#        context_uuid = request.context_uuid.uuid
+#
+#        with self.session() as session:
+#            result = session.query(ContextModel).options(selectinload(ContextModel.topology)).filter_by(
+#                context_uuid=context_uuid).one_or_none()
+#            if not result:
+#                raise NotFoundException(ContextModel.__name__.replace('Model', ''), context_uuid)
+#
+#            db_topologies = result.topology
+#            return TopologyList(topologies=[db_topology.dump() for db_topology in db_topologies])
+#
+#    @safe_and_metered_rpc_method(METRICS, LOGGER)
+#    def GetTopology(self, request: TopologyId, context : grpc.ServicerContext) -> Topology:
+#        topology_uuid = request.topology_uuid.uuid
+#
+#        result, dump = self.database.get_object(TopologyModel, topology_uuid, True)
+#        with self.session() as session:
+#            devs = None
+#            links = None
+#
+#            filt = {'topology_uuid': topology_uuid}
+#            topology_devices = session.query(TopologyDeviceModel).filter_by(**filt).all()
+#            if topology_devices:
+#                devs = []
+#                for td in topology_devices:
+#                    filt = {'device_uuid': td.device_uuid}
+#                    devs.append(session.query(DeviceModel).filter_by(**filt).one())
+#
+#            filt = {'topology_uuid': topology_uuid}
+#            topology_links = session.query(TopologyLinkModel).filter_by(**filt).all()
+#            if topology_links:
+#                links = []
+#                for tl in topology_links:
+#                    filt = {'link_uuid': tl.link_uuid}
+#                    links.append(session.query(LinkModel).filter_by(**filt).one())
+#
+#            return Topology(**result.dump(devs, links))
+#
+#
+#    @safe_and_metered_rpc_method(METRICS, LOGGER)
+#    def SetTopology(self, request: Topology, context : grpc.ServicerContext) -> TopologyId:
+#        context_uuid = request.topology_id.context_id.context_uuid.uuid
+#        topology_uuid = request.topology_id.topology_uuid.uuid
+#        with self.session() as session:
+#            topology_add = TopologyModel(topology_uuid=topology_uuid, context_uuid=context_uuid)
+#            updated = True
+#            db_topology = session.query(TopologyModel).join(TopologyModel.context).filter(TopologyModel.topology_uuid==topology_uuid).one_or_none()
+#            if not db_topology:
+#                updated = False
+#            session.merge(topology_add)
+#            session.commit()
+#            db_topology = session.query(TopologyModel).join(TopologyModel.context).filter(TopologyModel.topology_uuid==topology_uuid).one_or_none()
+#
+#            for device_id in request.device_ids:
+#                device_uuid = device_id.device_uuid.uuid
+#                td = TopologyDeviceModel(topology_uuid=topology_uuid, device_uuid=device_uuid)
+#                result: Tuple[TopologyDeviceModel, bool] = self.database.create_or_update(td)
+#
+#
+#            for link_id in request.link_ids:
+#                link_uuid = link_id.link_uuid.uuid
+#                db_link = session.query(LinkModel).filter(
+#                        LinkModel.link_uuid == link_uuid).one_or_none()
+#                tl = TopologyLinkModel(topology_uuid=topology_uuid, link_uuid=link_uuid)
+#                result: Tuple[TopologyDeviceModel, bool] = self.database.create_or_update(tl)
+#
+#
+#
+#            event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
+#            dict_topology_id = db_topology.dump_id()
+#            notify_event(self.messagebroker, TOPIC_TOPOLOGY, event_type, {'topology_id': dict_topology_id})
+#            return TopologyId(**dict_topology_id)
+#
+#    @safe_and_metered_rpc_method(METRICS, LOGGER)
+#    def RemoveTopology(self, request: TopologyId, context : grpc.ServicerContext) -> Empty:
+#        context_uuid = request.context_id.context_uuid.uuid
+#        topology_uuid = request.topology_uuid.uuid
+#
+#        with self.session() as session:
+#            result = session.query(TopologyModel).filter_by(topology_uuid=topology_uuid, context_uuid=context_uuid).one_or_none()
+#            if not result:
+#                return Empty()
+#            dict_topology_id = result.dump_id()
+#
+#            session.query(TopologyModel).filter_by(topology_uuid=topology_uuid, context_uuid=context_uuid).delete()
+#            session.commit()
+#            event_type = EventTypeEnum.EVENTTYPE_REMOVE
+#            notify_event(self.messagebroker, TOPIC_TOPOLOGY, event_type, {'topology_id': dict_topology_id})
+#            return Empty()
+#
+##    @safe_and_metered_rpc_method(METRICS, LOGGER)
+##    def GetTopologyEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[TopologyEvent]:
+##        for message in self.messagebroker.consume({TOPIC_TOPOLOGY}, consume_timeout=CONSUME_TIMEOUT):
+##            yield TopologyEvent(**json.loads(message.content))
+#
+#
+#    # ----- Device -----------------------------------------------------------------------------------------------------
+#
+#    @safe_and_metered_rpc_method(METRICS, LOGGER)
+#    def ListDeviceIds(self, request: Empty, context : grpc.ServicerContext) -> DeviceIdList:
+#        with self.session() as session:
+#            result = session.query(DeviceModel).all()
+#            return DeviceIdList(device_ids=[device.dump_id() for device in result])
+#
+#    @safe_and_metered_rpc_method(METRICS, LOGGER)
+#    def ListDevices(self, request: Empty, context : grpc.ServicerContext) -> DeviceList:
+#        with self.session() as session:
+#            result = session.query(DeviceModel).all()
+#            return DeviceList(devices=[device.dump() for device in result])
+#
+#    @safe_and_metered_rpc_method(METRICS, LOGGER)
+#    def GetDevice(self, request: DeviceId, context : grpc.ServicerContext) -> Device:
+#        device_uuid = request.device_uuid.uuid
+#        with self.session() as session:
+#            result = session.query(DeviceModel).filter(DeviceModel.device_uuid == device_uuid).one_or_none()
+#            if not result:
+#                raise NotFoundException(DeviceModel.__name__.replace('Model', ''), device_uuid)
+#
+#            rd = result.dump(include_config_rules=True, include_drivers=True, include_endpoints=True)
+#
+#            rt = Device(**rd)
+#
+#            return rt
+#
+#    @safe_and_metered_rpc_method(METRICS, LOGGER)
+#    def SetDevice(self, request: Device, context : grpc.ServicerContext) -> DeviceId:
+#        with self.session() as session:
+#            device_uuid = request.device_id.device_uuid.uuid
+#
+#            for i, endpoint in enumerate(request.device_endpoints):
+#                endpoint_device_uuid = endpoint.endpoint_id.device_id.device_uuid.uuid
+#                if len(endpoint_device_uuid) == 0:
+#                    endpoint_device_uuid = device_uuid
+#                if device_uuid != endpoint_device_uuid:
+#                    raise InvalidArgumentException(
+#                        'request.device_endpoints[{:d}].device_id.device_uuid.uuid'.format(i), endpoint_device_uuid,
+#                        ['should be == {:s}({:s})'.format('request.device_id.device_uuid.uuid', device_uuid)])
+#
+#            config_rules = grpc_config_rules_to_raw(request.device_config.config_rules)
+#            running_config_result = self.update_config(session, device_uuid, 'device', config_rules)
+#            db_running_config = running_config_result[0][0]
+#            config_uuid = db_running_config.config_uuid
+#            running_config_rules = update_config(
+#                self.database, device_uuid, 'device', request.device_config.config_rules)
+#            db_running_config = running_config_rules[0][0]
+#
+#            new_obj = DeviceModel(**{
+#                'device_uuid'               : device_uuid,
+#                'device_type'               : request.device_type,
+#                'device_operational_status' : grpc_to_enum__device_operational_status(request.device_operational_status),
+#                'device_config_uuid'        : config_uuid,
+#            })
+#            result: Tuple[DeviceModel, bool] = self.database.create_or_update(new_obj)
+#            db_device, updated = result
+#
+#            self.set_drivers(db_device, request.device_drivers)
+#
+#            for i, endpoint in enumerate(request.device_endpoints):
+#                endpoint_uuid = endpoint.endpoint_id.endpoint_uuid.uuid
+#                # endpoint_device_uuid = endpoint.endpoint_id.device_id.device_uuid.uuid
+#                # if len(endpoint_device_uuid) == 0:
+#                #     endpoint_device_uuid = device_uuid
+#
+#                endpoint_attributes = {
+#                    'device_uuid'  : db_device.device_uuid,
+#                    'endpoint_uuid': endpoint_uuid,
+#                    'endpoint_type': endpoint.endpoint_type,
+#                }
+#
+#                endpoint_topology_context_uuid = endpoint.endpoint_id.topology_id.context_id.context_uuid.uuid
+#                endpoint_topology_uuid = endpoint.endpoint_id.topology_id.topology_uuid.uuid
+#                if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0:
+#                    # str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid])
+#
+#                    db_topology, topo_dump = self.database.get_object(TopologyModel, endpoint_topology_uuid)
+#
+#                    topology_device = TopologyDeviceModel(
+#                        topology_uuid=endpoint_topology_uuid,
+#                        device_uuid=db_device.device_uuid)
+#                    self.database.create_or_update(topology_device)
+#
+#                    endpoint_attributes['topology_uuid'] = db_topology.topology_uuid
+#                result : Tuple[EndPointModel, bool] = update_or_create_object(
+#                    self.database, EndPointModel, str_endpoint_key, endpoint_attributes)
+#                db_endpoint, endpoint_updated = result # pylint: disable=unused-variable
+#
+#                new_endpoint = EndPointModel(**endpoint_attributes)
+#                result: Tuple[EndPointModel, bool] = self.database.create_or_update(new_endpoint)
+#                db_endpoint, updated = result
+#
+#                self.set_kpi_sample_types(db_endpoint, endpoint.kpi_sample_types)
+#
+#            # event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
+#            dict_device_id = db_device.dump_id()
+#            # notify_event(self.messagebroker, TOPIC_DEVICE, event_type, {'device_id': dict_device_id})
+#
+#            return DeviceId(**dict_device_id)
+#
+#    def set_kpi_sample_types(self, db_endpoint: EndPointModel, grpc_endpoint_kpi_sample_types):
+#        db_endpoint_pk = db_endpoint.endpoint_uuid
+#        for kpi_sample_type in grpc_endpoint_kpi_sample_types:
+#            orm_kpi_sample_type = grpc_to_enum__kpi_sample_type(kpi_sample_type)
+#            # str_endpoint_kpi_sample_type_key = key_to_str([db_endpoint_pk, orm_kpi_sample_type.name])
+#            data = {'endpoint_uuid': db_endpoint_pk,
+#                    'kpi_sample_type': orm_kpi_sample_type.name,
+#                    'kpi_uuid': str(uuid.uuid4())}
+#            db_endpoint_kpi_sample_type = KpiSampleTypeModel(**data)
+#            self.database.create(db_endpoint_kpi_sample_type)
+#
+#    def set_drivers(self, db_device: DeviceModel, grpc_device_drivers):
+#        db_device_pk = db_device.device_uuid
+#        for driver in grpc_device_drivers:
+#            orm_driver = grpc_to_enum__device_driver(driver)
+#            str_device_driver_key = key_to_str([db_device_pk, orm_driver.name])
+#            driver_config = {
+#                # "driver_uuid": str(uuid.uuid4()),
+#                "device_uuid": db_device_pk,
+#                "driver": orm_driver.name
+#            }
+#            db_device_driver = DriverModel(**driver_config)
+#            db_device_driver.device_fk = db_device
+#            db_device_driver.driver = orm_driver
+#
+#            self.database.create_or_update(db_device_driver)
+#
+#    def update_config(
+#            self, session, db_parent_pk: str, config_name: str,
+#            raw_config_rules: List[Tuple[ORM_ConfigActionEnum, str, str]]
+#    ) -> List[Tuple[Union[ConfigModel, ConfigRuleModel], bool]]:
+#
+#        created = False
+#
+#        db_config = session.query(ConfigModel).filter_by(**{ConfigModel.main_pk_name(): db_parent_pk}).one_or_none()
+#        if not db_config:
+#            db_config = ConfigModel()
+#            setattr(db_config, ConfigModel.main_pk_name(), db_parent_pk)
+#            session.add(db_config)
+#            session.commit()
+#            created = True
+#
+#        LOGGER.info('UPDATED-CONFIG: {}'.format(db_config.dump()))
+#
+#        db_objects: List[Tuple[Union[ConfigModel, ConfigRuleModel], bool]] = [(db_config, created)]
+#
+#        for position, (action, resource_key, resource_value) in enumerate(raw_config_rules):
+#            if action == ORM_ConfigActionEnum.SET:
+#                result : Tuple[ConfigRuleModel, bool] = self.set_config_rule(
+#                    db_config, position, resource_key, resource_value)
+#                db_config_rule, updated = result
+#                db_objects.append((db_config_rule, updated))
+#            elif action == ORM_ConfigActionEnum.DELETE:
+#                self.delete_config_rule(db_config, resource_key)
+#            else:
+#                msg = 'Unsupported action({:s}) for resource_key({:s})/resource_value({:s})'
+#                raise AttributeError(
+#                    msg.format(str(ConfigActionEnum.Name(action)), str(resource_key), str(resource_value)))
+#
+#        return db_objects
+#
+#    def set_config_rule(self, db_config: ConfigModel, position: int, resource_key: str, resource_value: str,
+#    ):  # -> Tuple[ConfigRuleModel, bool]:
+#
+#        from src.context.service.database.Tools import fast_hasher
+#        str_rule_key_hash = fast_hasher(resource_key)
+#        str_config_rule_key = key_to_str([db_config.config_uuid, str_rule_key_hash], separator=':')
+#        pk = str(uuid.uuid5(uuid.UUID('9566448d-e950-425e-b2ae-7ead656c7e47'), str_config_rule_key))
+#        data = {'config_rule_uuid': pk, 'config_uuid': db_config.config_uuid, 'position': position,
+#                'action': ORM_ConfigActionEnum.SET, 'key': resource_key, 'value': resource_value}
+#        to_add = ConfigRuleModel(**data)
+#
+#        result, updated = self.database.create_or_update(to_add)
+#        return result, updated
+#
+#    def delete_config_rule(
+#            self, db_config: ConfigModel, resource_key: str
+#    ) -> None:
+#
+#        from src.context.service.database.Tools import fast_hasher
+#        str_rule_key_hash = fast_hasher(resource_key)
+#        str_config_rule_key = key_to_str([db_config.pk, str_rule_key_hash], separator=':')
+#
+#        db_config_rule = self.database.get_object(ConfigRuleModel, str_config_rule_key, raise_if_not_found=False)
+#
+#        if db_config_rule is None:
+#            return
+#        db_config_rule.delete()
+#
+#    def delete_all_config_rules(self, db_config: ConfigModel) -> None:
+#
+#        db_config_rule_pks = db_config.references(ConfigRuleModel)
+#        for pk, _ in db_config_rule_pks: ConfigRuleModel(self.database, pk).delete()
+#
+#        """
+#        for position, (action, resource_key, resource_value) in enumerate(raw_config_rules):
+#            if action == ORM_ConfigActionEnum.SET:
+#                result: Tuple[ConfigRuleModel, bool] = set_config_rule(
+#                    database, db_config, position, resource_key, resource_value)
+#                db_config_rule, updated = result
+#                db_objects.append((db_config_rule, updated))
+#            elif action == ORM_ConfigActionEnum.DELETE:
+#                delete_config_rule(database, db_config, resource_key)
+#            else:
+#                msg = 'Unsupported action({:s}) for resource_key({:s})/resource_value({:s})'
+#                raise AttributeError(
+#                    msg.format(str(ConfigActionEnum.Name(action)), str(resource_key), str(resource_value)))
+#
+#        return db_objects
+#        """
+#
+#    @safe_and_metered_rpc_method(METRICS, LOGGER)
+#    def RemoveDevice(self, request: DeviceId, context : grpc.ServicerContext) -> Empty:
+#        device_uuid = request.device_uuid.uuid
+#
+#        with self.session() as session:
+#            db_device = session.query(DeviceModel).filter_by(device_uuid=device_uuid).one_or_none()
+#
+#            session.query(TopologyDeviceModel).filter_by(device_uuid=device_uuid).delete()
+#            session.query(ConfigRuleModel).filter_by(config_uuid=db_device.device_config_uuid).delete()
+#            session.query(ConfigModel).filter_by(config_uuid=db_device.device_config_uuid).delete()
+#
+#            if not db_device:
+#                return Empty()
+#            dict_device_id = db_device.dump_id()
+#
+#            session.query(DeviceModel).filter_by(device_uuid=device_uuid).delete()
+#            session.commit()
+#            event_type = EventTypeEnum.EVENTTYPE_REMOVE
+#            notify_event(self.messagebroker, TOPIC_DEVICE, event_type, {'device_id': dict_device_id})
+#            return Empty()
+#
+##    @safe_and_metered_rpc_method(METRICS, LOGGER)
+##    def GetDeviceEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[DeviceEvent]:
+##        for message in self.messagebroker.consume({TOPIC_DEVICE}, consume_timeout=CONSUME_TIMEOUT):
+##            yield DeviceEvent(**json.loads(message.content))
+#
+#
+#
+#
+#    # ----- Link -------------------------------------------------------------------------------------------------------
+#
+#    @safe_and_metered_rpc_method(METRICS, LOGGER)
+#    def ListLinkIds(self, request: Empty, context : grpc.ServicerContext) -> LinkIdList:
+#        with self.session() as session:
+#            result = session.query(LinkModel).all()
+#            return LinkIdList(link_ids=[db_link.dump_id() for db_link in result])
+#
+#
+#    @safe_and_metered_rpc_method(METRICS, LOGGER)
+#    def ListLinks(self, request: Empty, context : grpc.ServicerContext) -> LinkList:
+#        with self.session() as session:
+#            link_list = LinkList()
+#
+#            db_links = session.query(LinkModel).all()
+#
+#            for db_link in db_links:
+#                link_uuid = db_link.link_uuid
+#                filt = {'link_uuid': link_uuid}
+#                link_endpoints = session.query(LinkEndPointModel).filter_by(**filt).all()
+#                if link_endpoints:
+#                    eps = []
+#                    for lep in link_endpoints:
+#                        filt = {'endpoint_uuid': lep.endpoint_uuid}
+#                        eps.append(session.query(EndPointModel).filter_by(**filt).one())
+#                    link_list.links.append(Link(**db_link.dump(eps)))
+#
+#            return link_list
+#
+#    @safe_and_metered_rpc_method(METRICS, LOGGER)
+#    def GetLink(self, request: LinkId, context : grpc.ServicerContext) -> Link:
+#        link_uuid = request.link_uuid.uuid
+#        with self.session() as session:
+#            result = session.query(LinkModel).filter(LinkModel.link_uuid == link_uuid).one_or_none()
+#            if not result:
+#                raise NotFoundException(LinkModel.__name__.replace('Model', ''), link_uuid)
+#
+#            filt = {'link_uuid': link_uuid}
+#            link_endpoints = session.query(LinkEndPointModel).filter_by(**filt).all()
+#            if link_endpoints:
+#                eps = []
+#                for lep in link_endpoints:
+#                    filt = {'endpoint_uuid': lep.endpoint_uuid}
+#                    eps.append(session.query(EndPointModel).filter_by(**filt).one())
+#                return Link(**result.dump(eps))
+#
+#            rd = result.dump()
+#            rt = Link(**rd)
+#
+#            return rt
+#
+#
+#
+#    @safe_and_metered_rpc_method(METRICS, LOGGER)
+#    def SetLink(self, request: Link, context : grpc.ServicerContext) -> LinkId:
+#        link_uuid = request.link_id.link_uuid.uuid
+#
+#        new_link = LinkModel(**{
+#            'link_uuid': link_uuid
+#        })
+#        result: Tuple[LinkModel, bool] = self.database.create_or_update(new_link)
+#        db_link, updated = result
+#
+#        for endpoint_id in request.link_endpoint_ids:
+#            endpoint_uuid                  = endpoint_id.endpoint_uuid.uuid
+#            endpoint_device_uuid           = endpoint_id.device_id.device_uuid.uuid
+#            endpoint_topology_uuid         = endpoint_id.topology_id.topology_uuid.uuid
+#            endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid
+#
+#
+#            db_topology = None
+#            if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0:
+#                db_topology: TopologyModel = self.database.get_object(TopologyModel, endpoint_topology_uuid)
+#                # check device is in topology
+#                self.database.get_object(TopologyDeviceModel, endpoint_device_uuid)
+#
+#
+#            link_endpoint = LinkEndPointModel(link_uuid=link_uuid, endpoint_uuid=endpoint_uuid)
+#            result: Tuple[LinkEndPointModel, bool] = self.database.create_or_update(link_endpoint)
+#
+#            if db_topology is not None:
+#                topology_link = TopologyLinkModel(topology_uuid=endpoint_topology_uuid, link_uuid=link_uuid)
+#                result: Tuple[TopologyLinkModel, bool] = self.database.create_or_update(topology_link)
+#
+#        event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
+#        dict_link_id = db_link.dump_id()
+#        notify_event(self.messagebroker, TOPIC_LINK, event_type, {'link_id': dict_link_id})
+#        return LinkId(**dict_link_id)
+#
+#    @safe_and_metered_rpc_method(METRICS, LOGGER)
+#    def RemoveLink(self, request: LinkId, context : grpc.ServicerContext) -> Empty:
+#        with self.session() as session:
+#            link_uuid = request.link_uuid.uuid
+#
+#            session.query(TopologyLinkModel).filter_by(link_uuid=link_uuid).delete()
+#            session.query(LinkEndPointModel).filter_by(link_uuid=link_uuid).delete()
+#
+#            result = session.query(LinkModel).filter_by(link_uuid=link_uuid).one_or_none()
+#            if not result:
+#                return Empty()
+#            dict_link_id = result.dump_id()
+#
+#            session.query(LinkModel).filter_by(link_uuid=link_uuid).delete()
+#            session.commit()
+#            event_type = EventTypeEnum.EVENTTYPE_REMOVE
+#            notify_event(self.messagebroker, TOPIC_LINK, event_type, {'link_id': dict_link_id})
+#            return Empty()
+#
+##    @safe_and_metered_rpc_method(METRICS, LOGGER)
+##    def GetLinkEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[LinkEvent]:
+##        for message in self.messagebroker.consume({TOPIC_LINK}, consume_timeout=CONSUME_TIMEOUT):
+##            yield LinkEvent(**json.loads(message.content))
+#
+#
+#    # ----- Service ----------------------------------------------------------------------------------------------------
+#
+#    @safe_and_metered_rpc_method(METRICS, LOGGER)
+#    def ListServiceIds(self, request: ContextId, context : grpc.ServicerContext) -> ServiceIdList:
+#        context_uuid = request.context_uuid.uuid
+#
+#        with self.session() as session:
+#            db_services = session.query(ServiceModel).filter_by(context_uuid=context_uuid).all()
+#            return ServiceIdList(service_ids=[db_service.dump_id() for db_service in db_services])
+#
+#    @safe_and_metered_rpc_method(METRICS, LOGGER)
+#    def ListServices(self, request: ContextId, context : grpc.ServicerContext) -> ServiceList:
+#        context_uuid = request.context_uuid.uuid
+#
+#        with self.session() as session:
+#            db_services = session.query(ServiceModel).filter_by(context_uuid=context_uuid).all()
+#            return ServiceList(services=[db_service.dump() for db_service in db_services])
+#
+#
+#
+#    @safe_and_metered_rpc_method(METRICS, LOGGER)
+#    def GetService(self, request: ServiceId, context : grpc.ServicerContext) -> Service:
+#        service_uuid = request.service_uuid.uuid
+#        with self.session() as session:
+#            result = session.query(ServiceModel).filter_by(service_uuid=service_uuid).one_or_none()
+#
+#        if not result:
+#            raise NotFoundException(ServiceModel.__name__.replace('Model', ''), service_uuid)
+#
+#        return Service(**result.dump())
+#
+#    def set_constraint(self, db_constraints: ConstraintsModel, grpc_constraint: Constraint, position: int
+#    ) -> Tuple[Union_ConstraintModel, bool]:
+#        with self.session() as session:
+#
+#            grpc_constraint_kind = str(grpc_constraint.WhichOneof('constraint'))
+#
+#            parser = CONSTRAINT_PARSERS.get(grpc_constraint_kind)
+#            if parser is None:
+#                raise NotImplementedError('Constraint of kind {:s} is not implemented: {:s}'.format(
+#                    grpc_constraint_kind, grpc_message_to_json_string(grpc_constraint)))
+#
+#            # create specific constraint
+#            constraint_class, str_constraint_id, constraint_data, constraint_kind = parser(grpc_constraint)
+#            str_constraint_id = str(uuid.uuid4())
+#            LOGGER.info('str_constraint_id: {}'.format(str_constraint_id))
+#            # str_constraint_key_hash = fast_hasher(':'.join([constraint_kind.value, str_constraint_id]))
+#            # str_constraint_key = key_to_str([db_constraints.pk, str_constraint_key_hash], separator=':')
+#
+#            # result : Tuple[Union_ConstraintModel, bool] = update_or_create_object(
+#            #     database, constraint_class, str_constraint_key, constraint_data)
+#            constraint_data[constraint_class.main_pk_name()] = str_constraint_id
+#            db_new_constraint = constraint_class(**constraint_data)
+#            result: Tuple[Union_ConstraintModel, bool] = self.database.create_or_update(db_new_constraint)
+#            db_specific_constraint, updated = result
+#
+#            # create generic constraint
+#            # constraint_fk_field_name = 'constraint_uuid'.format(constraint_kind.value)
+#            constraint_data = {
+#                'constraints_uuid': db_constraints.constraints_uuid, 'position': position, 'kind': constraint_kind
+#            }
+#
+#            db_new_constraint = ConstraintModel(**constraint_data)
+#            result: Tuple[Union_ConstraintModel, bool] = self.database.create_or_update(db_new_constraint)
+#            db_constraint, updated = result
+#
+#            return db_constraint, updated
+#
+#    def set_constraints(self, service_uuid: str, constraints_name : str, grpc_constraints
+#    ) -> List[Tuple[Union[ConstraintsModel, ConstraintModel], bool]]:
+#        with self.session() as session:
+#            # str_constraints_key = key_to_str([db_parent_pk, constraints_name], separator=':')
+#            # result : Tuple[ConstraintsModel, bool] = get_or_create_object(database, ConstraintsModel, str_constraints_key)
+#            result = session.query(ConstraintsModel).filter_by(constraints_uuid=service_uuid).one_or_none()
+#            created = None
+#            if result:
+#                created = True
+#            session.query(ConstraintsModel).filter_by(constraints_uuid=service_uuid).one_or_none()
+#            db_constraints = ConstraintsModel(constraints_uuid=service_uuid)
+#            session.add(db_constraints)
+#
+#            db_objects = [(db_constraints, created)]
+#
+#            for position,grpc_constraint in enumerate(grpc_constraints):
+#                result : Tuple[ConstraintModel, bool] = self.set_constraint(
+#                    db_constraints, grpc_constraint, position)
+#                db_constraint, updated = result
+#                db_objects.append((db_constraint, updated))
+#
+#            return db_objects
+#
+#    @safe_and_metered_rpc_method(METRICS, LOGGER)
+#    def SetService(self, request: Service, context : grpc.ServicerContext) -> ServiceId:
+#        with self.lock:
+#            with self.session() as session:
+#
+#                context_uuid = request.service_id.context_id.context_uuid.uuid
+#                # db_context : ContextModel = get_object(self.database, ContextModel, context_uuid)
+#                db_context = session.query(ContextModel).filter_by(context_uuid=context_uuid).one_or_none()
+#
+#                for i,endpoint_id in enumerate(request.service_endpoint_ids):
+#                    endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid
+#                    if len(endpoint_topology_context_uuid) > 0 and context_uuid != endpoint_topology_context_uuid:
+#                        raise InvalidArgumentException(
+#                            'request.service_endpoint_ids[{:d}].topology_id.context_id.context_uuid.uuid'.format(i),
+#                            endpoint_topology_context_uuid,
+#                            ['should be == {:s}({:s})'.format(
+#                                'request.service_id.context_id.context_uuid.uuid', context_uuid)])
+#
+#                service_uuid = request.service_id.service_uuid.uuid
+#                # str_service_key = key_to_str([context_uuid, service_uuid])
+#
+#                constraints_result = self.set_constraints(service_uuid, 'constraints', request.service_constraints)
+#                db_constraints = constraints_result[0][0]
+#
+#                config_rules = grpc_config_rules_to_raw(request.service_config.config_rules)
+#                running_config_result = update_config(self.database, str_service_key, 'running', config_rules)
+#                db_running_config = running_config_result[0][0]
+#
+#                result : Tuple[ServiceModel, bool] = update_or_create_object(self.database, ServiceModel, str_service_key, {
+#                    'context_fk'            : db_context,
+#                    'service_uuid'          : service_uuid,
+#                    'service_type'          : grpc_to_enum__service_type(request.service_type),
+#                    'service_constraints_fk': db_constraints,
+#                    'service_status'        : grpc_to_enum__service_status(request.service_status.service_status),
+#                    'service_config_fk'     : db_running_config,
+#                })
+#                db_service, updated = result
+#
+#                for i,endpoint_id in enumerate(request.service_endpoint_ids):
+#                    endpoint_uuid                  = endpoint_id.endpoint_uuid.uuid
+#                    endpoint_device_uuid           = endpoint_id.device_id.device_uuid.uuid
+#                    endpoint_topology_uuid         = endpoint_id.topology_id.topology_uuid.uuid
+#                    endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid
+#
+#                    str_endpoint_key = key_to_str([endpoint_device_uuid, endpoint_uuid])
+#                    if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0:
+#                        str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid])
+#                        str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':')
+#
+#                    db_endpoint : EndPointModel = get_object(self.database, EndPointModel, str_endpoint_key)
+#
+#                    str_service_endpoint_key = key_to_str([service_uuid, str_endpoint_key], separator='--')
+#                    result : Tuple[ServiceEndPointModel, bool] = get_or_create_object(
+#                        self.database, ServiceEndPointModel, str_service_endpoint_key, {
+#                            'service_fk': db_service, 'endpoint_fk': db_endpoint})
+#                    #db_service_endpoint, service_endpoint_created = result
+#
+#                event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
+#                dict_service_id = db_service.dump_id()
+#                notify_event(self.messagebroker, TOPIC_SERVICE, event_type, {'service_id': dict_service_id})
+#                return ServiceId(**dict_service_id)
+#            context_uuid = request.service_id.context_id.context_uuid.uuid
+#            db_context : ContextModel = get_object(self.database, ContextModel, context_uuid)
+#
+#            for i,endpoint_id in enumerate(request.service_endpoint_ids):
+#                endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid
+#                if len(endpoint_topology_context_uuid) > 0 and context_uuid != endpoint_topology_context_uuid:
+#                    raise InvalidArgumentException(
+#                        'request.service_endpoint_ids[{:d}].topology_id.context_id.context_uuid.uuid'.format(i),
+#                        endpoint_topology_context_uuid,
+#                        ['should be == {:s}({:s})'.format(
+#                            'request.service_id.context_id.context_uuid.uuid', context_uuid)])
+#
+#            service_uuid = request.service_id.service_uuid.uuid
+#            str_service_key = key_to_str([context_uuid, service_uuid])
+#
+#            constraints_result = set_constraints(
+#                self.database, str_service_key, 'service', request.service_constraints)
+#            db_constraints = constraints_result[0][0]
+#
+#            running_config_rules = update_config(
+#                self.database, str_service_key, 'service', request.service_config.config_rules)
+#            db_running_config = running_config_rules[0][0]
+#
+#            result : Tuple[ServiceModel, bool] = update_or_create_object(self.database, ServiceModel, str_service_key, {
+#                'context_fk'            : db_context,
+#                'service_uuid'          : service_uuid,
+#                'service_type'          : grpc_to_enum__service_type(request.service_type),
+#                'service_constraints_fk': db_constraints,
+#                'service_status'        : grpc_to_enum__service_status(request.service_status.service_status),
+#                'service_config_fk'     : db_running_config,
+#            })
+#            db_service, updated = result
+#
+#            for i,endpoint_id in enumerate(request.service_endpoint_ids):
+#                endpoint_uuid                  = endpoint_id.endpoint_uuid.uuid
+#                endpoint_device_uuid           = endpoint_id.device_id.device_uuid.uuid
+#                endpoint_topology_uuid         = endpoint_id.topology_id.topology_uuid.uuid
+#                endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid
+#
+#                str_endpoint_key = key_to_str([endpoint_device_uuid, endpoint_uuid])
+#                if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0:
+#                    str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid])
+#                    str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':')
+#
+#                db_endpoint : EndPointModel = get_object(self.database, EndPointModel, str_endpoint_key)
+#
+#                str_service_endpoint_key = key_to_str([service_uuid, str_endpoint_key], separator='--')
+#                result : Tuple[ServiceEndPointModel, bool] = get_or_create_object(
+#                    self.database, ServiceEndPointModel, str_service_endpoint_key, {
+#                        'service_fk': db_service, 'endpoint_fk': db_endpoint})
+#                #db_service_endpoint, service_endpoint_created = result
+#
+#            event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
+#            dict_service_id = db_service.dump_id()
+#            notify_event(self.messagebroker, TOPIC_SERVICE, event_type, {'service_id': dict_service_id})
+#            return ServiceId(**dict_service_id)
+#
+#    @safe_and_metered_rpc_method(METRICS, LOGGER)
+#    def RemoveService(self, request: ServiceId, context : grpc.ServicerContext) -> Empty:
+#        with self.lock:
+#            context_uuid = request.context_id.context_uuid.uuid
+#            service_uuid = request.service_uuid.uuid
+#            db_service = ServiceModel(self.database, key_to_str([context_uuid, service_uuid]), auto_load=False)
+#            found = db_service.load()
+#            if not found: return Empty()
+#
+#            dict_service_id = db_service.dump_id()
+#            db_service.delete()
+#
+#            event_type = EventTypeEnum.EVENTTYPE_REMOVE
+#            notify_event(self.messagebroker, TOPIC_SERVICE, event_type, {'service_id': dict_service_id})
+#            return Empty()
+#
+##    @safe_and_metered_rpc_method(METRICS, LOGGER)
+##    def GetServiceEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ServiceEvent]:
+##        for message in self.messagebroker.consume({TOPIC_SERVICE}, consume_timeout=CONSUME_TIMEOUT):
+##            yield ServiceEvent(**json.loads(message.content))
+#
+#
+#    # ----- Slice ----------------------------------------------------------------------------------------------------
+#
+#    @safe_and_metered_rpc_method(METRICS, LOGGER)
+#    def ListSliceIds(self, request: ContextId, context : grpc.ServicerContext) -> SliceIdList:
+#        with self.lock:
+#            db_context : ContextModel = get_object(self.database, ContextModel, request.context_uuid.uuid)
+#            db_slices : Set[SliceModel] = get_related_objects(db_context, SliceModel)
+#            db_slices = sorted(db_slices, key=operator.attrgetter('pk'))
+#            return SliceIdList(slice_ids=[db_slice.dump_id() for db_slice in db_slices])
+#
+#    @safe_and_metered_rpc_method(METRICS, LOGGER)
+#    def ListSlices(self, request: ContextId, context : grpc.ServicerContext) -> SliceList:
+#        with self.lock:
+#            db_context : ContextModel = get_object(self.database, ContextModel, request.context_uuid.uuid)
+#            db_slices : Set[SliceModel] = get_related_objects(db_context, SliceModel)
+#            db_slices = sorted(db_slices, key=operator.attrgetter('pk'))
+#            return SliceList(slices=[db_slice.dump() for db_slice in db_slices])
+#
+#    @safe_and_metered_rpc_method(METRICS, LOGGER)
+#    def GetSlice(self, request: SliceId, context : grpc.ServicerContext) -> Slice:
+#        with self.lock:
+#            str_key = key_to_str([request.context_id.context_uuid.uuid, request.slice_uuid.uuid])
+#            db_slice : SliceModel = get_object(self.database, SliceModel, str_key)
+#            return Slice(**db_slice.dump(
+#                include_endpoint_ids=True, include_constraints=True, include_config_rules=True,
+#                include_service_ids=True, include_subslice_ids=True))
+#
+#    @safe_and_metered_rpc_method(METRICS, LOGGER)
+#    def SetSlice(self, request: Slice, context : grpc.ServicerContext) -> SliceId:
+#        with self.lock:
+#            context_uuid = request.slice_id.context_id.context_uuid.uuid
+#            db_context : ContextModel = get_object(self.database, ContextModel, context_uuid)
+#
+#            for i,endpoint_id in enumerate(request.slice_endpoint_ids):
+#                endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid
+#                if len(endpoint_topology_context_uuid) > 0 and context_uuid != endpoint_topology_context_uuid:
+#                    raise InvalidArgumentException(
+#                        'request.slice_endpoint_ids[{:d}].topology_id.context_id.context_uuid.uuid'.format(i),
+#                        endpoint_topology_context_uuid,
+#                        ['should be == {:s}({:s})'.format(
+#                            'request.slice_id.context_id.context_uuid.uuid', context_uuid)])
+#
+#            slice_uuid = request.slice_id.slice_uuid.uuid
+#            str_slice_key = key_to_str([context_uuid, slice_uuid])
+#
+#            constraints_result = set_constraints(
+#                self.database, str_slice_key, 'slice', request.slice_constraints)
+#            db_constraints = constraints_result[0][0]
+#
+#            running_config_rules = update_config(
+#                self.database, str_slice_key, 'slice', request.slice_config.config_rules)
+#            db_running_config = running_config_rules[0][0]
+#
+#            result : Tuple[SliceModel, bool] = update_or_create_object(self.database, SliceModel, str_slice_key, {
+#                'context_fk'          : db_context,
+#                'slice_uuid'          : slice_uuid,
+#                'slice_constraints_fk': db_constraints,
+#                'slice_status'        : grpc_to_enum__slice_status(request.slice_status.slice_status),
+#                'slice_config_fk'     : db_running_config,
+#                'slice_owner_uuid'    : request.slice_owner.owner_uuid.uuid,
+#                'slice_owner_string'  : request.slice_owner.owner_string,
+#            })
+#            db_slice, updated = result
+#
+#            for i,endpoint_id in enumerate(request.slice_endpoint_ids):
+#                endpoint_uuid                  = endpoint_id.endpoint_uuid.uuid
+#                endpoint_device_uuid           = endpoint_id.device_id.device_uuid.uuid
+#                endpoint_topology_uuid         = endpoint_id.topology_id.topology_uuid.uuid
+#                endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid
+#
+#                str_endpoint_key = key_to_str([endpoint_device_uuid, endpoint_uuid])
+#                if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0:
+#                    str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid])
+#                    str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':')
+#
+#                db_endpoint : EndPointModel = get_object(self.database, EndPointModel, str_endpoint_key)
+#
+#                str_slice_endpoint_key = key_to_str([str_slice_key, str_endpoint_key], separator='--')
+#                result : Tuple[SliceEndPointModel, bool] = get_or_create_object(
+#                    self.database, SliceEndPointModel, str_slice_endpoint_key, {
+#                        'slice_fk': db_slice, 'endpoint_fk': db_endpoint})
+#                #db_slice_endpoint, slice_endpoint_created = result
+#
+#            for i,service_id in enumerate(request.slice_service_ids):
+#                service_uuid         = service_id.service_uuid.uuid
+#                service_context_uuid = service_id.context_id.context_uuid.uuid
+#                str_service_key = key_to_str([service_context_uuid, service_uuid])
+#                db_service : ServiceModel = get_object(self.database, ServiceModel, str_service_key)
+#
+#                str_slice_service_key = key_to_str([str_slice_key, str_service_key], separator='--')
+#                result : Tuple[SliceServiceModel, bool] = get_or_create_object(
+#                    self.database, SliceServiceModel, str_slice_service_key, {
+#                        'slice_fk': db_slice, 'service_fk': db_service})
+#                #db_slice_service, slice_service_created = result
+#
+#            for i,subslice_id in enumerate(request.slice_subslice_ids):
+#                subslice_uuid         = subslice_id.slice_uuid.uuid
+#                subslice_context_uuid = subslice_id.context_id.context_uuid.uuid
+#                str_subslice_key = key_to_str([subslice_context_uuid, subslice_uuid])
+#                db_subslice : SliceModel = get_object(self.database, SliceModel, str_subslice_key)
+#
+#                str_slice_subslice_key = key_to_str([str_slice_key, str_subslice_key], separator='--')
+#                result : Tuple[SliceSubSliceModel, bool] = get_or_create_object(
+#                    self.database, SliceSubSliceModel, str_slice_subslice_key, {
+#                        'slice_fk': db_slice, 'sub_slice_fk': db_subslice})
+#                #db_slice_subslice, slice_subslice_created = result
+#
+#            event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
+#            dict_slice_id = db_slice.dump_id()
+#            notify_event(self.messagebroker, TOPIC_SLICE, event_type, {'slice_id': dict_slice_id})
+#            return SliceId(**dict_slice_id)
+#
+#    @safe_and_metered_rpc_method(METRICS, LOGGER)
+#    def UnsetSlice(self, request: Slice, context : grpc.ServicerContext) -> SliceId:
+#        with self.lock:
+#            context_uuid = request.slice_id.context_id.context_uuid.uuid
+#            db_context : ContextModel = get_object(self.database, ContextModel, context_uuid)
+#
+#            for i,endpoint_id in enumerate(request.slice_endpoint_ids):
+#                endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid
+#                if len(endpoint_topology_context_uuid) > 0 and context_uuid != endpoint_topology_context_uuid:
+#                    raise InvalidArgumentException(
+#                        'request.slice_endpoint_ids[{:d}].topology_id.context_id.context_uuid.uuid'.format(i),
+#                        endpoint_topology_context_uuid,
+#                        ['should be == {:s}({:s})'.format(
+#                            'request.slice_id.context_id.context_uuid.uuid', context_uuid)])
+#
+#            slice_uuid = request.slice_id.slice_uuid.uuid
+#            str_slice_key = key_to_str([context_uuid, slice_uuid])
+#
+#            if len(request.slice_constraints) > 0:
+#                raise NotImplementedError('UnsetSlice: removal of constraints')
+#            if len(request.slice_config.config_rules) > 0:
+#                raise NotImplementedError('UnsetSlice: removal of config rules')
+#            if len(request.slice_endpoint_ids) > 0:
+#                raise NotImplementedError('UnsetSlice: removal of endpoints')
+#
+#            updated = False
+#
+#            for service_id in request.slice_service_ids:
+#                service_uuid         = service_id.service_uuid.uuid
+#                service_context_uuid = service_id.context_id.context_uuid.uuid
+#                str_service_key = key_to_str([service_context_uuid, service_uuid])
+#                str_slice_service_key = key_to_str([str_slice_key, str_service_key], separator='--')
+#                SliceServiceModel(self.database, str_slice_service_key).delete()
+#                updated = True
+#
+#            for subslice_id in request.slice_subslice_ids:
+#                subslice_uuid         = subslice_id.slice_uuid.uuid
+#                subslice_context_uuid = subslice_id.context_id.context_uuid.uuid
+#                str_subslice_key = key_to_str([subslice_context_uuid, subslice_uuid])
+#                str_slice_subslice_key = key_to_str([str_slice_key, str_subslice_key], separator='--')
+#                SliceSubSliceModel(self.database, str_slice_subslice_key).delete()
+#                updated = True
+#
+#            event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
+#            db_slice : SliceModel = get_object(self.database, SliceModel, str_slice_key)
+#            dict_slice_id = db_slice.dump_id()
+#            notify_event(self.messagebroker, TOPIC_SLICE, event_type, {'slice_id': dict_slice_id})
+#            return SliceId(**dict_slice_id)
+#
+#    @safe_and_metered_rpc_method(METRICS, LOGGER)
+#    def RemoveSlice(self, request: SliceId, context : grpc.ServicerContext) -> Empty:
+#        with self.lock:
+#            context_uuid = request.context_id.context_uuid.uuid
+#            slice_uuid = request.slice_uuid.uuid
+#            db_slice = SliceModel(self.database, key_to_str([context_uuid, slice_uuid]), auto_load=False)
+#            found = db_slice.load()
+#            if not found: return Empty()
+#
+#            dict_slice_id = db_slice.dump_id()
+#            db_slice.delete()
+#
+#            event_type = EventTypeEnum.EVENTTYPE_REMOVE
+#            notify_event(self.messagebroker, TOPIC_SLICE, event_type, {'slice_id': dict_slice_id})
+#            return Empty()
+#
+##    @safe_and_metered_rpc_method(METRICS, LOGGER)
+##    def GetSliceEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[SliceEvent]:
+##        for message in self.messagebroker.consume({TOPIC_SLICE}, consume_timeout=CONSUME_TIMEOUT):
+##            yield SliceEvent(**json.loads(message.content))
+#
+#
+#    # ----- Connection -------------------------------------------------------------------------------------------------
+#
+#    @safe_and_metered_rpc_method(METRICS, LOGGER)
+#    def ListConnectionIds(self, request: ServiceId, context : grpc.ServicerContext) -> ConnectionIdList:
+#        with self.session() as session:
+#            result = session.query(DeviceModel).all()
+#            return DeviceIdList(device_ids=[device.dump_id() for device in result])
+#
+#        with self.lock:
+#            str_key = key_to_str([request.context_id.context_uuid.uuid, request.service_uuid.uuid])
+#            db_service : ServiceModel = get_object(self.database, ServiceModel, str_key)
+#            db_connections : Set[ConnectionModel] = get_related_objects(db_service, ConnectionModel)
+#            db_connections = sorted(db_connections, key=operator.attrgetter('pk'))
+#            return ConnectionIdList(connection_ids=[db_connection.dump_id() for db_connection in db_connections])
+#
+#    @safe_and_metered_rpc_method(METRICS, LOGGER)
+#    def ListConnections(self, request: ContextId, context : grpc.ServicerContext) -> ServiceList:
+#        with self.lock:
+#            str_key = key_to_str([request.context_id.context_uuid.uuid, request.service_uuid.uuid])
+#            db_service : ServiceModel = get_object(self.database, ServiceModel, str_key)
+#            db_connections : Set[ConnectionModel] = get_related_objects(db_service, ConnectionModel)
+#            db_connections = sorted(db_connections, key=operator.attrgetter('pk'))
+#            return ConnectionList(connections=[db_connection.dump() for db_connection in db_connections])
+#
+#    @safe_and_metered_rpc_method(METRICS, LOGGER)
+#    def GetConnection(self, request: ConnectionId, context : grpc.ServicerContext) -> Connection:
+#        with self.lock:
+#            db_connection : ConnectionModel = get_object(self.database, ConnectionModel, request.connection_uuid.uuid)
+#            return Connection(**db_connection.dump(include_path=True, include_sub_service_ids=True))
+#
+#    @safe_and_metered_rpc_method(METRICS, LOGGER)
+#    def SetConnection(self, request: Connection, context : grpc.ServicerContext) -> ConnectionId:
+#        with self.lock:
+#            connection_uuid = request.connection_id.connection_uuid.uuid
+#
+#            connection_attributes = {'connection_uuid': connection_uuid}
+#
+#            service_context_uuid = request.service_id.context_id.context_uuid.uuid
+#            service_uuid = request.service_id.service_uuid.uuid
+#            if len(service_context_uuid) > 0 and len(service_uuid) > 0:
+#                str_service_key = key_to_str([service_context_uuid, service_uuid])
+#                db_service : ServiceModel = get_object(self.database, ServiceModel, str_service_key)
+#                connection_attributes['service_fk'] = db_service
+#
+#            path_hops_result = set_path(self.database, connection_uuid, request.path_hops_endpoint_ids, path_name = '')
+#            db_path = path_hops_result[0]
+#            connection_attributes['path_fk'] = db_path
+#
+#            result : Tuple[ConnectionModel, bool] = update_or_create_object(
+#                self.database, ConnectionModel, connection_uuid, connection_attributes)
+#            db_connection, updated = result
+#
+#            for sub_service_id in request.sub_service_ids:
+#                sub_service_uuid         = sub_service_id.service_uuid.uuid
+#                sub_service_context_uuid = sub_service_id.context_id.context_uuid.uuid
+#                str_sub_service_key = key_to_str([sub_service_context_uuid, sub_service_uuid])
+#                db_service : ServiceModel = get_object(self.database, ServiceModel, str_sub_service_key)
+#
+#                str_connection_sub_service_key = key_to_str([connection_uuid, str_sub_service_key], separator='--')
+#                result : Tuple[ConnectionSubServiceModel, bool] = get_or_create_object(
+#                    self.database, ConnectionSubServiceModel, str_connection_sub_service_key, {
+#                        'connection_fk': db_connection, 'sub_service_fk': db_service})
+#                #db_connection_sub_service, connection_sub_service_created = result
+#
+#            event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
+#            dict_connection_id = db_connection.dump_id()
+#            notify_event(self.messagebroker, TOPIC_CONNECTION, event_type, {'connection_id': dict_connection_id})
+#            return ConnectionId(**dict_connection_id)
+#
+#    @safe_and_metered_rpc_method(METRICS, LOGGER)
+#    def RemoveConnection(self, request: ConnectionId, context : grpc.ServicerContext) -> Empty:
+#        with self.lock:
+#            db_connection = ConnectionModel(self.database, request.connection_uuid.uuid, auto_load=False)
+#            found = db_connection.load()
+#            if not found: return Empty()
+#
+#            dict_connection_id = db_connection.dump_id()
+#            db_connection.delete()
+#
+#            event_type = EventTypeEnum.EVENTTYPE_REMOVE
+#            notify_event(self.messagebroker, TOPIC_CONNECTION, event_type, {'connection_id': dict_connection_id})
+#            return Empty()
+#
+##    @safe_and_metered_rpc_method(METRICS, LOGGER)
+##    def GetConnectionEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ConnectionEvent]:
+##        for message in self.messagebroker.consume({TOPIC_CONNECTION}, consume_timeout=CONSUME_TIMEOUT):
+##            yield ConnectionEvent(**json.loads(message.content))
+#
+#
+#    # ----- Policy -----------------------------------------------------------------------------------------------------
+#
+#    @safe_and_metered_rpc_method(METRICS, LOGGER)
+#    def ListPolicyRuleIds(self, request: Empty, context: grpc.ServicerContext) -> PolicyRuleIdList:
+#        with self.lock:
+#            db_policy_rules: List[PolicyRuleModel] = get_all_objects(self.database, PolicyRuleModel)
+#            db_policy_rules = sorted(db_policy_rules, key=operator.attrgetter('pk'))
+#            return PolicyRuleIdList(policyRuleIdList=[db_policy_rule.dump_id() for db_policy_rule in db_policy_rules])
+#
+#    @safe_and_metered_rpc_method(METRICS, LOGGER)
+#    def ListPolicyRules(self, request: Empty, context: grpc.ServicerContext) -> PolicyRuleList:
+#        with self.lock:
+#            db_policy_rules: List[PolicyRuleModel] = get_all_objects(self.database, PolicyRuleModel)
+#            db_policy_rules = sorted(db_policy_rules, key=operator.attrgetter('pk'))
+#            return PolicyRuleList(policyRules=[db_policy_rule.dump() for db_policy_rule in db_policy_rules])
+#
+#    @safe_and_metered_rpc_method(METRICS, LOGGER)
+#    def GetPolicyRule(self, request: PolicyRuleId, context: grpc.ServicerContext) -> PolicyRule:
+#        with self.lock:
+#            policy_rule_uuid = request.uuid.uuid
+#            db_policy_rule: PolicyRuleModel = get_object(self.database, PolicyRuleModel, policy_rule_uuid)
+#            return PolicyRule(**db_policy_rule.dump())
+#
+#    @safe_and_metered_rpc_method(METRICS, LOGGER)
+#    def SetPolicyRule(self, request: PolicyRule, context: grpc.ServicerContext) -> PolicyRuleId:
+#        with self.lock:
+#            policy_rule_type = request.WhichOneof('policy_rule')
+#            policy_rule_json = grpc_message_to_json(request)
+#            policy_rule_uuid = policy_rule_json[policy_rule_type]['policyRuleBasic']['policyRuleId']['uuid']['uuid']
+#            result: Tuple[PolicyRuleModel, bool] = update_or_create_object(
+#                self.database, PolicyRuleModel, policy_rule_uuid, {'value': json.dumps(policy_rule_json)})
+#            db_policy, updated = result # pylint: disable=unused-variable
+#
+#            #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
+#            dict_policy_id = db_policy.dump_id()
+#            #notify_event(self.messagebroker, TOPIC_POLICY, event_type, {"policy_id": dict_policy_id})
+#            return PolicyRuleId(**dict_policy_id)
+#
+#    @safe_and_metered_rpc_method(METRICS, LOGGER)
+#    def RemovePolicyRule(self, request: PolicyRuleId, context: grpc.ServicerContext) -> Empty:
+#        with self.lock:
+#            policy_uuid = request.uuid.uuid
+#            db_policy = PolicyRuleModel(self.database, policy_uuid, auto_load=False)
+#            found = db_policy.load()
+#            if not found: return Empty()
+#
+#            dict_policy_id = db_policy.dump_id()
+#            db_policy.delete()
+#            #event_type = EventTypeEnum.EVENTTYPE_REMOVE
+#            #notify_event(self.messagebroker, TOPIC_POLICY, event_type, {"policy_id": dict_policy_id})
+#            return Empty()
+#
\ No newline at end of file
diff --git a/src/context/service/Database.py b/src/context/service/Database.py
index 2b699203a7d668ae6062c05cb129c2207b05ac2a..8aa5682390e79146f3cec661af645ac277534c88 100644
--- a/src/context/service/Database.py
+++ b/src/context/service/Database.py
@@ -2,7 +2,7 @@ from typing import Tuple, List
 
 from sqlalchemy import MetaData
 from sqlalchemy.orm import Session, joinedload
-from context.service.database.Base import Base
+from context.service.database._Base import Base
 import logging
 from common.orm.backend.Tools import key_to_str
 
diff --git a/src/context/service/Engine.py b/src/context/service/Engine.py
new file mode 100644
index 0000000000000000000000000000000000000000..7944d86012d6d7bd76539aee6dc3b282c718fd03
--- /dev/null
+++ b/src/context/service/Engine.py
@@ -0,0 +1,40 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, sqlalchemy, sqlalchemy_utils
+from common.Settings import get_setting
+
+LOGGER = logging.getLogger(__name__)
+
+APP_NAME = 'tfs'
+
+class Engine:
+    def get_engine(self) -> sqlalchemy.engine.Engine:
+        ccdb_url = get_setting('CCDB_URL')
+
+        try:
+            engine = sqlalchemy.create_engine(
+                ccdb_url, connect_args={'application_name': APP_NAME}, echo=False, future=True)
+        except: # pylint: disable=bare-except
+            LOGGER.exception('Failed to connect to database: {:s}'.format(ccdb_url))
+            return None
+
+        try:
+            if not sqlalchemy_utils.database_exists(engine.url):
+                sqlalchemy_utils.create_database(engine.url)
+        except: # pylint: disable=bare-except
+            LOGGER.exception('Failed to check/create to database: {:s}'.format(ccdb_url))
+            return None
+
+        return engine
diff --git a/src/context/service/__main__.py b/src/context/service/__main__.py
index 34942ec8297a97e683549c541d8cd50934fb02fa..c5bbcc3f27aef2a55fa6614adad34c8004f4fdd7 100644
--- a/src/context/service/__main__.py
+++ b/src/context/service/__main__.py
@@ -14,85 +14,52 @@
 
 import logging, signal, sys, threading
 from prometheus_client import start_http_server
-from common.Settings import get_log_level, get_metrics_port, get_setting
+from common.Settings import get_log_level, get_metrics_port
 from common.message_broker.Factory import get_messagebroker_backend
 from common.message_broker.MessageBroker import MessageBroker
-from context.Config import POPULATE_FAKE_DATA
-from sqlalchemy.orm import sessionmaker, declarative_base
-from context.service.database.Base import Base
-from .grpc_server.ContextService import ContextService
-from .rest_server.Resources import RESOURCES
-from .rest_server.RestServer import RestServer
-from .Populate import populate
-# from models import Device, EndPoint, EndPointId, DeviceDriverEnum, DeviceOperationalStatusEnum, ConfigActionEnum, \
-#     ConfigRule, KpiSampleType, Base
-from sqlalchemy import create_engine
+from sqlalchemy.orm import sessionmaker
+from .database import rebuild_database
+from .ContextService import ContextService
+from .Engine import Engine
+
+LOG_LEVEL = get_log_level()
+logging.basicConfig(level=LOG_LEVEL, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s")
+LOGGER = logging.getLogger(__name__)
+
+LOGGER.addHandler(logging.StreamHandler(stream=sys.stderr))
+LOGGER.setLevel(logging.WARNING)
 
 terminate = threading.Event()
-LOGGER = None
+LOGGER : logging.Logger = None
 
 def signal_handler(signal, frame): # pylint: disable=redefined-outer-name
     LOGGER.warning('Terminate signal received')
     terminate.set()
 
 def main():
-    global LOGGER # pylint: disable=global-statement
-
-    log_level = get_log_level()
-    logging.basicConfig(level=log_level, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s")
-    LOGGER = logging.getLogger(__name__)
-
+    LOGGER.info('Starting...')
     signal.signal(signal.SIGINT,  signal_handler)
     signal.signal(signal.SIGTERM, signal_handler)
 
-    LOGGER.info('Starting...')
-
     # Start metrics server
     metrics_port = get_metrics_port()
     start_http_server(metrics_port)
 
-    # Get database instance
-    db_uri = 'cockroachdb://root@10.152.183.111:26257/defaultdb?sslmode=disable'
-    LOGGER.debug('Connecting to DB: {}'.format(db_uri))
-
-    # engine = create_engine(db_uri, echo=False)
-
-    try:
-        engine = create_engine(db_uri)
-    except Exception as e:
-        LOGGER.error("Failed to connect to database.")
-        LOGGER.error(f"{e}")
-        return 1
-
-    Base.metadata.create_all(engine)
-    session = sessionmaker(bind=engine, expire_on_commit=False)
+    db_engine = Engine().get_engine()
+    rebuild_database(db_engine, drop_if_exists=False)
 
     # Get message broker instance
     messagebroker = MessageBroker(get_messagebroker_backend())
 
     # Starting context service
-    grpc_service = ContextService(session, messagebroker)
+    grpc_service = ContextService(db_engine, messagebroker)
     grpc_service.start()
 
-    rest_server = RestServer()
-    for endpoint_name, resource_class, resource_url in RESOURCES:
-        rest_server.add_resource(resource_class, resource_url, endpoint=endpoint_name, resource_class_args=(session,))
-    rest_server.start()
-
-    populate_fake_data = get_setting('POPULATE_FAKE_DATA', default=POPULATE_FAKE_DATA)
-    if isinstance(populate_fake_data, str): populate_fake_data = (populate_fake_data.upper() in {'T', '1', 'TRUE'})
-    if populate_fake_data:
-        LOGGER.info('Populating fake data...')
-        populate(host='127.0.0.1', port=grpc_service.bind_port)
-        LOGGER.info('Fake Data populated')
-
     # Wait for Ctrl+C or termination signal
     while not terminate.wait(timeout=0.1): pass
 
     LOGGER.info('Terminating...')
     grpc_service.stop()
-    rest_server.shutdown()
-    rest_server.join()
 
     LOGGER.info('Bye')
     return 0
diff --git a/src/context/service/rest_server/__init__.py b/src/context/service/_old_code/Config.py
similarity index 86%
rename from src/context/service/rest_server/__init__.py
rename to src/context/service/_old_code/Config.py
index 70a33251242c51f49140e596b8208a19dd5245f7..6f5d1dc0b347dc5db27a2cfae973a4e5bdf7b4cc 100644
--- a/src/context/service/rest_server/__init__.py
+++ b/src/context/service/_old_code/Config.py
@@ -12,3 +12,5 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+# Autopopulate the component with fake data for testing purposes?
+POPULATE_FAKE_DATA = False
diff --git a/src/context/service/Populate.py b/src/context/service/_old_code/Populate.py
similarity index 100%
rename from src/context/service/Populate.py
rename to src/context/service/_old_code/Populate.py
diff --git a/src/context/service/rest_server/Resources.py b/src/context/service/_old_code/Resources.py
similarity index 100%
rename from src/context/service/rest_server/Resources.py
rename to src/context/service/_old_code/Resources.py
diff --git a/src/context/service/rest_server/RestServer.py b/src/context/service/_old_code/RestServer.py
similarity index 100%
rename from src/context/service/rest_server/RestServer.py
rename to src/context/service/_old_code/RestServer.py
diff --git a/src/context/service/grpc_server/__init__.py b/src/context/service/_old_code/__init__.py
similarity index 100%
rename from src/context/service/grpc_server/__init__.py
rename to src/context/service/_old_code/__init__.py
diff --git a/src/context/service/_old_code/__main__.py b/src/context/service/_old_code/__main__.py
new file mode 100644
index 0000000000000000000000000000000000000000..69d3f5cbef08d4301891a6ad1cbf2d8a247f7c40
--- /dev/null
+++ b/src/context/service/_old_code/__main__.py
@@ -0,0 +1,85 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging, signal, sys, threading
+from prometheus_client import start_http_server
+from common.Settings import get_log_level, get_metrics_port, get_setting
+from common.orm.Database import Database
+from common.orm.Factory import get_database_backend
+from common.message_broker.Factory import get_messagebroker_backend
+from common.message_broker.MessageBroker import MessageBroker
+from context.service.grpc_server.ContextService import ContextService
+from .Config import POPULATE_FAKE_DATA
+from .Populate import populate
+from .Resources import RESOURCES
+from .RestServer import RestServer
+
+terminate = threading.Event()
+LOGGER = None
+
+def signal_handler(signal, frame): # pylint: disable=redefined-outer-name
+    LOGGER.warning('Terminate signal received')
+    terminate.set()
+
+def main():
+    global LOGGER # pylint: disable=global-statement
+
+    log_level = get_log_level()
+    logging.basicConfig(level=log_level, format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s")
+    LOGGER = logging.getLogger(__name__)
+
+    signal.signal(signal.SIGINT,  signal_handler)
+    signal.signal(signal.SIGTERM, signal_handler)
+
+    LOGGER.info('Starting...')
+
+    # Start metrics server
+    metrics_port = get_metrics_port()
+    start_http_server(metrics_port)
+
+    # Get database instance
+    database = Database(get_database_backend())
+
+    # Get message broker instance
+    messagebroker = MessageBroker(get_messagebroker_backend())
+
+    # Starting context service
+    grpc_service = ContextService(database, messagebroker)
+    grpc_service.start()
+
+    rest_server = RestServer()
+    for endpoint_name, resource_class, resource_url in RESOURCES:
+        rest_server.add_resource(resource_class, resource_url, endpoint=endpoint_name, resource_class_args=(database,))
+    rest_server.start()
+
+    populate_fake_data = get_setting('POPULATE_FAKE_DATA', default=POPULATE_FAKE_DATA)
+    if isinstance(populate_fake_data, str): populate_fake_data = (populate_fake_data.upper() in {'T', '1', 'TRUE'})
+    if populate_fake_data:
+        LOGGER.info('Populating fake data...')
+        populate(host='127.0.0.1', port=grpc_service.bind_port)
+        LOGGER.info('Fake Data populated')
+
+    # Wait for Ctrl+C or termination signal
+    while not terminate.wait(timeout=0.1): pass
+
+    LOGGER.info('Terminating...')
+    grpc_service.stop()
+    rest_server.shutdown()
+    rest_server.join()
+
+    LOGGER.info('Bye')
+    return 0
+
+if __name__ == '__main__':
+    sys.exit(main())
diff --git a/src/context/service/_old_code/test_unitary.py b/src/context/service/_old_code/test_unitary.py
new file mode 100644
index 0000000000000000000000000000000000000000..04e054aad022a916d443feac8916b7eb436cafe2
--- /dev/null
+++ b/src/context/service/_old_code/test_unitary.py
@@ -0,0 +1,1450 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# pylint: disable=too-many-lines
+import copy, grpc, logging, os, pytest, requests, time, urllib
+from typing import Tuple
+from common.Constants import DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID, ServiceNameEnum
+from common.Settings import (
+    ENVVAR_SUFIX_SERVICE_HOST, ENVVAR_SUFIX_SERVICE_PORT_GRPC, ENVVAR_SUFIX_SERVICE_PORT_HTTP, get_env_var_name,
+    get_service_baseurl_http, get_service_port_grpc, get_service_port_http)
+from context.service.Database import Database
+from common.message_broker.Factory import get_messagebroker_backend, BackendEnum as MessageBrokerBackendEnum
+from common.message_broker.MessageBroker import MessageBroker
+from common.proto.context_pb2 import (
+    Connection, ConnectionEvent, ConnectionId, Context, ContextEvent, ContextId, Device, DeviceEvent, DeviceId,
+    DeviceOperationalStatusEnum, Empty, EventTypeEnum, Link, LinkEvent, LinkId, Service, ServiceEvent, ServiceId,
+    ServiceStatusEnum, ServiceTypeEnum, Topology, TopologyEvent, TopologyId)
+from common.proto.policy_pb2 import (PolicyRuleIdList, PolicyRuleId, PolicyRuleList, PolicyRule)
+from common.type_checkers.Assertions import (
+    validate_connection, validate_connection_ids, validate_connections, validate_context, validate_context_ids,
+    validate_contexts, validate_device, validate_device_ids, validate_devices, validate_link, validate_link_ids,
+    validate_links, validate_service, validate_service_ids, validate_services, validate_topologies, validate_topology,
+    validate_topology_ids)
+from context.client.ContextClient import ContextClient
+from context.client.EventsCollector import EventsCollector
+from context.service.database.Tools import (
+    FASTHASHER_DATA_ACCEPTED_FORMAT, FASTHASHER_ITEM_ACCEPTED_FORMAT, fast_hasher)
+from context.service.grpc_server.ContextService import ContextService
+from context.service._old_code.Populate import populate
+from context.service.rest_server.RestServer import RestServer
+from context.service.rest_server.Resources import RESOURCES
+from requests import Session
+from sqlalchemy import create_engine
+from sqlalchemy.orm import sessionmaker
+from context.service.database._Base import Base
+
+from .Objects import (
+    CONNECTION_R1_R3, CONNECTION_R1_R3_ID, CONNECTION_R1_R3_UUID, CONTEXT, CONTEXT_ID, DEVICE_R1, DEVICE_R1_ID,
+    DEVICE_R1_UUID, DEVICE_R2, DEVICE_R2_ID, DEVICE_R2_UUID, DEVICE_R3, DEVICE_R3_ID, DEVICE_R3_UUID, LINK_R1_R2,
+    LINK_R1_R2_ID, LINK_R1_R2_UUID, SERVICE_R1_R2, SERVICE_R1_R2_ID, SERVICE_R1_R2_UUID, SERVICE_R1_R3,
+    SERVICE_R1_R3_ID, SERVICE_R1_R3_UUID, SERVICE_R2_R3, SERVICE_R2_R3_ID, SERVICE_R2_R3_UUID, TOPOLOGY, TOPOLOGY_ID,
+    POLICY_RULE, POLICY_RULE_ID, POLICY_RULE_UUID)
+
+LOGGER = logging.getLogger(__name__)
+LOGGER.setLevel(logging.DEBUG)
+
+LOCAL_HOST = '127.0.0.1'
+GRPC_PORT = 10000 + int(get_service_port_grpc(ServiceNameEnum.CONTEXT))   # avoid privileged ports
+HTTP_PORT = 10000 + int(get_service_port_http(ServiceNameEnum.CONTEXT))   # avoid privileged ports
+
+os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_HOST     )] = str(LOCAL_HOST)
+os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_GRPC)] = str(GRPC_PORT)
+os.environ[get_env_var_name(ServiceNameEnum.CONTEXT, ENVVAR_SUFIX_SERVICE_PORT_HTTP)] = str(HTTP_PORT)
+
+DEFAULT_REDIS_SERVICE_HOST = LOCAL_HOST
+DEFAULT_REDIS_SERVICE_PORT = 6379
+DEFAULT_REDIS_DATABASE_ID  = 0
+
+REDIS_CONFIG = {
+    'REDIS_SERVICE_HOST': os.environ.get('REDIS_SERVICE_HOST', DEFAULT_REDIS_SERVICE_HOST),
+    'REDIS_SERVICE_PORT': os.environ.get('REDIS_SERVICE_PORT', DEFAULT_REDIS_SERVICE_PORT),
+    'REDIS_DATABASE_ID' : os.environ.get('REDIS_DATABASE_ID',  DEFAULT_REDIS_DATABASE_ID ),
+}
+
+SCENARIOS = [
+    ('all_sqlalchemy', {},           MessageBrokerBackendEnum.INMEMORY, {}          ),
+    ('all_inmemory', DatabaseBackendEnum.INMEMORY, {},           MessageBrokerBackendEnum.INMEMORY, {}          )
+#    ('all_redis',    DatabaseBackendEnum.REDIS,    REDIS_CONFIG, MessageBrokerBackendEnum.REDIS,    REDIS_CONFIG),
+]
+
+@pytest.fixture(scope='session', ids=[str(scenario[0]) for scenario in SCENARIOS], params=SCENARIOS)
+def context_s_mb(request) -> Tuple[Session, MessageBroker]:
+    name,db_session,mb_backend,mb_settings = request.param
+    msg = 'Running scenario {:s} db_session={:s}, mb_backend={:s}, mb_settings={:s}...'
+    LOGGER.info(msg.format(str(name), str(db_session), str(mb_backend.value), str(mb_settings)))
+
+    db_uri = 'cockroachdb://root@10.152.183.111:26257/defaultdb?sslmode=disable'
+    LOGGER.debug('Connecting to DB: {}'.format(db_uri))
+
+    try:
+        engine = create_engine(db_uri)
+    except Exception as e:
+        LOGGER.error("Failed to connect to database.")
+        LOGGER.error(f"{e}")
+        return 1
+
+    Base.metadata.create_all(engine)
+    _session = sessionmaker(bind=engine, expire_on_commit=False)
+
+    _message_broker = MessageBroker(get_messagebroker_backend(backend=mb_backend, **mb_settings))
+    yield _session, _message_broker
+    _message_broker.terminate()
+
+@pytest.fixture(scope='session')
+def context_service_grpc(context_s_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name
+    _service = ContextService(context_s_mb[0], context_s_mb[1])
+    _service.start()
+    yield _service
+    _service.stop()
+@pytest.fixture(scope='session')
+def context_service_rest(context_db_mb : Tuple[Database, MessageBroker]): # pylint: disable=redefined-outer-name
+    database = context_db_mb[0]
+    _rest_server = RestServer()
+    for endpoint_name, resource_class, resource_url in RESOURCES:
+        _rest_server.add_resource(resource_class, resource_url, endpoint=endpoint_name, resource_class_args=(database,))
+    _rest_server.start()
+    time.sleep(1) # bring time for the server to start
+    yield _rest_server
+    _rest_server.shutdown()
+    _rest_server.join()
+@pytest.fixture(scope='session')
+def context_client_grpc(context_service_grpc : ContextService): # pylint: disable=redefined-outer-name
+    _client = ContextClient()
+    yield _client
+    _client.close()
+"""
+def do_rest_request(url : str):
+    base_url = get_service_baseurl_http(ServiceNameEnum.CONTEXT)
+    request_url = 'http://{:s}:{:s}{:s}{:s}'.format(str(LOCAL_HOST), str(HTTP_PORT), str(base_url), url)
+    LOGGER.warning('Request: GET {:s}'.format(str(request_url)))
+    reply = requests.get(request_url)
+    LOGGER.warning('Reply: {:s}'.format(str(reply.text)))
+    assert reply.status_code == 200, 'Reply failed with code {}'.format(reply.status_code)
+    return reply.json()
+"""
+
+"""# ----- Test gRPC methods ----------------------------------------------------------------------------------------------
+def test_grpc_context(
+    context_client_grpc : ContextClient,                # pylint: disable=redefined-outer-name
+    context_s_mb : Tuple[Session, MessageBroker]):    # pylint: disable=redefined-outer-name
+    Session = context_s_mb[0]
+
+    database = Database(Session)
+
+    # ----- Clean the database -----------------------------------------------------------------------------------------
+    database.clear()
+    # ----- Initialize the EventsCollector -----------------------------------------------------------------------------
+    events_collector = EventsCollector(context_client_grpc)
+    events_collector.start()
+
+    # ----- Get when the object does not exist -------------------------------------------------------------------------
+    with pytest.raises(grpc.RpcError) as e:
+        context_client_grpc.GetContext(ContextId(**CONTEXT_ID))
+    assert e.value.code() == grpc.StatusCode.NOT_FOUND
+    assert e.value.details() == 'Context({:s}) not found'.format(DEFAULT_CONTEXT_UUID)
+
+    # ----- List when the object does not exist ------------------------------------------------------------------------
+    response = context_client_grpc.ListContextIds(Empty())
+    assert len(response.context_ids) == 0
+
+    response = context_client_grpc.ListContexts(Empty())
+    assert len(response.contexts) == 0
+
+    # ----- Dump state of database before create the object ------------------------------------------------------------
+    db_entries = database.dump_all()
+    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
+    for db_entry in db_entries:
+        LOGGER.info('  [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover
+    LOGGER.info('-----------------------------------------------------------')
+    assert len(db_entries) == 0
+
+    # ----- Create the object ------------------------------------------------------------------------------------------
+    response = context_client_grpc.SetContext(Context(**CONTEXT))
+    assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+
+    wrong_uuid = 'c97c4185-e1d1-4ea7-b6b9-afbf76cb61f4'
+    with pytest.raises(grpc.RpcError) as e:
+        WRONG_TOPOLOGY_ID = copy.deepcopy(TOPOLOGY_ID)
+        WRONG_TOPOLOGY_ID['context_id']['context_uuid']['uuid'] = wrong_uuid
+        WRONG_CONTEXT = copy.deepcopy(CONTEXT)
+        WRONG_CONTEXT['topology_ids'].append(WRONG_TOPOLOGY_ID)
+        context_client_grpc.SetContext(Context(**WRONG_CONTEXT))
+    assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT
+    msg = 'request.topology_ids[0].context_id.context_uuid.uuid({}) is invalid; '\
+          'should be == request.context_id.context_uuid.uuid({})'.format(wrong_uuid, DEFAULT_CONTEXT_UUID)
+    assert e.value.details() == msg
+
+    with pytest.raises(grpc.RpcError) as e:
+        WRONG_SERVICE_ID = copy.deepcopy(SERVICE_R1_R2_ID)
+        WRONG_SERVICE_ID['context_id']['context_uuid']['uuid'] = wrong_uuid
+        WRONG_CONTEXT = copy.deepcopy(CONTEXT)
+        WRONG_CONTEXT['service_ids'].append(WRONG_SERVICE_ID)
+        context_client_grpc.SetContext(Context(**WRONG_CONTEXT))
+    assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT
+    msg = 'request.service_ids[0].context_id.context_uuid.uuid({}) is invalid; '\
+          'should be == request.context_id.context_uuid.uuid({})'.format(wrong_uuid, DEFAULT_CONTEXT_UUID)
+    assert e.value.details() == msg
+
+    # ----- Check create event -----------------------------------------------------------------------------------------
+    event = events_collector.get_event(block=True)
+    assert isinstance(event, ContextEvent)
+    assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    # ----- Update the object ------------------------------------------------------------------------------------------
+    response = context_client_grpc.SetContext(Context(**CONTEXT))
+    assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+
+    # ----- Check update event -----------------------------------------------------------------------------------------
+    event = events_collector.get_event(block=True)
+    assert isinstance(event, ContextEvent)
+    assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE
+    assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+
+    # ----- Dump state of database after create/update the object ------------------------------------------------------
+    db_entries = database.dump_all()
+
+    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
+    for db_entry in db_entries:
+        LOGGER.info(db_entry)
+    LOGGER.info('-----------------------------------------------------------')
+    assert len(db_entries) == 1
+
+    # ----- Get when the object exists ---------------------------------------------------------------------------------
+    response = context_client_grpc.GetContext(ContextId(**CONTEXT_ID))
+    assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    assert len(response.topology_ids) == 0
+    assert len(response.service_ids) == 0
+
+    # ----- List when the object exists --------------------------------------------------------------------------------
+    response = context_client_grpc.ListContextIds(Empty())
+    assert len(response.context_ids) == 1
+    assert response.context_ids[0].context_uuid.uuid == DEFAULT_CONTEXT_UUID
+
+    response = context_client_grpc.ListContexts(Empty())
+    assert len(response.contexts) == 1
+    assert response.contexts[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    assert len(response.contexts[0].topology_ids) == 0
+    assert len(response.contexts[0].service_ids) == 0
+
+    # ----- Remove the object ------------------------------------------------------------------------------------------
+    context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID))
+
+    # ----- Check remove event -----------------------------------------------------------------------------------------
+    # event = events_collector.get_event(block=True)
+    # assert isinstance(event, ContextEvent)
+    # assert event.event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    # assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+
+    # ----- Stop the EventsCollector -----------------------------------------------------------------------------------
+    events_collector.stop()
+
+    # ----- Dump state of database after remove the object -------------------------------------------------------------
+    db_entries = database.dump_all()
+
+    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
+    for db_entry in db_entries:
+        LOGGER.info(db_entry)
+    LOGGER.info('-----------------------------------------------------------')
+    assert len(db_entries) == 0
+
+
+def test_grpc_topology(
+    context_client_grpc: ContextClient,  # pylint: disable=redefined-outer-name
+    context_s_mb: Tuple[Session, MessageBroker]):  # pylint: disable=redefined-outer-name
+    session = context_s_mb[0]
+
+    database = Database(session)
+
+    # ----- Clean the database -----------------------------------------------------------------------------------------
+    database.clear()
+
+    # ----- Initialize the EventsCollector -----------------------------------------------------------------------------
+    events_collector = EventsCollector(context_client_grpc)
+    events_collector.start()
+
+    # ----- Prepare dependencies for the test and capture related events -----------------------------------------------
+    response = context_client_grpc.SetContext(Context(**CONTEXT))
+    assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    # event = events_collector.get_event(block=True)
+    # assert isinstance(event, ContextEvent)
+    # assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    # assert event.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+
+    # ----- Get when the object does not exist -------------------------------------------------------------------------
+    with pytest.raises(grpc.RpcError) as e:
+        context_client_grpc.GetTopology(TopologyId(**TOPOLOGY_ID))
+    assert e.value.code() == grpc.StatusCode.NOT_FOUND
+    # assert e.value.details() == 'Topology({:s}/{:s}) not found'.format(DEFAULT_CONTEXT_UUID, DEFAULT_TOPOLOGY_UUID)
+    assert e.value.details() == 'Topology({:s}) not found'.format(DEFAULT_TOPOLOGY_UUID)
+    # ----- List when the object does not exist ------------------------------------------------------------------------
+    response = context_client_grpc.ListTopologyIds(ContextId(**CONTEXT_ID))
+    assert len(response.topology_ids) == 0
+    response = context_client_grpc.ListTopologies(ContextId(**CONTEXT_ID))
+    assert len(response.topologies) == 0
+
+    # ----- Dump state of database before create the object ------------------------------------------------------------
+    db_entries = database.dump_all()
+    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
+    for db_entry in db_entries:
+        LOGGER.info(db_entry)
+    LOGGER.info('-----------------------------------------------------------')
+    assert len(db_entries) == 1
+
+    # ----- Create the object ------------------------------------------------------------------------------------------
+    response = context_client_grpc.SetTopology(Topology(**TOPOLOGY))
+    assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
+
+    CONTEXT_WITH_TOPOLOGY = copy.deepcopy(CONTEXT)
+    CONTEXT_WITH_TOPOLOGY['topology_ids'].append(TOPOLOGY_ID)
+    response = context_client_grpc.SetContext(Context(**CONTEXT_WITH_TOPOLOGY))
+    assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+
+    # ----- Check create event -----------------------------------------------------------------------------------------
+    # events = events_collector.get_events(block=True, count=2)
+
+    # assert isinstance(events[0], TopologyEvent)
+    # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    # assert events[0].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    # assert events[0].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
+
+    # assert isinstance(events[1], ContextEvent)
+    # assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_UPDATE
+    # assert events[1].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+
+    # ----- Update the object ------------------------------------------------------------------------------------------
+    response = context_client_grpc.SetTopology(Topology(**TOPOLOGY))
+    assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
+
+    # ----- Check update event -----------------------------------------------------------------------------------------
+    # event = events_collector.get_event(block=True)
+    # assert isinstance(event, TopologyEvent)
+    # assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE
+    # assert event.topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    # assert event.topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
+
+    # ----- Dump state of database after create/update the object ------------------------------------------------------
+    db_entries = database.dump_all()
+    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
+    for db_entry in db_entries:
+        LOGGER.info(db_entry)
+    LOGGER.info('-----------------------------------------------------------')
+    assert len(db_entries) == 2
+
+    # ----- Get when the object exists ---------------------------------------------------------------------------------
+    response = context_client_grpc.GetTopology(TopologyId(**TOPOLOGY_ID))
+    assert response.topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    assert response.topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
+    assert len(response.device_ids) == 0
+    assert len(response.link_ids) == 0
+
+    # ----- List when the object exists --------------------------------------------------------------------------------
+    response = context_client_grpc.ListTopologyIds(ContextId(**CONTEXT_ID))
+    assert len(response.topology_ids) == 1
+    assert response.topology_ids[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    assert response.topology_ids[0].topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
+
+    response = context_client_grpc.ListTopologies(ContextId(**CONTEXT_ID))
+    assert len(response.topologies) == 1
+    assert response.topologies[0].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    assert response.topologies[0].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
+    assert len(response.topologies[0].device_ids) == 0
+    assert len(response.topologies[0].link_ids) == 0
+
+    # ----- Remove the object ------------------------------------------------------------------------------------------
+    context_client_grpc.RemoveTopology(TopologyId(**TOPOLOGY_ID))
+    context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID))
+
+    # ----- Check remove event -----------------------------------------------------------------------------------------
+    # events = events_collector.get_events(block=True, count=2)
+
+    # assert isinstance(events[0], TopologyEvent)
+    # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    # assert events[0].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    # assert events[0].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
+
+    # assert isinstance(events[1], ContextEvent)
+    # assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    # assert events[1].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+
+    # ----- Stop the EventsCollector -----------------------------------------------------------------------------------
+    # events_collector.stop()
+
+    # ----- Dump state of database after remove the object -------------------------------------------------------------
+    db_entries = database.dump_all()
+    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
+    for db_entry in db_entries:
+        LOGGER.info(db_entry)
+    LOGGER.info('-----------------------------------------------------------')
+    assert len(db_entries) == 0
+
+
+def test_grpc_device(
+    context_client_grpc: ContextClient,             # pylint: disable=redefined-outer-name
+    context_s_mb: Tuple[Session, MessageBroker]):   # pylint: disable=redefined-outer-name
+    session = context_s_mb[0]
+
+    database = Database(session)
+
+    # ----- Clean the database -----------------------------------------------------------------------------------------
+    database.clear()
+
+    # ----- Initialize the EventsCollector -----------------------------------------------------------------------------
+    events_collector = EventsCollector(context_client_grpc)
+    events_collector.start()
+
+    # ----- Prepare dependencies for the test and capture related events -----------------------------------------------
+    response = context_client_grpc.SetContext(Context(**CONTEXT))
+    assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+
+    response = context_client_grpc.SetTopology(Topology(**TOPOLOGY))
+    assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
+
+    events = events_collector.get_events(block=True, count=2)
+
+    assert isinstance(events[0], ContextEvent)
+    assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    assert events[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+
+    assert isinstance(events[1], TopologyEvent)
+    assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    assert events[1].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    assert events[1].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
+
+    # ----- Get when the object does not exist -------------------------------------------------------------------------
+    with pytest.raises(grpc.RpcError) as e:
+        context_client_grpc.GetDevice(DeviceId(**DEVICE_R1_ID))
+    assert e.value.code() == grpc.StatusCode.NOT_FOUND
+    assert e.value.details() == 'Device({:s}) not found'.format(DEVICE_R1_UUID)
+
+    # ----- List when the object does not exist ------------------------------------------------------------------------
+    response = context_client_grpc.ListDeviceIds(Empty())
+    assert len(response.device_ids) == 0
+
+    response = context_client_grpc.ListDevices(Empty())
+    assert len(response.devices) == 0
+
+    # ----- Dump state of database before create the object ------------------------------------------------------------
+    db_entries = database.dump_all()
+    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
+    for db_entry in db_entries:
+        LOGGER.info(db_entry)
+    LOGGER.info('-----------------------------------------------------------')
+    assert len(db_entries) == 2
+
+    # ----- Create the object ------------------------------------------------------------------------------------------
+    with pytest.raises(grpc.RpcError) as e:
+        WRONG_DEVICE = copy.deepcopy(DEVICE_R1)
+        WRONG_DEVICE_UUID = '3f03c76d-31fb-47f5-9c1d-bc6b6bfa2d08'
+        WRONG_DEVICE['device_endpoints'][0]['endpoint_id']['device_id']['device_uuid']['uuid'] = WRONG_DEVICE_UUID
+        context_client_grpc.SetDevice(Device(**WRONG_DEVICE))
+    assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT
+    msg = 'request.device_endpoints[0].device_id.device_uuid.uuid({}) is invalid; '\
+          'should be == request.device_id.device_uuid.uuid({})'.format(WRONG_DEVICE_UUID, DEVICE_R1_UUID)
+    assert e.value.details() == msg
+    response = context_client_grpc.SetDevice(Device(**DEVICE_R1))
+    assert response.device_uuid.uuid == DEVICE_R1_UUID
+
+    # ----- Check create event -----------------------------------------------------------------------------------------
+    # event = events_collector.get_event(block=True)
+    # assert isinstance(event, DeviceEvent)
+    # assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    # assert event.device_id.device_uuid.uuid == DEVICE_R1_UUID
+
+    # ----- Update the object ------------------------------------------------------------------------------------------
+    response = context_client_grpc.SetDevice(Device(**DEVICE_R1))
+    assert response.device_uuid.uuid == DEVICE_R1_UUID
+
+    # ----- Check update event -----------------------------------------------------------------------------------------
+    # event = events_collector.get_event(block=True)
+    # assert isinstance(event, DeviceEvent)
+    # assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE
+    # assert event.device_id.device_uuid.uuid == DEVICE_R1_UUID
+
+    # ----- Dump state of database after create/update the object ------------------------------------------------------
+    db_entries = database.dump_all()
+    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
+    for db_entry in db_entries:
+        LOGGER.info(db_entry)
+    LOGGER.info('-----------------------------------------------------------')
+    assert len(db_entries) == 47
+
+    # ----- Get when the object exists ---------------------------------------------------------------------------------
+    response = context_client_grpc.GetDevice(DeviceId(**DEVICE_R1_ID))
+    assert response.device_id.device_uuid.uuid == DEVICE_R1_UUID
+    assert response.device_type == 'packet-router'
+    assert len(response.device_config.config_rules) == 3
+    assert response.device_operational_status == DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED
+    assert len(response.device_drivers) == 1
+    assert len(response.device_endpoints) == 3
+
+    # ----- List when the object exists --------------------------------------------------------------------------------
+    response = context_client_grpc.ListDeviceIds(Empty())
+    assert len(response.device_ids) == 1
+    assert response.device_ids[0].device_uuid.uuid == DEVICE_R1_UUID
+
+    response = context_client_grpc.ListDevices(Empty())
+    assert len(response.devices) == 1
+    assert response.devices[0].device_id.device_uuid.uuid == DEVICE_R1_UUID
+    assert response.devices[0].device_type == 'packet-router'
+    assert len(response.devices[0].device_config.config_rules) == 3
+    assert response.devices[0].device_operational_status == DeviceOperationalStatusEnum.DEVICEOPERATIONALSTATUS_DISABLED
+    assert len(response.devices[0].device_drivers) == 1
+    assert len(response.devices[0].device_endpoints) == 3
+
+    # ----- Create object relation -------------------------------------------------------------------------------------
+    TOPOLOGY_WITH_DEVICE = copy.deepcopy(TOPOLOGY)
+    TOPOLOGY_WITH_DEVICE['device_ids'].append(DEVICE_R1_ID)
+    response = context_client_grpc.SetTopology(Topology(**TOPOLOGY_WITH_DEVICE))
+    assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
+
+    # ----- Check update event -----------------------------------------------------------------------------------------
+    # event = events_collector.get_event(block=True)
+    # assert isinstance(event, TopologyEvent)
+    # assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE
+    # assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    # assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
+
+    # ----- Check relation was created ---------------------------------------------------------------------------------
+    response = context_client_grpc.GetTopology(TopologyId(**TOPOLOGY_ID))
+    assert response.topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    assert response.topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
+    assert len(response.device_ids) == 1
+    assert response.device_ids[0].device_uuid.uuid == DEVICE_R1_UUID
+    assert len(response.link_ids) == 0
+
+    # ----- Dump state of database after creating the object relation --------------------------------------------------
+    db_entries = database.dump_all()
+    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
+    for db_entry in db_entries:
+        LOGGER.info(db_entry)
+    LOGGER.info('-----------------------------------------------------------')
+    assert len(db_entries) == 47
+
+    # ----- Remove the object -------------------------------ro-----------------------------------------------------------
+    context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R1_ID))
+    context_client_grpc.RemoveTopology(TopologyId(**TOPOLOGY_ID))
+    context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID))
+
+    # ----- Check remove event -----------------------------------------------------------------------------------------
+    # events = events_collector.get_events(block=True, count=3)
+
+    # assert isinstance(events[0], DeviceEvent)
+    # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    # assert events[0].device_id.device_uuid.uuid == DEVICE_R1_UUID
+
+    # assert isinstance(events[1], TopologyEvent)
+    # assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    # assert events[1].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    # assert events[1].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
+
+    # assert isinstance(events[2], ContextEvent)
+    # assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    # assert events[2].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+
+    # ----- Stop the EventsCollector -----------------------------------------------------------------------------------
+    # events_collector.stop()
+
+    # ----- Dump state of database after remove the object -------------------------------------------------------------
+    db_entries = database.dump_all()
+    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
+    for db_entry in db_entries:
+        LOGGER.info(db_entry)
+    LOGGER.info('-----------------------------------------------------------')
+    assert len(db_entries) == 0
+
+
+def test_grpc_link(
+    context_client_grpc: ContextClient,             # pylint: disable=redefined-outer-name
+    context_s_mb: Tuple[Session, MessageBroker]):   # pylint: disable=redefined-outer-name
+    session = context_s_mb[0]
+
+    database = Database(session)
+
+    # ----- Clean the database -----------------------------------------------------------------------------------------
+    database.clear()
+
+    # ----- Initialize the EventsCollector -----------------------------------------------------------------------------
+    events_collector = EventsCollector(context_client_grpc)
+    events_collector.start()
+
+    # ----- Prepare dependencies for the test and capture related events -----------------------------------------------
+    response = context_client_grpc.SetContext(Context(**CONTEXT))
+    assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+
+    response = context_client_grpc.SetTopology(Topology(**TOPOLOGY))
+    assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
+
+    response = context_client_grpc.SetDevice(Device(**DEVICE_R1))
+    assert response.device_uuid.uuid == DEVICE_R1_UUID
+
+    response = context_client_grpc.SetDevice(Device(**DEVICE_R2))
+    assert response.device_uuid.uuid == DEVICE_R2_UUID
+    # events = events_collector.get_events(block=True, count=4)
+
+    # assert isinstance(events[0], ContextEvent)
+    # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    # assert events[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    #
+    # assert isinstance(events[1], TopologyEvent)
+    # assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    # assert events[1].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    # assert events[1].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
+    #
+    # assert isinstance(events[2], DeviceEvent)
+    # assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    # assert events[2].device_id.device_uuid.uuid == DEVICE_R1_UUID
+    #
+    # assert isinstance(events[3], DeviceEvent)
+    # assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    # assert events[3].device_id.device_uuid.uuid == DEVICE_R2_UUID
+
+    # ----- Get when the object does not exist -------------------------------------------------------------------------
+    with pytest.raises(grpc.RpcError) as e:
+        context_client_grpc.GetLink(LinkId(**LINK_R1_R2_ID))
+    assert e.value.code() == grpc.StatusCode.NOT_FOUND
+    assert e.value.details() == 'Link({:s}) not found'.format(LINK_R1_R2_UUID)
+
+    # ----- List when the object does not exist ------------------------------------------------------------------------
+    response = context_client_grpc.ListLinkIds(Empty())
+    assert len(response.link_ids) == 0
+
+    response = context_client_grpc.ListLinks(Empty())
+    assert len(response.links) == 0
+
+    # ----- Dump state of database before create the object ------------------------------------------------------------
+    db_entries = database.dump_all()
+    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
+    for db_entry in db_entries:
+        LOGGER.info(db_entry)
+    LOGGER.info('-----------------------------------------------------------')
+    assert len(db_entries) == 80
+
+    # ----- Create the object ------------------------------------------------------------------------------------------
+    response = context_client_grpc.SetLink(Link(**LINK_R1_R2))
+    assert response.link_uuid.uuid == LINK_R1_R2_UUID
+
+    # ----- Check create event -----------------------------------------------------------------------------------------
+    # event = events_collector.get_event(block=True)
+    # assert isinstance(event, LinkEvent)
+    # assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    # assert event.link_id.link_uuid.uuid == LINK_R1_R2_UUID
+
+    # ----- Update the object ------------------------------------------------------------------------------------------
+    response = context_client_grpc.SetLink(Link(**LINK_R1_R2))
+    assert response.link_uuid.uuid == LINK_R1_R2_UUID
+    # ----- Check update event -----------------------------------------------------------------------------------------
+    # event = events_collector.get_event(block=True)
+    # assert isinstance(event, LinkEvent)
+    # assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE
+    # assert event.link_id.link_uuid.uuid == LINK_R1_R2_UUID
+
+    # ----- Dump state of database after create/update the object ------------------------------------------------------
+    db_entries = database.dump_all()
+    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
+    for db_entry in db_entries:
+        LOGGER.info(db_entry)
+    LOGGER.info('-----------------------------------------------------------')
+    assert len(db_entries) == 88
+
+    # ----- Get when the object exists ---------------------------------------------------------------------------------
+    response = context_client_grpc.GetLink(LinkId(**LINK_R1_R2_ID))
+    assert response.link_id.link_uuid.uuid == LINK_R1_R2_UUID
+    assert len(response.link_endpoint_ids) == 2
+
+    # ----- List when the object exists --------------------------------------------------------------------------------
+    response = context_client_grpc.ListLinkIds(Empty())
+    assert len(response.link_ids) == 1
+    assert response.link_ids[0].link_uuid.uuid == LINK_R1_R2_UUID
+
+    response = context_client_grpc.ListLinks(Empty())
+    assert len(response.links) == 1
+    assert response.links[0].link_id.link_uuid.uuid == LINK_R1_R2_UUID
+
+    assert len(response.links[0].link_endpoint_ids) == 2
+
+    # ----- Create object relation -------------------------------------------------------------------------------------
+    TOPOLOGY_WITH_LINK = copy.deepcopy(TOPOLOGY)
+    TOPOLOGY_WITH_LINK['link_ids'].append(LINK_R1_R2_ID)
+    response = context_client_grpc.SetTopology(Topology(**TOPOLOGY_WITH_LINK))
+    assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
+
+    # ----- Check update event -----------------------------------------------------------------------------------------
+    # event = events_collector.get_event(block=True)
+    # assert isinstance(event, TopologyEvent)
+    # assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE
+    # assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    # assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
+
+    # ----- Check relation was created ---------------------------------------------------------------------------------
+    response = context_client_grpc.GetTopology(TopologyId(**TOPOLOGY_ID))
+    assert response.topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    assert response.topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
+    assert len(response.device_ids) == 2
+    # assert response.device_ids[0].device_uuid.uuid == DEVICE_R1_UUID
+    # assert response.device_ids[1].device_uuid.uuid == DEVICE_R2_UUID
+    assert len(response.link_ids) == 1
+    assert response.link_ids[0].link_uuid.uuid == LINK_R1_R2_UUID
+
+    db_entries = database.dump_all()
+    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
+    for db_entry in db_entries:
+        LOGGER.info(db_entry)
+    LOGGER.info('-----------------------------------------------------------')
+    assert len(db_entries) == 88
+
+    # ----- Remove the object ------------------------------------------------------------------------------------------
+    context_client_grpc.RemoveLink(LinkId(**LINK_R1_R2_ID))
+    context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R1_ID))
+    context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R2_ID))
+    context_client_grpc.RemoveTopology(TopologyId(**TOPOLOGY_ID))
+    context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID))
+
+    # ----- Check remove event -----------------------------------------------------------------------------------------
+    # events = events_collector.get_events(block=True, count=5)
+    #
+    # assert isinstance(events[0], LinkEvent)
+    # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    # assert events[0].link_id.link_uuid.uuid == LINK_R1_R2_UUID
+    #
+    # assert isinstance(events[1], DeviceEvent)
+    # assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    # assert events[1].device_id.device_uuid.uuid == DEVICE_R1_UUID
+    #
+    # assert isinstance(events[2], DeviceEvent)
+    # assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    # assert events[2].device_id.device_uuid.uuid == DEVICE_R2_UUID
+    #
+    # assert isinstance(events[3], TopologyEvent)
+    # assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    # assert events[3].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    # assert events[3].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
+    #
+    # assert isinstance(events[4], ContextEvent)
+    # assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    # assert events[4].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+
+    # ----- Stop the EventsCollector -----------------------------------------------------------------------------------
+    events_collector.stop()
+
+    # ----- Dump state of database after remove the object -------------------------------------------------------------
+    db_entries = database.dump_all()
+    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
+    for db_entry in db_entries:
+        LOGGER.info(db_entry)
+    LOGGER.info('-----------------------------------------------------------')
+    assert len(db_entries) == 0
+"""
+
+def test_grpc_service(
+    context_client_grpc : ContextClient,                # pylint: disable=redefined-outer-name
+    context_s_mb : Tuple[Database, MessageBroker]):    # pylint: disable=redefined-outer-name
+    Session = context_s_mb[0]
+    # ----- Clean the database -----------------------------------------------------------------------------------------
+    database = Database(Session)
+    database.clear()
+
+    # ----- Initialize the EventsCollector -----------------------------------------------------------------------------
+    events_collector = EventsCollector(context_client_grpc)
+    events_collector.start()
+
+    # ----- Prepare dependencies for the test and capture related events -----------------------------------------------
+    response = context_client_grpc.SetContext(Context(**CONTEXT))
+    assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+
+    response = context_client_grpc.SetTopology(Topology(**TOPOLOGY))
+    assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
+
+    response = context_client_grpc.SetDevice(Device(**DEVICE_R1))
+    assert response.device_uuid.uuid == DEVICE_R1_UUID
+
+    response = context_client_grpc.SetDevice(Device(**DEVICE_R2))
+    assert response.device_uuid.uuid == DEVICE_R2_UUID
+    # events = events_collector.get_events(block=True, count=4)
+    #
+    # assert isinstance(events[0], ContextEvent)
+    # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    # assert events[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    #
+    # assert isinstance(events[1], TopologyEvent)
+    # assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    # assert events[1].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    # assert events[1].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
+    #
+    # assert isinstance(events[2], DeviceEvent)
+    # assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    # assert events[2].device_id.device_uuid.uuid == DEVICE_R1_UUID
+    #
+    # assert isinstance(events[3], DeviceEvent)
+    # assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    # assert events[3].device_id.device_uuid.uuid == DEVICE_R2_UUID
+    LOGGER.info('----------------')
+
+    # ----- Get when the object does not exist -------------------------------------------------------------------------
+    with pytest.raises(grpc.RpcError) as e:
+        context_client_grpc.GetService(ServiceId(**SERVICE_R1_R2_ID))
+    assert e.value.code() == grpc.StatusCode.NOT_FOUND
+    assert e.value.details() == 'Service({:s}) not found'.format(SERVICE_R1_R2_UUID)
+    LOGGER.info('----------------')
+
+    # ----- List when the object does not exist ------------------------------------------------------------------------
+    response = context_client_grpc.ListServiceIds(ContextId(**CONTEXT_ID))
+    assert len(response.service_ids) == 0
+    LOGGER.info('----------------')
+
+    response = context_client_grpc.ListServices(ContextId(**CONTEXT_ID))
+    assert len(response.services) == 0
+    LOGGER.info('----------------')
+
+    # ----- Dump state of database before create the object ------------------------------------------------------------
+    db_entries = database.dump_all()
+    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
+    for db_entry in db_entries:
+        LOGGER.info(db_entry)
+    LOGGER.info('-----------------------------------------------------------')
+    assert len(db_entries) == 80
+
+    # ----- Create the object ------------------------------------------------------------------------------------------
+    with pytest.raises(grpc.RpcError) as e:
+        WRONG_SERVICE = copy.deepcopy(SERVICE_R1_R2)
+        WRONG_SERVICE['service_endpoint_ids'][0]\
+            ['topology_id']['context_id']['context_uuid']['uuid'] = 'ca1ea172-728f-441d-972c-feeae8c9bffc'
+        context_client_grpc.SetService(Service(**WRONG_SERVICE))
+    assert e.value.code() == grpc.StatusCode.INVALID_ARGUMENT
+    msg = 'request.service_endpoint_ids[0].topology_id.context_id.context_uuid.uuid(ca1ea172-728f-441d-972c-feeae8c9bffc) is invalid; '\
+          'should be == request.service_id.context_id.context_uuid.uuid({:s})'.format(DEFAULT_CONTEXT_UUID)
+    assert e.value.details() == msg
+
+    response = context_client_grpc.SetService(Service(**SERVICE_R1_R2))
+    assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    assert response.service_uuid.uuid == SERVICE_R1_R2_UUID
+
+    CONTEXT_WITH_SERVICE = copy.deepcopy(CONTEXT)
+    CONTEXT_WITH_SERVICE['service_ids'].append(SERVICE_R1_R2_ID)
+    response = context_client_grpc.SetContext(Context(**CONTEXT_WITH_SERVICE))
+    assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+
+    # ----- Check create event -----------------------------------------------------------------------------------------
+    events = events_collector.get_events(block=True, count=2)
+
+    assert isinstance(events[0], ServiceEvent)
+    assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    assert events[0].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    assert events[0].service_id.service_uuid.uuid == SERVICE_R1_R2_UUID
+
+    assert isinstance(events[1], ContextEvent)
+    assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_UPDATE
+    assert events[1].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+
+    # ----- Update the object ------------------------------------------------------------------------------------------
+    response = context_client_grpc.SetService(Service(**SERVICE_R1_R2))
+    assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    assert response.service_uuid.uuid == SERVICE_R1_R2_UUID
+
+    # ----- Check update event -----------------------------------------------------------------------------------------
+    event = events_collector.get_event(block=True)
+    assert isinstance(event, ServiceEvent)
+    assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE
+    assert event.service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    assert event.service_id.service_uuid.uuid == SERVICE_R1_R2_UUID
+
+    # ----- Dump state of database after create/update the object ------------------------------------------------------
+    db_entries = context_database.dump()
+    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
+    for db_entry in db_entries:
+        LOGGER.info('  [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover
+    LOGGER.info('-----------------------------------------------------------')
+    assert len(db_entries) == 108
+
+    # ----- Get when the object exists ---------------------------------------------------------------------------------
+    response = context_client_grpc.GetService(ServiceId(**SERVICE_R1_R2_ID))
+    assert response.service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    assert response.service_id.service_uuid.uuid == SERVICE_R1_R2_UUID
+    assert response.service_type == ServiceTypeEnum.SERVICETYPE_L3NM
+    assert len(response.service_endpoint_ids) == 2
+    assert len(response.service_constraints) == 2
+    assert response.service_status.service_status == ServiceStatusEnum.SERVICESTATUS_PLANNED
+    assert len(response.service_config.config_rules) == 3
+
+    # ----- List when the object exists --------------------------------------------------------------------------------
+    response = context_client_grpc.ListServiceIds(ContextId(**CONTEXT_ID))
+    assert len(response.service_ids) == 1
+    assert response.service_ids[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    assert response.service_ids[0].service_uuid.uuid == SERVICE_R1_R2_UUID
+
+    response = context_client_grpc.ListServices(ContextId(**CONTEXT_ID))
+    assert len(response.services) == 1
+    assert response.services[0].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    assert response.services[0].service_id.service_uuid.uuid == SERVICE_R1_R2_UUID
+    assert response.services[0].service_type == ServiceTypeEnum.SERVICETYPE_L3NM
+    assert len(response.services[0].service_endpoint_ids) == 2
+    assert len(response.services[0].service_constraints) == 2
+    assert response.services[0].service_status.service_status == ServiceStatusEnum.SERVICESTATUS_PLANNED
+    assert len(response.services[0].service_config.config_rules) == 3
+
+    # ----- Remove the object ------------------------------------------------------------------------------------------
+    context_client_grpc.RemoveService(ServiceId(**SERVICE_R1_R2_ID))
+    context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R1_ID))
+    context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R2_ID))
+    context_client_grpc.RemoveTopology(TopologyId(**TOPOLOGY_ID))
+    context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID))
+
+    # ----- Check remove event -----------------------------------------------------------------------------------------
+    events = events_collector.get_events(block=True, count=5)
+
+    assert isinstance(events[0], ServiceEvent)
+    assert events[0].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    assert events[0].service_id.service_uuid.uuid == SERVICE_R1_R2_UUID
+
+    assert isinstance(events[1], DeviceEvent)
+    assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    assert events[1].device_id.device_uuid.uuid == DEVICE_R1_UUID
+
+    assert isinstance(events[2], DeviceEvent)
+    assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    assert events[2].device_id.device_uuid.uuid == DEVICE_R2_UUID
+
+    assert isinstance(events[3], TopologyEvent)
+    assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    assert events[3].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    assert events[3].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
+
+    assert isinstance(events[4], ContextEvent)
+    assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    assert events[4].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+
+    # ----- Stop the EventsCollector -----------------------------------------------------------------------------------
+    events_collector.stop()
+
+    # ----- Dump state of database after remove the object -------------------------------------------------------------
+    db_entries = context_database.dump()
+    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
+    for db_entry in db_entries:
+        LOGGER.info('  [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover
+    LOGGER.info('-----------------------------------------------------------')
+    assert len(db_entries) == 0
+
+
+"""
+
+def test_grpc_connection(
+    context_client_grpc : ContextClient,                # pylint: disable=redefined-outer-name
+    context_db_mb : Tuple[Database, MessageBroker]):    # pylint: disable=redefined-outer-name
+    Session = context_s_mb[0]
+
+    database = Database(Session)
+
+    # ----- Clean the database -----------------------------------------------------------------------------------------
+    database.clear()
+
+    # ----- Initialize the EventsCollector -----------------------------------------------------------------------------
+    events_collector = EventsCollector(context_client_grpc)
+    events_collector.start()
+
+    # ----- Prepare dependencies for the test and capture related events -----------------------------------------------
+    response = context_client_grpc.SetContext(Context(**CONTEXT))
+    assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+
+    response = context_client_grpc.SetTopology(Topology(**TOPOLOGY))
+    assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    assert response.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
+
+    response = context_client_grpc.SetDevice(Device(**DEVICE_R1))
+    assert response.device_uuid.uuid == DEVICE_R1_UUID
+
+    response = context_client_grpc.SetDevice(Device(**DEVICE_R2))
+    assert response.device_uuid.uuid == DEVICE_R2_UUID
+
+    response = context_client_grpc.SetDevice(Device(**DEVICE_R3))
+    assert response.device_uuid.uuid == DEVICE_R3_UUID
+
+    response = context_client_grpc.SetService(Service(**SERVICE_R1_R2))
+    assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    assert response.service_uuid.uuid == SERVICE_R1_R2_UUID
+
+    CONTEXT_WITH_SERVICE = copy.deepcopy(CONTEXT)
+    CONTEXT_WITH_SERVICE['service_ids'].append(SERVICE_R1_R2_ID)
+    response = context_client_grpc.SetContext(Context(**CONTEXT_WITH_SERVICE))
+    assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+
+    response = context_client_grpc.SetService(Service(**SERVICE_R2_R3))
+    assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    assert response.service_uuid.uuid == SERVICE_R2_R3_UUID
+
+    CONTEXT_WITH_SERVICE = copy.deepcopy(CONTEXT)
+    CONTEXT_WITH_SERVICE['service_ids'].append(SERVICE_R2_R3_ID)
+    response = context_client_grpc.SetContext(Context(**CONTEXT_WITH_SERVICE))
+    assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+
+    response = context_client_grpc.SetService(Service(**SERVICE_R1_R3))
+    assert response.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    assert response.service_uuid.uuid == SERVICE_R1_R3_UUID
+
+    CONTEXT_WITH_SERVICE = copy.deepcopy(CONTEXT)
+    CONTEXT_WITH_SERVICE['service_ids'].append(SERVICE_R1_R3_ID)
+    response = context_client_grpc.SetContext(Context(**CONTEXT_WITH_SERVICE))
+    assert response.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+
+    events = events_collector.get_events(block=True, count=11)
+
+    assert isinstance(events[0], ContextEvent)
+    assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    assert events[0].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+
+    assert isinstance(events[1], TopologyEvent)
+    assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    assert events[1].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    assert events[1].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
+
+    assert isinstance(events[2], DeviceEvent)
+    assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    assert events[2].device_id.device_uuid.uuid == DEVICE_R1_UUID
+
+    assert isinstance(events[3], DeviceEvent)
+    assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    assert events[3].device_id.device_uuid.uuid == DEVICE_R2_UUID
+
+    assert isinstance(events[4], DeviceEvent)
+    assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    assert events[4].device_id.device_uuid.uuid == DEVICE_R3_UUID
+
+    assert isinstance(events[5], ServiceEvent)
+    assert events[5].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    assert events[5].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    assert events[5].service_id.service_uuid.uuid == SERVICE_R1_R2_UUID
+
+    assert isinstance(events[6], ContextEvent)
+    assert events[6].event.event_type == EventTypeEnum.EVENTTYPE_UPDATE
+    assert events[6].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+
+    assert isinstance(events[7], ServiceEvent)
+    assert events[7].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    assert events[7].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    assert events[7].service_id.service_uuid.uuid == SERVICE_R2_R3_UUID
+
+    assert isinstance(events[8], ContextEvent)
+    assert events[8].event.event_type == EventTypeEnum.EVENTTYPE_UPDATE
+    assert events[8].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+
+    assert isinstance(events[9], ServiceEvent)
+    assert events[9].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    assert events[9].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    assert events[9].service_id.service_uuid.uuid == SERVICE_R1_R3_UUID
+
+    assert isinstance(events[10], ContextEvent)
+    assert events[10].event.event_type == EventTypeEnum.EVENTTYPE_UPDATE
+    assert events[10].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+
+    # ----- Get when the object does not exist -------------------------------------------------------------------------
+    with pytest.raises(grpc.RpcError) as e:
+        context_client_grpc.GetConnection(ConnectionId(**CONNECTION_R1_R3_ID))
+    assert e.value.code() == grpc.StatusCode.NOT_FOUND
+    assert e.value.details() == 'Connection({:s}) not found'.format(CONNECTION_R1_R3_UUID)
+
+    # ----- List when the object does not exist ------------------------------------------------------------------------
+    response = context_client_grpc.ListConnectionIds(ServiceId(**SERVICE_R1_R3_ID))
+    assert len(response.connection_ids) == 0
+
+    response = context_client_grpc.ListConnections(ServiceId(**SERVICE_R1_R3_ID))
+    assert len(response.connections) == 0
+
+    # ----- Dump state of database before create the object ------------------------------------------------------------
+    db_entries = context_database.dump()
+    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
+    for db_entry in db_entries:
+        LOGGER.info('  [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover
+    LOGGER.info('-----------------------------------------------------------')
+    assert len(db_entries) == 187
+
+    # ----- Create the object ------------------------------------------------------------------------------------------
+    with pytest.raises(grpc.RpcError) as e:
+        WRONG_CONNECTION = copy.deepcopy(CONNECTION_R1_R3)
+        WRONG_CONNECTION['path_hops_endpoint_ids'][0]\
+            ['topology_id']['context_id']['context_uuid']['uuid'] = 'wrong-context-uuid'
+        context_client_grpc.SetConnection(Connection(**WRONG_CONNECTION))
+    assert e.value.code() == grpc.StatusCode.NOT_FOUND
+    # TODO: should we check that all endpoints belong to same topology?
+    # TODO: should we check that endpoints form links over the topology?
+    msg = 'EndPoint({:s}/{:s}:wrong-context-uuid/{:s}) not found'.format(
+        DEVICE_R1_UUID, WRONG_CONNECTION['path_hops_endpoint_ids'][0]['endpoint_uuid']['uuid'], DEFAULT_TOPOLOGY_UUID)
+    assert e.value.details() == msg
+
+    response = context_client_grpc.SetConnection(Connection(**CONNECTION_R1_R3))
+    assert response.connection_uuid.uuid == CONNECTION_R1_R3_UUID
+
+    # ----- Check create event -----------------------------------------------------------------------------------------
+    event = events_collector.get_event(block=True)
+    assert isinstance(event, ConnectionEvent)
+    assert event.event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    assert event.connection_id.connection_uuid.uuid == CONNECTION_R1_R3_UUID
+
+    # ----- Update the object ------------------------------------------------------------------------------------------
+    response = context_client_grpc.SetConnection(Connection(**CONNECTION_R1_R3))
+    assert response.connection_uuid.uuid == CONNECTION_R1_R3_UUID
+
+    # ----- Check update event -----------------------------------------------------------------------------------------
+    event = events_collector.get_event(block=True)
+    assert isinstance(event, ConnectionEvent)
+    assert event.event.event_type == EventTypeEnum.EVENTTYPE_UPDATE
+    assert event.connection_id.connection_uuid.uuid == CONNECTION_R1_R3_UUID
+
+    # ----- Dump state of database after create/update the object ------------------------------------------------------
+    db_entries = context_database.dump()
+    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
+    for db_entry in db_entries:
+        LOGGER.info('  [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover
+    LOGGER.info('-----------------------------------------------------------')
+    assert len(db_entries) == 203
+
+    # ----- Get when the object exists ---------------------------------------------------------------------------------
+    response = context_client_grpc.GetConnection(ConnectionId(**CONNECTION_R1_R3_ID))
+    assert response.connection_id.connection_uuid.uuid == CONNECTION_R1_R3_UUID
+    assert response.service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    assert response.service_id.service_uuid.uuid == SERVICE_R1_R3_UUID
+    assert len(response.path_hops_endpoint_ids) == 6
+    assert len(response.sub_service_ids) == 2
+
+    # ----- List when the object exists --------------------------------------------------------------------------------
+    response = context_client_grpc.ListConnectionIds(ServiceId(**SERVICE_R1_R3_ID))
+    assert len(response.connection_ids) == 1
+    assert response.connection_ids[0].connection_uuid.uuid == CONNECTION_R1_R3_UUID
+
+    response = context_client_grpc.ListConnections(ServiceId(**SERVICE_R1_R3_ID))
+    assert len(response.connections) == 1
+    assert response.connections[0].connection_id.connection_uuid.uuid == CONNECTION_R1_R3_UUID
+    assert len(response.connections[0].path_hops_endpoint_ids) == 6
+    assert len(response.connections[0].sub_service_ids) == 2
+
+    # ----- Remove the object ------------------------------------------------------------------------------------------
+    context_client_grpc.RemoveConnection(ConnectionId(**CONNECTION_R1_R3_ID))
+    context_client_grpc.RemoveService(ServiceId(**SERVICE_R1_R3_ID))
+    context_client_grpc.RemoveService(ServiceId(**SERVICE_R2_R3_ID))
+    context_client_grpc.RemoveService(ServiceId(**SERVICE_R1_R2_ID))
+    context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R1_ID))
+    context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R2_ID))
+    context_client_grpc.RemoveDevice(DeviceId(**DEVICE_R3_ID))
+    context_client_grpc.RemoveTopology(TopologyId(**TOPOLOGY_ID))
+    context_client_grpc.RemoveContext(ContextId(**CONTEXT_ID))
+
+    # ----- Check remove event -----------------------------------------------------------------------------------------
+    events = events_collector.get_events(block=True, count=9)
+
+    assert isinstance(events[0], ConnectionEvent)
+    assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    assert events[0].connection_id.connection_uuid.uuid == CONNECTION_R1_R3_UUID
+
+    assert isinstance(events[1], ServiceEvent)
+    assert events[1].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    assert events[1].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    assert events[1].service_id.service_uuid.uuid == SERVICE_R1_R3_UUID
+
+    assert isinstance(events[2], ServiceEvent)
+    assert events[2].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    assert events[2].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    assert events[2].service_id.service_uuid.uuid == SERVICE_R2_R3_UUID
+
+    assert isinstance(events[3], ServiceEvent)
+    assert events[3].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    assert events[3].service_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    assert events[3].service_id.service_uuid.uuid == SERVICE_R1_R2_UUID
+
+    assert isinstance(events[4], DeviceEvent)
+    assert events[4].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    assert events[4].device_id.device_uuid.uuid == DEVICE_R1_UUID
+
+    assert isinstance(events[5], DeviceEvent)
+    assert events[5].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    assert events[5].device_id.device_uuid.uuid == DEVICE_R2_UUID
+
+    assert isinstance(events[6], DeviceEvent)
+    assert events[6].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    assert events[6].device_id.device_uuid.uuid == DEVICE_R3_UUID
+
+    assert isinstance(events[7], TopologyEvent)
+    assert events[7].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    assert events[7].topology_id.context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+    assert events[7].topology_id.topology_uuid.uuid == DEFAULT_TOPOLOGY_UUID
+
+    assert isinstance(events[8], ContextEvent)
+    assert events[8].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    assert events[8].context_id.context_uuid.uuid == DEFAULT_CONTEXT_UUID
+
+    # ----- Stop the EventsCollector -----------------------------------------------------------------------------------
+    events_collector.stop()
+
+    # ----- Dump state of database after remove the object -------------------------------------------------------------
+    db_entries = context_database.dump()
+    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
+    for db_entry in db_entries:
+        LOGGER.info('  [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover
+    LOGGER.info('-----------------------------------------------------------')
+    assert len(db_entries) == 0
+
+
+def test_grpc_policy(
+    context_client_grpc : ContextClient,                # pylint: disable=redefined-outer-name
+    context_db_mb : Tuple[Database, MessageBroker]):    # pylint: disable=redefined-outer-name
+    context_database = context_db_mb[0]
+
+    # ----- Clean the database -----------------------------------------------------------------------------------------
+    context_database.clear_all()
+
+    # ----- Initialize the EventsCollector -----------------------------------------------------------------------------
+    #events_collector = EventsCollector(context_client_grpc)
+    #events_collector.start()
+
+    # ----- Get when the object does not exist -------------------------------------------------------------------------
+    POLICY_ID = 'no-uuid'
+    DEFAULT_POLICY_ID = {'uuid': {'uuid': POLICY_ID}}
+
+    with pytest.raises(grpc.RpcError) as e:
+        context_client_grpc.GetPolicyRule(PolicyRuleId(**DEFAULT_POLICY_ID))
+
+    assert e.value.code() == grpc.StatusCode.NOT_FOUND
+    assert e.value.details() == 'PolicyRule({:s}) not found'.format(POLICY_ID)
+
+    # ----- List when the object does not exist ------------------------------------------------------------------------
+    response = context_client_grpc.ListPolicyRuleIds(Empty())
+    assert len(response.policyRuleIdList) == 0
+
+    response = context_client_grpc.ListPolicyRules(Empty())
+    assert len(response.policyRules) == 0
+
+    # ----- Dump state of database before create the object ------------------------------------------------------------
+    db_entries = context_database.dump()
+    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
+    for db_entry in db_entries:
+        LOGGER.info('  [{:>4s}] {:40s} :: {:s}'.format(*db_entry))  # pragma: no cover
+    LOGGER.info('-----------------------------------------------------------')
+    assert len(db_entries) == 0
+
+    # ----- Create the object ------------------------------------------------------------------------------------------
+    response = context_client_grpc.SetPolicyRule(PolicyRule(**POLICY_RULE))
+    assert response.uuid.uuid == POLICY_RULE_UUID
+
+    # ----- Check create event -----------------------------------------------------------------------------------------
+    # events = events_collector.get_events(block=True, count=1)
+    # assert isinstance(events[0], PolicyEvent)
+    # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_CREATE
+    # assert events[0].policy_id.uuid.uuid == POLICY_RULE_UUID
+
+    # ----- Update the object ------------------------------------------------------------------------------------------
+    response = context_client_grpc.SetPolicyRule(PolicyRule(**POLICY_RULE))
+    assert response.uuid.uuid == POLICY_RULE_UUID
+
+    # ----- Dump state of database after create/update the object ------------------------------------------------------
+    db_entries = context_database.dump()
+    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
+    for db_entry in db_entries:
+        LOGGER.info('  [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover
+    LOGGER.info('-----------------------------------------------------------')
+    assert len(db_entries) == 2
+
+    # ----- Get when the object exists ---------------------------------------------------------------------------------
+    response = context_client_grpc.GetPolicyRule(PolicyRuleId(**POLICY_RULE_ID))
+    assert response.device.policyRuleBasic.policyRuleId.uuid.uuid == POLICY_RULE_UUID
+
+    # ----- List when the object exists --------------------------------------------------------------------------------
+    response = context_client_grpc.ListPolicyRuleIds(Empty())
+    assert len(response.policyRuleIdList) == 1
+    assert response.policyRuleIdList[0].uuid.uuid == POLICY_RULE_UUID
+
+    response = context_client_grpc.ListPolicyRules(Empty())
+    assert len(response.policyRules) == 1
+
+    # ----- Remove the object ------------------------------------------------------------------------------------------
+    context_client_grpc.RemovePolicyRule(PolicyRuleId(**POLICY_RULE_ID))
+
+    # ----- Check remove event -----------------------------------------------------------------------------------------
+    # events = events_collector.get_events(block=True, count=2)
+
+    # assert isinstance(events[0], PolicyEvent)
+    # assert events[0].event.event_type == EventTypeEnum.EVENTTYPE_REMOVE
+    # assert events[0].policy_id.uuid.uuid == POLICY_RULE_UUID
+
+
+    # ----- Stop the EventsCollector -----------------------------------------------------------------------------------
+    # events_collector.stop()
+
+    # ----- Dump state of database after remove the object -------------------------------------------------------------
+    db_entries = context_database.dump()
+    LOGGER.info('----- Database Dump [{:3d} entries] -------------------------'.format(len(db_entries)))
+    for db_entry in db_entries:
+        LOGGER.info('  [{:>4s}] {:40s} :: {:s}'.format(*db_entry)) # pragma: no cover
+    LOGGER.info('-----------------------------------------------------------')
+    assert len(db_entries) == 0
+
+
+
+# ----- Test REST API methods ------------------------------------------------------------------------------------------
+
+def test_rest_populate_database(
+    context_db_mb : Tuple[Database, MessageBroker], # pylint: disable=redefined-outer-name
+    context_service_grpc : ContextService           # pylint: disable=redefined-outer-name
+    ):
+    database = context_db_mb[0]
+    database.clear_all()
+    populate(LOCAL_HOST, GRPC_PORT)
+
+def test_rest_get_context_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    reply = do_rest_request('/context_ids')
+    validate_context_ids(reply)
+
+def test_rest_get_contexts(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    reply = do_rest_request('/contexts')
+    validate_contexts(reply)
+
+def test_rest_get_context(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
+    reply = do_rest_request('/context/{:s}'.format(context_uuid))
+    validate_context(reply)
+
+def test_rest_get_topology_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
+    reply = do_rest_request('/context/{:s}/topology_ids'.format(context_uuid))
+    validate_topology_ids(reply)
+
+def test_rest_get_topologies(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
+    reply = do_rest_request('/context/{:s}/topologies'.format(context_uuid))
+    validate_topologies(reply)
+
+def test_rest_get_topology(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
+    topology_uuid = urllib.parse.quote(DEFAULT_TOPOLOGY_UUID)
+    reply = do_rest_request('/context/{:s}/topology/{:s}'.format(context_uuid, topology_uuid))
+    validate_topology(reply, num_devices=3, num_links=3)
+
+def test_rest_get_service_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
+    reply = do_rest_request('/context/{:s}/service_ids'.format(context_uuid))
+    validate_service_ids(reply)
+
+def test_rest_get_services(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
+    reply = do_rest_request('/context/{:s}/services'.format(context_uuid))
+    validate_services(reply)
+
+def test_rest_get_service(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
+    service_uuid = urllib.parse.quote(SERVICE_R1_R2_UUID, safe='')
+    reply = do_rest_request('/context/{:s}/service/{:s}'.format(context_uuid, service_uuid))
+    validate_service(reply)
+
+def test_rest_get_slice_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
+    reply = do_rest_request('/context/{:s}/slice_ids'.format(context_uuid))
+    #validate_slice_ids(reply)
+
+def test_rest_get_slices(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
+    reply = do_rest_request('/context/{:s}/slices'.format(context_uuid))
+    #validate_slices(reply)
+
+#def test_rest_get_slice(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+#    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
+#    slice_uuid = urllib.parse.quote(SLICE_R1_R2_UUID, safe='')
+#    reply = do_rest_request('/context/{:s}/slice/{:s}'.format(context_uuid, slice_uuid))
+#    #validate_slice(reply)
+
+def test_rest_get_device_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    reply = do_rest_request('/device_ids')
+    validate_device_ids(reply)
+
+def test_rest_get_devices(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    reply = do_rest_request('/devices')
+    validate_devices(reply)
+
+def test_rest_get_device(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    device_uuid = urllib.parse.quote(DEVICE_R1_UUID, safe='')
+    reply = do_rest_request('/device/{:s}'.format(device_uuid))
+    validate_device(reply)
+
+def test_rest_get_link_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    reply = do_rest_request('/link_ids')
+    validate_link_ids(reply)
+
+def test_rest_get_links(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    reply = do_rest_request('/links')
+    validate_links(reply)
+
+def test_rest_get_link(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    link_uuid = urllib.parse.quote(LINK_R1_R2_UUID, safe='')
+    reply = do_rest_request('/link/{:s}'.format(link_uuid))
+    validate_link(reply)
+
+def test_rest_get_connection_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
+    service_uuid = urllib.parse.quote(SERVICE_R1_R3_UUID, safe='')
+    reply = do_rest_request('/context/{:s}/service/{:s}/connection_ids'.format(context_uuid, service_uuid))
+    validate_connection_ids(reply)
+
+def test_rest_get_connections(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
+    service_uuid = urllib.parse.quote(SERVICE_R1_R3_UUID, safe='')
+    reply = do_rest_request('/context/{:s}/service/{:s}/connections'.format(context_uuid, service_uuid))
+    validate_connections(reply)
+
+def test_rest_get_connection(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    connection_uuid = urllib.parse.quote(CONNECTION_R1_R3_UUID, safe='')
+    reply = do_rest_request('/connection/{:s}'.format(connection_uuid))
+    validate_connection(reply)
+
+def test_rest_get_policyrule_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    reply = do_rest_request('/policyrule_ids')
+    #validate_policyrule_ids(reply)
+
+def test_rest_get_policyrules(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+    reply = do_rest_request('/policyrules')
+    #validate_policyrules(reply)
+
+#def test_rest_get_policyrule(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
+#    policyrule_uuid = urllib.parse.quote(POLICYRULE_UUID, safe='')
+#    reply = do_rest_request('/policyrule/{:s}'.format(policyrule_uuid))
+#    #validate_policyrule(reply)
+
+
+# ----- Test misc. Context internal tools ------------------------------------------------------------------------------
+
+def test_tools_fast_string_hasher():
+    with pytest.raises(TypeError) as e:
+        fast_hasher(27)
+    assert str(e.value) == "data(27) must be " + FASTHASHER_DATA_ACCEPTED_FORMAT + ", found <class 'int'>"
+
+    with pytest.raises(TypeError) as e:
+        fast_hasher({27})
+    assert str(e.value) == "data({27}) must be " + FASTHASHER_DATA_ACCEPTED_FORMAT + ", found <class 'set'>"
+
+    with pytest.raises(TypeError) as e:
+        fast_hasher({'27'})
+    assert str(e.value) == "data({'27'}) must be " + FASTHASHER_DATA_ACCEPTED_FORMAT + ", found <class 'set'>"
+
+    with pytest.raises(TypeError) as e:
+        fast_hasher([27])
+    assert str(e.value) == "data[0](27) must be " + FASTHASHER_ITEM_ACCEPTED_FORMAT + ", found <class 'int'>"
+
+    fast_hasher('hello-world')
+    fast_hasher('hello-world'.encode('UTF-8'))
+    fast_hasher(['hello', 'world'])
+    fast_hasher(('hello', 'world'))
+    fast_hasher(['hello'.encode('UTF-8'), 'world'.encode('UTF-8')])
+    fast_hasher(('hello'.encode('UTF-8'), 'world'.encode('UTF-8')))
+"""
\ No newline at end of file
diff --git a/src/context/service/database/Base.py b/src/context/service/database/Base.py
deleted file mode 100644
index c64447da1a151a8c428cf0db6c4474ac781c34b5..0000000000000000000000000000000000000000
--- a/src/context/service/database/Base.py
+++ /dev/null
@@ -1,2 +0,0 @@
-from sqlalchemy.ext.declarative import declarative_base
-Base = declarative_base()
diff --git a/src/context/service/database/ConfigModel.py b/src/context/service/database/ConfigModel.py
index 0de91c2df34aa63bcbc073b7de5970c97af99bb6..5f71119819a8f72cbb994c2b19f0bb5cbde57da4 100644
--- a/src/context/service/database/ConfigModel.py
+++ b/src/context/service/database/ConfigModel.py
@@ -19,7 +19,7 @@ from common.proto.context_pb2 import ConfigActionEnum
 from common.tools.grpc.Tools import grpc_message_to_json_string
 from sqlalchemy import Column, ForeignKey, INTEGER, CheckConstraint, Enum, String
 from sqlalchemy.dialects.postgresql import UUID, ARRAY
-from context.service.database.Base import Base
+from context.service.database._Base import Base
 from sqlalchemy.orm import relationship
 from context.service.Database import Database
 
diff --git a/src/context/service/database/ConnectionModel.py b/src/context/service/database/ConnectionModel.py
index 1147f3859af73234e6b6f7c001f632c38ea74913..e780ccb681101919dd7065ab6424dcb9033f521b 100644
--- a/src/context/service/database/ConnectionModel.py
+++ b/src/context/service/database/ConnectionModel.py
@@ -36,7 +36,7 @@ from .ConstraintModel import ConstraintsModel
 from .ContextModel import ContextModel
 from .Tools import grpc_to_enum
 from sqlalchemy.dialects.postgresql import UUID
-from context.service.database.Base import Base
+from context.service.database._Base import Base
 import enum
 LOGGER = logging.getLogger(__name__)
 
diff --git a/src/context/service/database/ConstraintModel.py b/src/context/service/database/ConstraintModel.py
index cf3b5f0d770ee8c7c59d7b1b115a18cb26688fed..30d9003009f2f804a168a3b709bb15565dae96ed 100644
--- a/src/context/service/database/ConstraintModel.py
+++ b/src/context/service/database/ConstraintModel.py
@@ -22,7 +22,7 @@ from .EndPointModel import EndPointModel
 from .Tools import fast_hasher, remove_dict_key
 from sqlalchemy import Column, ForeignKey, String, Float, CheckConstraint, Integer, Boolean, Enum
 from sqlalchemy.dialects.postgresql import UUID
-from context.service.database.Base import Base
+from context.service.database._Base import Base
 import enum
 
 LOGGER = logging.getLogger(__name__)
diff --git a/src/context/service/database/ContextModel.py b/src/context/service/database/ContextModel.py
index cde774fe49430bf9d4c347533747315562dcdccc..46f0741e5ce05e3489c36da1fe9a1cd448a075f2 100644
--- a/src/context/service/database/ContextModel.py
+++ b/src/context/service/database/ContextModel.py
@@ -13,29 +13,27 @@
 # limitations under the License.
 
 import logging
-from typing import Dict, List
-from sqlalchemy import Column
+from typing import Dict
+from sqlalchemy import Column, String
 from sqlalchemy.dialects.postgresql import UUID
-from context.service.database.Base import Base
-from sqlalchemy.orm import relationship
-
+from ._Base import _Base
+#from sqlalchemy.orm import relationship
 
 LOGGER = logging.getLogger(__name__)
 
-
-class ContextModel(Base):
-    __tablename__ = 'Context'
+class ContextModel(_Base):
+    __tablename__ = 'context'
     context_uuid = Column(UUID(as_uuid=False), primary_key=True)
+    context_name = Column(String(), nullable=False)
 
-    # Relationships
-    topology = relationship("TopologyModel", back_populates="context")
+    #topology = relationship('TopologyModel', back_populates='context')
 
     def dump_id(self) -> Dict:
         return {'context_uuid': {'uuid': self.context_uuid}}
 
-    @staticmethod
-    def main_pk_name():
-        return 'context_uuid'
+    #@staticmethod
+    #def main_pk_name():
+    #    return 'context_uuid'
 
     """    
     def dump_service_ids(self) -> List[Dict]:
@@ -50,8 +48,7 @@ class ContextModel(Base):
     """
 
     def dump(self, include_services=True, include_topologies=True) -> Dict:  # pylint: disable=arguments-differ
-        result = {'context_id': self.dump_id()}
+        result = {'context_id': self.dump_id(), 'name': self.context_name}
         # if include_services: result['service_ids'] = self.dump_service_ids()
         # if include_topologies: result['topology_ids'] = self.dump_topology_ids()
         return result
-
diff --git a/src/context/service/database/DeviceModel.py b/src/context/service/database/DeviceModel.py
index cb4517e68004614a9a630128c6ea69923da483c4..cb568e123f7f67b84e614845180001165f3e0172 100644
--- a/src/context/service/database/DeviceModel.py
+++ b/src/context/service/database/DeviceModel.py
@@ -20,7 +20,7 @@ from common.orm.backend.Tools import key_to_str
 from common.proto.context_pb2 import DeviceDriverEnum, DeviceOperationalStatusEnum
 from sqlalchemy import Column, ForeignKey, String, Enum
 from sqlalchemy.dialects.postgresql import UUID, ARRAY
-from context.service.database.Base import Base
+from context.service.database._Base import Base
 from sqlalchemy.orm import relationship
 from .Tools import grpc_to_enum
 
diff --git a/src/context/service/database/EndPointModel.py b/src/context/service/database/EndPointModel.py
index 540453970602374dfc213ad4effadfaee5c2a02e..38214aa9bd57556f617568c112786964d1b87a67 100644
--- a/src/context/service/database/EndPointModel.py
+++ b/src/context/service/database/EndPointModel.py
@@ -21,7 +21,7 @@ from common.proto.context_pb2 import EndPointId
 from .KpiSampleType import ORM_KpiSampleTypeEnum, grpc_to_enum__kpi_sample_type
 from sqlalchemy import Column, ForeignKey, String, Enum, ForeignKeyConstraint
 from sqlalchemy.dialects.postgresql import UUID
-from context.service.database.Base import Base
+from context.service.database._Base import Base
 from sqlalchemy.orm import relationship
 LOGGER = logging.getLogger(__name__)
 
diff --git a/src/context/service/database/LinkModel.py b/src/context/service/database/LinkModel.py
index 025709dfd7df6399793b7aeb05d53e98aa8f9168..6b768d1b7ba8e4eca5e35ec774d40f2d66aeac63 100644
--- a/src/context/service/database/LinkModel.py
+++ b/src/context/service/database/LinkModel.py
@@ -16,7 +16,7 @@ import logging, operator
 from typing import Dict, List
 from sqlalchemy import Column, ForeignKey
 from sqlalchemy.dialects.postgresql import UUID
-from context.service.database.Base import Base
+from context.service.database._Base import Base
 from sqlalchemy.orm import relationship
 
 LOGGER = logging.getLogger(__name__)
diff --git a/src/context/service/database/RelationModels.py b/src/context/service/database/RelationModels.py
index e69feadc4d6e6a259e1b15b8aa369ceadc705f68..61e05db0e4f02f342bd42fad413c279427e8fa9c 100644
--- a/src/context/service/database/RelationModels.py
+++ b/src/context/service/database/RelationModels.py
@@ -15,7 +15,7 @@
 import logging
 from sqlalchemy import Column, ForeignKey
 from sqlalchemy.dialects.postgresql import UUID
-from context.service.database.Base import Base
+from context.service.database._Base import Base
 
 LOGGER = logging.getLogger(__name__)
 #
diff --git a/src/context/service/database/ServiceModel.py b/src/context/service/database/ServiceModel.py
index 8f358be526091b5b99ed216843d4a2549c5cf287..20e10ddd5a9ecb5b62e22c4c082b3b3144a5a509 100644
--- a/src/context/service/database/ServiceModel.py
+++ b/src/context/service/database/ServiceModel.py
@@ -22,7 +22,7 @@ from .ConstraintModel import ConstraintsModel
 from .ContextModel import ContextModel
 from .Tools import grpc_to_enum
 from sqlalchemy.dialects.postgresql import UUID
-from context.service.database.Base import Base
+from context.service.database._Base import Base
 import enum
 LOGGER = logging.getLogger(__name__)
 
diff --git a/src/context/service/database/TopologyModel.py b/src/context/service/database/TopologyModel.py
index 063a1f51139ed05746b7b198b66cc7c957070417..0a56981638d51b6a4fced1620abd63618bf0e716 100644
--- a/src/context/service/database/TopologyModel.py
+++ b/src/context/service/database/TopologyModel.py
@@ -17,7 +17,7 @@ from typing import Dict, List
 from sqlalchemy.orm import relationship
 from sqlalchemy import Column, ForeignKey
 from sqlalchemy.dialects.postgresql import UUID
-from context.service.database.Base import Base
+from context.service.database._Base import Base
 LOGGER = logging.getLogger(__name__)
 
 class TopologyModel(Base):
diff --git a/src/context/service/database/_Base.py b/src/context/service/database/_Base.py
new file mode 100644
index 0000000000000000000000000000000000000000..49269be08f17bc6954da50e9169990d9a438eefe
--- /dev/null
+++ b/src/context/service/database/_Base.py
@@ -0,0 +1,22 @@
+# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import sqlalchemy
+from sqlalchemy.orm import declarative_base
+
+_Base = declarative_base()
+
+def rebuild_database(db_engine : sqlalchemy.engine.Engine, drop_if_exists : bool = False):
+    if drop_if_exists: _Base.metadata.drop_all(db_engine)
+    _Base.metadata.create_all(db_engine)
diff --git a/src/context/service/database/__init__.py b/src/context/service/database/__init__.py
index 70a33251242c51f49140e596b8208a19dd5245f7..27b5f5dd22d6a16809e219ebaa6526d249e5c2a8 100644
--- a/src/context/service/database/__init__.py
+++ b/src/context/service/database/__init__.py
@@ -12,3 +12,4 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+from ._Base import _Base, rebuild_database
diff --git a/src/context/service/grpc_server/ContextServiceServicerImpl.py b/src/context/service/grpc_server/ContextServiceServicerImpl.py
deleted file mode 100644
index 4d7f06463edb4e319c23ddfbcccc31a9a846ed1f..0000000000000000000000000000000000000000
--- a/src/context/service/grpc_server/ContextServiceServicerImpl.py
+++ /dev/null
@@ -1,1213 +0,0 @@
-# Copyright 2021-2023 H2020 TeraFlow (https://www.teraflow-h2020.eu/)
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-import uuid
-
-import grpc, json, logging, operator, threading
-from typing import Iterator, List, Set, Tuple, Union
-from common.message_broker.MessageBroker import MessageBroker
-from context.service.Database import Database
-from common.tools.grpc.Tools import grpc_message_to_json_string
-
-from common.proto.context_pb2 import (
-    Connection, ConnectionEvent, ConnectionId, ConnectionIdList, ConnectionList,
-    Context, ContextEvent, ContextId, ContextIdList, ContextList,
-    Device, DeviceEvent, DeviceId, DeviceIdList, DeviceList,
-    Empty, EventTypeEnum,
-    Link, LinkEvent, LinkId, LinkIdList, LinkList,
-    Service, ServiceEvent, ServiceId, ServiceIdList, ServiceList,
-    Slice, SliceEvent, SliceId, SliceIdList, SliceList,
-    Topology, TopologyEvent, TopologyId, TopologyIdList, TopologyList,
-    ConfigActionEnum, Constraint)
-from common.proto.policy_pb2 import (PolicyRuleIdList, PolicyRuleId, PolicyRuleList, PolicyRule)
-from common.proto.context_pb2_grpc import ContextServiceServicer
-from common.proto.context_policy_pb2_grpc import ContextPolicyServiceServicer
-from common.rpc_method_wrapper.Decorator import create_metrics, safe_and_metered_rpc_method
-from common.rpc_method_wrapper.ServiceExceptions import InvalidArgumentException
-from sqlalchemy.orm import Session, contains_eager, selectinload
-from common.rpc_method_wrapper.ServiceExceptions import NotFoundException
-from context.service.database.ConfigModel import grpc_config_rules_to_raw
-from context.service.database.DeviceModel import DeviceModel, grpc_to_enum__device_operational_status, set_drivers, grpc_to_enum__device_driver, DriverModel
-from context.service.database.ConfigModel import ConfigModel, ORM_ConfigActionEnum, ConfigRuleModel
-
-from common.orm.backend.Tools import key_to_str
-
-from ..database.KpiSampleType import grpc_to_enum__kpi_sample_type
-
-"""
-from context.service.database.ConnectionModel import ConnectionModel, set_path
-from context.service.database.ConstraintModel import set_constraints
-from common.tools.grpc.Tools import grpc_message_to_json
-from context.service.database.ConfigModel import update_config
-from context.service.database.ConnectionModel import ConnectionModel, set_path
-from context.service.database.ConstraintModel import set_constraints
-from context.service.database.ContextModel import ContextModel
-from context.service.database.PolicyRuleModel import PolicyRuleModel
-from context.service.database.DeviceModel import DeviceModel, grpc_to_enum__device_operational_status, set_drivers
-from context.service.database.EndPointModel import EndPointModel, set_kpi_sample_types
-from context.service.database.Events import notify_event
-from context.service.database.RelationModels import (
-    ConnectionSubServiceModel, LinkEndPointModel, ServiceEndPointModel, SliceEndPointModel, SliceServiceModel,
-    SliceSubSliceModel, TopologyDeviceModel, TopologyLinkModel)
-from context.service.database.ServiceModel import (
-    ServiceModel, grpc_to_enum__service_status, grpc_to_enum__service_type)
-from context.service.database.SliceModel import SliceModel, grpc_to_enum__slice_status
-from context.service.database.TopologyModel import TopologyModel
-"""
-from context.service.database.ContextModel import ContextModel
-from context.service.database.TopologyModel import TopologyModel
-from context.service.database.Events import notify_event
-from context.service.database.EndPointModel import EndPointModel
-from context.service.database.EndPointModel import KpiSampleTypeModel
-from context.service.database.LinkModel import LinkModel
-from context.service.database.ServiceModel import ServiceModel
-from context.service.database.ConstraintModel import ConstraintModel, ConstraintsModel, Union_ConstraintModel, CONSTRAINT_PARSERS
-from context.service.database.RelationModels import (TopologyDeviceModel, TopologyLinkModel, LinkEndPointModel)
-
-from .Constants import (
-    CONSUME_TIMEOUT, TOPIC_CONNECTION, TOPIC_CONTEXT, TOPIC_DEVICE, TOPIC_LINK, TOPIC_SERVICE, TOPIC_SLICE,
-    TOPIC_TOPOLOGY)
-
-LOGGER = logging.getLogger(__name__)
-
-SERVICE_NAME = 'Context'
-METHOD_NAMES = [
-    'ListConnectionIds', 'ListConnections', 'GetConnection', 'SetConnection', 'RemoveConnection', 'GetConnectionEvents',
-    'ListContextIds',    'ListContexts',    'GetContext',    'SetContext',    'RemoveContext',    'GetContextEvents',
-    'ListTopologyIds',   'ListTopologies',  'GetTopology',   'SetTopology',   'RemoveTopology',   'GetTopologyEvents',
-    'ListDeviceIds',     'ListDevices',     'GetDevice',     'SetDevice',     'RemoveDevice',     'GetDeviceEvents',
-    'ListLinkIds',       'ListLinks',       'GetLink',       'SetLink',       'RemoveLink',       'GetLinkEvents',
-    'ListServiceIds',    'ListServices',    'GetService',    'SetService',    'RemoveService',    'GetServiceEvents',
-    'ListSliceIds',      'ListSlices',      'GetSlice',      'SetSlice',      'RemoveSlice',      'GetSliceEvents',
-    'ListPolicyRuleIds', 'ListPolicyRules', 'GetPolicyRule', 'SetPolicyRule', 'RemovePolicyRule',
-    'UnsetService',      'UnsetSlice',
-]
-METRICS = create_metrics(SERVICE_NAME, METHOD_NAMES)
-
-class ContextServiceServicerImpl(ContextServiceServicer, ContextPolicyServiceServicer):
-    #def __init__(self, session : Session, messagebroker : MessageBroker):
-    def __init__(self, database : Database, messagebroker : MessageBroker):
-        LOGGER.debug('Creating Servicer...')
-        self.lock = threading.Lock()
-        self.session = session
-        self.database = Database(session)
-        self.messagebroker = messagebroker
-        LOGGER.debug('Servicer Created')
-
-
-    # ----- Context ----------------------------------------------------------------------------------------------------
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def ListContextIds(self, request: Empty, context : grpc.ServicerContext) -> ContextIdList:
-        with self.session() as session:
-            result = session.query(ContextModel).all()
-
-        return ContextIdList(context_ids=[row.dump_id() for row in result])
-
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def ListContexts(self, request: Empty, context : grpc.ServicerContext) -> ContextList:
-        with self.session() as session:
-            result = session.query(ContextModel).all()
-
-        return ContextList(contexts=[row.dump() for row in result])
-
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def GetContext(self, request: ContextId, context : grpc.ServicerContext) -> Context:
-        context_uuid = request.context_uuid.uuid
-        with self.session() as session:
-            result = session.query(ContextModel).filter_by(context_uuid=context_uuid).one_or_none()
-
-        if not result:
-            raise NotFoundException(ContextModel.__name__.replace('Model', ''), context_uuid)
-
-        return Context(**result.dump())
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def SetContext(self, request: Context, context : grpc.ServicerContext) -> ContextId:
-        context_uuid = request.context_id.context_uuid.uuid
-
-        for i, topology_id in enumerate(request.topology_ids):
-            topology_context_uuid = topology_id.context_id.context_uuid.uuid
-            if topology_context_uuid != context_uuid:
-                raise InvalidArgumentException(
-                    'request.topology_ids[{:d}].context_id.context_uuid.uuid'.format(i), topology_context_uuid,
-                    ['should be == {:s}({:s})'.format('request.context_id.context_uuid.uuid', context_uuid)])
-
-        for i, service_id in enumerate(request.service_ids):
-            service_context_uuid = service_id.context_id.context_uuid.uuid
-            if service_context_uuid != context_uuid:
-                raise InvalidArgumentException(
-                    'request.service_ids[{:d}].context_id.context_uuid.uuid'.format(i), service_context_uuid,
-                    ['should be == {:s}({:s})'.format('request.context_id.context_uuid.uuid', context_uuid)])
-
-        context_add = ContextModel(context_uuid=context_uuid)
-
-        updated = True
-        with self.session() as session:
-            result = session.query(ContextModel).filter_by(context_uuid=context_uuid).all()
-            if not result:
-                updated = False
-            session.merge(context_add)
-            session.commit()
-
-
-        event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
-        dict_context_id = context_add.dump_id()
-        notify_event(self.messagebroker, TOPIC_CONTEXT, event_type, {'context_id': dict_context_id})
-        return ContextId(**context_add.dump_id())
-
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def RemoveContext(self, request: ContextId, context : grpc.ServicerContext) -> Empty:
-        context_uuid = request.context_uuid.uuid
-
-        with self.session() as session:
-            result = session.query(ContextModel).filter_by(context_uuid=context_uuid).one_or_none()
-            if not result:
-                return Empty()
-            session.query(ContextModel).filter_by(context_uuid=context_uuid).delete()
-            session.commit()
-            event_type = EventTypeEnum.EVENTTYPE_REMOVE
-            notify_event(self.messagebroker, TOPIC_CONTEXT, event_type, {'context_id': result.dump_id()})
-            return Empty()
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def GetContextEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ContextEvent]:
-        for message in self.messagebroker.consume({TOPIC_CONTEXT}, consume_timeout=CONSUME_TIMEOUT):
-            yield ContextEvent(**json.loads(message.content))
-
-
-    # ----- Topology ---------------------------------------------------------------------------------------------------
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def ListTopologyIds(self, request: ContextId, context : grpc.ServicerContext) -> TopologyIdList:
-        context_uuid = request.context_uuid.uuid
-
-        with self.session() as session:
-            result = session.query(ContextModel).options(selectinload(ContextModel.topology)).filter_by(context_uuid=context_uuid).one_or_none()
-            if not result:
-                raise NotFoundException(ContextModel.__name__.replace('Model', ''), context_uuid)
-
-            db_topologies = result.topology
-            return TopologyIdList(topology_ids=[db_topology.dump_id() for db_topology in db_topologies])
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def ListTopologies(self, request: ContextId, context : grpc.ServicerContext) -> TopologyList:
-        context_uuid = request.context_uuid.uuid
-
-        with self.session() as session:
-            result = session.query(ContextModel).options(selectinload(ContextModel.topology)).filter_by(
-                context_uuid=context_uuid).one_or_none()
-            if not result:
-                raise NotFoundException(ContextModel.__name__.replace('Model', ''), context_uuid)
-
-            db_topologies = result.topology
-            return TopologyList(topologies=[db_topology.dump() for db_topology in db_topologies])
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def GetTopology(self, request: TopologyId, context : grpc.ServicerContext) -> Topology:
-        topology_uuid = request.topology_uuid.uuid
-
-        result, dump = self.database.get_object(TopologyModel, topology_uuid, True)
-        with self.session() as session:
-            devs = None
-            links = None
-
-            filt = {'topology_uuid': topology_uuid}
-            topology_devices = session.query(TopologyDeviceModel).filter_by(**filt).all()
-            if topology_devices:
-                devs = []
-                for td in topology_devices:
-                    filt = {'device_uuid': td.device_uuid}
-                    devs.append(session.query(DeviceModel).filter_by(**filt).one())
-
-            filt = {'topology_uuid': topology_uuid}
-            topology_links = session.query(TopologyLinkModel).filter_by(**filt).all()
-            if topology_links:
-                links = []
-                for tl in topology_links:
-                    filt = {'link_uuid': tl.link_uuid}
-                    links.append(session.query(LinkModel).filter_by(**filt).one())
-
-            return Topology(**result.dump(devs, links))
-
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def SetTopology(self, request: Topology, context : grpc.ServicerContext) -> TopologyId:
-        context_uuid = request.topology_id.context_id.context_uuid.uuid
-        topology_uuid = request.topology_id.topology_uuid.uuid
-        with self.session() as session:
-            topology_add = TopologyModel(topology_uuid=topology_uuid, context_uuid=context_uuid)
-            updated = True
-            db_topology = session.query(TopologyModel).join(TopologyModel.context).filter(TopologyModel.topology_uuid==topology_uuid).one_or_none()
-            if not db_topology:
-                updated = False
-            session.merge(topology_add)
-            session.commit()
-            db_topology = session.query(TopologyModel).join(TopologyModel.context).filter(TopologyModel.topology_uuid==topology_uuid).one_or_none()
-
-            for device_id in request.device_ids:
-                device_uuid = device_id.device_uuid.uuid
-                td = TopologyDeviceModel(topology_uuid=topology_uuid, device_uuid=device_uuid)
-                result: Tuple[TopologyDeviceModel, bool] = self.database.create_or_update(td)
-
-
-            for link_id in request.link_ids:
-                link_uuid = link_id.link_uuid.uuid
-                db_link = session.query(LinkModel).filter(
-                        LinkModel.link_uuid == link_uuid).one_or_none()
-                tl = TopologyLinkModel(topology_uuid=topology_uuid, link_uuid=link_uuid)
-                result: Tuple[TopologyDeviceModel, bool] = self.database.create_or_update(tl)
-
-
-
-            event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
-            dict_topology_id = db_topology.dump_id()
-            notify_event(self.messagebroker, TOPIC_TOPOLOGY, event_type, {'topology_id': dict_topology_id})
-            return TopologyId(**dict_topology_id)
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def RemoveTopology(self, request: TopologyId, context : grpc.ServicerContext) -> Empty:
-        context_uuid = request.context_id.context_uuid.uuid
-        topology_uuid = request.topology_uuid.uuid
-
-        with self.session() as session:
-            result = session.query(TopologyModel).filter_by(topology_uuid=topology_uuid, context_uuid=context_uuid).one_or_none()
-            if not result:
-                return Empty()
-            dict_topology_id = result.dump_id()
-
-            session.query(TopologyModel).filter_by(topology_uuid=topology_uuid, context_uuid=context_uuid).delete()
-            session.commit()
-            event_type = EventTypeEnum.EVENTTYPE_REMOVE
-            notify_event(self.messagebroker, TOPIC_TOPOLOGY, event_type, {'topology_id': dict_topology_id})
-            return Empty()
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def GetTopologyEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[TopologyEvent]:
-        for message in self.messagebroker.consume({TOPIC_TOPOLOGY}, consume_timeout=CONSUME_TIMEOUT):
-            yield TopologyEvent(**json.loads(message.content))
-
-
-    # ----- Device -----------------------------------------------------------------------------------------------------
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def ListDeviceIds(self, request: Empty, context : grpc.ServicerContext) -> DeviceIdList:
-        with self.session() as session:
-            result = session.query(DeviceModel).all()
-            return DeviceIdList(device_ids=[device.dump_id() for device in result])
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def ListDevices(self, request: Empty, context : grpc.ServicerContext) -> DeviceList:
-        with self.session() as session:
-            result = session.query(DeviceModel).all()
-            return DeviceList(devices=[device.dump() for device in result])
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def GetDevice(self, request: DeviceId, context : grpc.ServicerContext) -> Device:
-        device_uuid = request.device_uuid.uuid
-        with self.session() as session:
-            result = session.query(DeviceModel).filter(DeviceModel.device_uuid == device_uuid).one_or_none()
-            if not result:
-                raise NotFoundException(DeviceModel.__name__.replace('Model', ''), device_uuid)
-
-            rd = result.dump(include_config_rules=True, include_drivers=True, include_endpoints=True)
-
-            rt = Device(**rd)
-
-            return rt
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def SetDevice(self, request: Device, context : grpc.ServicerContext) -> DeviceId:
-        with self.session() as session:
-            device_uuid = request.device_id.device_uuid.uuid
-
-            for i, endpoint in enumerate(request.device_endpoints):
-                endpoint_device_uuid = endpoint.endpoint_id.device_id.device_uuid.uuid
-                if len(endpoint_device_uuid) == 0:
-                    endpoint_device_uuid = device_uuid
-                if device_uuid != endpoint_device_uuid:
-                    raise InvalidArgumentException(
-                        'request.device_endpoints[{:d}].device_id.device_uuid.uuid'.format(i), endpoint_device_uuid,
-                        ['should be == {:s}({:s})'.format('request.device_id.device_uuid.uuid', device_uuid)])
-
-            config_rules = grpc_config_rules_to_raw(request.device_config.config_rules)
-            running_config_result = self.update_config(session, device_uuid, 'device', config_rules)
-            db_running_config = running_config_result[0][0]
-            config_uuid = db_running_config.config_uuid
-            running_config_rules = update_config(
-                self.database, device_uuid, 'device', request.device_config.config_rules)
-            db_running_config = running_config_rules[0][0]
-
-            new_obj = DeviceModel(**{
-                'device_uuid'               : device_uuid,
-                'device_type'               : request.device_type,
-                'device_operational_status' : grpc_to_enum__device_operational_status(request.device_operational_status),
-                'device_config_uuid'        : config_uuid,
-            })
-            result: Tuple[DeviceModel, bool] = self.database.create_or_update(new_obj)
-            db_device, updated = result
-
-            self.set_drivers(db_device, request.device_drivers)
-
-            for i, endpoint in enumerate(request.device_endpoints):
-                endpoint_uuid = endpoint.endpoint_id.endpoint_uuid.uuid
-                # endpoint_device_uuid = endpoint.endpoint_id.device_id.device_uuid.uuid
-                # if len(endpoint_device_uuid) == 0:
-                #     endpoint_device_uuid = device_uuid
-
-                endpoint_attributes = {
-                    'device_uuid'  : db_device.device_uuid,
-                    'endpoint_uuid': endpoint_uuid,
-                    'endpoint_type': endpoint.endpoint_type,
-                }
-
-                endpoint_topology_context_uuid = endpoint.endpoint_id.topology_id.context_id.context_uuid.uuid
-                endpoint_topology_uuid = endpoint.endpoint_id.topology_id.topology_uuid.uuid
-                if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0:
-                    # str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid])
-
-                    db_topology, topo_dump = self.database.get_object(TopologyModel, endpoint_topology_uuid)
-
-                    topology_device = TopologyDeviceModel(
-                        topology_uuid=endpoint_topology_uuid,
-                        device_uuid=db_device.device_uuid)
-                    self.database.create_or_update(topology_device)
-
-                    endpoint_attributes['topology_uuid'] = db_topology.topology_uuid
-                result : Tuple[EndPointModel, bool] = update_or_create_object(
-                    self.database, EndPointModel, str_endpoint_key, endpoint_attributes)
-                db_endpoint, endpoint_updated = result # pylint: disable=unused-variable
-
-                new_endpoint = EndPointModel(**endpoint_attributes)
-                result: Tuple[EndPointModel, bool] = self.database.create_or_update(new_endpoint)
-                db_endpoint, updated = result
-
-                self.set_kpi_sample_types(db_endpoint, endpoint.kpi_sample_types)
-
-            # event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
-            dict_device_id = db_device.dump_id()
-            # notify_event(self.messagebroker, TOPIC_DEVICE, event_type, {'device_id': dict_device_id})
-
-            return DeviceId(**dict_device_id)
-
-    def set_kpi_sample_types(self, db_endpoint: EndPointModel, grpc_endpoint_kpi_sample_types):
-        db_endpoint_pk = db_endpoint.endpoint_uuid
-        for kpi_sample_type in grpc_endpoint_kpi_sample_types:
-            orm_kpi_sample_type = grpc_to_enum__kpi_sample_type(kpi_sample_type)
-            # str_endpoint_kpi_sample_type_key = key_to_str([db_endpoint_pk, orm_kpi_sample_type.name])
-            data = {'endpoint_uuid': db_endpoint_pk,
-                    'kpi_sample_type': orm_kpi_sample_type.name,
-                    'kpi_uuid': str(uuid.uuid4())}
-            db_endpoint_kpi_sample_type = KpiSampleTypeModel(**data)
-            self.database.create(db_endpoint_kpi_sample_type)
-
-    def set_drivers(self, db_device: DeviceModel, grpc_device_drivers):
-        db_device_pk = db_device.device_uuid
-        for driver in grpc_device_drivers:
-            orm_driver = grpc_to_enum__device_driver(driver)
-            str_device_driver_key = key_to_str([db_device_pk, orm_driver.name])
-            driver_config = {
-                # "driver_uuid": str(uuid.uuid4()),
-                "device_uuid": db_device_pk,
-                "driver": orm_driver.name
-            }
-            db_device_driver = DriverModel(**driver_config)
-            db_device_driver.device_fk = db_device
-            db_device_driver.driver = orm_driver
-
-            self.database.create_or_update(db_device_driver)
-
-    def update_config(
-            self, session, db_parent_pk: str, config_name: str,
-            raw_config_rules: List[Tuple[ORM_ConfigActionEnum, str, str]]
-    ) -> List[Tuple[Union[ConfigModel, ConfigRuleModel], bool]]:
-
-        created = False
-
-        db_config = session.query(ConfigModel).filter_by(**{ConfigModel.main_pk_name(): db_parent_pk}).one_or_none()
-        if not db_config:
-            db_config = ConfigModel()
-            setattr(db_config, ConfigModel.main_pk_name(), db_parent_pk)
-            session.add(db_config)
-            session.commit()
-            created = True
-
-        LOGGER.info('UPDATED-CONFIG: {}'.format(db_config.dump()))
-
-        db_objects: List[Tuple[Union[ConfigModel, ConfigRuleModel], bool]] = [(db_config, created)]
-
-        for position, (action, resource_key, resource_value) in enumerate(raw_config_rules):
-            if action == ORM_ConfigActionEnum.SET:
-                result : Tuple[ConfigRuleModel, bool] = self.set_config_rule(
-                    db_config, position, resource_key, resource_value)
-                db_config_rule, updated = result
-                db_objects.append((db_config_rule, updated))
-            elif action == ORM_ConfigActionEnum.DELETE:
-                self.delete_config_rule(db_config, resource_key)
-            else:
-                msg = 'Unsupported action({:s}) for resource_key({:s})/resource_value({:s})'
-                raise AttributeError(
-                    msg.format(str(ConfigActionEnum.Name(action)), str(resource_key), str(resource_value)))
-
-        return db_objects
-
-    def set_config_rule(self, db_config: ConfigModel, position: int, resource_key: str, resource_value: str,
-    ):  # -> Tuple[ConfigRuleModel, bool]:
-
-        from src.context.service.database.Tools import fast_hasher
-        str_rule_key_hash = fast_hasher(resource_key)
-        str_config_rule_key = key_to_str([db_config.config_uuid, str_rule_key_hash], separator=':')
-        pk = str(uuid.uuid5(uuid.UUID('9566448d-e950-425e-b2ae-7ead656c7e47'), str_config_rule_key))
-        data = {'config_rule_uuid': pk, 'config_uuid': db_config.config_uuid, 'position': position,
-                'action': ORM_ConfigActionEnum.SET, 'key': resource_key, 'value': resource_value}
-        to_add = ConfigRuleModel(**data)
-
-        result, updated = self.database.create_or_update(to_add)
-        return result, updated
-
-    def delete_config_rule(
-            self, db_config: ConfigModel, resource_key: str
-    ) -> None:
-
-        from src.context.service.database.Tools import fast_hasher
-        str_rule_key_hash = fast_hasher(resource_key)
-        str_config_rule_key = key_to_str([db_config.pk, str_rule_key_hash], separator=':')
-
-        db_config_rule = self.database.get_object(ConfigRuleModel, str_config_rule_key, raise_if_not_found=False)
-
-        if db_config_rule is None:
-            return
-        db_config_rule.delete()
-
-    def delete_all_config_rules(self, db_config: ConfigModel) -> None:
-
-        db_config_rule_pks = db_config.references(ConfigRuleModel)
-        for pk, _ in db_config_rule_pks: ConfigRuleModel(self.database, pk).delete()
-
-        """
-        for position, (action, resource_key, resource_value) in enumerate(raw_config_rules):
-            if action == ORM_ConfigActionEnum.SET:
-                result: Tuple[ConfigRuleModel, bool] = set_config_rule(
-                    database, db_config, position, resource_key, resource_value)
-                db_config_rule, updated = result
-                db_objects.append((db_config_rule, updated))
-            elif action == ORM_ConfigActionEnum.DELETE:
-                delete_config_rule(database, db_config, resource_key)
-            else:
-                msg = 'Unsupported action({:s}) for resource_key({:s})/resource_value({:s})'
-                raise AttributeError(
-                    msg.format(str(ConfigActionEnum.Name(action)), str(resource_key), str(resource_value)))
-
-        return db_objects
-        """
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def RemoveDevice(self, request: DeviceId, context : grpc.ServicerContext) -> Empty:
-        device_uuid = request.device_uuid.uuid
-
-        with self.session() as session:
-            db_device = session.query(DeviceModel).filter_by(device_uuid=device_uuid).one_or_none()
-
-            session.query(TopologyDeviceModel).filter_by(device_uuid=device_uuid).delete()
-            session.query(ConfigRuleModel).filter_by(config_uuid=db_device.device_config_uuid).delete()
-            session.query(ConfigModel).filter_by(config_uuid=db_device.device_config_uuid).delete()
-
-            if not db_device:
-                return Empty()
-            dict_device_id = db_device.dump_id()
-
-            session.query(DeviceModel).filter_by(device_uuid=device_uuid).delete()
-            session.commit()
-            event_type = EventTypeEnum.EVENTTYPE_REMOVE
-            notify_event(self.messagebroker, TOPIC_DEVICE, event_type, {'device_id': dict_device_id})
-            return Empty()
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def GetDeviceEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[DeviceEvent]:
-        for message in self.messagebroker.consume({TOPIC_DEVICE}, consume_timeout=CONSUME_TIMEOUT):
-            yield DeviceEvent(**json.loads(message.content))
-
-
-
-
-    # ----- Link -------------------------------------------------------------------------------------------------------
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def ListLinkIds(self, request: Empty, context : grpc.ServicerContext) -> LinkIdList:
-        with self.session() as session:
-            result = session.query(LinkModel).all()
-            return LinkIdList(link_ids=[db_link.dump_id() for db_link in result])
-
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def ListLinks(self, request: Empty, context : grpc.ServicerContext) -> LinkList:
-        with self.session() as session:
-            link_list = LinkList()
-
-            db_links = session.query(LinkModel).all()
-
-            for db_link in db_links:
-                link_uuid = db_link.link_uuid
-                filt = {'link_uuid': link_uuid}
-                link_endpoints = session.query(LinkEndPointModel).filter_by(**filt).all()
-                if link_endpoints:
-                    eps = []
-                    for lep in link_endpoints:
-                        filt = {'endpoint_uuid': lep.endpoint_uuid}
-                        eps.append(session.query(EndPointModel).filter_by(**filt).one())
-                    link_list.links.append(Link(**db_link.dump(eps)))
-
-            return link_list
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def GetLink(self, request: LinkId, context : grpc.ServicerContext) -> Link:
-        link_uuid = request.link_uuid.uuid
-        with self.session() as session:
-            result = session.query(LinkModel).filter(LinkModel.link_uuid == link_uuid).one_or_none()
-            if not result:
-                raise NotFoundException(LinkModel.__name__.replace('Model', ''), link_uuid)
-
-            filt = {'link_uuid': link_uuid}
-            link_endpoints = session.query(LinkEndPointModel).filter_by(**filt).all()
-            if link_endpoints:
-                eps = []
-                for lep in link_endpoints:
-                    filt = {'endpoint_uuid': lep.endpoint_uuid}
-                    eps.append(session.query(EndPointModel).filter_by(**filt).one())
-                return Link(**result.dump(eps))
-
-            rd = result.dump()
-            rt = Link(**rd)
-
-            return rt
-
-
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def SetLink(self, request: Link, context : grpc.ServicerContext) -> LinkId:
-        link_uuid = request.link_id.link_uuid.uuid
-
-        new_link = LinkModel(**{
-            'link_uuid': link_uuid
-        })
-        result: Tuple[LinkModel, bool] = self.database.create_or_update(new_link)
-        db_link, updated = result
-
-        for endpoint_id in request.link_endpoint_ids:
-            endpoint_uuid                  = endpoint_id.endpoint_uuid.uuid
-            endpoint_device_uuid           = endpoint_id.device_id.device_uuid.uuid
-            endpoint_topology_uuid         = endpoint_id.topology_id.topology_uuid.uuid
-            endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid
-
-
-            db_topology = None
-            if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0:
-                db_topology: TopologyModel = self.database.get_object(TopologyModel, endpoint_topology_uuid)
-                # check device is in topology
-                self.database.get_object(TopologyDeviceModel, endpoint_device_uuid)
-
-
-            link_endpoint = LinkEndPointModel(link_uuid=link_uuid, endpoint_uuid=endpoint_uuid)
-            result: Tuple[LinkEndPointModel, bool] = self.database.create_or_update(link_endpoint)
-
-            if db_topology is not None:
-                topology_link = TopologyLinkModel(topology_uuid=endpoint_topology_uuid, link_uuid=link_uuid)
-                result: Tuple[TopologyLinkModel, bool] = self.database.create_or_update(topology_link)
-
-        event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
-        dict_link_id = db_link.dump_id()
-        notify_event(self.messagebroker, TOPIC_LINK, event_type, {'link_id': dict_link_id})
-        return LinkId(**dict_link_id)
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def RemoveLink(self, request: LinkId, context : grpc.ServicerContext) -> Empty:
-        with self.session() as session:
-            link_uuid = request.link_uuid.uuid
-
-            session.query(TopologyLinkModel).filter_by(link_uuid=link_uuid).delete()
-            session.query(LinkEndPointModel).filter_by(link_uuid=link_uuid).delete()
-
-            result = session.query(LinkModel).filter_by(link_uuid=link_uuid).one_or_none()
-            if not result:
-                return Empty()
-            dict_link_id = result.dump_id()
-
-            session.query(LinkModel).filter_by(link_uuid=link_uuid).delete()
-            session.commit()
-            event_type = EventTypeEnum.EVENTTYPE_REMOVE
-            notify_event(self.messagebroker, TOPIC_LINK, event_type, {'link_id': dict_link_id})
-            return Empty()
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def GetLinkEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[LinkEvent]:
-        for message in self.messagebroker.consume({TOPIC_LINK}, consume_timeout=CONSUME_TIMEOUT):
-            yield LinkEvent(**json.loads(message.content))
-
-
-    # ----- Service ----------------------------------------------------------------------------------------------------
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def ListServiceIds(self, request: ContextId, context : grpc.ServicerContext) -> ServiceIdList:
-        context_uuid = request.context_uuid.uuid
-
-        with self.session() as session:
-            db_services = session.query(ServiceModel).filter_by(context_uuid=context_uuid).all()
-            return ServiceIdList(service_ids=[db_service.dump_id() for db_service in db_services])
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def ListServices(self, request: ContextId, context : grpc.ServicerContext) -> ServiceList:
-        context_uuid = request.context_uuid.uuid
-
-        with self.session() as session:
-            db_services = session.query(ServiceModel).filter_by(context_uuid=context_uuid).all()
-            return ServiceList(services=[db_service.dump() for db_service in db_services])
-
-
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def GetService(self, request: ServiceId, context : grpc.ServicerContext) -> Service:
-        service_uuid = request.service_uuid.uuid
-        with self.session() as session:
-            result = session.query(ServiceModel).filter_by(service_uuid=service_uuid).one_or_none()
-
-        if not result:
-            raise NotFoundException(ServiceModel.__name__.replace('Model', ''), service_uuid)
-
-        return Service(**result.dump())
-
-    def set_constraint(self, db_constraints: ConstraintsModel, grpc_constraint: Constraint, position: int
-    ) -> Tuple[Union_ConstraintModel, bool]:
-        with self.session() as session:
-
-            grpc_constraint_kind = str(grpc_constraint.WhichOneof('constraint'))
-
-            parser = CONSTRAINT_PARSERS.get(grpc_constraint_kind)
-            if parser is None:
-                raise NotImplementedError('Constraint of kind {:s} is not implemented: {:s}'.format(
-                    grpc_constraint_kind, grpc_message_to_json_string(grpc_constraint)))
-
-            # create specific constraint
-            constraint_class, str_constraint_id, constraint_data, constraint_kind = parser(grpc_constraint)
-            str_constraint_id = str(uuid.uuid4())
-            LOGGER.info('str_constraint_id: {}'.format(str_constraint_id))
-            # str_constraint_key_hash = fast_hasher(':'.join([constraint_kind.value, str_constraint_id]))
-            # str_constraint_key = key_to_str([db_constraints.pk, str_constraint_key_hash], separator=':')
-
-            # result : Tuple[Union_ConstraintModel, bool] = update_or_create_object(
-            #     database, constraint_class, str_constraint_key, constraint_data)
-            constraint_data[constraint_class.main_pk_name()] = str_constraint_id
-            db_new_constraint = constraint_class(**constraint_data)
-            result: Tuple[Union_ConstraintModel, bool] = self.database.create_or_update(db_new_constraint)
-            db_specific_constraint, updated = result
-
-            # create generic constraint
-            # constraint_fk_field_name = 'constraint_uuid'.format(constraint_kind.value)
-            constraint_data = {
-                'constraints_uuid': db_constraints.constraints_uuid, 'position': position, 'kind': constraint_kind
-            }
-
-            db_new_constraint = ConstraintModel(**constraint_data)
-            result: Tuple[Union_ConstraintModel, bool] = self.database.create_or_update(db_new_constraint)
-            db_constraint, updated = result
-
-            return db_constraint, updated
-
-    def set_constraints(self, service_uuid: str, constraints_name : str, grpc_constraints
-    ) -> List[Tuple[Union[ConstraintsModel, ConstraintModel], bool]]:
-        with self.session() as session:
-            # str_constraints_key = key_to_str([db_parent_pk, constraints_name], separator=':')
-            # result : Tuple[ConstraintsModel, bool] = get_or_create_object(database, ConstraintsModel, str_constraints_key)
-            result = session.query(ConstraintsModel).filter_by(constraints_uuid=service_uuid).one_or_none()
-            created = None
-            if result:
-                created = True
-            session.query(ConstraintsModel).filter_by(constraints_uuid=service_uuid).one_or_none()
-            db_constraints = ConstraintsModel(constraints_uuid=service_uuid)
-            session.add(db_constraints)
-
-            db_objects = [(db_constraints, created)]
-
-            for position,grpc_constraint in enumerate(grpc_constraints):
-                result : Tuple[ConstraintModel, bool] = self.set_constraint(
-                    db_constraints, grpc_constraint, position)
-                db_constraint, updated = result
-                db_objects.append((db_constraint, updated))
-
-            return db_objects
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def SetService(self, request: Service, context : grpc.ServicerContext) -> ServiceId:
-        with self.lock:
-            with self.session() as session:
-
-                context_uuid = request.service_id.context_id.context_uuid.uuid
-                # db_context : ContextModel = get_object(self.database, ContextModel, context_uuid)
-                db_context = session.query(ContextModel).filter_by(context_uuid=context_uuid).one_or_none()
-
-                for i,endpoint_id in enumerate(request.service_endpoint_ids):
-                    endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid
-                    if len(endpoint_topology_context_uuid) > 0 and context_uuid != endpoint_topology_context_uuid:
-                        raise InvalidArgumentException(
-                            'request.service_endpoint_ids[{:d}].topology_id.context_id.context_uuid.uuid'.format(i),
-                            endpoint_topology_context_uuid,
-                            ['should be == {:s}({:s})'.format(
-                                'request.service_id.context_id.context_uuid.uuid', context_uuid)])
-
-                service_uuid = request.service_id.service_uuid.uuid
-                # str_service_key = key_to_str([context_uuid, service_uuid])
-
-                constraints_result = self.set_constraints(service_uuid, 'constraints', request.service_constraints)
-                db_constraints = constraints_result[0][0]
-
-                config_rules = grpc_config_rules_to_raw(request.service_config.config_rules)
-                running_config_result = update_config(self.database, str_service_key, 'running', config_rules)
-                db_running_config = running_config_result[0][0]
-
-                result : Tuple[ServiceModel, bool] = update_or_create_object(self.database, ServiceModel, str_service_key, {
-                    'context_fk'            : db_context,
-                    'service_uuid'          : service_uuid,
-                    'service_type'          : grpc_to_enum__service_type(request.service_type),
-                    'service_constraints_fk': db_constraints,
-                    'service_status'        : grpc_to_enum__service_status(request.service_status.service_status),
-                    'service_config_fk'     : db_running_config,
-                })
-                db_service, updated = result
-
-                for i,endpoint_id in enumerate(request.service_endpoint_ids):
-                    endpoint_uuid                  = endpoint_id.endpoint_uuid.uuid
-                    endpoint_device_uuid           = endpoint_id.device_id.device_uuid.uuid
-                    endpoint_topology_uuid         = endpoint_id.topology_id.topology_uuid.uuid
-                    endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid
-
-                    str_endpoint_key = key_to_str([endpoint_device_uuid, endpoint_uuid])
-                    if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0:
-                        str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid])
-                        str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':')
-
-                    db_endpoint : EndPointModel = get_object(self.database, EndPointModel, str_endpoint_key)
-
-                    str_service_endpoint_key = key_to_str([service_uuid, str_endpoint_key], separator='--')
-                    result : Tuple[ServiceEndPointModel, bool] = get_or_create_object(
-                        self.database, ServiceEndPointModel, str_service_endpoint_key, {
-                            'service_fk': db_service, 'endpoint_fk': db_endpoint})
-                    #db_service_endpoint, service_endpoint_created = result
-
-                event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
-                dict_service_id = db_service.dump_id()
-                notify_event(self.messagebroker, TOPIC_SERVICE, event_type, {'service_id': dict_service_id})
-                return ServiceId(**dict_service_id)
-            context_uuid = request.service_id.context_id.context_uuid.uuid
-            db_context : ContextModel = get_object(self.database, ContextModel, context_uuid)
-
-            for i,endpoint_id in enumerate(request.service_endpoint_ids):
-                endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid
-                if len(endpoint_topology_context_uuid) > 0 and context_uuid != endpoint_topology_context_uuid:
-                    raise InvalidArgumentException(
-                        'request.service_endpoint_ids[{:d}].topology_id.context_id.context_uuid.uuid'.format(i),
-                        endpoint_topology_context_uuid,
-                        ['should be == {:s}({:s})'.format(
-                            'request.service_id.context_id.context_uuid.uuid', context_uuid)])
-
-            service_uuid = request.service_id.service_uuid.uuid
-            str_service_key = key_to_str([context_uuid, service_uuid])
-
-            constraints_result = set_constraints(
-                self.database, str_service_key, 'service', request.service_constraints)
-            db_constraints = constraints_result[0][0]
-
-            running_config_rules = update_config(
-                self.database, str_service_key, 'service', request.service_config.config_rules)
-            db_running_config = running_config_rules[0][0]
-
-            result : Tuple[ServiceModel, bool] = update_or_create_object(self.database, ServiceModel, str_service_key, {
-                'context_fk'            : db_context,
-                'service_uuid'          : service_uuid,
-                'service_type'          : grpc_to_enum__service_type(request.service_type),
-                'service_constraints_fk': db_constraints,
-                'service_status'        : grpc_to_enum__service_status(request.service_status.service_status),
-                'service_config_fk'     : db_running_config,
-            })
-            db_service, updated = result
-
-            for i,endpoint_id in enumerate(request.service_endpoint_ids):
-                endpoint_uuid                  = endpoint_id.endpoint_uuid.uuid
-                endpoint_device_uuid           = endpoint_id.device_id.device_uuid.uuid
-                endpoint_topology_uuid         = endpoint_id.topology_id.topology_uuid.uuid
-                endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid
-
-                str_endpoint_key = key_to_str([endpoint_device_uuid, endpoint_uuid])
-                if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0:
-                    str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid])
-                    str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':')
-
-                db_endpoint : EndPointModel = get_object(self.database, EndPointModel, str_endpoint_key)
-
-                str_service_endpoint_key = key_to_str([service_uuid, str_endpoint_key], separator='--')
-                result : Tuple[ServiceEndPointModel, bool] = get_or_create_object(
-                    self.database, ServiceEndPointModel, str_service_endpoint_key, {
-                        'service_fk': db_service, 'endpoint_fk': db_endpoint})
-                #db_service_endpoint, service_endpoint_created = result
-
-            event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
-            dict_service_id = db_service.dump_id()
-            notify_event(self.messagebroker, TOPIC_SERVICE, event_type, {'service_id': dict_service_id})
-            return ServiceId(**dict_service_id)
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def RemoveService(self, request: ServiceId, context : grpc.ServicerContext) -> Empty:
-        with self.lock:
-            context_uuid = request.context_id.context_uuid.uuid
-            service_uuid = request.service_uuid.uuid
-            db_service = ServiceModel(self.database, key_to_str([context_uuid, service_uuid]), auto_load=False)
-            found = db_service.load()
-            if not found: return Empty()
-
-            dict_service_id = db_service.dump_id()
-            db_service.delete()
-
-            event_type = EventTypeEnum.EVENTTYPE_REMOVE
-            notify_event(self.messagebroker, TOPIC_SERVICE, event_type, {'service_id': dict_service_id})
-            return Empty()
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def GetServiceEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ServiceEvent]:
-        for message in self.messagebroker.consume({TOPIC_SERVICE}, consume_timeout=CONSUME_TIMEOUT):
-            yield ServiceEvent(**json.loads(message.content))
-
-
-    # ----- Slice ----------------------------------------------------------------------------------------------------
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def ListSliceIds(self, request: ContextId, context : grpc.ServicerContext) -> SliceIdList:
-        with self.lock:
-            db_context : ContextModel = get_object(self.database, ContextModel, request.context_uuid.uuid)
-            db_slices : Set[SliceModel] = get_related_objects(db_context, SliceModel)
-            db_slices = sorted(db_slices, key=operator.attrgetter('pk'))
-            return SliceIdList(slice_ids=[db_slice.dump_id() for db_slice in db_slices])
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def ListSlices(self, request: ContextId, context : grpc.ServicerContext) -> SliceList:
-        with self.lock:
-            db_context : ContextModel = get_object(self.database, ContextModel, request.context_uuid.uuid)
-            db_slices : Set[SliceModel] = get_related_objects(db_context, SliceModel)
-            db_slices = sorted(db_slices, key=operator.attrgetter('pk'))
-            return SliceList(slices=[db_slice.dump() for db_slice in db_slices])
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def GetSlice(self, request: SliceId, context : grpc.ServicerContext) -> Slice:
-        with self.lock:
-            str_key = key_to_str([request.context_id.context_uuid.uuid, request.slice_uuid.uuid])
-            db_slice : SliceModel = get_object(self.database, SliceModel, str_key)
-            return Slice(**db_slice.dump(
-                include_endpoint_ids=True, include_constraints=True, include_config_rules=True,
-                include_service_ids=True, include_subslice_ids=True))
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def SetSlice(self, request: Slice, context : grpc.ServicerContext) -> SliceId:
-        with self.lock:
-            context_uuid = request.slice_id.context_id.context_uuid.uuid
-            db_context : ContextModel = get_object(self.database, ContextModel, context_uuid)
-
-            for i,endpoint_id in enumerate(request.slice_endpoint_ids):
-                endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid
-                if len(endpoint_topology_context_uuid) > 0 and context_uuid != endpoint_topology_context_uuid:
-                    raise InvalidArgumentException(
-                        'request.slice_endpoint_ids[{:d}].topology_id.context_id.context_uuid.uuid'.format(i),
-                        endpoint_topology_context_uuid,
-                        ['should be == {:s}({:s})'.format(
-                            'request.slice_id.context_id.context_uuid.uuid', context_uuid)])
-
-            slice_uuid = request.slice_id.slice_uuid.uuid
-            str_slice_key = key_to_str([context_uuid, slice_uuid])
-
-            constraints_result = set_constraints(
-                self.database, str_slice_key, 'slice', request.slice_constraints)
-            db_constraints = constraints_result[0][0]
-
-            running_config_rules = update_config(
-                self.database, str_slice_key, 'slice', request.slice_config.config_rules)
-            db_running_config = running_config_rules[0][0]
-
-            result : Tuple[SliceModel, bool] = update_or_create_object(self.database, SliceModel, str_slice_key, {
-                'context_fk'          : db_context,
-                'slice_uuid'          : slice_uuid,
-                'slice_constraints_fk': db_constraints,
-                'slice_status'        : grpc_to_enum__slice_status(request.slice_status.slice_status),
-                'slice_config_fk'     : db_running_config,
-                'slice_owner_uuid'    : request.slice_owner.owner_uuid.uuid,
-                'slice_owner_string'  : request.slice_owner.owner_string,
-            })
-            db_slice, updated = result
-
-            for i,endpoint_id in enumerate(request.slice_endpoint_ids):
-                endpoint_uuid                  = endpoint_id.endpoint_uuid.uuid
-                endpoint_device_uuid           = endpoint_id.device_id.device_uuid.uuid
-                endpoint_topology_uuid         = endpoint_id.topology_id.topology_uuid.uuid
-                endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid
-
-                str_endpoint_key = key_to_str([endpoint_device_uuid, endpoint_uuid])
-                if len(endpoint_topology_context_uuid) > 0 and len(endpoint_topology_uuid) > 0:
-                    str_topology_key = key_to_str([endpoint_topology_context_uuid, endpoint_topology_uuid])
-                    str_endpoint_key = key_to_str([str_endpoint_key, str_topology_key], separator=':')
-
-                db_endpoint : EndPointModel = get_object(self.database, EndPointModel, str_endpoint_key)
-
-                str_slice_endpoint_key = key_to_str([str_slice_key, str_endpoint_key], separator='--')
-                result : Tuple[SliceEndPointModel, bool] = get_or_create_object(
-                    self.database, SliceEndPointModel, str_slice_endpoint_key, {
-                        'slice_fk': db_slice, 'endpoint_fk': db_endpoint})
-                #db_slice_endpoint, slice_endpoint_created = result
-
-            for i,service_id in enumerate(request.slice_service_ids):
-                service_uuid         = service_id.service_uuid.uuid
-                service_context_uuid = service_id.context_id.context_uuid.uuid
-                str_service_key = key_to_str([service_context_uuid, service_uuid])
-                db_service : ServiceModel = get_object(self.database, ServiceModel, str_service_key)
-
-                str_slice_service_key = key_to_str([str_slice_key, str_service_key], separator='--')
-                result : Tuple[SliceServiceModel, bool] = get_or_create_object(
-                    self.database, SliceServiceModel, str_slice_service_key, {
-                        'slice_fk': db_slice, 'service_fk': db_service})
-                #db_slice_service, slice_service_created = result
-
-            for i,subslice_id in enumerate(request.slice_subslice_ids):
-                subslice_uuid         = subslice_id.slice_uuid.uuid
-                subslice_context_uuid = subslice_id.context_id.context_uuid.uuid
-                str_subslice_key = key_to_str([subslice_context_uuid, subslice_uuid])
-                db_subslice : SliceModel = get_object(self.database, SliceModel, str_subslice_key)
-
-                str_slice_subslice_key = key_to_str([str_slice_key, str_subslice_key], separator='--')
-                result : Tuple[SliceSubSliceModel, bool] = get_or_create_object(
-                    self.database, SliceSubSliceModel, str_slice_subslice_key, {
-                        'slice_fk': db_slice, 'sub_slice_fk': db_subslice})
-                #db_slice_subslice, slice_subslice_created = result
-
-            event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
-            dict_slice_id = db_slice.dump_id()
-            notify_event(self.messagebroker, TOPIC_SLICE, event_type, {'slice_id': dict_slice_id})
-            return SliceId(**dict_slice_id)
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def UnsetSlice(self, request: Slice, context : grpc.ServicerContext) -> SliceId:
-        with self.lock:
-            context_uuid = request.slice_id.context_id.context_uuid.uuid
-            db_context : ContextModel = get_object(self.database, ContextModel, context_uuid)
-
-            for i,endpoint_id in enumerate(request.slice_endpoint_ids):
-                endpoint_topology_context_uuid = endpoint_id.topology_id.context_id.context_uuid.uuid
-                if len(endpoint_topology_context_uuid) > 0 and context_uuid != endpoint_topology_context_uuid:
-                    raise InvalidArgumentException(
-                        'request.slice_endpoint_ids[{:d}].topology_id.context_id.context_uuid.uuid'.format(i),
-                        endpoint_topology_context_uuid,
-                        ['should be == {:s}({:s})'.format(
-                            'request.slice_id.context_id.context_uuid.uuid', context_uuid)])
-
-            slice_uuid = request.slice_id.slice_uuid.uuid
-            str_slice_key = key_to_str([context_uuid, slice_uuid])
-
-            if len(request.slice_constraints) > 0:
-                raise NotImplementedError('UnsetSlice: removal of constraints')
-            if len(request.slice_config.config_rules) > 0:
-                raise NotImplementedError('UnsetSlice: removal of config rules')
-            if len(request.slice_endpoint_ids) > 0:
-                raise NotImplementedError('UnsetSlice: removal of endpoints')
-
-            updated = False
-
-            for service_id in request.slice_service_ids:
-                service_uuid         = service_id.service_uuid.uuid
-                service_context_uuid = service_id.context_id.context_uuid.uuid
-                str_service_key = key_to_str([service_context_uuid, service_uuid])
-                str_slice_service_key = key_to_str([str_slice_key, str_service_key], separator='--')
-                SliceServiceModel(self.database, str_slice_service_key).delete()
-                updated = True
-
-            for subslice_id in request.slice_subslice_ids:
-                subslice_uuid         = subslice_id.slice_uuid.uuid
-                subslice_context_uuid = subslice_id.context_id.context_uuid.uuid
-                str_subslice_key = key_to_str([subslice_context_uuid, subslice_uuid])
-                str_slice_subslice_key = key_to_str([str_slice_key, str_subslice_key], separator='--')
-                SliceSubSliceModel(self.database, str_slice_subslice_key).delete()
-                updated = True
-
-            event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
-            db_slice : SliceModel = get_object(self.database, SliceModel, str_slice_key)
-            dict_slice_id = db_slice.dump_id()
-            notify_event(self.messagebroker, TOPIC_SLICE, event_type, {'slice_id': dict_slice_id})
-            return SliceId(**dict_slice_id)
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def RemoveSlice(self, request: SliceId, context : grpc.ServicerContext) -> Empty:
-        with self.lock:
-            context_uuid = request.context_id.context_uuid.uuid
-            slice_uuid = request.slice_uuid.uuid
-            db_slice = SliceModel(self.database, key_to_str([context_uuid, slice_uuid]), auto_load=False)
-            found = db_slice.load()
-            if not found: return Empty()
-
-            dict_slice_id = db_slice.dump_id()
-            db_slice.delete()
-
-            event_type = EventTypeEnum.EVENTTYPE_REMOVE
-            notify_event(self.messagebroker, TOPIC_SLICE, event_type, {'slice_id': dict_slice_id})
-            return Empty()
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def GetSliceEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[SliceEvent]:
-        for message in self.messagebroker.consume({TOPIC_SLICE}, consume_timeout=CONSUME_TIMEOUT):
-            yield SliceEvent(**json.loads(message.content))
-
-
-    # ----- Connection -------------------------------------------------------------------------------------------------
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def ListConnectionIds(self, request: ServiceId, context : grpc.ServicerContext) -> ConnectionIdList:
-        with self.session() as session:
-            result = session.query(DeviceModel).all()
-            return DeviceIdList(device_ids=[device.dump_id() for device in result])
-
-        with self.lock:
-            str_key = key_to_str([request.context_id.context_uuid.uuid, request.service_uuid.uuid])
-            db_service : ServiceModel = get_object(self.database, ServiceModel, str_key)
-            db_connections : Set[ConnectionModel] = get_related_objects(db_service, ConnectionModel)
-            db_connections = sorted(db_connections, key=operator.attrgetter('pk'))
-            return ConnectionIdList(connection_ids=[db_connection.dump_id() for db_connection in db_connections])
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def ListConnections(self, request: ContextId, context : grpc.ServicerContext) -> ServiceList:
-        with self.lock:
-            str_key = key_to_str([request.context_id.context_uuid.uuid, request.service_uuid.uuid])
-            db_service : ServiceModel = get_object(self.database, ServiceModel, str_key)
-            db_connections : Set[ConnectionModel] = get_related_objects(db_service, ConnectionModel)
-            db_connections = sorted(db_connections, key=operator.attrgetter('pk'))
-            return ConnectionList(connections=[db_connection.dump() for db_connection in db_connections])
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def GetConnection(self, request: ConnectionId, context : grpc.ServicerContext) -> Connection:
-        with self.lock:
-            db_connection : ConnectionModel = get_object(self.database, ConnectionModel, request.connection_uuid.uuid)
-            return Connection(**db_connection.dump(include_path=True, include_sub_service_ids=True))
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def SetConnection(self, request: Connection, context : grpc.ServicerContext) -> ConnectionId:
-        with self.lock:
-            connection_uuid = request.connection_id.connection_uuid.uuid
-
-            connection_attributes = {'connection_uuid': connection_uuid}
-
-            service_context_uuid = request.service_id.context_id.context_uuid.uuid
-            service_uuid = request.service_id.service_uuid.uuid
-            if len(service_context_uuid) > 0 and len(service_uuid) > 0:
-                str_service_key = key_to_str([service_context_uuid, service_uuid])
-                db_service : ServiceModel = get_object(self.database, ServiceModel, str_service_key)
-                connection_attributes['service_fk'] = db_service
-
-            path_hops_result = set_path(self.database, connection_uuid, request.path_hops_endpoint_ids, path_name = '')
-            db_path = path_hops_result[0]
-            connection_attributes['path_fk'] = db_path
-
-            result : Tuple[ConnectionModel, bool] = update_or_create_object(
-                self.database, ConnectionModel, connection_uuid, connection_attributes)
-            db_connection, updated = result
-
-            for sub_service_id in request.sub_service_ids:
-                sub_service_uuid         = sub_service_id.service_uuid.uuid
-                sub_service_context_uuid = sub_service_id.context_id.context_uuid.uuid
-                str_sub_service_key = key_to_str([sub_service_context_uuid, sub_service_uuid])
-                db_service : ServiceModel = get_object(self.database, ServiceModel, str_sub_service_key)
-
-                str_connection_sub_service_key = key_to_str([connection_uuid, str_sub_service_key], separator='--')
-                result : Tuple[ConnectionSubServiceModel, bool] = get_or_create_object(
-                    self.database, ConnectionSubServiceModel, str_connection_sub_service_key, {
-                        'connection_fk': db_connection, 'sub_service_fk': db_service})
-                #db_connection_sub_service, connection_sub_service_created = result
-
-            event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
-            dict_connection_id = db_connection.dump_id()
-            notify_event(self.messagebroker, TOPIC_CONNECTION, event_type, {'connection_id': dict_connection_id})
-            return ConnectionId(**dict_connection_id)
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def RemoveConnection(self, request: ConnectionId, context : grpc.ServicerContext) -> Empty:
-        with self.lock:
-            db_connection = ConnectionModel(self.database, request.connection_uuid.uuid, auto_load=False)
-            found = db_connection.load()
-            if not found: return Empty()
-
-            dict_connection_id = db_connection.dump_id()
-            db_connection.delete()
-
-            event_type = EventTypeEnum.EVENTTYPE_REMOVE
-            notify_event(self.messagebroker, TOPIC_CONNECTION, event_type, {'connection_id': dict_connection_id})
-            return Empty()
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def GetConnectionEvents(self, request: Empty, context : grpc.ServicerContext) -> Iterator[ConnectionEvent]:
-        for message in self.messagebroker.consume({TOPIC_CONNECTION}, consume_timeout=CONSUME_TIMEOUT):
-            yield ConnectionEvent(**json.loads(message.content))
-
-
-    # ----- Policy -----------------------------------------------------------------------------------------------------
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def ListPolicyRuleIds(self, request: Empty, context: grpc.ServicerContext) -> PolicyRuleIdList:
-        with self.lock:
-            db_policy_rules: List[PolicyRuleModel] = get_all_objects(self.database, PolicyRuleModel)
-            db_policy_rules = sorted(db_policy_rules, key=operator.attrgetter('pk'))
-            return PolicyRuleIdList(policyRuleIdList=[db_policy_rule.dump_id() for db_policy_rule in db_policy_rules])
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def ListPolicyRules(self, request: Empty, context: grpc.ServicerContext) -> PolicyRuleList:
-        with self.lock:
-            db_policy_rules: List[PolicyRuleModel] = get_all_objects(self.database, PolicyRuleModel)
-            db_policy_rules = sorted(db_policy_rules, key=operator.attrgetter('pk'))
-            return PolicyRuleList(policyRules=[db_policy_rule.dump() for db_policy_rule in db_policy_rules])
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def GetPolicyRule(self, request: PolicyRuleId, context: grpc.ServicerContext) -> PolicyRule:
-        with self.lock:
-            policy_rule_uuid = request.uuid.uuid
-            db_policy_rule: PolicyRuleModel = get_object(self.database, PolicyRuleModel, policy_rule_uuid)
-            return PolicyRule(**db_policy_rule.dump())
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def SetPolicyRule(self, request: PolicyRule, context: grpc.ServicerContext) -> PolicyRuleId:
-        with self.lock:
-            policy_rule_type = request.WhichOneof('policy_rule')
-            policy_rule_json = grpc_message_to_json(request)
-            policy_rule_uuid = policy_rule_json[policy_rule_type]['policyRuleBasic']['policyRuleId']['uuid']['uuid']
-            result: Tuple[PolicyRuleModel, bool] = update_or_create_object(
-                self.database, PolicyRuleModel, policy_rule_uuid, {'value': json.dumps(policy_rule_json)})
-            db_policy, updated = result # pylint: disable=unused-variable
-
-            #event_type = EventTypeEnum.EVENTTYPE_UPDATE if updated else EventTypeEnum.EVENTTYPE_CREATE
-            dict_policy_id = db_policy.dump_id()
-            #notify_event(self.messagebroker, TOPIC_POLICY, event_type, {"policy_id": dict_policy_id})
-            return PolicyRuleId(**dict_policy_id)
-
-    @safe_and_metered_rpc_method(METRICS, LOGGER)
-    def RemovePolicyRule(self, request: PolicyRuleId, context: grpc.ServicerContext) -> Empty:
-        with self.lock:
-            policy_uuid = request.uuid.uuid
-            db_policy = PolicyRuleModel(self.database, policy_uuid, auto_load=False)
-            found = db_policy.load()
-            if not found: return Empty()
-
-            dict_policy_id = db_policy.dump_id()
-            db_policy.delete()
-            #event_type = EventTypeEnum.EVENTTYPE_REMOVE
-            #notify_event(self.messagebroker, TOPIC_POLICY, event_type, {"policy_id": dict_policy_id})
-            return Empty()
diff --git a/src/context/tests/test_unitary.py b/src/context/tests/test_unitary.py
index 67dd64fb33194d77d4596764a1e3a6c239fe475c..aaa8c7fbd0cad3015c911a77d925c215cf2c61fe 100644
--- a/src/context/tests/test_unitary.py
+++ b/src/context/tests/test_unitary.py
@@ -37,13 +37,13 @@ from context.client.EventsCollector import EventsCollector
 from context.service.database.Tools import (
     FASTHASHER_DATA_ACCEPTED_FORMAT, FASTHASHER_ITEM_ACCEPTED_FORMAT, fast_hasher)
 from context.service.grpc_server.ContextService import ContextService
-from context.service.Populate import populate
+from context.service._old_code.Populate import populate
 from context.service.rest_server.RestServer import RestServer
 from context.service.rest_server.Resources import RESOURCES
 from requests import Session
 from sqlalchemy import create_engine
 from sqlalchemy.orm import sessionmaker
-from context.service.database.Base import Base
+from context.service.database._Base import Base
 
 from .Objects import (
     CONNECTION_R1_R3, CONNECTION_R1_R3_ID, CONNECTION_R1_R3_UUID, CONTEXT, CONTEXT_ID, DEVICE_R1, DEVICE_R1_ID,
@@ -1294,134 +1294,6 @@ def test_grpc_policy(
 
 
 
-# ----- Test REST API methods ------------------------------------------------------------------------------------------
-
-def test_rest_populate_database(
-    context_db_mb : Tuple[Database, MessageBroker], # pylint: disable=redefined-outer-name
-    context_service_grpc : ContextService           # pylint: disable=redefined-outer-name
-    ):
-    database = context_db_mb[0]
-    database.clear_all()
-    populate(LOCAL_HOST, GRPC_PORT)
-
-def test_rest_get_context_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-    reply = do_rest_request('/context_ids')
-    validate_context_ids(reply)
-
-def test_rest_get_contexts(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-    reply = do_rest_request('/contexts')
-    validate_contexts(reply)
-
-def test_rest_get_context(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
-    reply = do_rest_request('/context/{:s}'.format(context_uuid))
-    validate_context(reply)
-
-def test_rest_get_topology_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
-    reply = do_rest_request('/context/{:s}/topology_ids'.format(context_uuid))
-    validate_topology_ids(reply)
-
-def test_rest_get_topologies(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
-    reply = do_rest_request('/context/{:s}/topologies'.format(context_uuid))
-    validate_topologies(reply)
-
-def test_rest_get_topology(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
-    topology_uuid = urllib.parse.quote(DEFAULT_TOPOLOGY_UUID)
-    reply = do_rest_request('/context/{:s}/topology/{:s}'.format(context_uuid, topology_uuid))
-    validate_topology(reply, num_devices=3, num_links=3)
-
-def test_rest_get_service_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
-    reply = do_rest_request('/context/{:s}/service_ids'.format(context_uuid))
-    validate_service_ids(reply)
-
-def test_rest_get_services(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
-    reply = do_rest_request('/context/{:s}/services'.format(context_uuid))
-    validate_services(reply)
-
-def test_rest_get_service(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
-    service_uuid = urllib.parse.quote(SERVICE_R1_R2_UUID, safe='')
-    reply = do_rest_request('/context/{:s}/service/{:s}'.format(context_uuid, service_uuid))
-    validate_service(reply)
-
-def test_rest_get_slice_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
-    reply = do_rest_request('/context/{:s}/slice_ids'.format(context_uuid))
-    #validate_slice_ids(reply)
-
-def test_rest_get_slices(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
-    reply = do_rest_request('/context/{:s}/slices'.format(context_uuid))
-    #validate_slices(reply)
-
-#def test_rest_get_slice(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-#    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
-#    slice_uuid = urllib.parse.quote(SLICE_R1_R2_UUID, safe='')
-#    reply = do_rest_request('/context/{:s}/slice/{:s}'.format(context_uuid, slice_uuid))
-#    #validate_slice(reply)
-
-def test_rest_get_device_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-    reply = do_rest_request('/device_ids')
-    validate_device_ids(reply)
-
-def test_rest_get_devices(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-    reply = do_rest_request('/devices')
-    validate_devices(reply)
-
-def test_rest_get_device(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-    device_uuid = urllib.parse.quote(DEVICE_R1_UUID, safe='')
-    reply = do_rest_request('/device/{:s}'.format(device_uuid))
-    validate_device(reply)
-
-def test_rest_get_link_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-    reply = do_rest_request('/link_ids')
-    validate_link_ids(reply)
-
-def test_rest_get_links(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-    reply = do_rest_request('/links')
-    validate_links(reply)
-
-def test_rest_get_link(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-    link_uuid = urllib.parse.quote(LINK_R1_R2_UUID, safe='')
-    reply = do_rest_request('/link/{:s}'.format(link_uuid))
-    validate_link(reply)
-
-def test_rest_get_connection_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
-    service_uuid = urllib.parse.quote(SERVICE_R1_R3_UUID, safe='')
-    reply = do_rest_request('/context/{:s}/service/{:s}/connection_ids'.format(context_uuid, service_uuid))
-    validate_connection_ids(reply)
-
-def test_rest_get_connections(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-    context_uuid = urllib.parse.quote(DEFAULT_CONTEXT_UUID)
-    service_uuid = urllib.parse.quote(SERVICE_R1_R3_UUID, safe='')
-    reply = do_rest_request('/context/{:s}/service/{:s}/connections'.format(context_uuid, service_uuid))
-    validate_connections(reply)
-
-def test_rest_get_connection(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-    connection_uuid = urllib.parse.quote(CONNECTION_R1_R3_UUID, safe='')
-    reply = do_rest_request('/connection/{:s}'.format(connection_uuid))
-    validate_connection(reply)
-
-def test_rest_get_policyrule_ids(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-    reply = do_rest_request('/policyrule_ids')
-    #validate_policyrule_ids(reply)
-
-def test_rest_get_policyrules(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-    reply = do_rest_request('/policyrules')
-    #validate_policyrules(reply)
-
-#def test_rest_get_policyrule(context_service_rest : RestServer): # pylint: disable=redefined-outer-name
-#    policyrule_uuid = urllib.parse.quote(POLICYRULE_UUID, safe='')
-#    reply = do_rest_request('/policyrule/{:s}'.format(policyrule_uuid))
-#    #validate_policyrule(reply)
-
-
 # ----- Test misc. Context internal tools ------------------------------------------------------------------------------
 
 def test_tools_fast_string_hasher():