diff --git a/src/common/perf_eval_method_wrapper/tests/README.md b/src/common/perf_eval_method_wrapper/tests/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..93eba4d8130c5071bba3a26fcfa6c989d87a5308
--- /dev/null
+++ b/src/common/perf_eval_method_wrapper/tests/README.md
@@ -0,0 +1,32 @@
+# Performance Evaluation Method Wrapper
+
+- deploy as:
+```
+tfs@tfs-vm:~/tfs-ctrl$ source src/common/perf_eval_method_wrapper/tests/deploy_specs.sh 
+tfs@tfs-vm:~/tfs-ctrl$ ./deploy.sh 
+```
+
+- expose prometheus and grafana
+terminal 1 (prometheus UI):
+```
+kubectl port-forward -n monitoring service/prometheus-k8s --address 0.0.0.0 9090:9090
+```
+
+terminal 2 (prometheus internal grafana):
+```
+kubectl port-forward -n monitoring service/grafana --address 0.0.0.0 3000:3000
+```
+
+terminal 3 (alertmanager):
+```
+kubectl port-forward -n monitoring service/alertmanager-main --address 0.0.0.0 9093:9093
+```
+
+- log into grafana:
+  - 127.0.0.1:3000
+  - admin/admin
+  - upload dashboard_prometheus_histogram.json
+  - watch in real time the dashboard
+
+- upload topology through WebUI and navigate
+  - should see histogram changing in Grafana
diff --git a/src/common/perf_eval_method_wrapper/tests/dashboard_prometheus_histogram.json b/src/common/perf_eval_method_wrapper/tests/dashboard_prometheus_histogram.json
new file mode 100644
index 0000000000000000000000000000000000000000..21aa9114685e4b03b6f8df0a4a56673601b52dde
--- /dev/null
+++ b/src/common/perf_eval_method_wrapper/tests/dashboard_prometheus_histogram.json
@@ -0,0 +1,110 @@
+{
+    "annotations": {
+      "list": [
+        {
+          "builtIn": 1,
+          "datasource": "-- Grafana --",
+          "enable": true,
+          "hide": true,
+          "iconColor": "rgba(0, 211, 255, 1)",
+          "name": "Annotations & Alerts",
+          "type": "dashboard"
+        }
+      ]
+    },
+    "editable": true,
+    "gnetId": null,
+    "graphTooltip": 0,
+    "id": 25,
+    "links": [],
+    "panels": [
+      {
+        "cards": {
+          "cardPadding": null,
+          "cardRound": null
+        },
+        "color": {
+          "cardColor": "#b4ff00",
+          "colorScale": "sqrt",
+          "colorScheme": "interpolateRdYlGn",
+          "exponent": 0.5,
+          "max": null,
+          "min": 0,
+          "mode": "opacity"
+        },
+        "dataFormat": "tsbuckets",
+        "datasource": "prometheus",
+        "fieldConfig": {
+          "defaults": {},
+          "overrides": []
+        },
+        "gridPos": {
+          "h": 11,
+          "w": 24,
+          "x": 0,
+          "y": 0
+        },
+        "heatmap": {},
+        "hideZeroBuckets": false,
+        "highlightCards": true,
+        "id": 2,
+        "legend": {
+          "show": true
+        },
+        "pluginVersion": "7.5.4",
+        "reverseYBuckets": false,
+        "targets": [
+          {
+            "exemplar": true,
+            "expr": "sum(increase(Context_GetDevice_histogram_duration_bucket[$__interval])) by (le)",
+            "format": "heatmap",
+            "instant": false,
+            "interval": "",
+            "intervalFactor": 1,
+            "legendFormat": "{{le}}",
+            "queryType": "randomWalk",
+            "refId": "A"
+          }
+        ],
+        "title": "TFS / Histogram Component RPCs",
+        "tooltip": {
+          "show": true,
+          "showHistogram": true
+        },
+        "type": "heatmap",
+        "xAxis": {
+          "show": true
+        },
+        "xBucketNumber": null,
+        "xBucketSize": null,
+        "yAxis": {
+          "decimals": null,
+          "format": "s",
+          "logBase": 1,
+          "max": null,
+          "min": null,
+          "show": true,
+          "splitFactor": null
+        },
+        "yBucketBound": "auto",
+        "yBucketNumber": null,
+        "yBucketSize": null
+      }
+    ],
+    "refresh": "5s",
+    "schemaVersion": 27,
+    "style": "dark",
+    "tags": [],
+    "templating": {
+      "list": []
+    },
+    "time": {
+      "from": "now-30m",
+      "to": "now"
+    },
+    "timepicker": {},
+    "timezone": "",
+    "title": "TFS / Histogram Component RPCs",
+    "uid": "eAg-wsOVk",
+    "version": 2
+}
diff --git a/src/common/perf_eval_method_wrapper/tests/deploy_specs.sh b/src/common/perf_eval_method_wrapper/tests/deploy_specs.sh
new file mode 100644
index 0000000000000000000000000000000000000000..797b646e2eaa6d4aa5033df9e8512ed149a575a8
--- /dev/null
+++ b/src/common/perf_eval_method_wrapper/tests/deploy_specs.sh
@@ -0,0 +1,26 @@
+# Set the URL of your local Docker registry where the images will be uploaded to.
+export TFS_REGISTRY_IMAGE="http://localhost:32000/tfs/"
+
+# Set the list of components, separated by spaces, you want to build images for, and deploy.
+# Supported components are:
+#   context device automation policy service compute monitoring webui
+#   interdomain slice pathcomp dlt
+#   dbscanserving opticalattackmitigator opticalattackdetector
+#   l3_attackmitigator l3_centralizedattackdetector l3_distributedattackdetector
+export TFS_COMPONENTS="context device pathcomp service slice webui"
+
+# Set the tag you want to use for your images.
+export TFS_IMAGE_TAG="dev"
+
+# Set the name of the Kubernetes namespace to deploy to.
+export TFS_K8S_NAMESPACE="tfs"
+
+# Set additional manifest files to be applied after the deployment
+export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml src/common/perf_eval_method_wrapper/tests/servicemonitors.yaml"
+
+# Set the new Grafana admin password
+export TFS_GRAFANA_PASSWORD="admin123+"
+
+# If not already set, disable skip-build flag.
+# If TFS_SKIP_BUILD is "YES", the containers are not rebuilt-retagged-repushed and existing ones are used.
+export TFS_SKIP_BUILD=${TFS_SKIP_BUILD:-"YES"}
diff --git a/src/common/perf_eval_method_wrapper/tests/servicemonitors.yaml b/src/common/perf_eval_method_wrapper/tests/servicemonitors.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..4036571016fe099b7e715083f3c25687b35ebb97
--- /dev/null
+++ b/src/common/perf_eval_method_wrapper/tests/servicemonitors.yaml
@@ -0,0 +1,115 @@
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  namespace: monitoring # namespace where prometheus is running
+  name: tfs-contextservice-metric
+  labels:
+    app: contextservice
+    #release: prometheus
+    #release: prom  # name of the release 
+    # ( VERY IMPORTANT: You need to know the correct release name by viewing 
+    #   the servicemonitor of Prometheus itself: Without the correct name, 
+    #   Prometheus cannot identify the metrics of the Flask app as the target.)
+spec:
+  selector:
+    matchLabels:
+      # Target app service
+      #namespace: tfs
+      app: contextservice # same as above
+      #release: prometheus # same as above
+  endpoints:
+  - port: metrics # named port in target app
+    scheme: http
+    path: /metrics # path to scrape
+    interval: 15s # scrape interval
+  namespaceSelector:
+    any: false
+    matchNames:
+    - tfs # namespace where the app is running
+---
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  namespace: monitoring # namespace where prometheus is running
+  name: tfs-deviceservice-metric
+  labels:
+    app: deviceservice
+    #release: prometheus
+    #release: prom  # name of the release 
+    # ( VERY IMPORTANT: You need to know the correct release name by viewing 
+    #   the servicemonitor of Prometheus itself: Without the correct name, 
+    #   Prometheus cannot identify the metrics of the Flask app as the target.)
+spec:
+  selector:
+    matchLabels:
+      # Target app service
+      #namespace: tfs
+      app: deviceservice # same as above
+      #release: prometheus # same as above
+  endpoints:
+  - port: metrics # named port in target app
+    scheme: http
+    path: /metrics # path to scrape
+    interval: 15s # scrape interval
+  namespaceSelector:
+    any: false
+    matchNames:
+    - tfs # namespace where the app is running
+---
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  namespace: monitoring # namespace where prometheus is running
+  name: tfs-serviceservice-metric
+  labels:
+    app: serviceservice
+    #release: prometheus
+    #release: prom  # name of the release 
+    # ( VERY IMPORTANT: You need to know the correct release name by viewing 
+    #   the servicemonitor of Prometheus itself: Without the correct name, 
+    #   Prometheus cannot identify the metrics of the Flask app as the target.)
+spec:
+  selector:
+    matchLabels:
+      # Target app service
+      #namespace: tfs
+      app: serviceservice # same as above
+      #release: prometheus # same as above
+  endpoints:
+  - port: metrics # named port in target app
+    scheme: http
+    path: /metrics # path to scrape
+    interval: 15s # scrape interval
+  namespaceSelector:
+    any: false
+    matchNames:
+    - tfs # namespace where the app is running
+---
+apiVersion: monitoring.coreos.com/v1
+kind: ServiceMonitor
+metadata:
+  namespace: monitoring # namespace where prometheus is running
+  name: tfs-sliceservice-metric
+  labels:
+    app: sliceservice
+    #release: prometheus
+    #release: prom  # name of the release 
+    # ( VERY IMPORTANT: You need to know the correct release name by viewing 
+    #   the servicemonitor of Prometheus itself: Without the correct name, 
+    #   Prometheus cannot identify the metrics of the Flask app as the target.)
+spec:
+  selector:
+    matchLabels:
+      # Target app service
+      #namespace: tfs
+      app: sliceservice # same as above
+      #release: prometheus # same as above
+  endpoints:
+  - port: metrics # named port in target app
+    scheme: http
+    path: /metrics # path to scrape
+    interval: 15s # scrape interval
+  namespaceSelector:
+    any: false
+    matchNames:
+    - tfs # namespace where the app is running