From d18cfa8553b20194b70ec3004db35d6bb6a0f63d Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Fri, 30 Jan 2026 15:14:55 +0000 Subject: [PATCH 01/78] feat: Updates in AI Analyzer Engine Implementation - Update AI model processing - Update SLA policy configuration for enhanced forecasting and data handling - AI model runs twice (for each metric) - In DB fetcher, df is added to process two data sets. - updated requirements --- .../ai_model/ai_processor.py | 161 ++++++++++------- .../ai_model/sla_policy.py | 24 ++- .../AI_analytics_engine/api/api_blueprint.py | 15 +- .../clients/influxdb_fetcher.py | 168 ++++++++++-------- .../AI_analytics_engine/requirements.in | 3 + .../AI_analytics_engine/tests/test_api.py | 14 +- .../simap_server/simap_client/__main__.py | 2 +- 7 files changed, 229 insertions(+), 158 deletions(-) diff --git a/src/tests/mwc26-f5ga/AI_analytics_engine/ai_model/ai_processor.py b/src/tests/mwc26-f5ga/AI_analytics_engine/ai_model/ai_processor.py index 5395e38ac..384b1070f 100644 --- a/src/tests/mwc26-f5ga/AI_analytics_engine/ai_model/ai_processor.py +++ b/src/tests/mwc26-f5ga/AI_analytics_engine/ai_model/ai_processor.py @@ -53,73 +53,105 @@ class AIModelProcessor: def ai_model_processor( self, - metric_values: list[float] - ) -> Optional[list[float]]: + metric_values: list[dict[str, Any]], + ) -> Optional[list[dict[str, Any]]]: """ Process device and performance data through AI models. Args: - metric_values: List of performance metric values. + metric_values: List of dictionaries containing performance metric values. + Each dict has keys like 'bandwidth_utilization', 'latency', etc. Returns: - List of 3 forecasted values, or None if insufficient data. + List of dicts containing forecasted values for each metric, + or None if insufficient data. """ LOGGER.debug("Processing data through AI models") - # LOGGER.debug(f"Number of performance data points: {len(metric_values)}") + LOGGER.debug(f"Number of performance data points: {len(metric_values)}") - if not metric_values or len(metric_values) < 4: - LOGGER.warning("Insufficient data for forecasting") + if not metric_values or len(metric_values) < 3: + LOGGER.warning("Insufficient data for forecasting (need at least 3 samples)") return None + results = [] + try: - # Convert metric_values to pandas Series - data = pd.Series(metric_values) - - # Create and fit Exponential Smoothing model - model = ExponentialSmoothing( - data, - trend="add", - seasonal=None # No seasonal component for this data - ) - - fit = model.fit() - - # Forecast next 3 values - forecast = fit.forecast(steps=3) - - forecasted_values = forecast.tolist() + # Convert list of dicts to DataFrame for easier processing + df = pd.DataFrame(metric_values) - # Calculate confidence score based on model fit quality - # Using residual standard error as inverse confidence metric - residuals = fit.resid - mse = (residuals ** 2).mean() - rmse = mse ** 0.5 + # Process each metric column separately + for column in df.columns: + data = df[column] + + # Skip non-numeric columns + if not pd.api.types.is_numeric_dtype(data): + LOGGER.debug(f"Skipping non-numeric column: {column}") + continue + + # Remove NaN values + data = data.dropna() + + if len(data) < 3: + LOGGER.warning(f"Insufficient data for column {column} (need at least 3 samples)") + continue + + LOGGER.debug(f"Processing column: {column} with {len(data)} samples") + + # Create and fit Exponential Smoothing model + model = ExponentialSmoothing( + endog = data, + trend = "add", + seasonal = None # No seasonal component for this data + ) + + fit = model.fit() + + # Forecast next 3 values + forecast = fit.forecast(steps=3) + forecasted_values = forecast.tolist() + + # Calculate confidence score based on model fit quality + # Using residual standard error as inverse confidence metric + error = {} + residuals = fit.resid + mse = (residuals ** 2).mean() + rmse = mse ** 0.5 + + error['mse'] = float(mse) + error['rmse'] = float(rmse) + + # Normalize confidence: lower RMSE = higher confidence + # Use data scale (std dev) to normalize RMSE + data_std = data.std() + + if data_std > 0: + normalized_error = rmse / data_std + # Convert to confidence score (0-1 range, higher is better) + confidence = max(0, min(1, 1 - normalized_error)) + else: + confidence = 0.5 # Default if std dev is 0 + + LOGGER.info(f"Metric: {column}, RMSE: {rmse:.4f}, Data Std: {data_std:.4f}, Confidence: {confidence:.4f}") + LOGGER.info(f"Forecasted next 3 values for {column}: {forecasted_values}") + + results.append({ + "metric_name": column, + "forecasted_values": forecasted_values, + "confidence": float(confidence), + "sample_interval": 5, + "error_metrics": error, + }) - # Normalize confidence: lower RMSE = higher confidence - # Use data scale (std dev) to normalize RMSE - data_std = data.std() - if data_std > 0: - normalized_error = rmse / data_std - # Convert to confidence score (0-1 range, higher is better) - confidence = max(0, min(1, 1 - normalized_error)) - else: - confidence = 0.5 # Default if std dev is 0 - - # LOGGER.info(f"Model RMSE: {rmse:.4f}, Confidence: {confidence:.4f}") - LOGGER.info(f"Forecasted next 3 values: {forecasted_values}") - - # return forecasted_values - return [confidence] + return results if results else None except Exception as e: - LOGGER.error(f"Error during forecasting: {e}") + LOGGER.error(f"Error during forecasting: {e}", exc_info=True) return None def process_data( self, performance_data: Dict[str, Any], - sla_policy: SLAPolicyConfig ) -> Dict[str, Any]: """ Process device and performance data through AI models. @@ -139,28 +171,23 @@ class AIModelProcessor: LOGGER.debug(f"Number of performance data points: {len(metric_values)}") # LOGGER.debug(f"Performance data values: {metric_values}") - # forecasted_values = self.ai_model_processor(metric_values) - # if forecasted_values is None: - # LOGGER.warning("AI model processing failed or insufficient data") - - # if forecasted_values: - # # Exponential weights: more weight on earlier (starting) values - # # Example: for 3 values -> weights = [0.5, 0.33, 0.17] (exponential decay) - # weights = [2**(-i) for i in range(len(forecasted_values))] - # # Normalize weights to sum to 1 - # total_weight = sum(weights) - # weights = [w / total_weight for w in weights] - # score = average(forecasted_values, weights=weights) - # # LOGGER.debug(f"Weighted average with exponential weights: {score}, weights: {weights}") - # else: - # score = None - - score = self.ai_model_processor(metric_values) + if not metric_values: + LOGGER.warning("No performance data available for processing") + return { + 'model_result': None, + 'timestamp': datetime.now(UTC).isoformat() + } + result = self.ai_model_processor(metric_values) + + if result is None: + # fallback score structure + LOGGER.warning("AI model processing failed or insufficient data. See logs for details.") + return { + 'model_result': None, + 'timestamp': datetime.now(UTC).isoformat() + } return { - 'confidence_scores': score, - 'summary': { - 'sla_policy': sla_policy.to_dict(), - 'timestamp': datetime.now(UTC).isoformat() - } + 'model_result': result, + 'timestamp': datetime.now(UTC).isoformat() } diff --git a/src/tests/mwc26-f5ga/AI_analytics_engine/ai_model/sla_policy.py b/src/tests/mwc26-f5ga/AI_analytics_engine/ai_model/sla_policy.py index ff957ad0a..2a13229ff 100644 --- a/src/tests/mwc26-f5ga/AI_analytics_engine/ai_model/sla_policy.py +++ b/src/tests/mwc26-f5ga/AI_analytics_engine/ai_model/sla_policy.py @@ -33,11 +33,15 @@ class SLAPolicyConfig: bandwidth_utilization_threshold_pct: Maximum acceptable bandwidth utilization as a percentage (0-100). time_window_seconds: Time window in seconds for data analysis. + sample_interval_sec: Sampling interval in seconds for data collection. + sample_count: Minimum number of samples to fetch from database. """ simap_id: str latency_threshold_ms: float|None - bandwidth_utilization_threshold_pct: float|None + bandwidth_utilization: float|None time_window_seconds: int + sample_interval_sec: int + sample_count: int @classmethod def from_dict(cls, data: Dict[str, Any]) -> SLAPolicyConfig: @@ -47,7 +51,7 @@ class SLAPolicyConfig: Args: data: Dictionary containing the SLA policy configuration fields. Required keys: 'simap_id', 'latency_threshold_ms', - 'bandwidth_utilization_threshold_pct', 'time_window_seconds'. + 'bandwidth_utilization', 'time_window_seconds'. Supports nested 'sla_metrics' structure. Returns: @@ -62,14 +66,18 @@ class SLAPolicyConfig: simap_id = str(data['simap_id']) metrics = data['sla_metrics'] latency_threshold_ms = float(metrics['latency_threshold_ms']) - bandwidth_threshold = float(metrics.get('bandwidth_utilization_threshold_pct', 0.0)) - time_window = int(data.get('window_size_sec', 300)) + bandwidth_threshold = float(metrics.get('bandwidth_utilization', 0.0)) + time_window = int(data['history_window_size_sec']) + sample_interval = int(data['sample_interval_sec']) + sample_count = int(data['sample_count']) return cls( - simap_id = simap_id, - latency_threshold_ms = latency_threshold_ms, - bandwidth_utilization_threshold_pct = bandwidth_threshold, - time_window_seconds = time_window + simap_id = simap_id, + latency_threshold_ms = latency_threshold_ms, + bandwidth_utilization = bandwidth_threshold, + time_window_seconds = time_window, + sample_interval_sec = sample_interval, + sample_count = sample_count ) except KeyError as e: raise KeyError(f"Missing required field: {e.args[0]}") from e diff --git a/src/tests/mwc26-f5ga/AI_analytics_engine/api/api_blueprint.py b/src/tests/mwc26-f5ga/AI_analytics_engine/api/api_blueprint.py index ea28ccfb1..564da8167 100644 --- a/src/tests/mwc26-f5ga/AI_analytics_engine/api/api_blueprint.py +++ b/src/tests/mwc26-f5ga/AI_analytics_engine/api/api_blueprint.py @@ -124,10 +124,11 @@ def create_ai_analytics_blueprint( # >>> Step 3: Process data through AI models LOGGER.debug(">>> Step 3: Processing data through AI models") results = ai_processor.process_data( - performance_data, sla_policy + performance_data ) # >>> Step 4: Send results to Decision Engine + results['simap_id'] = sla_policy.simap_id # Include SIMAP ID in results LOGGER.debug(">>> Step 4: Sending results to Decision Engine") if not decision_client.send_results(results): LOGGER.error("Failed to send results to Decision Engine") @@ -181,20 +182,28 @@ def create_ai_analytics_blueprint( Returns: JSON response with SIMAP and InfluxDB connection details. + Sensitive values (passwords, tokens) are masked. """ LOGGER.debug("Configuration requested") + + def mask_secret(value: str) -> str: + """Mask sensitive values for display.""" + if not value: + return '(not set)' + return f"{value[:2]}{'*' * (len(value) - 2)}" if len(value) > 2 else '***' + return jsonify({ 'simap': { 'scheme': Config.SIMAP_SERVER_SCHEME, 'address': Config.SIMAP_SERVER_ADDRESS, 'port': Config.SIMAP_SERVER_PORT, 'username': Config.SIMAP_SERVER_USERNAME, - 'password': Config.SIMAP_SERVER_PASSWORD + 'password': mask_secret(Config.SIMAP_SERVER_PASSWORD) }, 'influxdb': { 'host': Config.INFLUXDB_HOST, 'port': Config.INFLUXDB_PORT, - 'token': Config.INFLUXDB_TOKEN, + 'token': mask_secret(Config.INFLUXDB_TOKEN), 'database': Config.INFLUXDB_DATABASE }, 'api': { diff --git a/src/tests/mwc26-f5ga/AI_analytics_engine/clients/influxdb_fetcher.py b/src/tests/mwc26-f5ga/AI_analytics_engine/clients/influxdb_fetcher.py index d989011dc..4aff397b8 100644 --- a/src/tests/mwc26-f5ga/AI_analytics_engine/clients/influxdb_fetcher.py +++ b/src/tests/mwc26-f5ga/AI_analytics_engine/clients/influxdb_fetcher.py @@ -100,7 +100,6 @@ class InfluxDBFetcher: def process_response_table( self, table: Any, - metric_to_process: str ) -> Dict[str, Any]: """ Process InfluxDB response table into structured data. @@ -111,45 +110,36 @@ class InfluxDBFetcher: Returns: Dictionary containing processed performance metrics and values. """ - metrics = [] - if table is not None and isinstance(table, pd.DataFrame): - metrics = table.to_dict('records') - else: - LOGGER.warning("No data returned from InfluxDB query ") - - keys_to_check = [ 'time', 'link_id'] - sla_metric = 'bandwidth_utilization' - keys_to_check.append(sla_metric) + if table is None or not isinstance(table, pd.DataFrame): + LOGGER.warning("No data returned from InfluxDB query") + return { + 'metrics': [], + 'metric_values': [] + } - # if metric_to_process == 'latency_threshold_ms': - # sla_metric = 'latency' - # keys_to_check.append(sla_metric) - # elif metric_to_process == 'bandwidth_utilization_threshold_pct': - # sla_metric = 'bandwidth_utilization' - # keys_to_check.append(sla_metric) - # else: - # sla_metric = None - # LOGGER.warning(f"Unknown metric to process: {metric_to_process}") - + LOGGER.debug(f"Processing {len(table)} rows from InfluxDB response") + + # Define columns for each output dataframe + full_columns = ['bandwidth_utilization', 'latency', 'time', 'link_id'] + metric_columns = ['bandwidth_utilization', 'latency'] - LOGGER.debug(f"Processed {len(metrics)} metric records from InfluxDB response") - data = [] - metric_value = [] - if sla_metric is not None: - for row in metrics: - # LOGGER.debug(f"Metric record: {row}") - new_row = {} - for key, value in row.items(): - if key in keys_to_check: - new_row[key] = value - if key == sla_metric: - metric_value.append(value) - data.append(new_row) - LOGGER.debug(f">>> Processed metric values: {metric_value}") + # Create DataFrame 1: Full metrics with time and link_id + # Filter only columns that exist in the table + available_full_cols = [col for col in full_columns if col in table.columns] + df_full = table[available_full_cols] + metrics = df_full.to_dict('records') + + # Create DataFrame 2: Only metric values (bandwidth_utilization, latency) + available_metric_cols = [col for col in metric_columns if col in table.columns] + df_metrics = table[available_metric_cols] + metric_values = df_metrics.to_dict('records') + + LOGGER.debug(f"Processed {len(metrics)} metric records with {len(available_full_cols)} columns") + LOGGER.debug(f"Extracted {len(metric_values)} metric value records with {len(available_metric_cols)} columns") return { - 'metrics': data, - 'metric_values': metric_value + 'metrics': metrics, + 'metric_values': metric_values } @@ -164,14 +154,20 @@ class InfluxDBFetcher: Queries InfluxDB for time-series performance data based on the SLA policy parameters and device information. The retry decorator ensures resilience against transient failures. + + If the initial query returns fewer samples than required by + sla_policy.sample_count, the method will automatically fetch + older data with an extended time window until the required + sample count is met or max attempts are reached. Args: - sla_policy: The SLA policy configuration containing time window - and threshold parameters. + sla_policy: The SLA policy configuration containing time window, + threshold parameters, and required sample count. Returns: Dictionary containing: - 'metrics': List of performance metric records. + - 'metric_values': List of metric values only. - 'timestamp_range': Dictionary with 'start' and 'end' timestamps for the queried data. @@ -185,50 +181,80 @@ class InfluxDBFetcher: if sla_policy.latency_threshold_ms is None: raise ValueError("SLA policy missing latency threshold for data fetch") - - metric_to_process = sla_policy.latency_threshold_ms LOGGER.debug( f"Fetching performance data for simap_id={sla_policy.simap_id}, " - f"time_window={sla_policy.time_window_seconds}s " - f"for metric={metric_to_process} " + f"time_window={sla_policy.time_window_seconds}s, " + f"required_samples={sla_policy.sample_count}" ) try: - query = ( - f"SELECT * FROM link_telemetry " - f"WHERE link_id = '{sla_policy.simap_id}' " - f"AND time >= now() - INTERVAL '{sla_policy.time_window_seconds} seconds' " - f"ORDER BY time DESC" - ) - - LOGGER.debug(f"Executing query: {query}") + # Initial time window + current_time_window = sla_policy.time_window_seconds + max_attempts = 3 + attempt = 1 + final_table = None - table = self._client.query(query=query, language="sql", mode="pandas") + while attempt <= max_attempts: + query = ( + f"SELECT * FROM link_telemetry " + f"WHERE link_id = '{sla_policy.simap_id}' " + f"AND time >= now() - INTERVAL '{current_time_window} seconds' " + f"ORDER BY time DESC" + ) + + LOGGER.debug(f"Attempt {attempt}/{max_attempts}: Executing query with time_window={current_time_window}s") + LOGGER.debug(f"Query: {query}") + + final_table = self._client.query(query=query, language="sql", mode="pandas") + + # Count samples from raw table + samples_fetched = 0 if final_table is None or not isinstance(final_table, pd.DataFrame) else len(final_table) + + LOGGER.info( + f"Attempt {attempt}: Fetched {samples_fetched} samples " + f"(required: {sla_policy.sample_count})" + ) + + # Check if we have enough samples + if samples_fetched >= sla_policy.sample_count: + LOGGER.info(f"Required samples met") + break + + # If not enough samples and not last attempt, calculate new time window + if attempt < max_attempts: + if samples_fetched > 0: + # Calculate required time window based on sample density + # Formula: new_window = current_window * (required_samples / fetched_samples) * 1.2 + # The 1.2 factor adds 20% buffer to account for non-uniform data distribution + ratio = sla_policy.sample_count / samples_fetched + current_time_window = int(current_time_window * ratio * 1.2) + LOGGER.debug(f"Extending time window to {current_time_window}s(ratio: {ratio:.2f})") + else: + # If no samples, double the time window + current_time_window *= 2 + LOGGER.warning(f"No samples found, doubling time window to {current_time_window}s") + + attempt += 1 + else: + LOGGER.warning( + f"Max attempts reached. Returning {samples_fetched} samples " + f"(required: {sla_policy.sample_count})" + ) + break - result = self.process_response_table(table, metric_to_process) - # metrics = result.get('metrics', []) - - # start_time = datetime.now(timezone.utc) - # end_time = datetime.now(timezone.utc) - # if metrics: - # times = [m.get('time') for m in metrics if m.get('time')] - # if times is not None: - # start_time = min(times) - # end_time = max(times) - - LOGGER.info(f"Fetched {len(result.get('metrics', []))} metric records for simap_id={sla_policy.simap_id}") + # Process the response table after fetch is completed + result = self.process_response_table(final_table) return { - 'metrics': result.get('metrics', []), - 'metric_values': result.get('metric_values', []), - 'timestamp_range': { - # 'start': start_time.isoformat() if isinstance(start_time, datetime) else str(start_time), - # 'end': end_time.isoformat() if isinstance(end_time, datetime) else str(end_time) - } + 'metrics': result.get('metrics', []), + 'metric_values': result.get('metric_values', []), + 'fetch_window_size_sec': current_time_window, + 'timestamp_range': {}, } - finally: - self._client.close() + except Exception as e: + LOGGER.error(f"Error fetching performance data from InfluxDB: {e}", exc_info=True) + raise e @RETRY_DECORATOR def notify_telemetry_update( diff --git a/src/tests/mwc26-f5ga/AI_analytics_engine/requirements.in b/src/tests/mwc26-f5ga/AI_analytics_engine/requirements.in index f0704cd0a..572d25a37 100644 --- a/src/tests/mwc26-f5ga/AI_analytics_engine/requirements.in +++ b/src/tests/mwc26-f5ga/AI_analytics_engine/requirements.in @@ -2,3 +2,6 @@ flask>=2.3.0 requests>=2.31.0 influxdb3-python>=0.8.0 +pandas>=2.0.0 +statsmodels>=0.14.0 +numpy>=1.24.0 diff --git a/src/tests/mwc26-f5ga/AI_analytics_engine/tests/test_api.py b/src/tests/mwc26-f5ga/AI_analytics_engine/tests/test_api.py index 1866cd97a..703ca3d20 100644 --- a/src/tests/mwc26-f5ga/AI_analytics_engine/tests/test_api.py +++ b/src/tests/mwc26-f5ga/AI_analytics_engine/tests/test_api.py @@ -19,7 +19,6 @@ This module tests the /api/v1/analyze endpoint by starting the server and sending HTTP requests. """ -from csv import Error import logging import os import sys @@ -113,21 +112,20 @@ def test_analyze_endpoint(ai_engine_server): - Returns JSON response with status and message fields """ - if ai_engine_server is Error: - pytest.fail("AI Analytics Engine server failed to start") - LOGGER.info(">>>>>> Starting test_case test_analyze_endpoint: POST /api/v1/analyze endpoint") # Prepare test payload with SLA policy configuration payload = { "simap_id": "E2E-L1", "sla_metrics": { - "latency_threshold_ms": 10, - "bandwidth_utilization_threshold_pct": 0.0 + "latency_threshold_ms": 0, + "bandwidth_utilization": 0.0 }, - "window_size_sec": 600 + "history_window_size_sec": 600, + "sample_interval_sec": 5, + "sample_count": 120, } - + LOGGER.info(f"Sending analyze request with payload: {payload}") # Send POST request to analyze endpoint diff --git a/src/tests/tools/simap_server/simap_client/__main__.py b/src/tests/tools/simap_server/simap_client/__main__.py index 7ff6fa3b2..d583ea961 100644 --- a/src/tests/tools/simap_server/simap_client/__main__.py +++ b/src/tests/tools/simap_server/simap_client/__main__.py @@ -96,7 +96,7 @@ def main() -> None: for link_id, (bw, lat) in abstract_metrics.items(): print(f'{link_id:10s}: BW={bw:5.2f}%, Lat={lat:.3f}ms SvcIDs: {domain_service_map[link_id]}') - time.sleep(5) + time.sleep(2) if __name__ == '__main__': -- GitLab From 3e7bde97c546020aa1eb198eaae964bcc8dadc2d Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Fri, 30 Jan 2026 15:31:15 +0000 Subject: [PATCH 02/78] feat: Update SLA policy and InfluxDB fetcher to use "forecast_sample_interval_sec" and "forecast_sample_count". --- .../AI_analytics_engine/ai_model/sla_policy.py | 16 ++++++++-------- .../clients/influxdb_fetcher.py | 12 ++++++------ .../AI_analytics_engine/tests/test_api.py | 6 +++--- 3 files changed, 17 insertions(+), 17 deletions(-) diff --git a/src/tests/mwc26-f5ga/AI_analytics_engine/ai_model/sla_policy.py b/src/tests/mwc26-f5ga/AI_analytics_engine/ai_model/sla_policy.py index 2a13229ff..116d6541c 100644 --- a/src/tests/mwc26-f5ga/AI_analytics_engine/ai_model/sla_policy.py +++ b/src/tests/mwc26-f5ga/AI_analytics_engine/ai_model/sla_policy.py @@ -33,15 +33,15 @@ class SLAPolicyConfig: bandwidth_utilization_threshold_pct: Maximum acceptable bandwidth utilization as a percentage (0-100). time_window_seconds: Time window in seconds for data analysis. - sample_interval_sec: Sampling interval in seconds for data collection. - sample_count: Minimum number of samples to fetch from database. + forecast_sample_interval_sec: Sampling interval in seconds for data collection. + forecast_sample_count: Minimum number of samples to fetch from database. """ simap_id: str latency_threshold_ms: float|None bandwidth_utilization: float|None time_window_seconds: int - sample_interval_sec: int - sample_count: int + forecast_sample_interval_sec: int + forecast_sample_count: int @classmethod def from_dict(cls, data: Dict[str, Any]) -> SLAPolicyConfig: @@ -68,16 +68,16 @@ class SLAPolicyConfig: latency_threshold_ms = float(metrics['latency_threshold_ms']) bandwidth_threshold = float(metrics.get('bandwidth_utilization', 0.0)) time_window = int(data['history_window_size_sec']) - sample_interval = int(data['sample_interval_sec']) - sample_count = int(data['sample_count']) + sample_interval = int(data['forecast_sample_interval_sec']) + forecast_sample_count = int(data['forecast_sample_count']) return cls( simap_id = simap_id, latency_threshold_ms = latency_threshold_ms, bandwidth_utilization = bandwidth_threshold, time_window_seconds = time_window, - sample_interval_sec = sample_interval, - sample_count = sample_count + forecast_sample_interval_sec = sample_interval, + forecast_sample_count = forecast_sample_count ) except KeyError as e: raise KeyError(f"Missing required field: {e.args[0]}") from e diff --git a/src/tests/mwc26-f5ga/AI_analytics_engine/clients/influxdb_fetcher.py b/src/tests/mwc26-f5ga/AI_analytics_engine/clients/influxdb_fetcher.py index 4aff397b8..d7228647a 100644 --- a/src/tests/mwc26-f5ga/AI_analytics_engine/clients/influxdb_fetcher.py +++ b/src/tests/mwc26-f5ga/AI_analytics_engine/clients/influxdb_fetcher.py @@ -156,7 +156,7 @@ class InfluxDBFetcher: ensures resilience against transient failures. If the initial query returns fewer samples than required by - sla_policy.sample_count, the method will automatically fetch + sla_policy.forecast_sample_count, the method will automatically fetch older data with an extended time window until the required sample count is met or max attempts are reached. @@ -185,7 +185,7 @@ class InfluxDBFetcher: LOGGER.debug( f"Fetching performance data for simap_id={sla_policy.simap_id}, " f"time_window={sla_policy.time_window_seconds}s, " - f"required_samples={sla_policy.sample_count}" + f"required_samples={sla_policy.forecast_sample_count}" ) try: @@ -213,11 +213,11 @@ class InfluxDBFetcher: LOGGER.info( f"Attempt {attempt}: Fetched {samples_fetched} samples " - f"(required: {sla_policy.sample_count})" + f"(required: {sla_policy.forecast_sample_count})" ) # Check if we have enough samples - if samples_fetched >= sla_policy.sample_count: + if samples_fetched >= sla_policy.forecast_sample_count: LOGGER.info(f"Required samples met") break @@ -227,7 +227,7 @@ class InfluxDBFetcher: # Calculate required time window based on sample density # Formula: new_window = current_window * (required_samples / fetched_samples) * 1.2 # The 1.2 factor adds 20% buffer to account for non-uniform data distribution - ratio = sla_policy.sample_count / samples_fetched + ratio = sla_policy.forecast_sample_count / samples_fetched current_time_window = int(current_time_window * ratio * 1.2) LOGGER.debug(f"Extending time window to {current_time_window}s(ratio: {ratio:.2f})") else: @@ -239,7 +239,7 @@ class InfluxDBFetcher: else: LOGGER.warning( f"Max attempts reached. Returning {samples_fetched} samples " - f"(required: {sla_policy.sample_count})" + f"(required: {sla_policy.forecast_sample_count})" ) break diff --git a/src/tests/mwc26-f5ga/AI_analytics_engine/tests/test_api.py b/src/tests/mwc26-f5ga/AI_analytics_engine/tests/test_api.py index 703ca3d20..d92265325 100644 --- a/src/tests/mwc26-f5ga/AI_analytics_engine/tests/test_api.py +++ b/src/tests/mwc26-f5ga/AI_analytics_engine/tests/test_api.py @@ -121,9 +121,9 @@ def test_analyze_endpoint(ai_engine_server): "latency_threshold_ms": 0, "bandwidth_utilization": 0.0 }, - "history_window_size_sec": 600, - "sample_interval_sec": 5, - "sample_count": 120, + "history_window_size_sec": 600, + "forecast_sample_interval_sec": 5, + "forecast_sample_count": 120, } LOGGER.info(f"Sending analyze request with payload: {payload}") -- GitLab From 19b287d68a3e2d83722b53ed6c28f9f92a761457 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Tue, 10 Feb 2026 11:53:49 +0000 Subject: [PATCH 03/78] feat: Update SIMAP Connector Service and related components - Added support for connection management in the SIMAP Connector Service. - Updated connection environment variables in simap_connectorservice.yaml. - Enhanced Connection.py with a new function to retrieve connections. - Modified SimapConnectorServiceServicerImpl.py to assert worker types. - Refactored __main__.py for improved import organization. - Updated ObjectCache.py to handle connection objects and caching logic. - Enhanced SimapUpdater.py with connection event handling and telemetry updates. - Added utility functions in Tools.py for connection endpoint and link retrieval. - Improved worker class in _Worker.py for better thread management. - Updated SyntheticSamplers.py to use current timestamp for sampling. - Added new JSON test data for L3VPN requests. - Created deployment and dummy scripts for L3VPN testing. - Added log dumping script for easier log collection during testing. --- .gitignore | 1 + manifests/simap_connectorservice.yaml | 10 +- .../tools/context_queries/Connection.py | 13 +- .../SimapConnectorServiceServicerImpl.py | 8 +- src/simap_connector/service/__main__.py | 10 +- .../service/simap_updater/ObjectCache.py | 31 ++- .../service/simap_updater/SimapUpdater.py | 242 ++++++++++++++---- .../service/simap_updater/Tools.py | 68 ++++- .../service/telemetry/worker/_Worker.py | 8 +- .../worker/data/SyntheticSamplers.py | 2 +- .../data/slices/l3vpn_request_from_agg.json | 185 +++++++++++++ src/tests/ecoc25-f5ga-telemetry/deploy.sh | 6 +- .../dummy_L3VPN_delete.sh | 29 +++ .../dummy_L3VPN_request.sh | 30 +++ src/tests/ecoc25-f5ga-telemetry/dump-logs.sh | 75 ++++++ 15 files changed, 634 insertions(+), 84 deletions(-) create mode 100644 src/tests/ecoc25-f5ga-telemetry/data/slices/l3vpn_request_from_agg.json create mode 100755 src/tests/ecoc25-f5ga-telemetry/dummy_L3VPN_delete.sh create mode 100755 src/tests/ecoc25-f5ga-telemetry/dummy_L3VPN_request.sh create mode 100755 src/tests/ecoc25-f5ga-telemetry/dump-logs.sh diff --git a/.gitignore b/.gitignore index d5af4f7f6..7e0e576a2 100644 --- a/.gitignore +++ b/.gitignore @@ -146,6 +146,7 @@ venv.bak/ # VSCode project settings .vscode/ +.github/ # Visual Studio project settings /.vs diff --git a/manifests/simap_connectorservice.yaml b/manifests/simap_connectorservice.yaml index 09796f6f8..0b4ea503c 100644 --- a/manifests/simap_connectorservice.yaml +++ b/manifests/simap_connectorservice.yaml @@ -40,15 +40,9 @@ spec: - name: SIMAP_SERVER_SCHEME value: "http" - name: SIMAP_SERVER_ADDRESS - # Assuming SIMAP Server is deployed in a local Docker container, as per: - # - ./src/tests/tools/simap_server/build.sh - # - ./src/tests/tools/simap_server/deploy.sh - value: "172.17.0.1" + value: "10.254.0.9" # running at SIMAP Server VM - name: SIMAP_SERVER_PORT - # Assuming SIMAP Server is deployed in a local Docker container, as per: - # - ./src/tests/tools/simap_server/build.sh - # - ./src/tests/tools/simap_server/deploy.sh - value: "8080" + value: "8080" # running at SIMAP Server VM - name: SIMAP_SERVER_USERNAME value: "admin" - name: SIMAP_SERVER_PASSWORD diff --git a/src/common/tools/context_queries/Connection.py b/src/common/tools/context_queries/Connection.py index 88ccb1bf2..118dfe136 100644 --- a/src/common/tools/context_queries/Connection.py +++ b/src/common/tools/context_queries/Connection.py @@ -13,8 +13,9 @@ # limitations under the License. import grpc, logging -from typing import Optional -from common.proto.context_pb2 import Connection, ConnectionId +from typing import List, Optional +from common.Constants import DEFAULT_CONTEXT_NAME +from common.proto.context_pb2 import Connection, ConnectionId, ContextId from context.client.ContextClient import ContextClient LOGGER = logging.getLogger(__name__) @@ -41,3 +42,11 @@ def get_connection_by_uuid( connection_id = ConnectionId() connection_id.connection_uuid.uuid = connection_uuid return get_connection_by_id(context_client, connection_id, rw_copy=rw_copy) + +def get_connections( + context_client : ContextClient, context_uuid : str = DEFAULT_CONTEXT_NAME +) -> List[Connection]: + context_id = ContextId() + context_id.context_uuid.uuid = context_uuid + connections = context_client.ListConnections(context_id) + return [c for c in connections.connections] diff --git a/src/simap_connector/service/SimapConnectorServiceServicerImpl.py b/src/simap_connector/service/SimapConnectorServiceServicerImpl.py index 8aafffc1a..b77df510a 100644 --- a/src/simap_connector/service/SimapConnectorServiceServicerImpl.py +++ b/src/simap_connector/service/SimapConnectorServiceServicerImpl.py @@ -22,7 +22,7 @@ from common.tools.rest_conf.client.RestConfClient import RestConfClient from common.method_wrappers.Decorator import MetricsPool, safe_and_metered_rpc_method from device.client.DeviceClient import DeviceClient from simap_connector.service.telemetry.worker.SynthesizerWorker import SynthesizerWorker -from simap_connector.service.telemetry.worker._Worker import WorkerTypeEnum +from simap_connector.service.telemetry.worker._Worker import _Worker, WorkerTypeEnum from .database.Subscription import subscription_get, subscription_set, subscription_delete from .database.SubSubscription import ( sub_subscription_list, sub_subscription_set, sub_subscription_delete @@ -167,11 +167,13 @@ class SimapConnectorServiceServicerImpl(SimapConnectorServiceServicer): latency_factor = request.latency_factor synthesizer_name = '{:s}:{:s}'.format(network_id, link_id) - synthesizer : Optional[SynthesizerWorker] = self._telemetry_pool.get_worker( - WorkerTypeEnum.SYNTHESIZER, synthesizer_name + synthesizer : Optional[_Worker] = self._telemetry_pool.get_worker( + WorkerTypeEnum.SYNTHESIZER, synthesizer_name ) if synthesizer is None: MSG = 'Synthesizer({:s}) not found' raise Exception(MSG.format(synthesizer_name)) + assert isinstance(synthesizer, SynthesizerWorker), \ + 'Expected SynthesizerWorker, got {:s}'.format(type(synthesizer).__name__) synthesizer.change_resources(bandwidth_factor, latency_factor) return Empty() diff --git a/src/simap_connector/service/__main__.py b/src/simap_connector/service/__main__.py index 01c36f717..a0e732028 100644 --- a/src/simap_connector/service/__main__.py +++ b/src/simap_connector/service/__main__.py @@ -24,12 +24,12 @@ from simap_connector.Config import ( SIMAP_SERVER_SCHEME, SIMAP_SERVER_ADDRESS, SIMAP_SERVER_PORT, SIMAP_SERVER_USERNAME, SIMAP_SERVER_PASSWORD, ) -from .database.Engine import Engine -from .database.models._Base import rebuild_database -from .simap_updater.SimapClient import SimapClient +from .database.Engine import Engine +from .database.models._Base import rebuild_database +from .simap_updater.SimapClient import SimapClient from .simap_updater.SimapUpdater import SimapUpdater -from .telemetry.TelemetryPool import TelemetryPool -from .SimapConnectorService import SimapConnectorService +from .telemetry.TelemetryPool import TelemetryPool +from .SimapConnectorService import SimapConnectorService TERMINATE = threading.Event() diff --git a/src/simap_connector/service/simap_updater/ObjectCache.py b/src/simap_connector/service/simap_updater/ObjectCache.py index d8b04f8d4..94ef4fa95 100644 --- a/src/simap_connector/service/simap_updater/ObjectCache.py +++ b/src/simap_connector/service/simap_updater/ObjectCache.py @@ -14,12 +14,13 @@ import logging -from enum import Enum +from enum import Enum from typing import Any, Dict, List, Optional, Tuple -from common.tools.context_queries.Device import get_device, get_devices -from common.tools.context_queries.Link import get_link, get_links -from common.tools.context_queries.Topology import get_topology, get_topologies -from common.tools.context_queries.Service import get_service_by_uuid, get_services +from common.tools.context_queries.Device import get_device, get_devices +from common.tools.context_queries.Link import get_link, get_links +from common.tools.context_queries.Topology import get_topology, get_topologies +from common.tools.context_queries.Service import get_service_by_uuid, get_services +from common.tools.context_queries.Connection import get_connection_by_uuid, get_connections from context.client.ContextClient import ContextClient @@ -41,7 +42,7 @@ KEY_LENGTHS = { CachedEntities.ENDPOINT : 2, CachedEntities.LINK : 1, CachedEntities.SERVICE : 1, - CachedEntities.CONNECTION : 3, + CachedEntities.CONNECTION : 1, } @@ -113,6 +114,10 @@ class ObjectCache: object_inst = get_service_by_uuid( self._context_client, object_uuids[0], rw_copy=False ) + elif entity == CachedEntities.CONNECTION: + object_inst = get_connection_by_uuid( + self._context_client, object_uuids[0], rw_copy=False + ) else: MSG = 'Not Supported ({:s}, {:s})' LOGGER.warning(MSG.format(str(entity.value).title(), str(object_uuids))) @@ -124,7 +129,9 @@ class ObjectCache: return None self.set(entity, object_inst, object_uuids[0]) - self.set(entity, object_inst, object_inst.name) + # Connections don't have a name field, so skip setting by name + if entity != CachedEntities.CONNECTION: + self.set(entity, object_inst, object_inst.name) if entity == CachedEntities.DEVICE: device_uuid = object_inst.device_id.device_uuid.uuid @@ -173,6 +180,12 @@ class ObjectCache: (s.service_id.service_uuid.uuid, s.name) : s for s in objects } + elif entity == CachedEntities.CONNECTION: + objects = get_connections(self._context_client) + objects = { + (c.connection_id.connection_uuid.uuid, c.connection_id.connection_uuid.uuid) : c + for c in objects + } else: MSG = 'Not Supported ({:s})' LOGGER.warning(MSG.format(str(entity.value).title())) @@ -180,7 +193,9 @@ class ObjectCache: for (object_uuid, object_name), object_inst in objects.items(): self.set(entity, object_inst, object_uuid) - self.set(entity, object_inst, object_name) + # Connections don't have a name field (object_name is same as UUID), so skip redundant set + if entity != CachedEntities.CONNECTION: + self.set(entity, object_inst, object_name) if entity == CachedEntities.DEVICE: for endpoint in object_inst.device_endpoints: diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index 573085ac9..3ac2613d3 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -18,7 +18,7 @@ from typing import Any, Optional, Set from common.Constants import DEFAULT_TOPOLOGY_NAME from common.DeviceTypes import DeviceTypeEnum from common.proto.context_pb2 import ( - ContextEvent, DeviceEvent, Empty, LinkEvent, ServiceEvent, + ContextEvent, DeviceEvent, Empty, LinkEvent, ServiceEvent, ServiceStatusEnum, SliceEvent, TopologyEvent, ConnectionEvent ) from common.tools.grpc.BaseEventCollector import BaseEventCollector @@ -29,13 +29,12 @@ from simap_connector.service.telemetry.worker.data.Resources import ( ResourceLink, Resources, SyntheticSampler ) from simap_connector.service.telemetry.worker._Worker import WorkerTypeEnum -from simap_connector.service.telemetry.TelemetryPool import TelemetryPool +from simap_connector.service.telemetry.TelemetryPool import SynthesizerWorker, TelemetryPool from .AllowedLinks import ALLOWED_LINKS_PER_CONTROLLER from .MockSimaps import delete_mock_simap, set_mock_simap from .ObjectCache import CachedEntities, ObjectCache from .SimapClient import SimapClient -from .Tools import get_device_endpoint, get_link_endpoint #, get_service_endpoint - +from .Tools import get_device_endpoint, get_link_endpoint, get_connection_endpoints_and_links #, get_service_endpoint LOGGER = logging.getLogger(__name__) @@ -49,18 +48,24 @@ SKIPPED_DEVICE_TYPES = { class EventDispatcher(BaseEventDispatcher): + # Telemetry scaling configuration + BASE_BANDWIDTH_OFFSET = 5.0 # Minimum bandwidth utilization % (no services) + MAX_BANDWIDTH_OFFSET = 90.0 # Maximum bandwidth utilization % (at capacity) + MAX_EXPECTED_SERVICES = 10 # Expected maximum concurrent services + DEFAULT_LINK_OFFSET = 25.0 # Default offset used in _dispatch_link_set + def __init__( self, events_queue : queue.PriorityQueue, - simap_client : SimapClient, - context_client : ContextClient, - telemetry_pool : TelemetryPool, - terminate : Optional[threading.Event] = None + simap_client : SimapClient, + context_client : ContextClient, + telemetry_pool : TelemetryPool, + terminate : Optional[threading.Event] = None ) -> None: super().__init__(events_queue, terminate) - self._simap_client = simap_client - self._context_client = context_client - self._telemetry_pool = telemetry_pool - self._object_cache = ObjectCache(self._context_client) + self._simap_client = simap_client + self._context_client = context_client + self._telemetry_pool = telemetry_pool + self._object_cache = ObjectCache(self._context_client) self._skipped_devices : Set[str] = set() @@ -663,42 +668,155 @@ class EventDispatcher(BaseEventDispatcher): MSG = 'Processing Connection Event: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(connection_event))) - # Here a connection object from context is received in connection_event. - # Here is gRPC message definition: message Connection { ConnectionId connection_id = 1; ServiceId service_id = 2; repeated EndPointId path_hops_endpoint_ids = 3; repeated ServiceId sub_service_ids = 4; ConnectionSettings settings = 5;} - # discard sub_service_ids and settings for now, as not used in SIMAP population. - # Extract service_id, endpoint_ids from connection_event to identify the connection. - # Get all links using gRPC ListLinkIds() from context, and find which link(s) correspond to the connection's endpoint_ids. - # Then update SIMAP accordingly. - # Then, do this only for connections that correspond to links that this controller is allowed to manage, as per ALLOWED_LINKS_PER_CONTROLLER. - # Then, do something like this (pseudocode): - # worker_name = '{:s}:{:s}'.format(topology_name, link_name) - # resources = Resources() - # resources.links.append(ResourceLink( - # domain_name=topology_name, link_name=link_name, - # bandwidth_utilization_sampler=SyntheticSampler.create_random( - # amplitude_scale = 25.0, - # phase_scale = 1e-7, - # period_scale = 86_400, - # offset_scale = 25, - # noise_ratio = 0.05, - # min_value = 0.0, - # max_value = 100.0, - # ), - # latency_sampler=SyntheticSampler.create_random( - # amplitude_scale = 0.5, - # phase_scale = 1e-7, - # period_scale = 60.0, - # offset_scale = 10.0, - # noise_ratio = 0.05, - # min_value = 0.0, - # ), - # related_service_ids=[], - # )) - # sampling_interval = 1.0 - # self._telemetry_pool.start_synthesizer(worker_name, resources, sampling_interval) + # Extract connection UUID from event + connection_uuid = connection_event.connection_id.connection_uuid.uuid + + try: + # Use common helper to prepare connection data + result = self._prepare_connection_processing(connection_uuid) + if result is None: + return False + (topology_name, processed_links) = result + + # Update telemetry for each link involved in this connection + bandwidth_factor = self._calculate_bandwidth_factor() + + for _, link_name in processed_links: + LOGGER.info('Connection {:s} uses allowed link: {:s}'.format(connection_uuid, link_name)) + worker_name = '{:s}:{:s}'.format(topology_name, link_name) + + # Worker should already exist from _dispatch_link_set (link creation event) + if not self._telemetry_pool.has_worker(WorkerTypeEnum.SYNTHESIZER, worker_name): + LOGGER.warning('Worker not found for link {:s}, creating and starting new worker'.format(link_name)) + + # Create worker with same parameters as in _dispatch_link_set + resources = Resources() + resources.links.append(ResourceLink( + domain_name=topology_name, link_name=link_name, + bandwidth_utilization_sampler=SyntheticSampler.create_random( + amplitude_scale = 25.0, + phase_scale = 1e-7, + period_scale = 86_400, + offset_scale = 25, + noise_ratio = 0.05, + min_value = 0.0, + max_value = 100.0, + ), + latency_sampler=SyntheticSampler.create_random( + amplitude_scale = 0.5, + phase_scale = 1e-7, + period_scale = 60.0, + offset_scale = 10.0, + noise_ratio = 0.05, + min_value = 0.0, + ), + related_service_ids=[], + )) + sampling_interval = 1.0 + self._telemetry_pool.start_synthesizer(worker_name, resources, sampling_interval) + LOGGER.info('Started new synthesizer worker: {:s}'.format(worker_name)) + else: + # Worker exists, update bandwidth scaling factor + worker = self._telemetry_pool.get_worker(WorkerTypeEnum.SYNTHESIZER, worker_name) + assert isinstance(worker, SynthesizerWorker), \ + 'Expected SynthesizerWorker, got {:s}'.format(type(worker).__name__) + + worker.change_resources(bandwidth_factor, latency_factor=1.0) + LOGGER.info('Updated telemetry of already running worker: link {:s}, and bandwidth_factor={:.2f}'.format( + link_name, bandwidth_factor)) + + except Exception as e: + LOGGER.exception('Failed to process connection event {:s}: {:s}'.format(connection_uuid, str(e))) + return False return True + def _prepare_connection_processing(self, connection_uuid: str): + """ + Extract common logic for processing connection events. + + Returns: + Tuple of ( domain_name, processed_links) or None if failed + """ + # Get the connection object + connection = self._object_cache.get(CachedEntities.CONNECTION, connection_uuid) + if connection is None: + LOGGER.warning('Connection {:s} not found in cache'.format(connection_uuid)) + return None + + # NOTE: Actual Connection event object does not include service_id. + + _, link_uuids = get_connection_endpoints_and_links(connection_uuid) + + # Determine the controller's domain name + topologies = self._object_cache.get_all(CachedEntities.TOPOLOGY, fresh=False) + topology_names = {t.name for t in topologies} + topology_names.discard(DEFAULT_TOPOLOGY_NAME) + if len(topology_names) != 1: + LOGGER.warning('Unable to identify self-controller for connection {:s} and {!r}'.format(connection_uuid, topology_names)) + return None + domain_name = topology_names.pop() + + # Filter links based on ALLOWED_LINKS_PER_CONTROLLER + allowed_link_names = ALLOWED_LINKS_PER_CONTROLLER.get(domain_name, set()) + processed_links = [] + for link_uuid in link_uuids: + link = self._object_cache.get(CachedEntities.LINK, link_uuid) + if link.name in allowed_link_names: + processed_links.append((link_uuid, link.name)) + + if not processed_links: + LOGGER.debug('Connection {:s} has no allowed links for domain {:s}'.format( + connection_uuid, domain_name)) + return None + + return domain_name, processed_links # NOTE: Domain name = topology name + + def _calculate_bandwidth_factor(self) -> float: + """ + Calculate bandwidth scaling factor based on active service count. + + Returns: + float: Bandwidth factor to multiply with existing worker offset + """ + try: + # Query all services from Context + all_services = self._object_cache.get_all(CachedEntities.SERVICE, fresh=False) + + # Count active services (SERVICESTATUS_ACTIVE or SERVICESTATUS_UPDATING) + active_service_count = 0 + for service in all_services: + service_status = service.service_status.service_status + if service_status in (ServiceStatusEnum.SERVICESTATUS_ACTIVE, + ServiceStatusEnum.SERVICESTATUS_UPDATING): + # Skip sub-services (UUID-based names) + try: + uuid.UUID(hex=service.name) + continue # Skip sub-services + except: # pylint: disable=bare-except + active_service_count += 1 + + active_service_count = int(active_service_count / 2) # Each service appears as two connections (uuid and name) + LOGGER.info('Active service count: {:d}'.format(int(active_service_count))) + + # Calculate bandwidth offset using linear scaling + service_ratio = min(active_service_count / self.MAX_EXPECTED_SERVICES, 1.0) + target_bandwidth_offset = (self.BASE_BANDWIDTH_OFFSET + (service_ratio * + (self.MAX_BANDWIDTH_OFFSET - self.BASE_BANDWIDTH_OFFSET))) + + # Calculate adjustment factor relative to default offset + bandwidth_factor = target_bandwidth_offset / self.DEFAULT_LINK_OFFSET + + LOGGER.info('Calculated bandwidth_factor={:.2f} (service_count={:d}, target_offset={:.2f})'.format( + bandwidth_factor, active_service_count, target_bandwidth_offset)) + + return bandwidth_factor + + except Exception as e: + LOGGER.exception('Failed to calculate bandwidth factor: {:s}'.format(str(e))) + # Return default factor (1.0 = no change) + return 1.0 + def dispatch_connection_create(self, connection_event : ConnectionEvent) -> None: if not self.dispatch_connection_set(connection_event): return @@ -712,8 +830,38 @@ class EventDispatcher(BaseEventDispatcher): LOGGER.debug(MSG.format(grpc_message_to_json_string(connection_event))) def dispatch_connection_remove(self, connection_event : ConnectionEvent) -> None: - MSG = 'Skipping Connection Remove Event: {:s}' - LOGGER.debug(MSG.format(grpc_message_to_json_string(connection_event))) + MSG = 'Processing Connection Remove Event: {:s}' + LOGGER.info(MSG.format(grpc_message_to_json_string(connection_event))) + + connection_uuid = connection_event.connection_id.connection_uuid.uuid + + try: + result = self._prepare_connection_processing(connection_uuid) + if result is None: + return + (topology_name, processed_links) = result + + # Update telemetry factor for remaining services + bandwidth_factor = self._calculate_bandwidth_factor() + + for _, link_name in processed_links: + worker_name = '{:s}:{:s}'.format(topology_name, link_name) + + if not self._telemetry_pool.has_worker(WorkerTypeEnum.SYNTHESIZER, worker_name): + LOGGER.warning('Worker not found for link {:s}, skipping telemetry update for connection removal'.format(link_name)) + continue + + worker = self._telemetry_pool.get_worker(WorkerTypeEnum.SYNTHESIZER, worker_name) + assert isinstance(worker, SynthesizerWorker), \ + 'Expected SynthesizerWorker, got {:s}'.format(type(worker).__name__) + + # Update bandwidth scaling + worker.change_resources(bandwidth_factor, latency_factor=1.0) + LOGGER.info('Updated telemetry for link {:s} after connection removal'.format(link_name)) + + except Exception as e: + LOGGER.exception('Failed to process connection removal {:s}: {:s}'.format( + connection_uuid, str(e))) class SimapUpdater: diff --git a/src/simap_connector/service/simap_updater/Tools.py b/src/simap_connector/service/simap_updater/Tools.py index d420f24e9..d08228640 100644 --- a/src/simap_connector/service/simap_updater/Tools.py +++ b/src/simap_connector/service/simap_updater/Tools.py @@ -16,11 +16,12 @@ import enum from typing import List, Optional, Set, Tuple, Union from common.proto.context_pb2 import ( - EVENTTYPE_CREATE, EVENTTYPE_REMOVE, EVENTTYPE_UPDATE, Device, - DeviceEvent, Link, LinkEvent, Service, ServiceEvent, SliceEvent, TopologyEvent + EVENTTYPE_CREATE, EVENTTYPE_REMOVE, EVENTTYPE_UPDATE, Device, ConnectionId, + DeviceEvent, Link, LinkEvent, Service, ServiceEvent, SliceEvent, TopologyEvent, Empty ) from common.tools.grpc.Tools import grpc_message_to_json_string - +from context.client.ContextClient import ContextClient +from common.tools.context_queries.Connection import get_connection_by_uuid class EventTypeEnum(enum.IntEnum): CREATE = EVENTTYPE_CREATE @@ -160,3 +161,64 @@ def get_service_endpoint(service : Service) -> Tuple[Optional[str], List[Tuple[s raise Exception(MSG.format(str(e), grpc_message_to_json_string(service))) from e return topology_uuid, endpoint_uuids + + +def get_connection_endpoints_and_links(connection_id: str) -> Tuple[List[Tuple[str, str]], List[str]]: + """ + Retrieve connection details and identify associated links. + Args: + connection_id: UUID string of the connection + Returns: + Tuple of: + - List of endpoint tuples (device_uuid, endpoint_uuid) in path order + - List of link UUIDs corresponding to consecutive endpoint pairs + """ + + context_client = ContextClient() + connection = get_connection_by_uuid(context_client, connection_id, rw_copy=False) + + if connection is None: + raise Exception(f"Failed to retrieve Connection({connection_id}): Connection not found") + + # Extract path_hops_endpoint_ids + endpoint_ids = [] + for endpoint_id in connection.path_hops_endpoint_ids: + device_uuid = endpoint_id.device_id.device_uuid.uuid + endpoint_uuid = endpoint_id.endpoint_uuid.uuid + endpoint_ids.append((device_uuid, endpoint_uuid)) + + if len(endpoint_ids) < 2: + # No path or single endpoint - no links + return endpoint_ids, [] + + # Find links connecting consecutive endpoint pairs + # Get all links from context + + link_list = context_client.ListLinks(Empty()) + link_uuids = [] + + # For each consecutive pair of endpoints in the path + for i in range(len(endpoint_ids) - 1): + src_device_uuid, src_endpoint_uuid = endpoint_ids[i] + dst_device_uuid, dst_endpoint_uuid = endpoint_ids[i + 1] + + # Find link connecting these endpoints + for link in link_list.links: + if len(link.link_endpoint_ids) != 2: + continue + + # Extract link endpoints + link_ep0_device = link.link_endpoint_ids[0].device_id.device_uuid.uuid + link_ep0_endpoint = link.link_endpoint_ids[0].endpoint_uuid.uuid + link_ep1_device = link.link_endpoint_ids[1].device_id.device_uuid.uuid + link_ep1_endpoint = link.link_endpoint_ids[1].endpoint_uuid.uuid + + # Check if link matches (bidirectional check) + if ((link_ep0_device == src_device_uuid and link_ep0_endpoint == src_endpoint_uuid and + link_ep1_device == dst_device_uuid and link_ep1_endpoint == dst_endpoint_uuid) or + (link_ep1_device == src_device_uuid and link_ep1_endpoint == src_endpoint_uuid and + link_ep0_device == dst_device_uuid and link_ep0_endpoint == dst_endpoint_uuid)): + link_uuids.append(link.link_id.link_uuid.uuid) + break + + return endpoint_ids, link_uuids \ No newline at end of file diff --git a/src/simap_connector/service/telemetry/worker/_Worker.py b/src/simap_connector/service/telemetry/worker/_Worker.py index ae0da4fc7..e6fe4f1fb 100644 --- a/src/simap_connector/service/telemetry/worker/_Worker.py +++ b/src/simap_connector/service/telemetry/worker/_Worker.py @@ -35,12 +35,12 @@ class _Worker(threading.Thread): ) -> None: self._worker_type = worker_type self._worker_name = worker_name - self._worker_key = get_worker_key(worker_type, worker_name) + self._worker_key = get_worker_key(worker_type, worker_name) name = 'TelemetryWorker({:s})'.format(self._worker_key) super().__init__(name=name, daemon=True) - self._logger = logging.getLogger(name) - self._stop_event = threading.Event() - self._terminate = threading.Event() if terminate is None else terminate + self._logger = logging.getLogger(name) + self._stop_event = threading.Event() + self._terminate = threading.Event() if terminate is None else terminate @property def worker_type(self) -> WorkerTypeEnum: return self._worker_type diff --git a/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py b/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py index b94297128..9aa0e483f 100644 --- a/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py +++ b/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py @@ -45,7 +45,7 @@ class SyntheticSampler: return cls(amplitude, phase, period, offset, noise_ratio, min_value, max_value) def get_sample(self) -> Sample: - timestamp = datetime.timestamp(datetime.utcnow()) + timestamp = datetime.now().timestamp() waveform = math.sin(2 * math.pi * timestamp / self.period + self.phase) waveform *= self.amplitude diff --git a/src/tests/ecoc25-f5ga-telemetry/data/slices/l3vpn_request_from_agg.json b/src/tests/ecoc25-f5ga-telemetry/data/slices/l3vpn_request_from_agg.json new file mode 100644 index 000000000..ba9c9d853 --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/data/slices/l3vpn_request_from_agg.json @@ -0,0 +1,185 @@ +{ + "ietf-l3vpn-svc:l3vpn-svc": { + "sites": { + "site": [ + { + "devices": { + "device": [ + { + "device-id": "P-PE1", + "location": "access" + } + ] + }, + "locations": { + "location": [ + { + "location-id": "access" + } + ] + }, + "management": { + "type": "ietf-l3vpn-svc:provider-managed" + }, + "routing-protocols": { + "routing-protocol": [ + { + "static": { + "cascaded-lan-prefixes": { + "ipv4-lan-prefixes": [ + { + "lan": "172.1.101.22/24", + "lan-tag": "21", + "next-hop": "128.32.44.254" + } + ] + } + }, + "type": "ietf-l3vpn-svc:static" + } + ] + }, + "site-id": "site_access", + "site-network-accesses": { + "site-network-access": [ + { + "device-reference": "P-PE1", + "ip-connection": { + "ipv4": { + "address-allocation-type": "ietf-l3vpn-svc:static-address", + "addresses": { + "customer-address": "128.32.44.254", + "prefix-length": "24", + "provider-address": "128.32.44.254" + } + } + }, + "service": { + "qos": { + "qos-profile": { + "classes": { + "class": [ + { + "bandwidth": { + "guaranteed-bw-percent": 100 + }, + "class-id": "qos-realtime", + "direction": "ietf-l3vpn-svc:both", + "latency": { + "latency-boundary": 20 + } + } + ] + } + } + }, + "svc-input-bandwidth": 1000000000, + "svc-mtu": 1500, + "svc-output-bandwidth": 5000000000 + }, + "site-network-access-id": "200", + "site-network-access-type": "ietf-l3vpn-svc:multipoint", + "vpn-attachment": { + "site-role": "ietf-l3vpn-svc:hub-role", + "vpn-id": "slice25" + } + } + ] + } + }, + { + "devices": { + "device": [ + { + "device-id": "P-PE2", + "location": "cloud" + } + ] + }, + "locations": { + "location": [ + { + "location-id": "cloud" + } + ] + }, + "management": { + "type": "ietf-l3vpn-svc:provider-managed" + }, + "routing-protocols": { + "routing-protocol": [ + { + "static": { + "cascaded-lan-prefixes": { + "ipv4-lan-prefixes": [ + { + "lan": "172.16.104.221/24", + "lan-tag": "201", + "next-hop": "172.10.44.254" + } + ] + } + }, + "type": "ietf-l3vpn-svc:static" + } + ] + }, + "site-id": "site_cloud", + "site-network-accesses": { + "site-network-access": [ + { + "device-reference": "P-PE2", + "ip-connection": { + "ipv4": { + "address-allocation-type": "ietf-l3vpn-svc:static-address", + "addresses": { + "customer-address": "172.10.44.254", + "prefix-length": "24", + "provider-address": "172.10.44.254" + } + } + }, + "service": { + "qos": { + "qos-profile": { + "classes": { + "class": [ + { + "bandwidth": { + "guaranteed-bw-percent": 100 + }, + "class-id": "qos-realtime", + "direction": "ietf-l3vpn-svc:both", + "latency": { + "latency-boundary": 10 + } + } + ] + } + } + }, + "svc-input-bandwidth": 5000000000, + "svc-mtu": 1500, + "svc-output-bandwidth": 1000000000 + }, + "site-network-access-id": "200", + "site-network-access-type": "ietf-l3vpn-svc:multipoint", + "vpn-attachment": { + "site-role": "ietf-l3vpn-svc:spoke-role", + "vpn-id": "slice25" + } + } + ] + } + } + ] + }, + "vpn-services": { + "vpn-service": [ + { + "vpn-id": "slice25" + } + ] + } + } +} diff --git a/src/tests/ecoc25-f5ga-telemetry/deploy.sh b/src/tests/ecoc25-f5ga-telemetry/deploy.sh index 4bdf8715d..73873fb88 100755 --- a/src/tests/ecoc25-f5ga-telemetry/deploy.sh +++ b/src/tests/ecoc25-f5ga-telemetry/deploy.sh @@ -80,9 +80,9 @@ case "$HOSTNAME" in source ~/tfs-ctrl/src/tests/ecoc25-f5ga-telemetry/deploy-specs-ip.sh ./deploy/all.sh - echo "Waiting for NATS connection..." - while ! kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server 2>&1 | grep -q 'Subscriber is Ready? True'; do sleep 1; done - kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server + # echo "Waiting for NATS connection..." + # while ! kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server 2>&1 | grep -q 'Subscriber is Ready? True'; do sleep 1; done + # kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server ;; *) echo "Unknown host: $HOSTNAME" diff --git a/src/tests/ecoc25-f5ga-telemetry/dummy_L3VPN_delete.sh b/src/tests/ecoc25-f5ga-telemetry/dummy_L3VPN_delete.sh new file mode 100755 index 000000000..d5e199e0c --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/dummy_L3VPN_delete.sh @@ -0,0 +1,29 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# ------------- +# For direct testing of L3VPN delete from IP-Controller, without the need to trigger it from AGG-Controller. +# This is a dummy script that replicates the behavior of AGG-Controller when it sends a delete request to IP-Controller. +# -------------- + +cd $(dirname $0) + +echo "[IP-Controller] sending L3VPN delete (dummy replicating AGG-Controller )..." +curl --request DELETE --user admin:admin --location \ + http://10.254.0.12:80/restconf/data/ietf-l3vpn-svc:l3vpn-svc/vpn-services/vpn-service=slice25 + +echo + +echo "Done! Delete!" diff --git a/src/tests/ecoc25-f5ga-telemetry/dummy_L3VPN_request.sh b/src/tests/ecoc25-f5ga-telemetry/dummy_L3VPN_request.sh new file mode 100755 index 000000000..f246c3b3b --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/dummy_L3VPN_request.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# ------------- +# For direct testing of L3VPN request from IP-Controller, without the need to trigger it from AGG-Controller. +# This is a dummy script that replicates the behavior of AGG-Controller when it sends a request to IP-Controller. +# -------------- + +cd $(dirname $0) + +echo "[IP-Controller] sending L3VPN request (dummy replicating AGG-Controller request)..." +curl --request POST --location --user admin:admin --header 'Content-Type: application/json' \ + --data @data/slices/l3vpn_request_agg.json \ + http://127.0.0.1:80/restconf/data/ietf-l3vpn-svc:l3vpn-svc/vpn-services +echo + + +echo "Done!" diff --git a/src/tests/ecoc25-f5ga-telemetry/dump-logs.sh b/src/tests/ecoc25-f5ga-telemetry/dump-logs.sh new file mode 100755 index 000000000..ec68fe5e7 --- /dev/null +++ b/src/tests/ecoc25-f5ga-telemetry/dump-logs.sh @@ -0,0 +1,75 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Set working directory +cd "$(dirname "$0")" || exit 1 + +# Get the current hostname +HOSTNAME=$(hostname) +echo "Collecting logs for ${HOSTNAME}..." + +rm logs -rf tmp/exec +mkdir -p tmp/exec + +case "$HOSTNAME" in + simap-server) + echo "Collecting Docker container logs..." + docker logs simap-server > tmp/exec/simap-server.log 2>&1 + docker logs nce-fan-ctrl > tmp/exec/nce-fan-ctrl.log 2>&1 + docker logs nce-t-ctrl > tmp/exec/nce-t-ctrl.log 2>&1 + docker logs traffic-changer > tmp/exec/traffic-changer.log 2>&1 + ;; + tfs-e2e-ctrl) + echo "Collecting TFS E2E Controller logs..." + kubectl logs --namespace tfs service/contextservice -c server > tmp/exec/e2e-context.log + kubectl logs --namespace tfs service/deviceservice -c server > tmp/exec/e2e-device.log + kubectl logs --namespace tfs service/serviceservice -c server > tmp/exec/e2e-service.log + kubectl logs --namespace tfs service/pathcompservice -c frontend > tmp/exec/e2e-pathcomp-frontend.log + kubectl logs --namespace tfs service/pathcompservice -c backend > tmp/exec/e2e-pathcomp-backend.log + kubectl logs --namespace tfs service/webuiservice -c server > tmp/exec/e2e-webui.log + kubectl logs --namespace tfs service/nbiservice -c server > tmp/exec/e2e-nbi.log + kubectl logs --namespace tfs service/simap-connectorservice -c server > tmp/exec/e2e-simap-connector.log + ;; + tfs-agg-ctrl) + echo "Collecting TFS Aggregation Controller logs..." + kubectl logs --namespace tfs service/contextservice -c server > tmp/exec/agg-context.log + kubectl logs --namespace tfs service/deviceservice -c server > tmp/exec/agg-device.log + kubectl logs --namespace tfs service/serviceservice -c server > tmp/exec/agg-service.log + kubectl logs --namespace tfs service/pathcompservice -c frontend > tmp/exec/agg-pathcomp-frontend.log + kubectl logs --namespace tfs service/pathcompservice -c backend > tmp/exec/agg-pathcomp-backend.log + kubectl logs --namespace tfs service/webuiservice -c server > tmp/exec/agg-webui.log + kubectl logs --namespace tfs service/nbiservice -c server > tmp/exec/agg-nbi.log + kubectl logs --namespace tfs service/simap-connectorservice -c server > tmp/exec/agg-simap-connector.log + ;; + tfs-ip-ctrl) + echo "Collecting TFS IP Controller logs..." + kubectl logs --namespace tfs service/contextservice -c server > tmp/exec/ip-context.log + kubectl logs --namespace tfs service/deviceservice -c server > tmp/exec/ip-device.log + kubectl logs --namespace tfs service/serviceservice -c server > tmp/exec/ip-service.log + kubectl logs --namespace tfs service/pathcompservice -c frontend > tmp/exec/ip-pathcomp-frontend.log + kubectl logs --namespace tfs service/pathcompservice -c backend > tmp/exec/ip-pathcomp-backend.log + kubectl logs --namespace tfs service/webuiservice -c server > tmp/exec/ip-webui.log + kubectl logs --namespace tfs service/nbiservice -c server > tmp/exec/ip-nbi.log + kubectl logs --namespace tfs service/simap-connectorservice -c server > tmp/exec/ip-simap-connector.log + ;; + *) + echo "Unknown host: $HOSTNAME" + echo "No logs to collect." + ;; +esac + +printf "\n" + +echo "Done!" -- GitLab From 02dabaa6aa1769a23d43d66305753c9084e49c56 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Tue, 10 Feb 2026 12:30:47 +0000 Subject: [PATCH 04/78] feat: Comment out traffic-changer related commands in deploy and destroy scripts --- src/tests/ecoc25-f5ga-telemetry/deploy.sh | 10 +++++----- src/tests/ecoc25-f5ga-telemetry/destroy.sh | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/src/tests/ecoc25-f5ga-telemetry/deploy.sh b/src/tests/ecoc25-f5ga-telemetry/deploy.sh index 73873fb88..bf69a0a34 100755 --- a/src/tests/ecoc25-f5ga-telemetry/deploy.sh +++ b/src/tests/ecoc25-f5ga-telemetry/deploy.sh @@ -35,21 +35,21 @@ case "$HOSTNAME" in cd ~/tfs-ctrl/ docker buildx build -t nce-t-ctrl:mock -f ./src/tests/tools/mock_nce_t_ctrl/Dockerfile . - echo "Building Traffic Changer..." - cd ~/tfs-ctrl/ - docker buildx build -t traffic-changer:mock -f ./src/tests/tools/traffic_changer/Dockerfile . + # echo "Building Traffic Changer..." + # cd ~/tfs-ctrl/ + # docker buildx build -t traffic-changer:mock -f ./src/tests/tools/traffic_changer/Dockerfile . echo "Cleaning up..." docker rm --force simap-server docker rm --force nce-fan-ctrl docker rm --force nce-t-ctrl - docker rm --force traffic-changer + # docker rm --force traffic-changer echo "Deploying support services..." docker run --detach --name simap-server --publish 8080:8080 simap-server:mock docker run --detach --name nce-fan-ctrl --publish 8081:8080 --env SIMAP_ADDRESS=172.17.0.1 --env SIMAP_PORT=8080 nce-fan-ctrl:mock docker run --detach --name nce-t-ctrl --publish 8082:8080 --env SIMAP_ADDRESS=172.17.0.1 --env SIMAP_PORT=8080 nce-t-ctrl:mock - docker run --detach --name traffic-changer --publish 8083:8080 traffic-changer:mock + # docker run --detach --name traffic-changer --publish 8083:8080 traffic-changer:mock sleep 2 docker ps -a diff --git a/src/tests/ecoc25-f5ga-telemetry/destroy.sh b/src/tests/ecoc25-f5ga-telemetry/destroy.sh index 47977562d..37850e5f7 100755 --- a/src/tests/ecoc25-f5ga-telemetry/destroy.sh +++ b/src/tests/ecoc25-f5ga-telemetry/destroy.sh @@ -27,7 +27,7 @@ case "$HOSTNAME" in docker rm --force simap-server docker rm --force nce-fan-ctrl docker rm --force nce-t-ctrl - docker rm --force traffic-changer + # docker rm --force traffic-changer sleep 2 docker ps -a -- GitLab From 5e07d785f540f83a9559f7febf8017731d47195c Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Fri, 13 Feb 2026 12:38:19 +0000 Subject: [PATCH 05/78] feat: Update SIMAP connection remove and created handling - Now, telemetry generation will be impacted by active connection count on a link. - On connection creation, the values will up and vice versa --- .../tools/context_queries/Connection.py | 10 +- .../service/simap_updater/ObjectCache.py | 25 ++- .../service/simap_updater/SimapUpdater.py | 162 ++++++++++++------ 3 files changed, 130 insertions(+), 67 deletions(-) diff --git a/src/common/tools/context_queries/Connection.py b/src/common/tools/context_queries/Connection.py index 118dfe136..99e982f51 100644 --- a/src/common/tools/context_queries/Connection.py +++ b/src/common/tools/context_queries/Connection.py @@ -15,7 +15,7 @@ import grpc, logging from typing import List, Optional from common.Constants import DEFAULT_CONTEXT_NAME -from common.proto.context_pb2 import Connection, ConnectionId, ContextId +from common.proto.context_pb2 import Connection, ConnectionId from context.client.ContextClient import ContextClient LOGGER = logging.getLogger(__name__) @@ -42,11 +42,3 @@ def get_connection_by_uuid( connection_id = ConnectionId() connection_id.connection_uuid.uuid = connection_uuid return get_connection_by_id(context_client, connection_id, rw_copy=rw_copy) - -def get_connections( - context_client : ContextClient, context_uuid : str = DEFAULT_CONTEXT_NAME -) -> List[Connection]: - context_id = ContextId() - context_id.context_uuid.uuid = context_uuid - connections = context_client.ListConnections(context_id) - return [c for c in connections.connections] diff --git a/src/simap_connector/service/simap_updater/ObjectCache.py b/src/simap_connector/service/simap_updater/ObjectCache.py index 94ef4fa95..1e326131d 100644 --- a/src/simap_connector/service/simap_updater/ObjectCache.py +++ b/src/simap_connector/service/simap_updater/ObjectCache.py @@ -20,7 +20,7 @@ from common.tools.context_queries.Device import get_device, get_devices from common.tools.context_queries.Link import get_link, get_links from common.tools.context_queries.Topology import get_topology, get_topologies from common.tools.context_queries.Service import get_service_by_uuid, get_services -from common.tools.context_queries.Connection import get_connection_by_uuid, get_connections +from common.tools.context_queries.Connection import get_connection_by_uuid from context.client.ContextClient import ContextClient @@ -64,6 +64,7 @@ class ObjectCache: def __init__(self, context_client : ContextClient): self._context_client = context_client self._object_cache : Dict[Tuple[str, str], Any] = dict() + # self.populate_all_cache() # Added for testing purposes; can be removed. def get( self, entity : CachedEntities, *object_uuids : str, @@ -180,12 +181,6 @@ class ObjectCache: (s.service_id.service_uuid.uuid, s.name) : s for s in objects } - elif entity == CachedEntities.CONNECTION: - objects = get_connections(self._context_client) - objects = { - (c.connection_id.connection_uuid.uuid, c.connection_id.connection_uuid.uuid) : c - for c in objects - } else: MSG = 'Not Supported ({:s})' LOGGER.warning(MSG.format(str(entity.value).title())) @@ -214,3 +209,19 @@ class ObjectCache: def delete(self, entity : CachedEntities, *object_uuids : str) -> None: object_key = compose_object_key(entity, *object_uuids) self._object_cache.pop(object_key, None) + + def populate_all_cache(self) -> None: + """Populate cache with all entities for testing purposes.""" + LOGGER.info('Populating cache with all entities for testing...') + for entity in CachedEntities: + if entity in (CachedEntities.ENDPOINT, CachedEntities.CONNECTION): + # Endpoints are populated when devices are updated + # Connections are service-scoped; cached on-demand during events + continue + try: + self._update_all(entity) + # LOGGER.info('Populated cache for entity: {:s}'.format(entity.value)) + except Exception as e: + LOGGER.warning('Failed to populate cache for entity {:s}: {:s}'.format( + entity.value, str(e))) + LOGGER.info('Cache population completed') diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index 3ac2613d3..6ba5cb780 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -49,10 +49,10 @@ SKIPPED_DEVICE_TYPES = { class EventDispatcher(BaseEventDispatcher): # Telemetry scaling configuration - BASE_BANDWIDTH_OFFSET = 5.0 # Minimum bandwidth utilization % (no services) - MAX_BANDWIDTH_OFFSET = 90.0 # Maximum bandwidth utilization % (at capacity) - MAX_EXPECTED_SERVICES = 10 # Expected maximum concurrent services - DEFAULT_LINK_OFFSET = 25.0 # Default offset used in _dispatch_link_set + BASE_BANDWIDTH_OFFSET = 5.0 # Minimum bandwidth utilization % (no services) + MAX_BANDWIDTH_OFFSET = 90.0 # Maximum bandwidth utilization % (at capacity) + MAX_EXPECTED_CONNECTIONS = 10 # Expected maximum concurrent connections per link for scaling purposes + DEFAULT_LINK_OFFSET = 25.0 # Default offset used in _dispatch_link_set def __init__( self, events_queue : queue.PriorityQueue, @@ -671,6 +671,12 @@ class EventDispatcher(BaseEventDispatcher): # Extract connection UUID from event connection_uuid = connection_event.connection_id.connection_uuid.uuid + # Clean up any stale mapping for this connection (e.g., if connection is being re-created) + old_mapping = self._object_cache.get(CachedEntities.CONNECTION, connection_uuid, auto_retrieve=False) + if old_mapping is not None and isinstance(old_mapping, dict) and 'domain' in old_mapping: + self._object_cache.delete(CachedEntities.CONNECTION, connection_uuid) + LOGGER.debug('Removed stale mapping for connection {:s} before processing'.format(connection_uuid)) + try: # Use common helper to prepare connection data result = self._prepare_connection_processing(connection_uuid) @@ -679,11 +685,11 @@ class EventDispatcher(BaseEventDispatcher): (topology_name, processed_links) = result # Update telemetry for each link involved in this connection - bandwidth_factor = self._calculate_bandwidth_factor() - - for _, link_name in processed_links: - LOGGER.info('Connection {:s} uses allowed link: {:s}'.format(connection_uuid, link_name)) - worker_name = '{:s}:{:s}'.format(topology_name, link_name) + for link_uuid, link_name in processed_links: + # Calculate bandwidth factor specific to this link + bandwidth_factor = self._calculate_bandwidth_factor(link_uuid, topology_name) + LOGGER.info('Connection {:s} uses allowed link: {:s} (uuid: {:s})'.format(connection_uuid, link_name, link_uuid)) + worker_name = '{:s}:{:s}'.format(topology_name, link_name) # Worker should already exist from _dispatch_link_set (link creation event) if not self._telemetry_pool.has_worker(WorkerTypeEnum.SYNTHESIZER, worker_name): @@ -731,6 +737,7 @@ class EventDispatcher(BaseEventDispatcher): return True + def _prepare_connection_processing(self, connection_uuid: str): """ Extract common logic for processing connection events. @@ -770,65 +777,81 @@ class EventDispatcher(BaseEventDispatcher): connection_uuid, domain_name)) return None + # Cache the connection-to-links mapping for later retrieval (e.g., during REMOVE events) + mapping = { + 'domain': domain_name, + 'links': {link_uuid: {'name': link_name} for link_uuid, link_name in processed_links} + } + self._object_cache.set(CachedEntities.CONNECTION, mapping, connection_uuid) + LOGGER.debug('Cached connection {:s} mapping with {:d} links for domain {:s}'.format( + connection_uuid, len(processed_links), domain_name)) + return domain_name, processed_links # NOTE: Domain name = topology name - def _calculate_bandwidth_factor(self) -> float: + + def _calculate_bandwidth_factor(self, link_uuid: str, domain_name: str, + active_connection_count: Optional[int] = None) -> float: """ - Calculate bandwidth scaling factor based on active service count. + Calculate bandwidth scaling factor based on active connection count for a specific link. + + Args: + link_uuid: UUID of the link to calculate factor for + domain_name: Domain name to filter connections + active_connection_count: Pre-calculated connection count (if None, will be calculated) Returns: float: Bandwidth factor to multiply with existing worker offset """ try: - # Query all services from Context - all_services = self._object_cache.get_all(CachedEntities.SERVICE, fresh=False) - - # Count active services (SERVICESTATUS_ACTIVE or SERVICESTATUS_UPDATING) - active_service_count = 0 - for service in all_services: - service_status = service.service_status.service_status - if service_status in (ServiceStatusEnum.SERVICESTATUS_ACTIVE, - ServiceStatusEnum.SERVICESTATUS_UPDATING): - # Skip sub-services (UUID-based names) - try: - uuid.UUID(hex=service.name) - continue # Skip sub-services - except: # pylint: disable=bare-except - active_service_count += 1 - - active_service_count = int(active_service_count / 2) # Each service appears as two connections (uuid and name) - LOGGER.info('Active service count: {:d}'.format(int(active_service_count))) + # If connection count not provided, calculate it from cache + if active_connection_count is None: + all_cached_connections = self._object_cache.get_all(CachedEntities.CONNECTION, fresh=False) + active_connection_count = 0 + for cached_obj in all_cached_connections: + if not isinstance(cached_obj, dict) or 'domain' not in cached_obj or 'links' not in cached_obj: + continue + + if cached_obj['domain'] != domain_name: + continue + + if link_uuid in cached_obj['links']: + active_connection_count += 1 + + LOGGER.info('Active connection count on link {:s} in domain {:s}: {:d}'.format( + link_uuid, domain_name, active_connection_count)) # Calculate bandwidth offset using linear scaling - service_ratio = min(active_service_count / self.MAX_EXPECTED_SERVICES, 1.0) - target_bandwidth_offset = (self.BASE_BANDWIDTH_OFFSET + (service_ratio * + connection_ratio = min(active_connection_count / self.MAX_EXPECTED_CONNECTIONS, 1.0) + target_bandwidth_offset = (self.BASE_BANDWIDTH_OFFSET + (connection_ratio * (self.MAX_BANDWIDTH_OFFSET - self.BASE_BANDWIDTH_OFFSET))) # Calculate adjustment factor relative to default offset bandwidth_factor = target_bandwidth_offset / self.DEFAULT_LINK_OFFSET - LOGGER.info('Calculated bandwidth_factor={:.2f} (service_count={:d}, target_offset={:.2f})'.format( - bandwidth_factor, active_service_count, target_bandwidth_offset)) + LOGGER.info('Calculated bandwidth_factor={:.2f} (connection_count={:d}, target_offset={:.2f})'.format( + bandwidth_factor, active_connection_count, target_bandwidth_offset)) return bandwidth_factor except Exception as e: LOGGER.exception('Failed to calculate bandwidth factor: {:s}'.format(str(e))) - # Return default factor (1.0 = no change) return 1.0 + def dispatch_connection_create(self, connection_event : ConnectionEvent) -> None: if not self.dispatch_connection_set(connection_event): return MSG = 'Skipping Connection Create Event: {:s}' LOGGER.debug(MSG.format(grpc_message_to_json_string(connection_event))) - + + def dispatch_connection_update(self, connection_event : ConnectionEvent) -> None: if not self.dispatch_connection_set(connection_event): return MSG = 'Skipping Connection Update Event: {:s}' LOGGER.debug(MSG.format(grpc_message_to_json_string(connection_event))) - + + def dispatch_connection_remove(self, connection_event : ConnectionEvent) -> None: MSG = 'Processing Connection Remove Event: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(connection_event))) @@ -836,28 +859,65 @@ class EventDispatcher(BaseEventDispatcher): connection_uuid = connection_event.connection_id.connection_uuid.uuid try: - result = self._prepare_connection_processing(connection_uuid) - if result is None: - return - (topology_name, processed_links) = result - - # Update telemetry factor for remaining services - bandwidth_factor = self._calculate_bandwidth_factor() - - for _, link_name in processed_links: + mapping = self._object_cache.get(CachedEntities.CONNECTION, connection_uuid, auto_retrieve=False) + + if mapping is None: + MSG = 'Connection {:s} not found in cache, cannot process removal (service may have restarted)' + raise Exception(MSG.format(connection_uuid)) + + # Defensive: distinguish mapping dicts from potential protobuf Connection objects + if not isinstance(mapping, dict) or 'domain' not in mapping or 'links' not in mapping: + MSG = 'Invalid mapping structure for connection {:s}: expected dict with domain/links keys' + raise Exception(MSG.format(connection_uuid)) + + # Extract domain and links from cached mapping + topology_name = mapping['domain'] + link_uuids_dict = mapping['links'] + processed_links = [(link_uuid, link_data['name']) for link_uuid, link_data in link_uuids_dict.items()] + + LOGGER.info('Retrieved cached mapping for connection {:s}: domain={:s}, links={:d}'.format( + connection_uuid, topology_name, len(processed_links))) + + # Process each link: count remaining connections and stop/update worker accordingly + for link_uuid, link_name in processed_links: worker_name = '{:s}:{:s}'.format(topology_name, link_name) if not self._telemetry_pool.has_worker(WorkerTypeEnum.SYNTHESIZER, worker_name): LOGGER.warning('Worker not found for link {:s}, skipping telemetry update for connection removal'.format(link_name)) continue - worker = self._telemetry_pool.get_worker(WorkerTypeEnum.SYNTHESIZER, worker_name) - assert isinstance(worker, SynthesizerWorker), \ - 'Expected SynthesizerWorker, got {:s}'.format(type(worker).__name__) + # Count how many OTHER connections (excluding current one being removed) use this link + remaining_connections_count = 0 + for cache_key, cached_obj in self._object_cache._object_cache.items(): + # Filter for CONNECTION entity type + if cache_key[0] != 'connection': + continue + cached_conn_uuid = cache_key[1] + if cached_conn_uuid == connection_uuid: + continue + + if isinstance(cached_obj, dict) and 'links' in cached_obj: + if link_uuid in cached_obj['links']: + remaining_connections_count += 1 - # Update bandwidth scaling - worker.change_resources(bandwidth_factor, latency_factor=1.0) - LOGGER.info('Updated telemetry for link {:s} after connection removal'.format(link_name)) + if remaining_connections_count == 0: + # No other connections use this link, stop the worker + self._telemetry_pool.stop_worker(WorkerTypeEnum.SYNTHESIZER, worker_name) + LOGGER.info('Stopped telemetry worker for link {:s}, no connections remain'.format(link_name)) + else: + # Other connections still use this link, recalculate bandwidth factor + bandwidth_factor = self._calculate_bandwidth_factor(link_uuid, topology_name, remaining_connections_count) + worker = self._telemetry_pool.get_worker(WorkerTypeEnum.SYNTHESIZER, worker_name) + assert isinstance(worker, SynthesizerWorker), \ + 'Expected SynthesizerWorker, got {:s}'.format(type(worker).__name__) + + worker.change_resources(bandwidth_factor, latency_factor=1.0) + LOGGER.info('Updated telemetry for link {:s} after connection removal, {:d} connections remain'.format( + link_name, remaining_connections_count)) + + # Clean up the mapping for this connection + self._object_cache.delete(CachedEntities.CONNECTION, connection_uuid) + LOGGER.debug('Deleted cached mapping for connection {:s}'.format(connection_uuid)) except Exception as e: LOGGER.exception('Failed to process connection removal {:s}: {:s}'.format( -- GitLab From 4f3e75b115e26475d746d845f919820338755683 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Fri, 13 Feb 2026 12:47:54 +0000 Subject: [PATCH 06/78] feat: Update logging message formatting in callback methods --- .../tools/rest_conf/server/restconf_server/Callbacks.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/common/tools/rest_conf/server/restconf_server/Callbacks.py b/src/common/tools/rest_conf/server/restconf_server/Callbacks.py index 04a8b8bd9..bd66be2f7 100644 --- a/src/common/tools/rest_conf/server/restconf_server/Callbacks.py +++ b/src/common/tools/rest_conf/server/restconf_server/Callbacks.py @@ -49,7 +49,7 @@ class _Callback: @param old_data: Resource representation before retrieval, if applicable, otherwise `None` @returns boolean indicating whether additional callbacks should be executed, defaults to False ''' - MSG = 'match={:s}, path={:s}, old_data={:s}' + MSG = 'match={}, path={}, old_data={}' msg = MSG.format(match.groupdict(), path, old_data) raise NotImplementedError(msg) @@ -66,7 +66,7 @@ class _Callback: @param new_data: Resource representation after change, if applicable, otherwise `None` @returns boolean indicating whether additional callbacks should be executed, defaults to False ''' - MSG = 'match={:s}, path={:s}, old_data={:s}, new_data={:s}' + MSG = 'match={}, path={}, old_data={}, new_data={}' msg = MSG.format(match.groupdict(), path, old_data, new_data) raise NotImplementedError(msg) @@ -81,7 +81,7 @@ class _Callback: @param input_data: Input data, if applicable, otherwise `None` @returns Optional[Dict] containing output data, defaults to None ''' - MSG = 'match={:s}, path={:s}, input_data={:s}' + MSG = 'match={}, path={}, input_data={:s}' msg = MSG.format(match.groupdict(), path, input_data) raise NotImplementedError(msg) -- GitLab From 2b3e9f2137caffa8217f411b53d5000f48f3bd07 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Fri, 13 Feb 2026 14:14:25 +0000 Subject: [PATCH 07/78] feat: Update SIMAP endpoint definitions and improve link telemetry handling --- .../service/simap_updater/ObjectCache.py | 2 +- .../service/simap_updater/SimapClient.py | 17 ++++++++++------- 2 files changed, 11 insertions(+), 8 deletions(-) diff --git a/src/simap_connector/service/simap_updater/ObjectCache.py b/src/simap_connector/service/simap_updater/ObjectCache.py index 1e326131d..996c1ed87 100644 --- a/src/simap_connector/service/simap_updater/ObjectCache.py +++ b/src/simap_connector/service/simap_updater/ObjectCache.py @@ -64,7 +64,7 @@ class ObjectCache: def __init__(self, context_client : ContextClient): self._context_client = context_client self._object_cache : Dict[Tuple[str, str], Any] = dict() - # self.populate_all_cache() # Added for testing purposes; can be removed. + # self.populate_all_cache() # NOTE: Added for testing purposes; can be removed. def get( self, entity : CachedEntities, *object_uuids : str, diff --git a/src/simap_connector/service/simap_updater/SimapClient.py b/src/simap_connector/service/simap_updater/SimapClient.py index 725b08bd4..8cdf4708e 100644 --- a/src/simap_connector/service/simap_updater/SimapClient.py +++ b/src/simap_connector/service/simap_updater/SimapClient.py @@ -64,7 +64,7 @@ class TerminationPoint: class NodeTelemetry: - ENDPOINT = '/ietf-network:networks/network={:s}/node={:s}/simap-telemetry:simap-telemetry' + ENDPOINT = '/ietf-network:networks/network={:s}/node={:s}' def __init__(self, restconf_client : RestConfClient, network_id : str, node_id : str): self._restconf_client = restconf_client @@ -173,7 +173,7 @@ class Node: class LinkTelemetry: - ENDPOINT = '/ietf-network:networks/network={:s}/ietf-network-topology:link={:s}/simap-telemetry:simap-telemetry' + ENDPOINT = '/ietf-network:networks/network={:s}/ietf-network-topology:link={:s}' def __init__(self, restconf_client : RestConfClient, network_id : str, link_id : str): self._restconf_client = restconf_client @@ -197,8 +197,8 @@ class LinkTelemetry: def get(self) -> Dict: endpoint = LinkTelemetry.ENDPOINT.format(self._network_id, self._link_id) - telemetry : Dict = self._restconf_client.get(endpoint) - return telemetry + link : Dict = self._restconf_client.get(endpoint) + return link.get('ietf-network-topology:link', [{}])[0].get('simap-telemetry:simap-telemetry', {}) def update( self, bandwidth_utilization : float, latency : float, @@ -210,14 +210,17 @@ class LinkTelemetry: 'latency' : '{:.3f}'.format(latency), } if len(related_service_ids) > 0: telemetry['related-service-ids'] = related_service_ids - link = {'link-id': self._link_id, 'simap-telemetry:simap-telemetry': telemetry} - network = {'network-id': self._network_id, 'ietf-network-topology:link': [link]} + link = {'link-id': self._link_id, 'simap-telemetry:simap-telemetry': telemetry} + network = {'network-id': self._network_id, 'ietf-network-topology:link': [link]} payload = {'ietf-network:networks': {'network': [network]}} self._restconf_client.patch(endpoint, payload) def delete(self) -> None: endpoint = LinkTelemetry.ENDPOINT.format(self._network_id, self._link_id) - self._restconf_client.delete(endpoint) + link = {'link-id': self._link_id, 'simap-telemetry:simap-telemetry': {}} + network = {'network-id': self._network_id, 'ietf-network-topology:link': [link]} + payload = {'ietf-network:networks': {'network': [network]}} + self._restconf_client.patch(endpoint, payload) class Link: -- GitLab From 980da96acfe070db8107fec1641f96ac98669b00 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Fri, 13 Feb 2026 14:19:52 +0000 Subject: [PATCH 08/78] feat: Update L3VPN request data file path in dummy script --- src/tests/ecoc25-f5ga-telemetry/dummy_L3VPN_request.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tests/ecoc25-f5ga-telemetry/dummy_L3VPN_request.sh b/src/tests/ecoc25-f5ga-telemetry/dummy_L3VPN_request.sh index f246c3b3b..c195fe34f 100755 --- a/src/tests/ecoc25-f5ga-telemetry/dummy_L3VPN_request.sh +++ b/src/tests/ecoc25-f5ga-telemetry/dummy_L3VPN_request.sh @@ -22,7 +22,7 @@ cd $(dirname $0) echo "[IP-Controller] sending L3VPN request (dummy replicating AGG-Controller request)..." curl --request POST --location --user admin:admin --header 'Content-Type: application/json' \ - --data @data/slices/l3vpn_request_agg.json \ + --data @data/slices/l3vpn_request_from_agg.json \ http://127.0.0.1:80/restconf/data/ietf-l3vpn-svc:l3vpn-svc/vpn-services echo -- GitLab From 86721d10c1e8dfd22f62a918550957857a017f6b Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Fri, 13 Feb 2026 15:08:48 +0000 Subject: [PATCH 09/78] feat: Improve connection removal handling by skipping connections not managed by the controller --- src/simap_connector/service/simap_updater/SimapUpdater.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index 6ba5cb780..9a2b236c6 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -862,8 +862,9 @@ class EventDispatcher(BaseEventDispatcher): mapping = self._object_cache.get(CachedEntities.CONNECTION, connection_uuid, auto_retrieve=False) if mapping is None: - MSG = 'Connection {:s} not found in cache, cannot process removal (service may have restarted)' - raise Exception(MSG.format(connection_uuid)) + MSG = 'Connection {:s} not managed by this controller (not in allowed links), skipping removal' + LOGGER.debug(MSG.format(connection_uuid)) + return # Defensive: distinguish mapping dicts from potential protobuf Connection objects if not isinstance(mapping, dict) or 'domain' not in mapping or 'links' not in mapping: -- GitLab From 5c58e56c0077db85e74d6e9a2805cb3d81cc6b38 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Fri, 13 Feb 2026 16:59:41 +0000 Subject: [PATCH 10/78] fix: Correct conditional logic for mapping validation in EventDispatcher --- src/simap_connector/service/simap_updater/SimapUpdater.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index 9a2b236c6..b09fd6460 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -867,7 +867,7 @@ class EventDispatcher(BaseEventDispatcher): return # Defensive: distinguish mapping dicts from potential protobuf Connection objects - if not isinstance(mapping, dict) or 'domain' not in mapping or 'links' not in mapping: + elif not isinstance(mapping, dict) or 'domain' not in mapping or 'links' not in mapping: MSG = 'Invalid mapping structure for connection {:s}: expected dict with domain/links keys' raise Exception(MSG.format(connection_uuid)) -- GitLab From 76132fd1505555b9179e4594e434ba418182fa20 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Sat, 14 Feb 2026 16:44:39 +0000 Subject: [PATCH 11/78] feat: Refactor InfluxDB client initialization and update telemetry callback patterns - Moved DB initilization from app.py to DB client py - update path telemetry SIMAP path in test client implementation --- .../rest_conf/server/restconf_server/app.py | 10 +--- .../callbacks/TelemetryCallbacks.py | 10 ++-- .../server/restconf_server/influxdb_client.py | 59 +++++++++++++------ .../AI_analytics_engine/config/Config.py | 4 +- src/tests/mwc26-f5ga/deploy.sh | 45 ++++++++------ .../simap_server/simap_client/SimapClient.py | 6 +- .../simap_server/simap_client/__main__.py | 31 ++++++---- 7 files changed, 101 insertions(+), 64 deletions(-) diff --git a/src/common/tools/rest_conf/server/restconf_server/app.py b/src/common/tools/rest_conf/server/restconf_server/app.py index f8b0321b4..27bc97fc4 100644 --- a/src/common/tools/rest_conf/server/restconf_server/app.py +++ b/src/common/tools/rest_conf/server/restconf_server/app.py @@ -14,7 +14,6 @@ from .callbacks import CallbackOnLinkTelemetry, CallbackOnNodeTelemetry -from .Config import INFLUXDB_HOST, INFLUXDB_PORT, INFLUXDB_TOKEN, INFLUXDB_DATABASE from .influxdb_client import SimapInfluxDBClient from .RestConfServerApplication import RestConfServerApplication import logging @@ -34,13 +33,8 @@ LOGGER.info('All connectors registered') # Initialize InfluxDB client and register telemetry callbacks try: - LOGGER.info('Initializing InfluxDB client (host=%s, port=%d, db=%s)...', INFLUXDB_HOST, INFLUXDB_PORT, INFLUXDB_DATABASE) - influx_client = SimapInfluxDBClient( - host = INFLUXDB_HOST, - port = INFLUXDB_PORT, - token = INFLUXDB_TOKEN, - database = INFLUXDB_DATABASE - ) + LOGGER.info('Initializing InfluxDB client with default configuration...') + influx_client = SimapInfluxDBClient() except Exception as e: LOGGER.error('Failed to initialize InfluxDB client: %s', e) influx_client = None diff --git a/src/common/tools/rest_conf/server/restconf_server/callbacks/TelemetryCallbacks.py b/src/common/tools/rest_conf/server/restconf_server/callbacks/TelemetryCallbacks.py index b54c4e583..a0c374349 100644 --- a/src/common/tools/rest_conf/server/restconf_server/callbacks/TelemetryCallbacks.py +++ b/src/common/tools/rest_conf/server/restconf_server/callbacks/TelemetryCallbacks.py @@ -101,12 +101,11 @@ class CallbackOnLinkTelemetry(_Callback): """ # Pattern matches: - # /restconf/data/ietf-network:networks/network=/ietf-network-topology:link=/simap-telemetry:simap-telemetry + # /restconf/data/ietf-network:networks/network=/ietf-network-topology:link= PATTERN = ( r'/restconf/data/ietf-network:networks' r'/network=(?P[^/]+)' r'/ietf-network-topology:link=(?P[^/]+)' - r'/simap-telemetry:simap-telemetry' ) def __init__(self, influx_client: SimapInfluxDBClient) -> None: @@ -119,7 +118,7 @@ class CallbackOnLinkTelemetry(_Callback): super().__init__(self.PATTERN) self._influx_client = influx_client - def execute( + def execute_data_update( self, match: re.Match, path: str, @@ -190,12 +189,11 @@ class CallbackOnNodeTelemetry(_Callback): """ # Pattern matches: - # /restconf/data/ietf-network:networks/network=/node=/simap-telemetry:simap-telemetry + # /restconf/data/ietf-network:networks/network=/node= PATTERN = ( r'/restconf/data/ietf-network:networks' r'/network=(?P[^/]+)' r'/node=(?P[^/]+)' - r'/simap-telemetry:simap-telemetry' ) def __init__(self, influx_client: SimapInfluxDBClient) -> None: @@ -208,7 +206,7 @@ class CallbackOnNodeTelemetry(_Callback): super().__init__(self.PATTERN) self._influx_client = influx_client - def execute( + def execute_data_update( self, match: re.Match, path: str, diff --git a/src/common/tools/rest_conf/server/restconf_server/influxdb_client.py b/src/common/tools/rest_conf/server/restconf_server/influxdb_client.py index 70dc56686..9e5b47619 100644 --- a/src/common/tools/rest_conf/server/restconf_server/influxdb_client.py +++ b/src/common/tools/rest_conf/server/restconf_server/influxdb_client.py @@ -22,6 +22,8 @@ from typing import List, Optional from influxdb_client_3 import InfluxDBClient3, Point, WritePrecision +from .Config import INFLUXDB_HOST, INFLUXDB_PORT, INFLUXDB_DATABASE, INFLUXDB_TOKEN + LOGGER = logging.getLogger(__name__) @@ -31,39 +33,62 @@ class SimapInfluxDBClient: Client wrapper for writing SIMAP telemetry data to InfluxDB 3.x. """ - def __init__( - self, - host: str, - port: int, - token: str, - database: str + def __init__( self, + host: Optional[str] = None, port: Optional[int] = None, + token: Optional[str] = None, database: Optional[str] = None ) -> None: """ Initialize the InfluxDB client. - Args: - host: InfluxDB server hostname - port: InfluxDB server port - token: Authentication token - database: Database/bucket name + host: InfluxDB server hostname (default: from INFLUXDB_HOST env or 'localhost') + port: InfluxDB server port (default: from INFLUXDB_PORT env or 8181) + token: Authentication token (default: from INFLUXDB_TOKEN env) + database: Database/bucket name (default: from INFLUXDB_DATABASE env or 'simap_telemetry') """ - self._host = host - self._port = port - self._database = database + self._host = host if host is not None else INFLUXDB_HOST + self._port = port if port is not None else INFLUXDB_PORT + self._database = database if database is not None else INFLUXDB_DATABASE + self._token = token if token is not None else INFLUXDB_TOKEN self._client: Optional[InfluxDBClient3] = None try: self._client = InfluxDBClient3( - token = token, - host = f"http://{host}:{port}", - database = database + token = self._token, + host = f"http://{self._host}:{self._port}", + database = self._database ) LOGGER.info("InfluxDB client initialized: host=%s:%d, database=%s", self._host, self._port, self._database) + + # Test the connection + if not self._test_connection(): + LOGGER.error("InfluxDB client initialized but connection test failed") + self._client = None + else: + LOGGER.info("InfluxDB connection test successful") + except Exception as e: # pylint: disable=broad-except LOGGER.error("Failed to initialize InfluxDB client: %s", str(e)) self._client = None + def _test_connection(self) -> bool: + """ + Test the InfluxDB connection by attempting a simple system query. + Returns: + True if connection is accessible, False otherwise + """ + if self._client is None: + LOGGER.warning("InfluxDB client not initialized, cannot test connection") + return False + + try: + query = "SHOW TABLES" + self._client.query(query=query, language="sql") + return True + except Exception as e: # pylint: disable=broad-except + LOGGER.error("InfluxDB connection test failed: %s", str(e)) + return False + def is_connected(self) -> bool: """Check if client is initialized.""" return self._client is not None diff --git a/src/tests/mwc26-f5ga/AI_analytics_engine/config/Config.py b/src/tests/mwc26-f5ga/AI_analytics_engine/config/Config.py index f2633797e..0d8e28994 100644 --- a/src/tests/mwc26-f5ga/AI_analytics_engine/config/Config.py +++ b/src/tests/mwc26-f5ga/AI_analytics_engine/config/Config.py @@ -29,8 +29,8 @@ SIMAP_SERVER_USERNAME = get_setting('SIMAP_SERVER_USERNAME', default='admin') SIMAP_SERVER_PASSWORD = get_setting('SIMAP_SERVER_PASSWORD', default='admin') # InfluxDB Configuration -# INFLUXDB_HOST = get_setting('INFLUXDB_HOST', default='localhost') -INFLUXDB_HOST = 'localhost' +INFLUXDB_HOST = get_setting('INFLUXDB_HOST', default='localhost') +# INFLUXDB_HOST = 'localhost' INFLUXDB_PORT = int(get_setting('INFLUXDB_PORT', default='8181')) INFLUXDB_TOKEN = get_setting('INFLUXDB_TOKEN', default='') INFLUXDB_DATABASE = get_setting('INFLUXDB_DATABASE', default='simap_telemetry') diff --git a/src/tests/mwc26-f5ga/deploy.sh b/src/tests/mwc26-f5ga/deploy.sh index a558102e1..797b3e407 100755 --- a/src/tests/mwc26-f5ga/deploy.sh +++ b/src/tests/mwc26-f5ga/deploy.sh @@ -1,39 +1,48 @@ +echo "Starting deployment of MWC26-F5GA test environment..." + echo "Building SIMAP Server..." cd ~/tfs-ctrl/ docker buildx build -t simap-server:mock -f ./src/tests/tools/simap_server/Dockerfile . -echo "Building NCE-FAN Controller..." -cd ~/tfs-ctrl/ -docker buildx build -t nce-fan-ctrl:mock -f ./src/tests/tools/mock_nce_fan_ctrl/Dockerfile . +# echo "Building NCE-FAN Controller..." +# cd ~/tfs-ctrl/ +# docker buildx build -t nce-fan-ctrl:mock -f ./src/tests/tools/mock_nce_fan_ctrl/Dockerfile . -echo "Building NCE-T Controller..." -cd ~/tfs-ctrl/ -docker buildx build -t nce-t-ctrl:mock -f ./src/tests/tools/mock_nce_t_ctrl/Dockerfile . +# echo "Building NCE-T Controller..." +# cd ~/tfs-ctrl/ +# docker buildx build -t nce-t-ctrl:mock -f ./src/tests/tools/mock_nce_t_ctrl/Dockerfile . echo "Building AI Analytics Engine..." cd ~/tfs-ctrl/ docker buildx build -t ai-engine:latest -f ./src/tests/mwc26-f5ga/AI_analytics_engine/Dockerfile . -# echo "Building Traffic Changer..." -# cd ~/tfs-ctrl/ -# docker buildx build -t traffic-changer:mock -f ./src/tests/tools/traffic_changer/Dockerfile . # echo "Cleaning up..." -# docker rm --force simap-server +docker rm --force simap-server # docker rm --force nce-fan-ctrl # docker rm --force nce-t-ctrl -# docker rm --force ai-engine -# docker rm --force traffic-changer +docker rm --force ai-engine -echo "Deploying support services..." -docker run --detach --name simap-server --publish 8080:8080 simap-server:mock -docker run --detach --name nce-fan-ctrl --publish 8081:8080 --env SIMAP_ADDRESS=172.17.0.1 --env SIMAP_PORT=8080 nce-fan-ctrl:mock -docker run --detach --name nce-t-ctrl --publish 8082:8080 --env SIMAP_ADDRESS=172.17.0.1 --env SIMAP_PORT=8080 nce-t-ctrl:mock +# echo "Deploying support services..." +docker run --detach --name simap-server --publish 8080:8080 \ + -e INFLUXDB_HOST=10.254.0.9 \ + -e INFLUXDB_PORT=8181 \ + simap-server:mock +# docker run --detach --name nce-fan-ctrl --publish 8081:8080 --env SIMAP_ADDRESS=172.17.0.1 --env SIMAP_PORT=8080 nce-fan-ctrl:mock +# docker run --detach --name nce-t-ctrl --publish 8082:8080 --env SIMAP_ADDRESS=172.17.0.1 --env SIMAP_PORT=8080 nce-t-ctrl:mock echo "Deploying AI Analytics Engine..." -docker run --detach --name ai-engine --publish 8084:8080 --env SIMAP_SERVER_ADDRESS=172.17.0.1 --env SIMAP_SERVER_PORT=8080 ai-engine:latest -# docker run --detach --name traffic-changer --publish 8083:8080 traffic-changer:mock +docker run --detach --name ai-engine --publish 8084:8080 \ + -e INFLUXDB_HOST=10.254.0.9 -e INFLUXDB_PORT=8181 \ + --env SIMAP_SERVER_ADDRESS=172.17.0.1 --env SIMAP_SERVER_PORT=8080 \ + --env SIMAP_SERVER_USERNAME=admin --env SIMAP_SERVER_PASSWORD=admin \ + ai-engine:latest + + +# NOTE: If testing, run client (src/tests/tools/simap_server/run_client.sh) to manually populate SIMAP Server with telemetry data. + + sleep 2 docker ps -a diff --git a/src/tests/tools/simap_server/simap_client/SimapClient.py b/src/tests/tools/simap_server/simap_client/SimapClient.py index 96237ba12..3163f9ae7 100644 --- a/src/tests/tools/simap_server/simap_client/SimapClient.py +++ b/src/tests/tools/simap_server/simap_client/SimapClient.py @@ -64,7 +64,8 @@ class TerminationPoint: class NodeTelemetry: - ENDPOINT = '/ietf-network:networks/network={:s}/node={:s}/simap-telemetry:simap-telemetry' + ENDPOINT = '/ietf-network:networks/network={:s}/node={:s}' + # ENDPOINT = '/ietf-network:networks/network={:s}/node={:s}/simap-telemetry:simap-telemetry' def __init__(self, restconf_client : RestConfClient, network_id : str, node_id : str): self._restconf_client = restconf_client @@ -173,7 +174,8 @@ class Node: class LinkTelemetry: - ENDPOINT = '/ietf-network:networks/network={:s}/ietf-network-topology:link={:s}/simap-telemetry:simap-telemetry' + ENDPOINT = '/ietf-network:networks/network={:s}/ietf-network-topology:link={:s}' + # ENDPOINT = '/ietf-network:networks/network={:s}/ietf-network-topology:link={:s}/simap-telemetry:simap-telemetry' def __init__(self, restconf_client : RestConfClient, network_id : str, link_id : str): self._restconf_client = restconf_client diff --git a/src/tests/tools/simap_server/simap_client/__main__.py b/src/tests/tools/simap_server/simap_client/__main__.py index d583ea961..65c2bacda 100644 --- a/src/tests/tools/simap_server/simap_client/__main__.py +++ b/src/tests/tools/simap_server/simap_client/__main__.py @@ -34,10 +34,18 @@ def main() -> None: simap_client = SimapClient(restconf_client) generator = SimapMetricsGenerator(service_count=5) - # create_simap_te(simap_client) - # create_simap_trans(simap_client) - # create_simap_aggnet(simap_client) - # create_simap_e2enet(simap_client) + try: + create_simap_te(simap_client) + create_simap_trans(simap_client) + create_simap_aggnet(simap_client) + create_simap_e2enet(simap_client) + except Exception as e: + error_msg = str(e) + if 'status_code=409' in error_msg or 'already exists' in error_msg.lower(): + LOGGER.warning('SIMAP topology already exists, skipping further creation requests.') + else: + LOGGER.error('Error creating SIMAP topology: %s', e) + return print('networks=', json.dumps(simap_client.networks())) @@ -90,13 +98,14 @@ def main() -> None: abstract_links[link_id].telemetry.update(bw, lat, related_service_ids=domain_service_map[link_id]) # Print telemetry summary - print(f'--- Iteration {i} | Services: {generator.service_count} ---') - for link_id, (bw, lat) in te_metrics.items(): - print(f'TE {link_id:4s}: BW={bw:5.2f}%, Lat={lat:.3f}ms SvcIDs: {te_service_ids}') - for link_id, (bw, lat) in abstract_metrics.items(): - print(f'{link_id:10s}: BW={bw:5.2f}%, Lat={lat:.3f}ms SvcIDs: {domain_service_map[link_id]}') - - time.sleep(2) + if i != 0 and i % 5 == 0: + print(f'--- Iteration {i} | Services: {generator.service_count} ---') + for link_id, (bw, lat) in te_metrics.items(): + print(f'TE {link_id:4s}: BW={bw:5.2f}%, Lat={lat:.3f}ms SvcIDs: {te_service_ids}') + for link_id, (bw, lat) in abstract_metrics.items(): + print(f'{link_id:10s}: BW={bw:5.2f}%, Lat={lat:.3f}ms SvcIDs: {domain_service_map[link_id]}') + + time.sleep(10) if __name__ == '__main__': -- GitLab From e24cab3af785d4bb9763a2c9674e63c6f77f3910 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Mon, 16 Feb 2026 11:50:50 +0000 Subject: [PATCH 12/78] Service component - IETF L3VPN Service Handler: - Fix operation type when creating config rules for device component --- .../l3nm_ietfl3vpn/ConfigRules.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/src/service/service/service_handlers/l3nm_ietfl3vpn/ConfigRules.py b/src/service/service/service_handlers/l3nm_ietfl3vpn/ConfigRules.py index 3b537a467..68cbc448a 100644 --- a/src/service/service/service_handlers/l3nm_ietfl3vpn/ConfigRules.py +++ b/src/service/service/service_handlers/l3nm_ietfl3vpn/ConfigRules.py @@ -259,10 +259,10 @@ def setup_config_rules( "/service[{:s}]/IETFL3VPN".format(service_uuid), l3_vpn_data_model, ), - #json_config_rule_set( - # "/service[{:s}]/IETFL3VPN/operation".format(service_uuid), - # {"type": operation_type}, - #), + json_config_rule_set( + "/service[{:s}]/IETFL3VPN/operation".format(service_uuid), + {"type": operation_type}, + ), ] return json_config_rules @@ -274,10 +274,10 @@ def teardown_config_rules(service_uuid: str) -> List[Dict]: "/service[{:s}]/IETFL3VPN".format(service_uuid), {"id": service_uuid}, ), - #json_config_rule_delete( - # "/service[{:s}]/IETFL3VPN/operation".format(service_uuid), - # {}, - #), + json_config_rule_delete( + "/service[{:s}]/IETFL3VPN/operation".format(service_uuid), + {"type": "delete"}, + ), ] return json_config_rules -- GitLab From 1784774c843d843a5156ef3a107856b12bd7d309 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Tue, 17 Feb 2026 11:50:53 +0000 Subject: [PATCH 13/78] feat: Updated Version of SIMAP Connector - Add link capacities - Updated SyntheticSampler class to return both BW and LAT is a single call. - change_resource method shoud work with conn_count only. --- .../SimapConnectorServiceServicerImpl.py | 7 +- .../service/simap_updater/AllowedLinks.py | 18 ++ .../service/simap_updater/ObjectCache.py | 2 +- .../service/simap_updater/SimapUpdater.py | 163 ++++++------------ .../telemetry/worker/SynthesizerWorker.py | 5 +- .../telemetry/worker/data/Resources.py | 16 +- .../worker/data/SyntheticSamplers.py | 147 ++++++++++++---- 7 files changed, 200 insertions(+), 158 deletions(-) diff --git a/src/simap_connector/service/SimapConnectorServiceServicerImpl.py b/src/simap_connector/service/SimapConnectorServiceServicerImpl.py index b77df510a..3917772d1 100644 --- a/src/simap_connector/service/SimapConnectorServiceServicerImpl.py +++ b/src/simap_connector/service/SimapConnectorServiceServicerImpl.py @@ -165,7 +165,12 @@ class SimapConnectorServiceServicerImpl(SimapConnectorServiceServicer): link_id = request.link_id bandwidth_factor = request.bandwidth_factor latency_factor = request.latency_factor + # connection_count = request.connection_count + # TODO: Remove bandwidth_factor and latency_factor from the request, as they are not used in the current implementation. + # Add connection_count to the request. + + connection_count = 0 synthesizer_name = '{:s}:{:s}'.format(network_id, link_id) synthesizer : Optional[_Worker] = self._telemetry_pool.get_worker( WorkerTypeEnum.SYNTHESIZER, synthesizer_name @@ -175,5 +180,5 @@ class SimapConnectorServiceServicerImpl(SimapConnectorServiceServicer): raise Exception(MSG.format(synthesizer_name)) assert isinstance(synthesizer, SynthesizerWorker), \ 'Expected SynthesizerWorker, got {:s}'.format(type(synthesizer).__name__) - synthesizer.change_resources(bandwidth_factor, latency_factor) + synthesizer.change_resources(connection_count) return Empty() diff --git a/src/simap_connector/service/simap_updater/AllowedLinks.py b/src/simap_connector/service/simap_updater/AllowedLinks.py index e01d78451..685c88a20 100644 --- a/src/simap_connector/service/simap_updater/AllowedLinks.py +++ b/src/simap_connector/service/simap_updater/AllowedLinks.py @@ -18,3 +18,21 @@ ALLOWED_LINKS_PER_CONTROLLER = { 'L11ba', 'L12ab', 'L12ba', 'L13', 'L14' }, 'trans-pkt': { 'L5', 'L6', 'L9', 'L10' }, } +# NOTE: Ranges should be less than 100 because the schema does not allow +# bandwidth-utilization to exceed 100% +# As per schema below: (percentage of link capacity) +# /* --- Local typedefs --- */ + # typedef percent { + # type decimal64 { + # fraction-digits 2; + # range "0 .. 100"; + # } + # units "percent"; + # description "0–100 percent value."; + # } +LINKS_CAPACITY = { + 'L1' : 30, 'L2' : 30, 'L3' : 70, 'L4' : 70, + 'L5' : 90, 'L6' : 90, 'L9' : 90, 'L10' : 90, + 'L7ab' : 50, 'L7ba' : 50, 'L8ab' : 50, 'L8ba' : 50, 'L11ab' : 50, + 'L11ba' : 50, 'L12ab': 50, 'L12ba': 50, 'L13' : 30, 'L14' : 30, +} diff --git a/src/simap_connector/service/simap_updater/ObjectCache.py b/src/simap_connector/service/simap_updater/ObjectCache.py index 996c1ed87..98dc9a923 100644 --- a/src/simap_connector/service/simap_updater/ObjectCache.py +++ b/src/simap_connector/service/simap_updater/ObjectCache.py @@ -64,7 +64,7 @@ class ObjectCache: def __init__(self, context_client : ContextClient): self._context_client = context_client self._object_cache : Dict[Tuple[str, str], Any] = dict() - # self.populate_all_cache() # NOTE: Added for testing purposes; can be removed. + # self.populate_all_cache() # NOTE: Added for testing purposes; should be removed/commented. def get( self, entity : CachedEntities, *object_uuids : str, diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index b09fd6460..20f1a2d5f 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -30,7 +30,7 @@ from simap_connector.service.telemetry.worker.data.Resources import ( ) from simap_connector.service.telemetry.worker._Worker import WorkerTypeEnum from simap_connector.service.telemetry.TelemetryPool import SynthesizerWorker, TelemetryPool -from .AllowedLinks import ALLOWED_LINKS_PER_CONTROLLER +from .AllowedLinks import ALLOWED_LINKS_PER_CONTROLLER, LINKS_CAPACITY from .MockSimaps import delete_mock_simap, set_mock_simap from .ObjectCache import CachedEntities, ObjectCache from .SimapClient import SimapClient @@ -48,12 +48,6 @@ SKIPPED_DEVICE_TYPES = { class EventDispatcher(BaseEventDispatcher): - # Telemetry scaling configuration - BASE_BANDWIDTH_OFFSET = 5.0 # Minimum bandwidth utilization % (no services) - MAX_BANDWIDTH_OFFSET = 90.0 # Maximum bandwidth utilization % (at capacity) - MAX_EXPECTED_CONNECTIONS = 10 # Expected maximum concurrent connections per link for scaling purposes - DEFAULT_LINK_OFFSET = 25.0 # Default offset used in _dispatch_link_set - def __init__( self, events_queue : queue.PriorityQueue, simap_client : SimapClient, @@ -363,25 +357,17 @@ class EventDispatcher(BaseEventDispatcher): te_link.update(src_device.name, src_endpoint.name, dst_device.name, dst_endpoint.name) worker_name = '{:s}:{:s}'.format(topology_name, link_name) - resources = Resources() + resources = Resources() resources.links.append(ResourceLink( - domain_name=topology_name, link_name=link_name, - bandwidth_utilization_sampler=SyntheticSampler.create_random( - amplitude_scale = 25.0, - phase_scale = 1e-7, - period_scale = 86_400, - offset_scale = 25, - noise_ratio = 0.05, - min_value = 0.0, - max_value = 100.0, - ), - latency_sampler=SyntheticSampler.create_random( - amplitude_scale = 0.5, - phase_scale = 1e-7, - period_scale = 60.0, - offset_scale = 10.0, - noise_ratio = 0.05, - min_value = 0.0, + domain_name = topology_name, + link_name = link_name, + metrics_sampler = SyntheticSampler.create_random( + base_bw_range = (5.0, 25.0), + base_latency_range = (0.3, 2.0), + sensitivity_range = (0.3, 1.0), + curve_type = None, # Default is LINEAR + connection_count = 0, + link_capacity = LINKS_CAPACITY.get(link_name, 100.0) ), related_service_ids=[], )) @@ -686,8 +672,8 @@ class EventDispatcher(BaseEventDispatcher): # Update telemetry for each link involved in this connection for link_uuid, link_name in processed_links: - # Calculate bandwidth factor specific to this link - bandwidth_factor = self._calculate_bandwidth_factor(link_uuid, topology_name) + # Count active connections on this link + active_conn_count = self._count_active_connections(link_uuid, topology_name) LOGGER.info('Connection {:s} uses allowed link: {:s} (uuid: {:s})'.format(connection_uuid, link_name, link_uuid)) worker_name = '{:s}:{:s}'.format(topology_name, link_name) @@ -698,23 +684,15 @@ class EventDispatcher(BaseEventDispatcher): # Create worker with same parameters as in _dispatch_link_set resources = Resources() resources.links.append(ResourceLink( - domain_name=topology_name, link_name=link_name, - bandwidth_utilization_sampler=SyntheticSampler.create_random( - amplitude_scale = 25.0, - phase_scale = 1e-7, - period_scale = 86_400, - offset_scale = 25, - noise_ratio = 0.05, - min_value = 0.0, - max_value = 100.0, - ), - latency_sampler=SyntheticSampler.create_random( - amplitude_scale = 0.5, - phase_scale = 1e-7, - period_scale = 60.0, - offset_scale = 10.0, - noise_ratio = 0.05, - min_value = 0.0, + domain_name = topology_name, + link_name = link_name, + metrics_sampler = SyntheticSampler.create_random( + base_bw_range = (5.0, 25.0), + base_latency_range = (0.3, 2.0), + sensitivity_range = (0.3, 1.0), + curve_type = None, # Random curve type + connection_count = active_conn_count, + link_capacity = LINKS_CAPACITY.get(link_name, 100.0) ), related_service_ids=[], )) @@ -722,14 +700,14 @@ class EventDispatcher(BaseEventDispatcher): self._telemetry_pool.start_synthesizer(worker_name, resources, sampling_interval) LOGGER.info('Started new synthesizer worker: {:s}'.format(worker_name)) else: - # Worker exists, update bandwidth scaling factor + # Worker exists, update connection count for congestion simulation worker = self._telemetry_pool.get_worker(WorkerTypeEnum.SYNTHESIZER, worker_name) assert isinstance(worker, SynthesizerWorker), \ 'Expected SynthesizerWorker, got {:s}'.format(type(worker).__name__) - worker.change_resources(bandwidth_factor, latency_factor=1.0) - LOGGER.info('Updated telemetry of already running worker: link {:s}, and bandwidth_factor={:.2f}'.format( - link_name, bandwidth_factor)) + worker.change_resources(active_conn_count) + LOGGER.info('Updated telemetry of already running worker: link {:s}, connection_count={:d}'.format( + link_name, active_conn_count)) except Exception as e: LOGGER.exception('Failed to process connection event {:s}: {:s}'.format(connection_uuid, str(e))) @@ -789,53 +767,32 @@ class EventDispatcher(BaseEventDispatcher): return domain_name, processed_links # NOTE: Domain name = topology name - def _calculate_bandwidth_factor(self, link_uuid: str, domain_name: str, - active_connection_count: Optional[int] = None) -> float: + def _count_active_connections(self, link_uuid: str, domain_name: str, ) -> int: """ - Calculate bandwidth scaling factor based on active connection count for a specific link. + Count active connections using a specific link. Args: - link_uuid: UUID of the link to calculate factor for + link_uuid: UUID of the link to count connections for domain_name: Domain name to filter connections - active_connection_count: Pre-calculated connection count (if None, will be calculated) - Returns: - float: Bandwidth factor to multiply with existing worker offset + int: Number of active connections using this link """ - try: - # If connection count not provided, calculate it from cache - if active_connection_count is None: - all_cached_connections = self._object_cache.get_all(CachedEntities.CONNECTION, fresh=False) - active_connection_count = 0 - for cached_obj in all_cached_connections: - if not isinstance(cached_obj, dict) or 'domain' not in cached_obj or 'links' not in cached_obj: - continue - - if cached_obj['domain'] != domain_name: - continue - - if link_uuid in cached_obj['links']: - active_connection_count += 1 - - LOGGER.info('Active connection count on link {:s} in domain {:s}: {:d}'.format( - link_uuid, domain_name, active_connection_count)) - - # Calculate bandwidth offset using linear scaling - connection_ratio = min(active_connection_count / self.MAX_EXPECTED_CONNECTIONS, 1.0) - target_bandwidth_offset = (self.BASE_BANDWIDTH_OFFSET + (connection_ratio * - (self.MAX_BANDWIDTH_OFFSET - self.BASE_BANDWIDTH_OFFSET))) - - # Calculate adjustment factor relative to default offset - bandwidth_factor = target_bandwidth_offset / self.DEFAULT_LINK_OFFSET - LOGGER.info('Calculated bandwidth_factor={:.2f} (connection_count={:d}, target_offset={:.2f})'.format( - bandwidth_factor, active_connection_count, target_bandwidth_offset)) + all_cached_connections = self._object_cache.get_all(CachedEntities.CONNECTION, fresh=False) + active_count = 0 + for cached_obj in all_cached_connections: + if not isinstance(cached_obj, dict) or 'domain' not in cached_obj or 'links' not in cached_obj: + continue + + if cached_obj['domain'] != domain_name: + continue + + if link_uuid in cached_obj['links']: + active_count += 1 - return bandwidth_factor - - except Exception as e: - LOGGER.exception('Failed to calculate bandwidth factor: {:s}'.format(str(e))) - return 1.0 + LOGGER.info('Active connection count on link {:s} in domain {:s}: {:d}'.format( + link_uuid, domain_name, active_count)) + return active_count def dispatch_connection_create(self, connection_event : ConnectionEvent) -> None: @@ -879,6 +836,10 @@ class EventDispatcher(BaseEventDispatcher): LOGGER.info('Retrieved cached mapping for connection {:s}: domain={:s}, links={:d}'.format( connection_uuid, topology_name, len(processed_links))) + # Delete the connection from cache first (we already have the mapping) + self._object_cache.delete(CachedEntities.CONNECTION, connection_uuid) + LOGGER.debug('Deleted cached mapping for connection {:s}'.format(connection_uuid)) + # Process each link: count remaining connections and stop/update worker accordingly for link_uuid, link_name in processed_links: worker_name = '{:s}:{:s}'.format(topology_name, link_name) @@ -887,38 +848,22 @@ class EventDispatcher(BaseEventDispatcher): LOGGER.warning('Worker not found for link {:s}, skipping telemetry update for connection removal'.format(link_name)) continue - # Count how many OTHER connections (excluding current one being removed) use this link - remaining_connections_count = 0 - for cache_key, cached_obj in self._object_cache._object_cache.items(): - # Filter for CONNECTION entity type - if cache_key[0] != 'connection': - continue - cached_conn_uuid = cache_key[1] - if cached_conn_uuid == connection_uuid: - continue - - if isinstance(cached_obj, dict) and 'links' in cached_obj: - if link_uuid in cached_obj['links']: - remaining_connections_count += 1 + # Count remaining connections on this link (now excluding the deleted one) + remaining_conn_count = self._count_active_connections(link_uuid, topology_name) - if remaining_connections_count == 0: + if remaining_conn_count == 0: # No other connections use this link, stop the worker self._telemetry_pool.stop_worker(WorkerTypeEnum.SYNTHESIZER, worker_name) LOGGER.info('Stopped telemetry worker for link {:s}, no connections remain'.format(link_name)) else: - # Other connections still use this link, recalculate bandwidth factor - bandwidth_factor = self._calculate_bandwidth_factor(link_uuid, topology_name, remaining_connections_count) + # Other connections still use this link, update worker with new count worker = self._telemetry_pool.get_worker(WorkerTypeEnum.SYNTHESIZER, worker_name) assert isinstance(worker, SynthesizerWorker), \ 'Expected SynthesizerWorker, got {:s}'.format(type(worker).__name__) - worker.change_resources(bandwidth_factor, latency_factor=1.0) + worker.change_resources(remaining_conn_count) LOGGER.info('Updated telemetry for link {:s} after connection removal, {:d} connections remain'.format( - link_name, remaining_connections_count)) - - # Clean up the mapping for this connection - self._object_cache.delete(CachedEntities.CONNECTION, connection_uuid) - LOGGER.debug('Deleted cached mapping for connection {:s}'.format(connection_uuid)) + link_name, remaining_conn_count)) except Exception as e: LOGGER.exception('Failed to process connection removal {:s}: {:s}'.format( diff --git a/src/simap_connector/service/telemetry/worker/SynthesizerWorker.py b/src/simap_connector/service/telemetry/worker/SynthesizerWorker.py index 884a2cff8..e80070527 100644 --- a/src/simap_connector/service/telemetry/worker/SynthesizerWorker.py +++ b/src/simap_connector/service/telemetry/worker/SynthesizerWorker.py @@ -34,11 +34,10 @@ class SynthesizerWorker(_Worker): self._resources = resources self._sampling_interval = sampling_interval - def change_resources(self, bandwidth_factor : float, latency_factor : float) -> None: + def change_resources(self, connection_count: int) -> None: with self._lock: for link in self._resources.links: - link.bandwidth_utilization_sampler.offset *= bandwidth_factor - link.latency_sampler.offset *= latency_factor + link.metrics_sampler.connection_count = connection_count def run(self) -> None: self._logger.info('[run] Starting...') diff --git a/src/simap_connector/service/telemetry/worker/data/Resources.py b/src/simap_connector/service/telemetry/worker/data/Resources.py index 49c16c340..2f3de0635 100644 --- a/src/simap_connector/service/telemetry/worker/data/Resources.py +++ b/src/simap_connector/service/telemetry/worker/data/Resources.py @@ -27,8 +27,8 @@ class ResourceNode: related_service_ids : List[str] = field(default_factory=list) def generate_samples(self, simap_client : SimapClient) -> None: - cpu_utilization = self.cpu_utilization_sampler.get_sample() - simap_node = simap_client.network(self.domain_name).node(self.node_name) + cpu_utilization, _ = self.cpu_utilization_sampler.get_sample() + simap_node = simap_client.network(self.domain_name).node(self.node_name) simap_node.telemetry.update( cpu_utilization.value, related_service_ids=self.related_service_ids ) @@ -36,15 +36,13 @@ class ResourceNode: @dataclass class ResourceLink: - domain_name : str - link_name : str - bandwidth_utilization_sampler : SyntheticSampler - latency_sampler : SyntheticSampler - related_service_ids : List[str] = field(default_factory=list) + domain_name : str + link_name : str + metrics_sampler : SyntheticSampler # Single sampler for both BW and latency + related_service_ids : List[str] = field(default_factory=list) def generate_samples(self, simap_client : SimapClient) -> None: - bandwidth_utilization = self.bandwidth_utilization_sampler.get_sample() - latency = self.latency_sampler.get_sample() + bandwidth_utilization, latency = self.metrics_sampler.get_sample() simap_link = simap_client.network(self.domain_name).link(self.link_name) simap_link.telemetry.update( bandwidth_utilization.value, latency.value, diff --git a/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py b/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py index 9aa0e483f..862ae3150 100644 --- a/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py +++ b/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py @@ -16,48 +16,117 @@ import math, random, sys, threading from dataclasses import dataclass, field from datetime import datetime -from typing import Dict, Optional +from typing import Dict, Optional, Tuple from .Sample import Sample +# Congestion curve types +CURVE_LINEAR = 'linear' # x - steady increase +CURVE_EXPONENTIAL = 'exponential' # exp(x)-1 - slow start, rapid end +CURVE_LOGARITHMIC = 'logarithmic' # log(1+x) - fast start, plateau + +MAX_CONNECTIONS = 5 + +# LINEAR curve target ranges: 0 conns (5-15%), 5+ conns (90-99%) +LINEAR_MIN_BASE = 5.0 # Minimum BW at 0 connections +LINEAR_MIN_FULL = 90.0 # Minimum BW at 5+ connections +LINEAR_MAX_BASE = 15.0 # Maximum BW at 0 connections +LINEAR_MAX_FULL = 99.0 # Maximum BW at 5+ connections + @dataclass class SyntheticSampler: - amplitude : float = field(default=0.0) - phase : float = field(default=0.0) - period : float = field(default=1.0) - offset : float = field(default=0.0) - noise_ratio : float = field(default=0.0) - min_value : float = field(default=-sys.float_info.max) - max_value : float = field(default=sys.float_info.max) + base_bw : float = field(default = 10.0) # Base bandwidth utilization % + base_latency : float = field(default = 1.0) # Base latency in ms + sensitivity : float = field(default = 0.5) # Congestion sensitivity (0.0-1.0) + curve_type : str = field(default = CURVE_LINEAR) + connection_count : int = field(default = 0) # Current connection count for load simulation + link_capacity : float = field(default = 100.0) # Link capacity in Gbps + bw_min_value : float = field(default = 0.0) + bw_max_value : float = field(default = 100.0) + lat_min_value : float = field(default = 0.1) + lat_max_value : float = field(default = 20.0) @classmethod def create_random( - cls, amplitude_scale : float, phase_scale : float, period_scale : float, - offset_scale : float, noise_ratio : float, - min_value : Optional[float] = None, max_value : Optional[float] = None + cls, base_bw_range : Tuple[float, float] = (0.0, 1.0), + base_latency_range : Tuple[float, float] = (1.0, 3.0), + sensitivity_range : Tuple[float, float] = (0.0, 1.0), + curve_type : Optional[str] = CURVE_LINEAR, + connection_count : int = 0, + link_capacity : float = 100.0 ) -> 'SyntheticSampler': - amplitude = amplitude_scale * random.random() - phase = phase_scale * random.random() - period = period_scale * random.random() - offset = offset_scale * random.random() + amplitude - if min_value is None: min_value = -sys.float_info.max - if max_value is None: max_value = sys.float_info.max - return cls(amplitude, phase, period, offset, noise_ratio, min_value, max_value) - - def get_sample(self) -> Sample: - timestamp = datetime.now().timestamp() - - waveform = math.sin(2 * math.pi * timestamp / self.period + self.phase) - waveform *= self.amplitude - waveform += self.offset + """Create a random sampler with congestion curve parameters. + + For LINEAR: base_bw and sensitivity interpolate within target ranges per connection count. + For EXPONENTIAL/LOGARITHMIC: uses traditional curve formulas. + """ + base_bw = random.uniform(base_bw_range[0], base_bw_range[1]) + base_latency = random.uniform(base_latency_range[0], base_latency_range[1]) + sensitivity = random.uniform(sensitivity_range[0], sensitivity_range[1]) + + return cls(base_bw, base_latency, sensitivity, curve_type, connection_count, link_capacity) - noise = self.amplitude * random.random() - value = abs((1.0 - self.noise_ratio) * waveform + self.noise_ratio * noise) + def _compute_congestion_factor(self, load_ratio: float) -> float: + """Compute congestion factor based on curve type and load ratio (0-1).""" + if self.curve_type == CURVE_LINEAR: + return load_ratio + elif self.curve_type == CURVE_EXPONENTIAL: + # Exponential: slow start, rapid increase at high load + return (math.exp(load_ratio * 2) - 1) / (math.e ** 2 - 1) + elif self.curve_type == CURVE_LOGARITHMIC: + # Logarithmic: fast initial increase, then plateau + return math.log1p(load_ratio * 2.7) / math.log1p(2.7) + else: + return load_ratio # Default to linear - value = max(value, self.min_value) - value = min(value, self.max_value) - - return Sample(timestamp, 0, value) + def get_sample(self) -> Tuple[Sample, Sample]: + """Generate both bandwidth and latency samples using congestion curves. + + LINEAR curve uses simple linear interpolation between min/max lines. + EXPONENTIAL/LOGARITHMIC use formula-based congestion factors. + + Returns: + Tuple of (bandwidth_sample, latency_sample) + """ + timestamp = datetime.now().timestamp() + + if self.curve_type == CURVE_LINEAR: + # Simple linear interpolation + # Connection ratio: 0 (no load) to 1 (max load) + conn_ratio = min(self.connection_count / MAX_CONNECTIONS, 1.0) if MAX_CONNECTIONS > 0 else 0.0 + + # Minimum line: 5% at 0 conns → 90% at 5 conns + min_bw = LINEAR_MIN_BASE + conn_ratio * (LINEAR_MIN_FULL - LINEAR_MIN_BASE) + + # Maximum line: 15% at 0 conns → 99% at 5 conns + max_bw = LINEAR_MAX_BASE + conn_ratio * (LINEAR_MAX_FULL - LINEAR_MAX_BASE) + + # Interpolate between min and max using base_bw and sensitivity + interpolation_factor = (self.base_bw + self.sensitivity) / 2.0 + bw_utilization = min_bw + (max_bw - min_bw) * interpolation_factor + + # Latency scales proportionally (10x increase from base at full load) + bw_normalized = (bw_utilization - LINEAR_MIN_BASE) / (LINEAR_MAX_FULL - LINEAR_MIN_BASE) + latency = self.base_latency * (1.0 + bw_normalized * 9.0) + else: + # Use formula-based approach for EXPONENTIAL/LOGARITHMIC + load_ratio = self.connection_count / MAX_CONNECTIONS if MAX_CONNECTIONS > 0 else 0.0 + congestion_factor = self._compute_congestion_factor(load_ratio) + + bw_utilization = self.base_bw + (congestion_factor * self.sensitivity * 70.0) + latency = self.base_latency * (1.0 + congestion_factor * self.sensitivity * 9.0) + + # Add uniform noise (5%) + bw_noise = random.uniform(-0.05, 0.05) * bw_utilization + lat_noise = random.uniform(-0.05, 0.05) * latency + + bw_utilization = max(self.bw_min_value, min(self.bw_max_value, bw_utilization + bw_noise)) + latency = max(self.lat_min_value, min(self.lat_max_value, latency + lat_noise)) + + # Convert percentage to actual utilization (Gbps) + actual_bw_utilization = (bw_utilization / 100.0) * self.link_capacity + + return (Sample(timestamp, 0, actual_bw_utilization), Sample(timestamp, 0, latency)) class SyntheticSamplers: @@ -66,22 +135,30 @@ class SyntheticSamplers: self._samplers : Dict[str, SyntheticSampler] = dict() def add_sampler( - self, sampler_name : str, amplitude_scale : float, phase_scale : float, - period_scale : float, offset_scale : float, noise_ratio : float + self, sampler_name : str, + base_bw_range : Tuple[float, float] = (5.0, 20.0), + base_latency_range : Tuple[float, float] = (0.3, 2.0), + sensitivity_range : Tuple[float, float] = (0.3, 1.0), + curve_type : Optional[str] = None, + connection_count : int = 0, + link_capacity : float = 100.0 ) -> None: with self._lock: if sampler_name in self._samplers: MSG = 'SyntheticSampler({:s}) already exists' raise Exception(MSG.format(sampler_name)) self._samplers[sampler_name] = SyntheticSampler.create_random( - amplitude_scale, phase_scale, period_scale, offset_scale, noise_ratio + base_bw_range, base_latency_range, sensitivity_range, curve_type, connection_count, link_capacity ) def remove_sampler(self, sampler_name : str) -> None: with self._lock: self._samplers.pop(sampler_name, None) - def get_sample(self, sampler_name : str) -> Sample: + def get_sample(self, sampler_name : str) -> Tuple[Sample, Sample]: + """Get both bandwidth and latency samples. + Returns: Tuple of (bandwidth_sample, latency_sample) + """ with self._lock: sampler = self._samplers.get(sampler_name) if sampler_name not in self._samplers: -- GitLab From 5ae15a6c2fc70de2556a7ded7c8cfc5922f7e246 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Tue, 17 Feb 2026 12:01:15 +0000 Subject: [PATCH 14/78] feat: Refactor AI Analytics Engine test setup and deployment scripts --- .../AI_analytics_engine/__init__.py | 40 ----- .../AI_analytics_engine/tests/run_test.sh | 21 +-- .../tests/test_api_docker.py | 142 ++++++++++++++++++ src/tests/mwc26-f5ga/deploy.sh | 24 +-- src/tests/mwc26-f5ga/deploy_ai_engine.sh | 20 --- src/tests/mwc26-f5ga/destroy.sh | 1 - 6 files changed, 166 insertions(+), 82 deletions(-) create mode 100644 src/tests/mwc26-f5ga/AI_analytics_engine/tests/test_api_docker.py delete mode 100755 src/tests/mwc26-f5ga/deploy_ai_engine.sh diff --git a/src/tests/mwc26-f5ga/AI_analytics_engine/__init__.py b/src/tests/mwc26-f5ga/AI_analytics_engine/__init__.py index 5790ca36d..3ccc21c7d 100644 --- a/src/tests/mwc26-f5ga/AI_analytics_engine/__init__.py +++ b/src/tests/mwc26-f5ga/AI_analytics_engine/__init__.py @@ -12,43 +12,3 @@ # See the License for the specific language governing permissions and # limitations under the License. -""" -AI Analytics Engine module. - -Provides REST API for AI-driven SLA policy analysis and violation detection -using data from SIMAP (network topology/devices) and InfluxDB (telemetry metrics). - -Module Structure: - - ai_model: AI/ML processing logic and SLA policy definitions - - api: Flask REST API endpoints - - clients: External service clients (SIMAP, InfluxDB, Decision Engine) - - config: Configuration management - - tests: Test suite - -Public API: - - AIAnalyticsEngineAPI: Main orchestrator and Flask application - - AIModelProcessor: AI/ML analysis engine - - SLAPolicyConfig: SLA policy configuration data model - - SimapDataFetcher: SIMAP client for device/topology data - - InfluxDBFetcher: InfluxDB client for telemetry metrics - - DecisionEngineClient: Decision engine notification client - - create_ai_analytics_blueprint: Flask blueprint factory -""" - -from .ai_model.ai_processor import AIModelProcessor -from .api.api_blueprint import create_ai_analytics_blueprint -from .engine import AIAnalyticsEngineAPI -from .clients.decision_client import DecisionEngineClient -from .clients.influxdb_fetcher import InfluxDBFetcher -from .clients.simap_fetcher import SimapDataFetcher -from .ai_model.sla_policy import SLAPolicyConfig - -__all__ = [ - 'AIAnalyticsEngineAPI', - 'AIModelProcessor', - 'DecisionEngineClient', - 'InfluxDBFetcher', - 'SimapDataFetcher', - 'SLAPolicyConfig', - 'create_ai_analytics_blueprint', -] diff --git a/src/tests/mwc26-f5ga/AI_analytics_engine/tests/run_test.sh b/src/tests/mwc26-f5ga/AI_analytics_engine/tests/run_test.sh index 1a1ee209e..c1372948a 100755 --- a/src/tests/mwc26-f5ga/AI_analytics_engine/tests/run_test.sh +++ b/src/tests/mwc26-f5ga/AI_analytics_engine/tests/run_test.sh @@ -18,23 +18,26 @@ # Usage: ./run_test.sh # Navigate to TFS root directory -cd "$(dirname "$0")/../../../../.." +cd "$(dirname "$0")" # Set Python path to include TFS src and AI Analytics Engine -export PYTHONPATH="${PWD}/src:${PWD}/src/tests/mwc26-f5ga" +# export PYTHONPATH="${PWD}/src:${PWD}/src/tests/mwc26-f5ga" # Activate virtual environment if not already activated -if [ -z "$VIRTUAL_ENV" ]; then - if [ -d "$HOME/.env-simap" ]; then - source "$HOME/.env-simap/bin/activate" - fi -fi +# if [ -z "$VIRTUAL_ENV" ]; then +# if [ -d "$HOME/.env-simap" ]; then +# source "$HOME/.env-simap/bin/activate" +# fi +# fi +echo "$PWD" +echo "Running AI Analytics Engine API tests..." # Define log file path -LOG_FILE="${PWD}/src/tests/mwc26-f5ga/AI_analytics_engine/tests/test_api.log" +LOG_FILE="${PWD}/test_api_docker.log" +TEST_FILE="${PWD}/test_api_docker.py" # Run the test with logging enabled and capture output -pytest src/tests/mwc26-f5ga/AI_analytics_engine/tests/test_api.py::test_analyze_endpoint \ +pytest $TEST_FILE \ -v -s \ --log-cli-level=DEBUG \ --log-file="${LOG_FILE}" \ diff --git a/src/tests/mwc26-f5ga/AI_analytics_engine/tests/test_api_docker.py b/src/tests/mwc26-f5ga/AI_analytics_engine/tests/test_api_docker.py new file mode 100644 index 000000000..c782f56c3 --- /dev/null +++ b/src/tests/mwc26-f5ga/AI_analytics_engine/tests/test_api_docker.py @@ -0,0 +1,142 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Test suite for AI Analytics Engine REST API running in Docker. + +This module tests the /api/v1/analyze endpoint by connecting to the +AI-Engine Docker container exposed on port 8084. +""" + +import logging +import time + +import pytest +import requests + +# Configure logging for tests +logging.basicConfig( + level=logging.DEBUG, + format="[%(asctime)s] %(levelname)s:%(name)s:%(message)s" +) +LOGGER = logging.getLogger(__name__) + +# Test server configuration - Docker container exposed port +TEST_HOST = '127.0.0.1' +TEST_PORT = 8084 # Docker container port mapping: 8084->8080 + +BASE_URL = f'http://{TEST_HOST}:{TEST_PORT}' + + + +@pytest.fixture(scope='module') +def ai_engine_server(): + """ + Fixture to verify the AI Analytics Engine Docker container is running. + + Checks connectivity to the Docker container and yields control to tests. + Assumes the container is already running (docker run -p 8084:8080 ai-engine:latest). + """ + LOGGER.info("Checking AI Analytics Engine Docker container availability") + + # Wait for server to be ready + max_retries = 15 + for i in range(max_retries): + try: + LOGGER.debug(f"Checking Docker container connectivity... ({i+1}/{max_retries})") + response = requests.get(f'{BASE_URL}/api/v1/config', timeout=2) + if response.status_code == 200: + LOGGER.info("AI Analytics Engine Docker container is ready") + break + except requests.exceptions.RequestException as e: + LOGGER.debug(f"Container not ready yet: {e}") + if i < max_retries - 1: + time.sleep(2) + else: + raise RuntimeError( + f"Failed to connect to AI Analytics Engine Docker container at {BASE_URL}. " + f"Ensure container is running: docker run -p 8084:8080 ai-engine:latest" + ) + + yield + + LOGGER.info("AI Analytics Engine Docker test fixture cleanup complete") + + + +def test_analyze_endpoint(ai_engine_server): + """ + Test POST /api/v1/analyze endpoint. + + Validates that the analyze endpoint: + - Accepts valid SLA policy JSON payload + - Returns appropriate status codes (200 for success, 503 for service unavailable) + - Returns JSON response with status and message fields + """ + + LOGGER.info(">>>>>> Starting test_case test_analyze_endpoint: POST /api/v1/analyze endpoint") + + # Prepare test payload with SLA policy configuration + payload = { + "simap_id": "E2E-L1", + "sla_metrics": { + "latency_threshold_ms": 0, + "bandwidth_utilization": 0.0 + }, + "history_window_size_sec": 600, + "forecast_sample_interval_sec": 5, + "forecast_sample_count": 120, + } + + LOGGER.info(f"Sending analyze request with payload: {payload}") + + # Send POST request to analyze endpoint + response = requests.post( + f'{BASE_URL}/api/v1/analyze', + json=payload, + timeout=10 + ) + + # Add condition to validate response status code and content + + LOGGER.info(f"Analyze response status: {response.status_code}") + + # Parse JSON response + data = response.json() + LOGGER.info(f"Analyze response body: {data}") + + # Validate response structure + assert 'status' in data, "Response missing 'status' field" + assert 'message' in data, "Response missing 'message' field" + + # Accept either success (200) or service unavailable (503) + # 503 is expected if SIMAP server or InfluxDB are not running + if response.status_code == 200: + LOGGER.info("Analysis completed successfully") + assert data['status'] == 'success', f"Expected status 'success', got '{data['status']}'" + assert 'data' in data, "Successful response missing 'data' field" + elif response.status_code == 503: + # LOGGER.error("External service unavailable (expected if SIMAP/InfluxDB not running)") + assert data['status'] == 'error', f"Expected status 'error' for 503, got '{data['status']}'" + pytest.fail("External service unavailable (expected if SIMAP/InfluxDB not running)") + elif response.status_code == 400: + LOGGER.error(f"Bad request: {data['message']}") + assert data['status'] == 'error', f"Expected status 'error' for 400, got '{data['status']}'" + pytest.fail(f"Bad request: {data['message']}") + else: + pytest.fail(f"Unexpected status code: {response.status_code}") + + LOGGER.info("Analyze endpoint test passed!") + LOGGER.info("<<<<<< Finished test_case test_analyze_endpoint") + diff --git a/src/tests/mwc26-f5ga/deploy.sh b/src/tests/mwc26-f5ga/deploy.sh index 797b3e407..c9f02e8e2 100755 --- a/src/tests/mwc26-f5ga/deploy.sh +++ b/src/tests/mwc26-f5ga/deploy.sh @@ -5,23 +5,23 @@ echo "Building SIMAP Server..." cd ~/tfs-ctrl/ docker buildx build -t simap-server:mock -f ./src/tests/tools/simap_server/Dockerfile . -# echo "Building NCE-FAN Controller..." -# cd ~/tfs-ctrl/ -# docker buildx build -t nce-fan-ctrl:mock -f ./src/tests/tools/mock_nce_fan_ctrl/Dockerfile . +echo "Building NCE-FAN Controller..." +cd ~/tfs-ctrl/ +docker buildx build -t nce-fan-ctrl:mock -f ./src/tests/tools/mock_nce_fan_ctrl/Dockerfile . -# echo "Building NCE-T Controller..." -# cd ~/tfs-ctrl/ -# docker buildx build -t nce-t-ctrl:mock -f ./src/tests/tools/mock_nce_t_ctrl/Dockerfile . +echo "Building NCE-T Controller..." +cd ~/tfs-ctrl/ +docker buildx build -t nce-t-ctrl:mock -f ./src/tests/tools/mock_nce_t_ctrl/Dockerfile . echo "Building AI Analytics Engine..." cd ~/tfs-ctrl/ docker buildx build -t ai-engine:latest -f ./src/tests/mwc26-f5ga/AI_analytics_engine/Dockerfile . -# echo "Cleaning up..." +echo "Cleaning up..." docker rm --force simap-server -# docker rm --force nce-fan-ctrl -# docker rm --force nce-t-ctrl +docker rm --force nce-fan-ctrl +docker rm --force nce-t-ctrl docker rm --force ai-engine # echo "Deploying support services..." @@ -29,8 +29,9 @@ docker run --detach --name simap-server --publish 8080:8080 \ -e INFLUXDB_HOST=10.254.0.9 \ -e INFLUXDB_PORT=8181 \ simap-server:mock -# docker run --detach --name nce-fan-ctrl --publish 8081:8080 --env SIMAP_ADDRESS=172.17.0.1 --env SIMAP_PORT=8080 nce-fan-ctrl:mock -# docker run --detach --name nce-t-ctrl --publish 8082:8080 --env SIMAP_ADDRESS=172.17.0.1 --env SIMAP_PORT=8080 nce-t-ctrl:mock + +docker run --detach --name nce-fan-ctrl --publish 8081:8080 --env SIMAP_ADDRESS=172.17.0.1 --env SIMAP_PORT=8080 nce-fan-ctrl:mock +docker run --detach --name nce-t-ctrl --publish 8082:8080 --env SIMAP_ADDRESS=172.17.0.1 --env SIMAP_PORT=8080 nce-t-ctrl:mock echo "Deploying AI Analytics Engine..." docker run --detach --name ai-engine --publish 8084:8080 \ @@ -43,7 +44,6 @@ docker run --detach --name ai-engine --publish 8084:8080 \ # NOTE: If testing, run client (src/tests/tools/simap_server/run_client.sh) to manually populate SIMAP Server with telemetry data. - sleep 2 docker ps -a echo "Deployment complete." diff --git a/src/tests/mwc26-f5ga/deploy_ai_engine.sh b/src/tests/mwc26-f5ga/deploy_ai_engine.sh deleted file mode 100755 index e54f2dbca..000000000 --- a/src/tests/mwc26-f5ga/deploy_ai_engine.sh +++ /dev/null @@ -1,20 +0,0 @@ - -docker rm --force ai-engine 2>/dev/null || true - -echo "Building AI Analytics Engine..." -cd ~/tfs-ctrl/ -docker buildx build -t ai-engine:latest -f ./src/tests/mwc26-f5ga/AI_analytics_engine/Dockerfile . - -echo "Deploying AI Analytics Engine..." -docker run --detach --name ai-engine \ - --publish 8084:8080 \ - --env SIMAP_SERVER_ADDRESS=172.17.0.1 \ - --env SIMAP_SERVER_PORT=8080 \ - --env SIMAP_SERVER_USERNAME=admin \ - --env SIMAP_SERVER_PASSWORD=admin \ - ai-engine:latest -# docker run --detach --name traffic-changer --publish 8083:8080 traffic-changer:mock - -sleep 2 -docker ps -a -echo "Deployment complete." diff --git a/src/tests/mwc26-f5ga/destroy.sh b/src/tests/mwc26-f5ga/destroy.sh index 1e6d6953e..45fc12bd7 100755 --- a/src/tests/mwc26-f5ga/destroy.sh +++ b/src/tests/mwc26-f5ga/destroy.sh @@ -4,6 +4,5 @@ docker rm --force simap-server docker rm --force nce-fan-ctrl docker rm --force nce-t-ctrl docker rm --force ai-engine -docker rm --force traffic-changer sleep 2 docker ps -a \ No newline at end of file -- GitLab From d5bc16a3c3bc6dc2a8dac6abc4ef5deb10a94de7 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Tue, 17 Feb 2026 12:30:28 +0000 Subject: [PATCH 15/78] feat: Add logging for retrieved mapping in EventDispatcher --- src/simap_connector/service/simap_updater/SimapUpdater.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index 20f1a2d5f..347118366 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -817,7 +817,7 @@ class EventDispatcher(BaseEventDispatcher): try: mapping = self._object_cache.get(CachedEntities.CONNECTION, connection_uuid, auto_retrieve=False) - + LOGGER.info('Retrieved mapping for connection {:s}: {:s}'.format(connection_uuid, str(mapping))) if mapping is None: MSG = 'Connection {:s} not managed by this controller (not in allowed links), skipping removal' LOGGER.debug(MSG.format(connection_uuid)) -- GitLab From 90a697775b07c1af0ca5a4e02bfcb5f434e39298 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Tue, 17 Feb 2026 13:11:02 +0000 Subject: [PATCH 16/78] feat: Update log level to DEBUG in deviceservice and add logging for SetConfig method in IetfL3VpnDriver --- manifests/deviceservice.yaml | 2 +- src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/manifests/deviceservice.yaml b/manifests/deviceservice.yaml index a366a5041..7c3ded7c0 100644 --- a/manifests/deviceservice.yaml +++ b/manifests/deviceservice.yaml @@ -39,7 +39,7 @@ spec: - containerPort: 9192 env: - name: LOG_LEVEL - value: "INFO" + value: "DEBUG" startupProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:2020"] diff --git a/src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py b/src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py index 08f34b8ad..1fe3b36f0 100644 --- a/src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py +++ b/src/device/service/drivers/ietf_l3vpn/IetfL3VpnDriver.py @@ -186,6 +186,7 @@ class IetfL3VpnDriver(_Driver): def SetConfig( self, resources : List[Tuple[str, Any]] ) -> List[Union[bool, Exception]]: + LOGGER.info('SetConfig called with resources: {:s}'.format(str(resources))) results = [] if len(resources) == 0: return results with self.__lock: -- GitLab From c84e654c9f02ee861ad288347e75fbc2c60d1e72 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Wed, 18 Feb 2026 16:19:35 +0000 Subject: [PATCH 17/78] Link to controller was wrong. - L3 and L4 move to AGG from E2E. - L7,8,11,12 are discarded because they cann't be monitored. --- .../service/simap_updater/AllowedLinks.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/src/simap_connector/service/simap_updater/AllowedLinks.py b/src/simap_connector/service/simap_updater/AllowedLinks.py index 685c88a20..ba862b30e 100644 --- a/src/simap_connector/service/simap_updater/AllowedLinks.py +++ b/src/simap_connector/service/simap_updater/AllowedLinks.py @@ -13,10 +13,12 @@ # limitations under the License. ALLOWED_LINKS_PER_CONTROLLER = { - 'e2e' : { 'L1', 'L2', 'L3', 'L4' }, - 'agg' : { 'L7ab', 'L7ba', 'L8ab', 'L8ba', 'L11ab', - 'L11ba', 'L12ab', 'L12ba', 'L13', 'L14' }, - 'trans-pkt': { 'L5', 'L6', 'L9', 'L10' }, + 'e2e' : { 'L1', 'L2' }, + 'agg' : { 'L3', 'L4', 'L13', 'L14' }, + 'trans-pkt': { 'L5', 'L6', 'L9', 'L10' }, + # The remaining can not be monitored therefore they are not included in the allowed links for the controllers + # 'agg' : { 'L3', 'L4', 'L7ab', 'L7ba', 'L8ab', 'L8ba', + # 'L11ab', 'L11ba', 'L12ab', 'L12ba', 'L13', 'L14' }, } # NOTE: Ranges should be less than 100 because the schema does not allow # bandwidth-utilization to exceed 100% -- GitLab From 99379c224302210446b810bcdf2ecac9369483c0 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Wed, 18 Feb 2026 16:23:41 +0000 Subject: [PATCH 18/78] Added full script to deploy all demo services --- src/tests/mwc26-f5ga/deploy.sh | 164 ++++++++++++++++++++++++++------- 1 file changed, 129 insertions(+), 35 deletions(-) diff --git a/src/tests/mwc26-f5ga/deploy.sh b/src/tests/mwc26-f5ga/deploy.sh index c9f02e8e2..1ec4ed4f0 100755 --- a/src/tests/mwc26-f5ga/deploy.sh +++ b/src/tests/mwc26-f5ga/deploy.sh @@ -1,49 +1,143 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. -echo "Starting deployment of MWC26-F5GA test environment..." -echo "Building SIMAP Server..." -cd ~/tfs-ctrl/ -docker buildx build -t simap-server:mock -f ./src/tests/tools/simap_server/Dockerfile . +# Assuming the instances are named as: simap-server, tfs-e2e-ctrl, tfs-agg-ctrl, tfs-ip-ctrl -echo "Building NCE-FAN Controller..." -cd ~/tfs-ctrl/ -docker buildx build -t nce-fan-ctrl:mock -f ./src/tests/tools/mock_nce_fan_ctrl/Dockerfile . +# Get the current hostname +HOSTNAME=$(hostname) +echo "Deploying in ${HOSTNAME}..." -echo "Building NCE-T Controller..." -cd ~/tfs-ctrl/ -docker buildx build -t nce-t-ctrl:mock -f ./src/tests/tools/mock_nce_t_ctrl/Dockerfile . -echo "Building AI Analytics Engine..." -cd ~/tfs-ctrl/ -docker buildx build -t ai-engine:latest -f ./src/tests/mwc26-f5ga/AI_analytics_engine/Dockerfile . +case "$HOSTNAME" in + simap-server) + echo "Building SIMAP Server..." + cd ~/tfs-ctrl/ + docker buildx build -t simap-server:mock -f ./src/tests/tools/simap_server/Dockerfile . + echo "Building NCE-FAN Controller..." + cd ~/tfs-ctrl/ + docker buildx build -t nce-fan-ctrl:mock -f ./src/tests/tools/mock_nce_fan_ctrl/Dockerfile . -echo "Cleaning up..." -docker rm --force simap-server -docker rm --force nce-fan-ctrl -docker rm --force nce-t-ctrl -docker rm --force ai-engine + echo "Building NCE-T Controller..." + cd ~/tfs-ctrl/ + docker buildx build -t nce-t-ctrl:mock -f ./src/tests/tools/mock_nce_t_ctrl/Dockerfile . -# echo "Deploying support services..." -docker run --detach --name simap-server --publish 8080:8080 \ - -e INFLUXDB_HOST=10.254.0.9 \ - -e INFLUXDB_PORT=8181 \ - simap-server:mock + echo "Building AI Analytics Engine..." + cd ~/tfs-ctrl/ + docker buildx build -t ai-engine:latest -f ./src/tests/mwc26-f5ga/AI_analytics_engine/Dockerfile . -docker run --detach --name nce-fan-ctrl --publish 8081:8080 --env SIMAP_ADDRESS=172.17.0.1 --env SIMAP_PORT=8080 nce-fan-ctrl:mock -docker run --detach --name nce-t-ctrl --publish 8082:8080 --env SIMAP_ADDRESS=172.17.0.1 --env SIMAP_PORT=8080 nce-t-ctrl:mock -echo "Deploying AI Analytics Engine..." -docker run --detach --name ai-engine --publish 8084:8080 \ - -e INFLUXDB_HOST=10.254.0.9 -e INFLUXDB_PORT=8181 \ - --env SIMAP_SERVER_ADDRESS=172.17.0.1 --env SIMAP_SERVER_PORT=8080 \ - --env SIMAP_SERVER_USERNAME=admin --env SIMAP_SERVER_PASSWORD=admin \ - ai-engine:latest + echo "Cleaning up..." + docker rm --force simap-server + docker rm --force nce-fan-ctrl + docker rm --force nce-t-ctrl + docker rm --force ai-engine + echo "Deploying support services..." + docker run --detach --name simap-server --publish 8080:8080 \ + -e INFLUXDB_HOST=10.254.0.9 \ + -e INFLUXDB_PORT=8181 \ + simap-server:mock -# NOTE: If testing, run client (src/tests/tools/simap_server/run_client.sh) to manually populate SIMAP Server with telemetry data. + docker run --detach --name nce-fan-ctrl --publish 8081:8080 --env SIMAP_ADDRESS=172.17.0.1 --env SIMAP_PORT=8080 nce-fan-ctrl:mock + docker run --detach --name nce-t-ctrl --publish 8082:8080 --env SIMAP_ADDRESS=172.17.0.1 --env SIMAP_PORT=8080 nce-t-ctrl:mock + echo "Deploying AI Analytics Engine..." + docker run --detach --name ai-engine --publish 8084:8080 \ + -e INFLUXDB_HOST=10.254.0.9 -e INFLUXDB_PORT=8181 \ + --env SIMAP_SERVER_ADDRESS=172.17.0.1 --env SIMAP_SERVER_PORT=8080 \ + --env SIMAP_SERVER_USERNAME=admin --env SIMAP_SERVER_PASSWORD=admin \ + ai-engine:latest + # NOTE: If testing, run client (src/tests/tools/simap_server/run_client.sh) to manually populate SIMAP Server with telemetry data. -sleep 2 -docker ps -a -echo "Deployment complete." + sleep 2 + docker ps -a + ;; + tfs-e2e-ctrl) + echo "Deploying TFS E2E Controller..." + sed -i 's|\(

ETSI TeraFlowSDN Controller\)

|\1 (End-to-End)|' src/webui/service/templates/main/home.html + source ~/tfs-ctrl/src/tests/ecoc25-f5ga-telemetry/deploy-specs-e2e.sh + ./deploy/all.sh + + echo "Waiting for NATS connection..." + while ! kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server 2>&1 | grep -q 'Subscriber is Ready? True'; do sleep 1; done + kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server + ;; + tfs-agg-ctrl) + echo "Deploying TFS Agg Controller..." + sed -i 's|\(

ETSI TeraFlowSDN Controller\)

|\1 (Aggregation)|' src/webui/service/templates/main/home.html + source ~/tfs-ctrl/src/tests/ecoc25-f5ga-telemetry/deploy-specs-agg.sh + ./deploy/all.sh + + echo "Waiting for NATS connection..." + while ! kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server 2>&1 | grep -q 'Subscriber is Ready? True'; do sleep 1; done + kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server + ;; + tfs-ip-ctrl) + echo "Deploying TFS IP Controller..." + sed -i 's|\(

ETSI TeraFlowSDN Controller\)

|\1 (IP)|' src/webui/service/templates/main/home.html + source ~/tfs-ctrl/src/tests/ecoc25-f5ga-telemetry/deploy-specs-ip.sh + ./deploy/all.sh + + # echo "Waiting for NATS connection..." + # while ! kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server 2>&1 | grep -q 'Subscriber is Ready? True'; do sleep 1; done + # kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server + ;; + *) + echo "Unknown host: $HOSTNAME" + echo "No commands to run." + ;; +esac + +echo "Ready!" + + + + +# echo "Starting deployment of MWC26-F5GA test environment..." + + + + +# echo "Cleaning up..." +# docker rm --force simap-server +# docker rm --force nce-fan-ctrl +# docker rm --force nce-t-ctrl +# docker rm --force ai-engine + +# # echo "Deploying support services..." +# docker run --detach --name simap-server --publish 8080:8080 \ +# -e INFLUXDB_HOST=10.254.0.9 \ +# -e INFLUXDB_PORT=8181 \ +# simap-server:mock + +# docker run --detach --name nce-fan-ctrl --publish 8081:8080 --env SIMAP_ADDRESS=172.17.0.1 --env SIMAP_PORT=8080 nce-fan-ctrl:mock +# docker run --detach --name nce-t-ctrl --publish 8082:8080 --env SIMAP_ADDRESS=172.17.0.1 --env SIMAP_PORT=8080 nce-t-ctrl:mock + +# echo "Deploying AI Analytics Engine..." +# docker run --detach --name ai-engine --publish 8084:8080 \ +# -e INFLUXDB_HOST=10.254.0.9 -e INFLUXDB_PORT=8181 \ +# --env SIMAP_SERVER_ADDRESS=172.17.0.1 --env SIMAP_SERVER_PORT=8080 \ +# --env SIMAP_SERVER_USERNAME=admin --env SIMAP_SERVER_PASSWORD=admin \ +# ai-engine:latest + + +# # NOTE: If testing, run client (src/tests/tools/simap_server/run_client.sh) to manually populate SIMAP Server with telemetry data. + + +# sleep 2 +# docker ps -a +# echo "Deployment complete." -- GitLab From d88285e40a0205c89324de193368fd56ff1a3a17 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Wed, 18 Feb 2026 21:34:27 +0000 Subject: [PATCH 19/78] feat: Refactor EventDispatcher to use domain names and link topology names for connection processing - Update SyntheticSampler to generate samples in specified range. --- .../service/simap_updater/SimapUpdater.py | 29 ++-- .../worker/data/SyntheticSamplers.py | 145 +++++++----------- 2 files changed, 75 insertions(+), 99 deletions(-) diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index 347118366..807ee64ef 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -668,14 +668,14 @@ class EventDispatcher(BaseEventDispatcher): result = self._prepare_connection_processing(connection_uuid) if result is None: return False - (topology_name, processed_links) = result + (domain_name, processed_links) = result # Update telemetry for each link involved in this connection - for link_uuid, link_name in processed_links: + for link_uuid, link_name, link_topology_name in processed_links: # Count active connections on this link - active_conn_count = self._count_active_connections(link_uuid, topology_name) + active_conn_count = self._count_active_connections(link_uuid, domain_name) LOGGER.info('Connection {:s} uses allowed link: {:s} (uuid: {:s})'.format(connection_uuid, link_name, link_uuid)) - worker_name = '{:s}:{:s}'.format(topology_name, link_name) + worker_name = '{:s}:{:s}'.format(link_topology_name, link_name) # Worker should already exist from _dispatch_link_set (link creation event) if not self._telemetry_pool.has_worker(WorkerTypeEnum.SYNTHESIZER, worker_name): @@ -684,7 +684,7 @@ class EventDispatcher(BaseEventDispatcher): # Create worker with same parameters as in _dispatch_link_set resources = Resources() resources.links.append(ResourceLink( - domain_name = topology_name, + domain_name = link_topology_name, link_name = link_name, metrics_sampler = SyntheticSampler.create_random( base_bw_range = (5.0, 25.0), @@ -748,7 +748,10 @@ class EventDispatcher(BaseEventDispatcher): for link_uuid in link_uuids: link = self._object_cache.get(CachedEntities.LINK, link_uuid) if link.name in allowed_link_names: - processed_links.append((link_uuid, link.name)) + # Get the link's topology for worker naming + link_topology_uuid, _ = get_link_endpoint(link) + link_topology = self._object_cache.get(CachedEntities.TOPOLOGY, link_topology_uuid) + processed_links.append((link_uuid, link.name, link_topology.name)) if not processed_links: LOGGER.debug('Connection {:s} has no allowed links for domain {:s}'.format( @@ -758,7 +761,7 @@ class EventDispatcher(BaseEventDispatcher): # Cache the connection-to-links mapping for later retrieval (e.g., during REMOVE events) mapping = { 'domain': domain_name, - 'links': {link_uuid: {'name': link_name} for link_uuid, link_name in processed_links} + 'links': {link_uuid: {'name': link_name, 'topology': link_topo_name} for link_uuid, link_name, link_topo_name in processed_links} } self._object_cache.set(CachedEntities.CONNECTION, mapping, connection_uuid) LOGGER.debug('Cached connection {:s} mapping with {:d} links for domain {:s}'.format( @@ -829,27 +832,27 @@ class EventDispatcher(BaseEventDispatcher): raise Exception(MSG.format(connection_uuid)) # Extract domain and links from cached mapping - topology_name = mapping['domain'] + domain_name = mapping['domain'] link_uuids_dict = mapping['links'] - processed_links = [(link_uuid, link_data['name']) for link_uuid, link_data in link_uuids_dict.items()] + processed_links = [(link_uuid, link_data['name'], link_data['topology']) for link_uuid, link_data in link_uuids_dict.items()] LOGGER.info('Retrieved cached mapping for connection {:s}: domain={:s}, links={:d}'.format( - connection_uuid, topology_name, len(processed_links))) + connection_uuid, domain_name, len(processed_links))) # Delete the connection from cache first (we already have the mapping) self._object_cache.delete(CachedEntities.CONNECTION, connection_uuid) LOGGER.debug('Deleted cached mapping for connection {:s}'.format(connection_uuid)) # Process each link: count remaining connections and stop/update worker accordingly - for link_uuid, link_name in processed_links: - worker_name = '{:s}:{:s}'.format(topology_name, link_name) + for link_uuid, link_name, link_topology_name in processed_links: + worker_name = '{:s}:{:s}'.format(link_topology_name, link_name) if not self._telemetry_pool.has_worker(WorkerTypeEnum.SYNTHESIZER, worker_name): LOGGER.warning('Worker not found for link {:s}, skipping telemetry update for connection removal'.format(link_name)) continue # Count remaining connections on this link (now excluding the deleted one) - remaining_conn_count = self._count_active_connections(link_uuid, topology_name) + remaining_conn_count = self._count_active_connections(link_uuid, domain_name) if remaining_conn_count == 0: # No other connections use this link, stop the worker diff --git a/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py b/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py index 862ae3150..d2eb18c33 100644 --- a/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py +++ b/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py @@ -13,115 +13,88 @@ # limitations under the License. -import math, random, sys, threading +import random, threading from dataclasses import dataclass, field from datetime import datetime from typing import Dict, Optional, Tuple from .Sample import Sample -# Congestion curve types -CURVE_LINEAR = 'linear' # x - steady increase -CURVE_EXPONENTIAL = 'exponential' # exp(x)-1 - slow start, rapid end -CURVE_LOGARITHMIC = 'logarithmic' # log(1+x) - fast start, plateau - -MAX_CONNECTIONS = 5 - -# LINEAR curve target ranges: 0 conns (5-15%), 5+ conns (90-99%) -LINEAR_MIN_BASE = 5.0 # Minimum BW at 0 connections -LINEAR_MIN_FULL = 90.0 # Minimum BW at 5+ connections -LINEAR_MAX_BASE = 15.0 # Maximum BW at 0 connections -LINEAR_MAX_FULL = 99.0 # Maximum BW at 5+ connections - @dataclass class SyntheticSampler: - base_bw : float = field(default = 10.0) # Base bandwidth utilization % - base_latency : float = field(default = 1.0) # Base latency in ms - sensitivity : float = field(default = 0.5) # Congestion sensitivity (0.0-1.0) - curve_type : str = field(default = CURVE_LINEAR) - connection_count : int = field(default = 0) # Current connection count for load simulation - link_capacity : float = field(default = 100.0) # Link capacity in Gbps - bw_min_value : float = field(default = 0.0) - bw_max_value : float = field(default = 100.0) - lat_min_value : float = field(default = 0.1) - lat_max_value : float = field(default = 20.0) + """Simple sampler with temporal continuity - next values stay close to previous values. + + Bandwidth ranges based on connection count: + 0 conns: avg=5%, range 0-10% + 1 conn: avg=25%, range 10-40% + 2 conns: avg=45%, range 30-60% + 3 conns: avg=65%, range 50-80% + 4+ conns: avg=85%, range 70-90% + + Latency scales proportionally with bandwidth (0% BW → 1ms, 100% BW → 20ms). + Values vary by ±5% between consecutive samples for realistic jitter. + """ + connection_count : int = field(default = 0) # Current connection count + link_capacity : float = field(default = 100.0) # Link capacity in Gbps + prev_bw : Optional[float] = field(default = None) # Previous BW percentage + prev_latency : Optional[float] = field(default = None) # Previous latency (ms) + + # Connection count to (avg, min, max) percentage mapping + BW_RANGES = { + 0: (3, 0, 10), + 1: (25, 10, 40), + 2: (45, 30, 60), + 3: (65, 50, 80), + 4: (85, 70, 90), + } @classmethod def create_random( - cls, base_bw_range : Tuple[float, float] = (0.0, 1.0), - base_latency_range : Tuple[float, float] = (1.0, 3.0), - sensitivity_range : Tuple[float, float] = (0.0, 1.0), - curve_type : Optional[str] = CURVE_LINEAR, - connection_count : int = 0, - link_capacity : float = 100.0 + cls, + connection_count : int = 0, + link_capacity : float = 100.0 ) -> 'SyntheticSampler': - """Create a random sampler with congestion curve parameters. - - For LINEAR: base_bw and sensitivity interpolate within target ranges per connection count. - For EXPONENTIAL/LOGARITHMIC: uses traditional curve formulas. - """ - base_bw = random.uniform(base_bw_range[0], base_bw_range[1]) - base_latency = random.uniform(base_latency_range[0], base_latency_range[1]) - sensitivity = random.uniform(sensitivity_range[0], sensitivity_range[1]) - - return cls(base_bw, base_latency, sensitivity, curve_type, connection_count, link_capacity) - - def _compute_congestion_factor(self, load_ratio: float) -> float: - """Compute congestion factor based on curve type and load ratio (0-1).""" - if self.curve_type == CURVE_LINEAR: - return load_ratio - elif self.curve_type == CURVE_EXPONENTIAL: - # Exponential: slow start, rapid increase at high load - return (math.exp(load_ratio * 2) - 1) / (math.e ** 2 - 1) - elif self.curve_type == CURVE_LOGARITHMIC: - # Logarithmic: fast initial increase, then plateau - return math.log1p(load_ratio * 2.7) / math.log1p(2.7) - else: - return load_ratio # Default to linear + """Factory method for compatibility (ignores unused parameters).""" + return cls(connection_count=connection_count, link_capacity=link_capacity) def get_sample(self) -> Tuple[Sample, Sample]: - """Generate both bandwidth and latency samples using congestion curves. - - LINEAR curve uses simple linear interpolation between min/max lines. - EXPONENTIAL/LOGARITHMIC use formula-based congestion factors. + """Generate bandwidth and latency samples with temporal continuity. Returns: Tuple of (bandwidth_sample, latency_sample) """ timestamp = datetime.now().timestamp() - if self.curve_type == CURVE_LINEAR: - # Simple linear interpolation - # Connection ratio: 0 (no load) to 1 (max load) - conn_ratio = min(self.connection_count / MAX_CONNECTIONS, 1.0) if MAX_CONNECTIONS > 0 else 0.0 - - # Minimum line: 5% at 0 conns → 90% at 5 conns - min_bw = LINEAR_MIN_BASE + conn_ratio * (LINEAR_MIN_FULL - LINEAR_MIN_BASE) - - # Maximum line: 15% at 0 conns → 99% at 5 conns - max_bw = LINEAR_MAX_BASE + conn_ratio * (LINEAR_MAX_FULL - LINEAR_MAX_BASE) - - # Interpolate between min and max using base_bw and sensitivity - interpolation_factor = (self.base_bw + self.sensitivity) / 2.0 - bw_utilization = min_bw + (max_bw - min_bw) * interpolation_factor - - # Latency scales proportionally (10x increase from base at full load) - bw_normalized = (bw_utilization - LINEAR_MIN_BASE) / (LINEAR_MAX_FULL - LINEAR_MIN_BASE) - latency = self.base_latency * (1.0 + bw_normalized * 9.0) + # Determine range based on connection count (cap at 4+) + conn_key = min(self.connection_count, 4) + avg, min_bw, max_bw = self.BW_RANGES[conn_key] + + # Generate bandwidth percentage + if self.prev_bw is None: + # First sample: start at average for this connection count + bw_utilization = avg else: - # Use formula-based approach for EXPONENTIAL/LOGARITHMIC - load_ratio = self.connection_count / MAX_CONNECTIONS if MAX_CONNECTIONS > 0 else 0.0 - congestion_factor = self._compute_congestion_factor(load_ratio) - - bw_utilization = self.base_bw + (congestion_factor * self.sensitivity * 70.0) - latency = self.base_latency * (1.0 + congestion_factor * self.sensitivity * 9.0) + # Add ±2% noise to previous value for temporal continuity + noise_factor = random.uniform(-0.02, 0.02) + bw_utilization = self.prev_bw * (1.0 + noise_factor) - # Add uniform noise (5%) - bw_noise = random.uniform(-0.05, 0.05) * bw_utilization - lat_noise = random.uniform(-0.05, 0.05) * latency + # Clamp to current range (handles "jump" when connection count changes) + bw_utilization = max(min_bw, min(max_bw, bw_utilization)) + self.prev_bw = bw_utilization + + # Latency scales proportionally with bandwidth (1ms at 0%, 20ms at 100%) + target_latency = 1.0 + (bw_utilization / 100.0) * 19.0 + + if self.prev_latency is None: + latency = target_latency + else: + # Add ±2% noise to previous latency + noise_factor = random.uniform(-0.02, 0.02) + latency = self.prev_latency * (1.0 + noise_factor) - bw_utilization = max(self.bw_min_value, min(self.bw_max_value, bw_utilization + bw_noise)) - latency = max(self.lat_min_value, min(self.lat_max_value, latency + lat_noise)) + # Clamp latency to reasonable range + latency = max(0.5, min(25.0, latency)) + self.prev_latency = latency # Convert percentage to actual utilization (Gbps) actual_bw_utilization = (bw_utilization / 100.0) * self.link_capacity -- GitLab From 27a925b2744d8e40b686686fd55762a58f2360ae Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Wed, 18 Feb 2026 21:54:52 +0000 Subject: [PATCH 20/78] feat: Update descriptions in network slice JSON files to include transport network details --- .../data/slices/network-slice1.json | 2 +- .../data/slices/network-slice2.json | 54 +++++++++---------- 2 files changed, 28 insertions(+), 28 deletions(-) diff --git a/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice1.json b/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice1.json index 786a6df35..ec6b3e54c 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice1.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice1.json @@ -2,7 +2,7 @@ "slice-service": [ { "id": "slice1", - "description": "network slice 1, PC1-VM1", + "description": "network slice 1, PC1-VM1 - using optical transport network", "sdps": { "sdp": [ { diff --git a/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice2.json b/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice2.json index f0875e25e..6c3484161 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice2.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice2.json @@ -1,52 +1,52 @@ { "slice-service": [ { - "id": "slice2", - "description": "network slice 2, PC1-VM2", + "id": "slice2", + "description": "network slice 2, PC1-VM2 - using IP transport network", "sdps": { "sdp": [ { - "id": "1", - "node-id": "ONT1", + "id": "1", + "node-id": "ONT1", "sdp-ip-address": ["172.16.61.10"], "service-match-criteria": {"match-criterion": [{ "index": 1, "match-type": [ - {"type": "ietf-network-slice-service:vlan", "value": ["31"]}, - {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.16.104.221/24"]}, - {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10500"]}, + {"type": "ietf-network-slice-service:vlan", "value": ["31"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.16.104.221/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10500"]}, {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.1.201.22/24"]}, - {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10200"]} + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10200"]} ], "target-connection-group-id": "line2" }]}, "attachment-circuits": {"attachment-circuit": [{ - "id": "AC ONT1", - "description": "AC ONT1 connected to PC1", - "ac-node-id": "ONT1", - "ac-tp-id": "200" + "id": "AC ONT1", + "description": "AC ONT1 connected to PC1", + "ac-node-id": "ONT1", + "ac-tp-id": "200" }]} }, { - "id": "2", - "node-id": "POP1", + "id": "2", + "node-id": "POP1", "sdp-ip-address": ["172.16.204.220"], "service-match-criteria": {"match-criterion": [{ "index": 1, "match-type": [ - {"type": "ietf-network-slice-service:vlan", "value": ["101"]}, - {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.1.201.22/24"]}, - {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10200"]}, + {"type": "ietf-network-slice-service:vlan", "value": ["101"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.1.201.22/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10200"]}, {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.16.104.221/24"]}, - {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10500"]} + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10500"]} ], "target-connection-group-id": "line2" }]}, "attachment-circuits": {"attachment-circuit": [{ - "id": "AC POP1 to VM2", - "description": "AC POP1 connected to VM2", - "ac-node-id": "POP1", - "ac-tp-id": "200" + "id": "AC POP1 to VM2", + "description": "AC POP1 connected to VM2", + "ac-node-id": "POP1", + "ac-tp-id": "200" }]} } ] @@ -93,16 +93,16 @@ { "metric-type": "ietf-network-slice-service:one-way-delay-maximum", "metric-unit": "milliseconds", - "bound": "20" + "bound": "20" }, { "metric-type": "ietf-network-slice-service:one-way-bandwidth", "metric-unit": "Mbps", - "bound": "4000" + "bound": "4000" }, { - "metric-type": "ietf-network-slice-service:two-way-packet-loss", - "metric-unit": "percentage", + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", "percentile-value": "0.001" } ] @@ -115,4 +115,4 @@ } } ] -} \ No newline at end of file +} -- GitLab From 76fdf2f695dd4b2bce16321f29394848aa97f9ad Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Wed, 18 Feb 2026 22:17:36 +0000 Subject: [PATCH 21/78] feat: Simplify parameters in SyntheticSampler creation and EventDispatcher metrics_sampler --- .../service/simap_updater/SimapUpdater.py | 16 ++++------------ .../telemetry/worker/data/SyntheticSamplers.py | 3 ++- 2 files changed, 6 insertions(+), 13 deletions(-) diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index 807ee64ef..a755613dd 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -362,12 +362,8 @@ class EventDispatcher(BaseEventDispatcher): domain_name = topology_name, link_name = link_name, metrics_sampler = SyntheticSampler.create_random( - base_bw_range = (5.0, 25.0), - base_latency_range = (0.3, 2.0), - sensitivity_range = (0.3, 1.0), - curve_type = None, # Default is LINEAR - connection_count = 0, - link_capacity = LINKS_CAPACITY.get(link_name, 100.0) + connection_count = 0, + link_capacity = LINKS_CAPACITY.get(link_name, 100.0) ), related_service_ids=[], )) @@ -687,12 +683,8 @@ class EventDispatcher(BaseEventDispatcher): domain_name = link_topology_name, link_name = link_name, metrics_sampler = SyntheticSampler.create_random( - base_bw_range = (5.0, 25.0), - base_latency_range = (0.3, 2.0), - sensitivity_range = (0.3, 1.0), - curve_type = None, # Random curve type - connection_count = active_conn_count, - link_capacity = LINKS_CAPACITY.get(link_name, 100.0) + connection_count = active_conn_count, + link_capacity = LINKS_CAPACITY.get(link_name, 100.0) ), related_service_ids=[], )) diff --git a/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py b/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py index d2eb18c33..743743848 100644 --- a/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py +++ b/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py @@ -121,7 +121,8 @@ class SyntheticSamplers: MSG = 'SyntheticSampler({:s}) already exists' raise Exception(MSG.format(sampler_name)) self._samplers[sampler_name] = SyntheticSampler.create_random( - base_bw_range, base_latency_range, sensitivity_range, curve_type, connection_count, link_capacity + connection_count=connection_count, + link_capacity=link_capacity ) def remove_sampler(self, sampler_name : str) -> None: -- GitLab From a324344651f859b415b1719bbaa6864231d42446 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Wed, 18 Feb 2026 22:48:46 +0000 Subject: [PATCH 22/78] feat: Update allowed links for AGG to E2E (L3 and L4) --- src/simap_connector/service/simap_updater/AllowedLinks.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/simap_connector/service/simap_updater/AllowedLinks.py b/src/simap_connector/service/simap_updater/AllowedLinks.py index ba862b30e..12e707170 100644 --- a/src/simap_connector/service/simap_updater/AllowedLinks.py +++ b/src/simap_connector/service/simap_updater/AllowedLinks.py @@ -13,8 +13,8 @@ # limitations under the License. ALLOWED_LINKS_PER_CONTROLLER = { - 'e2e' : { 'L1', 'L2' }, - 'agg' : { 'L3', 'L4', 'L13', 'L14' }, + 'e2e' : { 'L1', 'L2', 'L3', 'L4', }, + 'agg' : { 'L13', 'L14' }, 'trans-pkt': { 'L5', 'L6', 'L9', 'L10' }, # The remaining can not be monitored therefore they are not included in the allowed links for the controllers # 'agg' : { 'L3', 'L4', 'L7ab', 'L7ba', 'L8ab', 'L8ba', -- GitLab From 59d850f896f0880dcad689633d760bd40ebd1c51 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Wed, 18 Feb 2026 23:13:53 +0000 Subject: [PATCH 23/78] Moved L3 and L$ back to AGG --- src/simap_connector/service/simap_updater/AllowedLinks.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/simap_connector/service/simap_updater/AllowedLinks.py b/src/simap_connector/service/simap_updater/AllowedLinks.py index 12e707170..693e9f9e8 100644 --- a/src/simap_connector/service/simap_updater/AllowedLinks.py +++ b/src/simap_connector/service/simap_updater/AllowedLinks.py @@ -13,8 +13,8 @@ # limitations under the License. ALLOWED_LINKS_PER_CONTROLLER = { - 'e2e' : { 'L1', 'L2', 'L3', 'L4', }, - 'agg' : { 'L13', 'L14' }, + 'e2e' : { 'L1', 'L2', }, + 'agg' : { 'L3', 'L4', 'L13', 'L14' }, 'trans-pkt': { 'L5', 'L6', 'L9', 'L10' }, # The remaining can not be monitored therefore they are not included in the allowed links for the controllers # 'agg' : { 'L3', 'L4', 'L7ab', 'L7ba', 'L8ab', 'L8ba', -- GitLab From 75e33a26272ff954467fb7a6832a1bd879cea332 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Wed, 18 Feb 2026 23:36:06 +0000 Subject: [PATCH 24/78] Test --- src/simap_connector/service/simap_updater/AllowedLinks.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/simap_connector/service/simap_updater/AllowedLinks.py b/src/simap_connector/service/simap_updater/AllowedLinks.py index 693e9f9e8..4287dce82 100644 --- a/src/simap_connector/service/simap_updater/AllowedLinks.py +++ b/src/simap_connector/service/simap_updater/AllowedLinks.py @@ -13,8 +13,8 @@ # limitations under the License. ALLOWED_LINKS_PER_CONTROLLER = { - 'e2e' : { 'L1', 'L2', }, - 'agg' : { 'L3', 'L4', 'L13', 'L14' }, + 'e2e' : { 'L1', 'L2', 'L3', 'L4' }, + 'agg' : { 'L13', 'L14' }, 'trans-pkt': { 'L5', 'L6', 'L9', 'L10' }, # The remaining can not be monitored therefore they are not included in the allowed links for the controllers # 'agg' : { 'L3', 'L4', 'L7ab', 'L7ba', 'L8ab', 'L8ba', -- GitLab From 06f176577fc472775f9e49171b58a34ad825caa7 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Wed, 18 Feb 2026 23:37:27 +0000 Subject: [PATCH 25/78] SIMAP set to DEBUG --- manifests/simap_connectorservice.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/manifests/simap_connectorservice.yaml b/manifests/simap_connectorservice.yaml index 0b4ea503c..a955c2666 100644 --- a/manifests/simap_connectorservice.yaml +++ b/manifests/simap_connectorservice.yaml @@ -36,7 +36,7 @@ spec: - containerPort: 9192 env: - name: LOG_LEVEL - value: "INFO" + value: "DEBUG" - name: SIMAP_SERVER_SCHEME value: "http" - name: SIMAP_SERVER_ADDRESS -- GitLab From dfe720419e71246dbd1d844cbbf4a09b0c0b13bb Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Thu, 19 Feb 2026 00:06:57 +0000 Subject: [PATCH 26/78] fix: Correct transport network descriptions in network slice JSON files --- src/simap_connector/service/simap_updater/SimapUpdater.py | 4 ++-- .../ecoc25-f5ga-telemetry/data/slices/network-slice1.json | 2 +- .../ecoc25-f5ga-telemetry/data/slices/network-slice2.json | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index a755613dd..42d8abf2e 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -374,14 +374,14 @@ class EventDispatcher(BaseEventDispatcher): def dispatch_link_create(self, link_event : LinkEvent) -> None: - if not self._dispatch_link_set(link_event): return + # if not self._dispatch_link_set(link_event): return MSG = 'Link Created: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(link_event))) def dispatch_link_update(self, link_event : LinkEvent) -> None: - if not self._dispatch_link_set(link_event): return + # if not self._dispatch_link_set(link_event): return MSG = 'Link Updated: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(link_event))) diff --git a/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice1.json b/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice1.json index ec6b3e54c..2d5f6604b 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice1.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice1.json @@ -2,7 +2,7 @@ "slice-service": [ { "id": "slice1", - "description": "network slice 1, PC1-VM1 - using optical transport network", + "description": "network slice 1, PC1-VM1 - using IP transport network", "sdps": { "sdp": [ { diff --git a/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice2.json b/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice2.json index 6c3484161..e5abe1286 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice2.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice2.json @@ -2,7 +2,7 @@ "slice-service": [ { "id": "slice2", - "description": "network slice 2, PC1-VM2 - using IP transport network", + "description": "network slice 2, PC1-VM2 - using optical transport network", "sdps": { "sdp": [ { -- GitLab From 4a0cffcf525ed7f8fbe9b1db3e9ed91653b6c918 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Thu, 19 Feb 2026 00:26:37 +0000 Subject: [PATCH 27/78] L3 and L4 added to both E2E and AGG --- src/simap_connector/service/simap_updater/AllowedLinks.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/simap_connector/service/simap_updater/AllowedLinks.py b/src/simap_connector/service/simap_updater/AllowedLinks.py index 4287dce82..a1fff3916 100644 --- a/src/simap_connector/service/simap_updater/AllowedLinks.py +++ b/src/simap_connector/service/simap_updater/AllowedLinks.py @@ -13,8 +13,8 @@ # limitations under the License. ALLOWED_LINKS_PER_CONTROLLER = { - 'e2e' : { 'L1', 'L2', 'L3', 'L4' }, - 'agg' : { 'L13', 'L14' }, + 'e2e' : { 'L1', 'L2', 'L3', 'L4' }, + 'agg' : { 'L3', 'L4', 'L13', 'L14' }, 'trans-pkt': { 'L5', 'L6', 'L9', 'L10' }, # The remaining can not be monitored therefore they are not included in the allowed links for the controllers # 'agg' : { 'L3', 'L4', 'L7ab', 'L7ba', 'L8ab', 'L8ba', -- GitLab From 9765f112d3ab1baa3969a564072c7f11a06019af Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Thu, 19 Feb 2026 00:57:55 +0000 Subject: [PATCH 28/78] fix: Update allowed links for E2E controller and modify connection retrieval in SimapUpdater --- src/simap_connector/service/simap_updater/AllowedLinks.py | 2 +- src/simap_connector/service/simap_updater/SimapUpdater.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/simap_connector/service/simap_updater/AllowedLinks.py b/src/simap_connector/service/simap_updater/AllowedLinks.py index a1fff3916..eb39abacd 100644 --- a/src/simap_connector/service/simap_updater/AllowedLinks.py +++ b/src/simap_connector/service/simap_updater/AllowedLinks.py @@ -13,7 +13,7 @@ # limitations under the License. ALLOWED_LINKS_PER_CONTROLLER = { - 'e2e' : { 'L1', 'L2', 'L3', 'L4' }, + 'e2e' : { 'L1', 'L2', }, 'agg' : { 'L3', 'L4', 'L13', 'L14' }, 'trans-pkt': { 'L5', 'L6', 'L9', 'L10' }, # The remaining can not be monitored therefore they are not included in the allowed links for the controllers diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index 42d8abf2e..2b6b87df3 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -716,7 +716,7 @@ class EventDispatcher(BaseEventDispatcher): Tuple of ( domain_name, processed_links) or None if failed """ # Get the connection object - connection = self._object_cache.get(CachedEntities.CONNECTION, connection_uuid) + connection = self._object_cache.get(CachedEntities.CONNECTION, connection_uuid, auto_retrieve=False) if connection is None: LOGGER.warning('Connection {:s} not found in cache'.format(connection_uuid)) return None -- GitLab From eeedbb1ab36fc439338abf9c347b0d1a2dff974e Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Thu, 19 Feb 2026 01:07:03 +0000 Subject: [PATCH 29/78] fix: Remove auto_retrieve parameter from connection retrieval in SimapUpdater --- src/simap_connector/service/simap_updater/SimapUpdater.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index 2b6b87df3..42d8abf2e 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -716,7 +716,7 @@ class EventDispatcher(BaseEventDispatcher): Tuple of ( domain_name, processed_links) or None if failed """ # Get the connection object - connection = self._object_cache.get(CachedEntities.CONNECTION, connection_uuid, auto_retrieve=False) + connection = self._object_cache.get(CachedEntities.CONNECTION, connection_uuid) if connection is None: LOGGER.warning('Connection {:s} not found in cache'.format(connection_uuid)) return None -- GitLab From fa85d488da1b9b61493f1b66e7f687ea9a7a0f99 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Thu, 19 Feb 2026 01:26:13 +0000 Subject: [PATCH 30/78] fix: Add debug logging for allowed links in EventDispatcher --- src/simap_connector/service/simap_updater/SimapUpdater.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index 42d8abf2e..d76f599d7 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -736,6 +736,7 @@ class EventDispatcher(BaseEventDispatcher): # Filter links based on ALLOWED_LINKS_PER_CONTROLLER allowed_link_names = ALLOWED_LINKS_PER_CONTROLLER.get(domain_name, set()) + LOGGER.debug('Allowed links for domain {:s}: {:s}'.format(domain_name, str(allowed_link_names))) processed_links = [] for link_uuid in link_uuids: link = self._object_cache.get(CachedEntities.LINK, link_uuid) -- GitLab From 77f867825753ff204b11612de3f3cd81b7d747ec Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Thu, 19 Feb 2026 01:40:21 +0000 Subject: [PATCH 31/78] fix: Comment out unused telemetry resource handling in EventDispatcher. --- .../service/simap_updater/SimapUpdater.py | 34 +++++++++---------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index d76f599d7..28d504d15 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -356,32 +356,32 @@ class EventDispatcher(BaseEventDispatcher): te_link = te_topo.link(link_name) te_link.update(src_device.name, src_endpoint.name, dst_device.name, dst_endpoint.name) - worker_name = '{:s}:{:s}'.format(topology_name, link_name) - resources = Resources() - resources.links.append(ResourceLink( - domain_name = topology_name, - link_name = link_name, - metrics_sampler = SyntheticSampler.create_random( - connection_count = 0, - link_capacity = LINKS_CAPACITY.get(link_name, 100.0) - ), - related_service_ids=[], - )) - sampling_interval = 1.0 - self._telemetry_pool.start_synthesizer(worker_name, resources, sampling_interval) + # worker_name = '{:s}:{:s}'.format(topology_name, link_name) + # resources = Resources() + # resources.links.append(ResourceLink( + # domain_name = topology_name, + # link_name = link_name, + # metrics_sampler = SyntheticSampler.create_random( + # connection_count = 0, + # link_capacity = LINKS_CAPACITY.get(link_name, 100.0) + # ), + # related_service_ids=[], + # )) + # sampling_interval = 1.0 + # self._telemetry_pool.start_synthesizer(worker_name, resources, sampling_interval) return True def dispatch_link_create(self, link_event : LinkEvent) -> None: - # if not self._dispatch_link_set(link_event): return + if not self._dispatch_link_set(link_event): return MSG = 'Link Created: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(link_event))) def dispatch_link_update(self, link_event : LinkEvent) -> None: - # if not self._dispatch_link_set(link_event): return + if not self._dispatch_link_set(link_event): return MSG = 'Link Updated: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(link_event))) @@ -435,8 +435,8 @@ class EventDispatcher(BaseEventDispatcher): self._object_cache.delete(CachedEntities.LINK, link_uuid) self._object_cache.delete(CachedEntities.LINK, link_name) - worker_name = '{:s}:{:s}'.format(topology_name, link_name) - self._telemetry_pool.stop_worker(WorkerTypeEnum.SYNTHESIZER, worker_name) + # worker_name = '{:s}:{:s}'.format(topology_name, link_name) + # self._telemetry_pool.stop_worker(WorkerTypeEnum.SYNTHESIZER, worker_name) MSG = 'Link Removed: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(link_event))) -- GitLab From 9aa2739174b6ace63ca64dcc9b0fca9c74b9418a Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Thu, 19 Feb 2026 02:00:12 +0000 Subject: [PATCH 32/78] fix: Enhance logging in AggregatorWorker and AggregationCache for better traceability --- .../telemetry/worker/AggregatorWorker.py | 12 ++++++- .../telemetry/worker/data/AggregationCache.py | 36 +++++++++++++++++-- 2 files changed, 45 insertions(+), 3 deletions(-) diff --git a/src/simap_connector/service/telemetry/worker/AggregatorWorker.py b/src/simap_connector/service/telemetry/worker/AggregatorWorker.py index 075c3b6d6..e4109790c 100644 --- a/src/simap_connector/service/telemetry/worker/AggregatorWorker.py +++ b/src/simap_connector/service/telemetry/worker/AggregatorWorker.py @@ -72,13 +72,17 @@ class AggregatorWorker(_Worker): def run(self) -> None: self._logger.info('[run] Starting...') + MSG = '[run] Aggregating link ({:s}, {:s}) every {:.1f}s' + self._logger.info(MSG.format( + self._network_id, self._link_id, self._sampling_interval + )) kafka_producer = KafkaProducer(bootstrap_servers=KAFKA_BOOT_SERVERS) update_counter = 1 try: while not self._stop_event.is_set() and not self._terminate.is_set(): - #self._logger.debug('[run] Aggregating...') + self._logger.debug('[run] Aggregation cycle #{:d}...'.format(update_counter)) link_sample = self._aggregation_cache.aggregate() @@ -110,6 +114,12 @@ class AggregatorWorker(_Worker): link_sample.bandwidth_utilization, link_sample.latency, related_service_ids=list(link_sample.related_service_ids) ) + + MSG = '[run] Updated SIMAP link ({:s}, {:s}): BW={:.2f}%, Latency={:.3f}ms' + self._logger.debug(MSG.format( + self._network_id, self._link_id, + link_sample.bandwidth_utilization, link_sample.latency + )) update_counter += 1 diff --git a/src/simap_connector/service/telemetry/worker/data/AggregationCache.py b/src/simap_connector/service/telemetry/worker/data/AggregationCache.py index 31a71d096..5fa88fbca 100644 --- a/src/simap_connector/service/telemetry/worker/data/AggregationCache.py +++ b/src/simap_connector/service/telemetry/worker/data/AggregationCache.py @@ -13,12 +13,15 @@ # limitations under the License. -import threading +import logging, threading from dataclasses import dataclass, field from datetime import datetime from typing import Dict, Set, Tuple +LOGGER = logging.getLogger(__name__) + + @dataclass class LinkSample: network_id : str @@ -46,12 +49,33 @@ class AggregationCache: link_key = (link_sample.network_id, link_sample.link_id) with self._lock: self._samples[link_key] = link_sample + + MSG = '[update] Received sample for link ({:s}, {:s}): BW={:.2f}%, Latency={:.3f}ms, Services={:s}' + LOGGER.debug(MSG.format( + link_sample.network_id, link_sample.link_id, + link_sample.bandwidth_utilization, link_sample.latency, + str(link_sample.related_service_ids) + )) def aggregate(self) -> AggregatedLinkSample: with self._lock: + num_samples = len(self._samples) + if num_samples > 0: + MSG = '[aggregate] Aggregating {:d} supporting link(s)' + LOGGER.info(MSG.format(num_samples)) + agg = AggregatedLinkSample(timestamp=datetime.utcnow()) - for sample in self._samples.values(): + for link_key, sample in self._samples.items(): + network_id, link_id = link_key + + MSG = '[aggregate] - Link ({:s}, {:s}): BW={:.2f}%, Latency={:.3f}ms, Services={:s}' + LOGGER.debug(MSG.format( + network_id, link_id, + sample.bandwidth_utilization, sample.latency, + str(sample.related_service_ids) + )) + agg.bandwidth_utilization = max( agg.bandwidth_utilization, sample.bandwidth_utilization ) @@ -59,4 +83,12 @@ class AggregationCache: agg.related_service_ids = agg.related_service_ids.union( sample.related_service_ids ) + + if num_samples > 0: + MSG = '[aggregate] Result: BW={:.2f}% (max), Latency={:.3f}ms (sum), Services={:s}' + LOGGER.info(MSG.format( + agg.bandwidth_utilization, agg.latency, + str(agg.related_service_ids) + )) + return agg -- GitLab From 048ba67f9061fb0ab3f06a382cfc2508caf71ee4 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Thu, 19 Feb 2026 02:45:52 +0000 Subject: [PATCH 33/78] fix: Update discover_link_details to handle network-level request --- src/simap_connector/service/Tools.py | 40 +++++++++++++++++----------- 1 file changed, 25 insertions(+), 15 deletions(-) diff --git a/src/simap_connector/service/Tools.py b/src/simap_connector/service/Tools.py index 024f8d708..1e143700e 100644 --- a/src/simap_connector/service/Tools.py +++ b/src/simap_connector/service/Tools.py @@ -62,24 +62,34 @@ def discover_link_details(restconf_client : RestConfClient, xpath_filter : str) network_id, link_id = link_xpath_match.groups() link_details = LinkDetails(Link(network_id, link_id)) - xpath_filter = link_details.link.get_xpath_filter(add_simap_telemetry=False) - xpath_data = restconf_client.get(xpath_filter) + # Workaround: RESTCONF server doesn't support namespace-prefixed child element paths + # Query at network level and filter the link from response + network_xpath = '/ietf-network:networks/network={:s}'.format(network_id) + xpath_data = restconf_client.get(network_xpath) if not xpath_data: - raise Exception('Resource({:s}) not found in SIMAP Server'.format(str(xpath_filter))) - - links = xpath_data.get('ietf-network-topology:link', list()) - if len(links) == 0: - raise Exception('Link({:s}) not found'.format(str(xpath_filter))) - if len(links) > 1: - raise Exception('Multiple occurrences for Link({:s})'.format(str(xpath_filter))) - link = links[0] - if link['link-id'] != link_id: - MSG = 'Retieved Link({:s}) does not match xpath_filter({:s})' - raise Exception(MSG.format(str(link), str(xpath_filter))) + raise Exception('Network({:s}) not found in SIMAP Server'.format(str(network_xpath))) + + # Extract network data from response + networks = xpath_data.get('ietf-network:network', []) + if len(networks) == 0: + raise Exception('Network({:s}) not found in response'.format(network_id)) + network_data = networks[0] + + # Find the target link + links = network_data.get('ietf-network-topology:link', list()) + link = None + for l in links: + if l['link-id'] == link_id: + link = l + break + + if link is None: + raise Exception('Link({:s}) not found in network({:s})'.format(link_id, network_id)) + supporting_links = link.get('supporting-link', list()) if len(supporting_links) == 0: - MSG = 'No supporting links found for Resource({:s}, {:s})' - raise Exception(MSG.format(str(xpath_filter), str(xpath_data))) + MSG = 'No supporting links found for Link({:s}) in Network({:s})' + raise Exception(MSG.format(str(link_id), str(network_id))) for sup_link in supporting_links: link_details.supporting_links.append(Link( -- GitLab From 175fc9fc73325df3112cc63b285746a066a302bc Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Thu, 19 Feb 2026 03:29:04 +0000 Subject: [PATCH 34/78] fix: Add authentication to stream request in telemetry subscription --- src/tests/ecoc25-f5ga-telemetry/telemetry-subscribe-slice1.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tests/ecoc25-f5ga-telemetry/telemetry-subscribe-slice1.py b/src/tests/ecoc25-f5ga-telemetry/telemetry-subscribe-slice1.py index 86ee09dab..559556829 100644 --- a/src/tests/ecoc25-f5ga-telemetry/telemetry-subscribe-slice1.py +++ b/src/tests/ecoc25-f5ga-telemetry/telemetry-subscribe-slice1.py @@ -64,7 +64,7 @@ def main() -> None: stream_url = 'http://{:s}:{:d}{:s}'.format(RESTCONF_ADDRESS, RESTCONF_PORT, subscription_uri) print('Opening stream "{:s}" (press Ctrl+C to stop)...'.format(stream_url)) - with requests.get(stream_url, stream=True) as resp: + with requests.get(stream_url, stream=True, auth=auth) as resp: for line in resp.iter_lines(decode_unicode=True): print(line) -- GitLab From 20279c5fe7f094549422167d2b2f47c31cd8052b Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Thu, 19 Feb 2026 09:34:02 +0000 Subject: [PATCH 35/78] Add deployment and teardown scripts for MWC26-F5G-A components - Created deploy-specs-e2e.sh and deploy-specs-ip.sh for building and deploying TeraFlowSDN components. - Added dummy scripts for L3VPN request and delete operations to simulate AGG-Controller behavior. - Implemented log dumping script to collect logs from various services based on hostname. - Added provisioning scripts for multiple network slices with corresponding JSON data files. - Included telemetry subscription and deletion scripts for managing telemetry data. - Introduced teardown scripts for cleaning up network slices after testing. --- .../data/slices/l3vpn_request_from_agg.json | 185 +++++++++++++++ .../slices/network-slice1_background.json | 118 ++++++++++ .../slices/network-slice2_game_creation.json | 118 ++++++++++ .../slices/network-slice3_background.json | 118 ++++++++++ .../data/slices/network-slice4_optical.json | 118 ++++++++++ .../data/telemetry/subscription-slice1.json | 9 + .../data/telemetry/subscription-slice2.json | 9 + .../data/topology/topology-agg.json | 95 ++++++++ .../data/topology/topology-e2e.json | 43 ++++ .../mwc26-f5ga/data/topology/topology-ip.json | 149 ++++++++++++ src/tests/mwc26-f5ga/deploy-specs-agg.sh | 220 ++++++++++++++++++ src/tests/mwc26-f5ga/deploy-specs-e2e.sh | 220 ++++++++++++++++++ src/tests/mwc26-f5ga/deploy-specs-ip.sh | 220 ++++++++++++++++++ src/tests/mwc26-f5ga/dummy_L3VPN_delete.sh | 29 +++ src/tests/mwc26-f5ga/dummy_L3VPN_request.sh | 30 +++ src/tests/mwc26-f5ga/dump-logs.sh | 75 ++++++ .../mwc26-f5ga/provision-slice1_background.sh | 28 +++ src/tests/mwc26-f5ga/provision-slice2_game.sh | 28 +++ .../provision-slice3_another_background.sh | 28 +++ .../mwc26-f5ga/provision-slice4_optical.sh | 28 +++ src/tests/mwc26-f5ga/teardown-slice2_game.sh | 23 ++ .../teardown-slice3_another_background.sh | 23 ++ .../mwc26-f5ga/teardown-slice4_optical.sh | 23 ++ .../mwc26-f5ga/telemetry-delete-slice1.py | 46 ++++ .../mwc26-f5ga/telemetry-subscribe-slice1.py | 72 ++++++ 25 files changed, 2055 insertions(+) create mode 100644 src/tests/mwc26-f5ga/data/slices/l3vpn_request_from_agg.json create mode 100644 src/tests/mwc26-f5ga/data/slices/network-slice1_background.json create mode 100644 src/tests/mwc26-f5ga/data/slices/network-slice2_game_creation.json create mode 100644 src/tests/mwc26-f5ga/data/slices/network-slice3_background.json create mode 100644 src/tests/mwc26-f5ga/data/slices/network-slice4_optical.json create mode 100644 src/tests/mwc26-f5ga/data/telemetry/subscription-slice1.json create mode 100644 src/tests/mwc26-f5ga/data/telemetry/subscription-slice2.json create mode 100644 src/tests/mwc26-f5ga/data/topology/topology-agg.json create mode 100644 src/tests/mwc26-f5ga/data/topology/topology-e2e.json create mode 100644 src/tests/mwc26-f5ga/data/topology/topology-ip.json create mode 100644 src/tests/mwc26-f5ga/deploy-specs-agg.sh create mode 100644 src/tests/mwc26-f5ga/deploy-specs-e2e.sh create mode 100644 src/tests/mwc26-f5ga/deploy-specs-ip.sh create mode 100755 src/tests/mwc26-f5ga/dummy_L3VPN_delete.sh create mode 100755 src/tests/mwc26-f5ga/dummy_L3VPN_request.sh create mode 100755 src/tests/mwc26-f5ga/dump-logs.sh create mode 100755 src/tests/mwc26-f5ga/provision-slice1_background.sh create mode 100755 src/tests/mwc26-f5ga/provision-slice2_game.sh create mode 100755 src/tests/mwc26-f5ga/provision-slice3_another_background.sh create mode 100755 src/tests/mwc26-f5ga/provision-slice4_optical.sh create mode 100755 src/tests/mwc26-f5ga/teardown-slice2_game.sh create mode 100755 src/tests/mwc26-f5ga/teardown-slice3_another_background.sh create mode 100755 src/tests/mwc26-f5ga/teardown-slice4_optical.sh create mode 100644 src/tests/mwc26-f5ga/telemetry-delete-slice1.py create mode 100644 src/tests/mwc26-f5ga/telemetry-subscribe-slice1.py diff --git a/src/tests/mwc26-f5ga/data/slices/l3vpn_request_from_agg.json b/src/tests/mwc26-f5ga/data/slices/l3vpn_request_from_agg.json new file mode 100644 index 000000000..ba9c9d853 --- /dev/null +++ b/src/tests/mwc26-f5ga/data/slices/l3vpn_request_from_agg.json @@ -0,0 +1,185 @@ +{ + "ietf-l3vpn-svc:l3vpn-svc": { + "sites": { + "site": [ + { + "devices": { + "device": [ + { + "device-id": "P-PE1", + "location": "access" + } + ] + }, + "locations": { + "location": [ + { + "location-id": "access" + } + ] + }, + "management": { + "type": "ietf-l3vpn-svc:provider-managed" + }, + "routing-protocols": { + "routing-protocol": [ + { + "static": { + "cascaded-lan-prefixes": { + "ipv4-lan-prefixes": [ + { + "lan": "172.1.101.22/24", + "lan-tag": "21", + "next-hop": "128.32.44.254" + } + ] + } + }, + "type": "ietf-l3vpn-svc:static" + } + ] + }, + "site-id": "site_access", + "site-network-accesses": { + "site-network-access": [ + { + "device-reference": "P-PE1", + "ip-connection": { + "ipv4": { + "address-allocation-type": "ietf-l3vpn-svc:static-address", + "addresses": { + "customer-address": "128.32.44.254", + "prefix-length": "24", + "provider-address": "128.32.44.254" + } + } + }, + "service": { + "qos": { + "qos-profile": { + "classes": { + "class": [ + { + "bandwidth": { + "guaranteed-bw-percent": 100 + }, + "class-id": "qos-realtime", + "direction": "ietf-l3vpn-svc:both", + "latency": { + "latency-boundary": 20 + } + } + ] + } + } + }, + "svc-input-bandwidth": 1000000000, + "svc-mtu": 1500, + "svc-output-bandwidth": 5000000000 + }, + "site-network-access-id": "200", + "site-network-access-type": "ietf-l3vpn-svc:multipoint", + "vpn-attachment": { + "site-role": "ietf-l3vpn-svc:hub-role", + "vpn-id": "slice25" + } + } + ] + } + }, + { + "devices": { + "device": [ + { + "device-id": "P-PE2", + "location": "cloud" + } + ] + }, + "locations": { + "location": [ + { + "location-id": "cloud" + } + ] + }, + "management": { + "type": "ietf-l3vpn-svc:provider-managed" + }, + "routing-protocols": { + "routing-protocol": [ + { + "static": { + "cascaded-lan-prefixes": { + "ipv4-lan-prefixes": [ + { + "lan": "172.16.104.221/24", + "lan-tag": "201", + "next-hop": "172.10.44.254" + } + ] + } + }, + "type": "ietf-l3vpn-svc:static" + } + ] + }, + "site-id": "site_cloud", + "site-network-accesses": { + "site-network-access": [ + { + "device-reference": "P-PE2", + "ip-connection": { + "ipv4": { + "address-allocation-type": "ietf-l3vpn-svc:static-address", + "addresses": { + "customer-address": "172.10.44.254", + "prefix-length": "24", + "provider-address": "172.10.44.254" + } + } + }, + "service": { + "qos": { + "qos-profile": { + "classes": { + "class": [ + { + "bandwidth": { + "guaranteed-bw-percent": 100 + }, + "class-id": "qos-realtime", + "direction": "ietf-l3vpn-svc:both", + "latency": { + "latency-boundary": 10 + } + } + ] + } + } + }, + "svc-input-bandwidth": 5000000000, + "svc-mtu": 1500, + "svc-output-bandwidth": 1000000000 + }, + "site-network-access-id": "200", + "site-network-access-type": "ietf-l3vpn-svc:multipoint", + "vpn-attachment": { + "site-role": "ietf-l3vpn-svc:spoke-role", + "vpn-id": "slice25" + } + } + ] + } + } + ] + }, + "vpn-services": { + "vpn-service": [ + { + "vpn-id": "slice25" + } + ] + } + } +} diff --git a/src/tests/mwc26-f5ga/data/slices/network-slice1_background.json b/src/tests/mwc26-f5ga/data/slices/network-slice1_background.json new file mode 100644 index 000000000..6d6e0893e --- /dev/null +++ b/src/tests/mwc26-f5ga/data/slices/network-slice1_background.json @@ -0,0 +1,118 @@ +{ + "slice-service": [ + { + "id": "initial_background_slice", + "description": "network slice, PC1-VM1 - using IP transport network", + "sdps": { + "sdp": [ + { + "id": "1", + "node-id": "ONT1", + "sdp-ip-address": ["172.16.61.10"], + "service-match-criteria": {"match-criterion": [{ + "index": 1, + "match-type": [ + {"type": "ietf-network-slice-service:vlan", "value": ["21"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.16.104.221/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10500"]}, + {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.1.101.22/24"]}, + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10200"]} + ], + "target-connection-group-id": "line1" + }]}, + "attachment-circuits": {"attachment-circuit": [{ + "id": "AC ONT1", + "description": "AC ONT1 connected to PC1", + "ac-node-id": "ONT1", + "ac-tp-id": "200" + }]} + }, + { + "id": "2", + "node-id": "POP2", + "sdp-ip-address": ["172.16.204.221"], + "service-match-criteria": {"match-criterion": [{ + "index": 1, + "match-type": [ + {"type": "ietf-network-slice-service:vlan", "value": ["201"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.1.101.22/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10200"]}, + {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.16.104.221/24"]}, + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10500"]} + ], + "target-connection-group-id": "line1" + }]}, + "attachment-circuits": {"attachment-circuit": [{ + "id": "AC POP2 to VM1", + "description": "AC POP2 connected to VM1", + "ac-node-id": "POP2", + "ac-tp-id": "200" + }]} + } + ] + }, + "connection-groups": { + "connection-group": [ + { + "id": "line1", + "connectivity-type": "point-to-point", + "connectivity-construct": [ + { + "id": 1, + "p2p-sender-sdp": "1", + "p2p-receiver-sdp": "2", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": "20" + }, + { + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps", + "bound": "1000" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": "0.01" + } + ] + } + } + }, + { + "id": 2, + "p2p-sender-sdp": "2", + "p2p-receiver-sdp": "1", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": "10" + }, + { + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps", + "bound": "5000" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": "0.01" + } + ] + } + } + } + ] + } + ] + } + } + ] +} \ No newline at end of file diff --git a/src/tests/mwc26-f5ga/data/slices/network-slice2_game_creation.json b/src/tests/mwc26-f5ga/data/slices/network-slice2_game_creation.json new file mode 100644 index 000000000..de69d29f9 --- /dev/null +++ b/src/tests/mwc26-f5ga/data/slices/network-slice2_game_creation.json @@ -0,0 +1,118 @@ +{ + "slice-service": [ + { + "id": "game_slice_on_ip_transport", + "description": "network slice, PC1-VM1 - using IP transport network", + "sdps": { + "sdp": [ + { + "id": "1", + "node-id": "ONT1", + "sdp-ip-address": ["172.16.61.10"], + "service-match-criteria": {"match-criterion": [{ + "index": 1, + "match-type": [ + {"type": "ietf-network-slice-service:vlan", "value": ["21"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.16.104.221/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10500"]}, + {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.1.101.22/24"]}, + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10200"]} + ], + "target-connection-group-id": "line1" + }]}, + "attachment-circuits": {"attachment-circuit": [{ + "id": "AC ONT1", + "description": "AC ONT1 connected to PC1", + "ac-node-id": "ONT1", + "ac-tp-id": "200" + }]} + }, + { + "id": "2", + "node-id": "POP2", + "sdp-ip-address": ["172.16.204.221"], + "service-match-criteria": {"match-criterion": [{ + "index": 1, + "match-type": [ + {"type": "ietf-network-slice-service:vlan", "value": ["201"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.1.101.22/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10200"]}, + {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.16.104.221/24"]}, + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10500"]} + ], + "target-connection-group-id": "line1" + }]}, + "attachment-circuits": {"attachment-circuit": [{ + "id": "AC POP2 to VM1", + "description": "AC POP2 connected to VM1", + "ac-node-id": "POP2", + "ac-tp-id": "200" + }]} + } + ] + }, + "connection-groups": { + "connection-group": [ + { + "id": "line1", + "connectivity-type": "point-to-point", + "connectivity-construct": [ + { + "id": 1, + "p2p-sender-sdp": "1", + "p2p-receiver-sdp": "2", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": "20" + }, + { + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps", + "bound": "1000" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": "0.01" + } + ] + } + } + }, + { + "id": 2, + "p2p-sender-sdp": "2", + "p2p-receiver-sdp": "1", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": "10" + }, + { + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps", + "bound": "5000" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": "0.01" + } + ] + } + } + } + ] + } + ] + } + } + ] +} \ No newline at end of file diff --git a/src/tests/mwc26-f5ga/data/slices/network-slice3_background.json b/src/tests/mwc26-f5ga/data/slices/network-slice3_background.json new file mode 100644 index 000000000..90c74a47b --- /dev/null +++ b/src/tests/mwc26-f5ga/data/slices/network-slice3_background.json @@ -0,0 +1,118 @@ +{ + "slice-service": [ + { + "id": "another_background_slice", + "description": "network slice, PC1-VM1 - using IP transport network", + "sdps": { + "sdp": [ + { + "id": "1", + "node-id": "ONT1", + "sdp-ip-address": ["172.16.61.10"], + "service-match-criteria": {"match-criterion": [{ + "index": 1, + "match-type": [ + {"type": "ietf-network-slice-service:vlan", "value": ["21"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.16.104.221/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10500"]}, + {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.1.101.22/24"]}, + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10200"]} + ], + "target-connection-group-id": "line1" + }]}, + "attachment-circuits": {"attachment-circuit": [{ + "id": "AC ONT1", + "description": "AC ONT1 connected to PC1", + "ac-node-id": "ONT1", + "ac-tp-id": "200" + }]} + }, + { + "id": "2", + "node-id": "POP2", + "sdp-ip-address": ["172.16.204.221"], + "service-match-criteria": {"match-criterion": [{ + "index": 1, + "match-type": [ + {"type": "ietf-network-slice-service:vlan", "value": ["201"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.1.101.22/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10200"]}, + {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.16.104.221/24"]}, + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10500"]} + ], + "target-connection-group-id": "line1" + }]}, + "attachment-circuits": {"attachment-circuit": [{ + "id": "AC POP2 to VM1", + "description": "AC POP2 connected to VM1", + "ac-node-id": "POP2", + "ac-tp-id": "200" + }]} + } + ] + }, + "connection-groups": { + "connection-group": [ + { + "id": "line1", + "connectivity-type": "point-to-point", + "connectivity-construct": [ + { + "id": 1, + "p2p-sender-sdp": "1", + "p2p-receiver-sdp": "2", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": "20" + }, + { + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps", + "bound": "1000" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": "0.01" + } + ] + } + } + }, + { + "id": 2, + "p2p-sender-sdp": "2", + "p2p-receiver-sdp": "1", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": "10" + }, + { + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps", + "bound": "5000" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": "0.01" + } + ] + } + } + } + ] + } + ] + } + } + ] +} \ No newline at end of file diff --git a/src/tests/mwc26-f5ga/data/slices/network-slice4_optical.json b/src/tests/mwc26-f5ga/data/slices/network-slice4_optical.json new file mode 100644 index 000000000..54fd5b2a0 --- /dev/null +++ b/src/tests/mwc26-f5ga/data/slices/network-slice4_optical.json @@ -0,0 +1,118 @@ +{ + "slice-service": [ + { + "id": "game_slice_on_optical_transport", + "description": "network slice 2, PC1-VM2 - using optical transport network", + "sdps": { + "sdp": [ + { + "id": "1", + "node-id": "ONT1", + "sdp-ip-address": ["172.16.61.10"], + "service-match-criteria": {"match-criterion": [{ + "index": 1, + "match-type": [ + {"type": "ietf-network-slice-service:vlan", "value": ["31"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.16.104.221/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10500"]}, + {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.1.201.22/24"]}, + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10200"]} + ], + "target-connection-group-id": "line2" + }]}, + "attachment-circuits": {"attachment-circuit": [{ + "id": "AC ONT1", + "description": "AC ONT1 connected to PC1", + "ac-node-id": "ONT1", + "ac-tp-id": "200" + }]} + }, + { + "id": "2", + "node-id": "POP1", + "sdp-ip-address": ["172.16.204.220"], + "service-match-criteria": {"match-criterion": [{ + "index": 1, + "match-type": [ + {"type": "ietf-network-slice-service:vlan", "value": ["101"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.1.201.22/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10200"]}, + {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.16.104.221/24"]}, + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10500"]} + ], + "target-connection-group-id": "line2" + }]}, + "attachment-circuits": {"attachment-circuit": [{ + "id": "AC POP1 to VM2", + "description": "AC POP1 connected to VM2", + "ac-node-id": "POP1", + "ac-tp-id": "200" + }]} + } + ] + }, + "connection-groups": { + "connection-group": [ + { + "id": "line2", + "connectivity-type": "point-to-point", + "connectivity-construct": [ + { + "id": 1, + "p2p-sender-sdp": "1", + "p2p-receiver-sdp": "2", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": "10" + }, + { + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps", + "bound": "7000" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": "0.001" + } + ] + } + } + }, + { + "id": 2, + "p2p-sender-sdp": "2", + "p2p-receiver-sdp": "1", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": "20" + }, + { + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps", + "bound": "4000" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": "0.001" + } + ] + } + } + } + ] + } + ] + } + } + ] +} diff --git a/src/tests/mwc26-f5ga/data/telemetry/subscription-slice1.json b/src/tests/mwc26-f5ga/data/telemetry/subscription-slice1.json new file mode 100644 index 000000000..3a2c4b96c --- /dev/null +++ b/src/tests/mwc26-f5ga/data/telemetry/subscription-slice1.json @@ -0,0 +1,9 @@ +{ + "ietf-subscribed-notifications:input": { + "datastore": "operational", + "ietf-yang-push:datastore-xpath-filter": "/ietf-network:networks/network=e2e/ietf-network-topology:link=E2E-L1/simap-telemetry", + "ietf-yang-push:periodic": { + "ietf-yang-push:period": 10 + } + } +} diff --git a/src/tests/mwc26-f5ga/data/telemetry/subscription-slice2.json b/src/tests/mwc26-f5ga/data/telemetry/subscription-slice2.json new file mode 100644 index 000000000..cd0954ac1 --- /dev/null +++ b/src/tests/mwc26-f5ga/data/telemetry/subscription-slice2.json @@ -0,0 +1,9 @@ +{ + "ietf-subscribed-notifications:input": { + "datastore": "operational", + "ietf-yang-push:datastore-xpath-filter": "/ietf-network:networks/network=e2e/ietf-network-topology:link=E2E-L2/simap-telemetry", + "ietf-yang-push:periodic": { + "ietf-yang-push:period": 10 + } + } +} diff --git a/src/tests/mwc26-f5ga/data/topology/topology-agg.json b/src/tests/mwc26-f5ga/data/topology/topology-agg.json new file mode 100644 index 000000000..c761a86dd --- /dev/null +++ b/src/tests/mwc26-f5ga/data/topology/topology-agg.json @@ -0,0 +1,95 @@ +{ + "contexts": [ + {"context_id": {"context_uuid": {"uuid": "admin"}}} + ], + "topologies": [ + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}}, + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "agg"}}} + ], + "devices": [ + {"device_id": {"device_uuid": {"uuid": "TFS-IP"}}, "device_type": "teraflowsdn", + "device_drivers": ["DEVICEDRIVER_IETF_L3VPN"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "10.254.0.12"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "80"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { + "scheme": "http", "username": "admin", "password": "admin", "base_url": "/restconf/v2/data", + "timeout": 120, "verify_certs": false, "import_topology": "topology" + }}} + ]}}, + {"device_id": {"device_uuid": {"uuid": "NCE-T"}}, "device_type": "nce", + "device_drivers": ["DEVICEDRIVER_IETF_ACTN"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "10.254.0.9"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8082"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { + "scheme": "http", "username": "admin", "password": "admin", "base_url": "/restconf/v2/data", + "timeout": 120, "verify_certs": false, "import_topology": "topology" + }}} + ]}}, + {"device_id": {"device_uuid": {"uuid": "POP1"}}, "device_type": "packet-pop", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.16.204.220"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "lo", "name": "lo", "type": "loopback"}, + {"uuid": "200", "name": "200", "type": "copper"}, + {"uuid": "201", "name": "201", "type": "copper"}, + {"uuid": "500", "name": "500", "type": "copper"} + ]}}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[200]", "resource_value": { + "uuid": "200", "name": "200", "type": "optical", + "address_ip": "172.1.201.1", "address_prefix": "24", + "site_location": "cloud", "mtu": "1500" + }}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[201]", "resource_value": { + "uuid": "201", "name": "201", "type": "optical", + "site_location": "transport", "mtu": "1500" + }}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[500]", "resource_value": { + "uuid": "500", "name": "500", "type": "optical", + "address_ip": "172.10.44.2", "address_prefix": "24", "vlan_tag": 101, + "site_location": "transport", "mtu": "1500" + }}} + ]}}, + {"device_id": {"device_uuid": {"uuid": "POP2"}}, "device_type": "packet-pop", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.16.204.221"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "lo", "name": "lo", "type": "loopback"}, + {"uuid": "200", "name": "200", "type": "copper"}, + {"uuid": "201", "name": "201", "type": "copper"}, + {"uuid": "500", "name": "500", "type": "copper"} + ]}}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[200]", "resource_value": { + "uuid": "200", "name": "200", "type": "optical", + "address_ip": "172.1.101.1", "address_prefix": "24", + "site_location": "cloud", "mtu": "1500" + }}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[201]", "resource_value": { + "uuid": "201", "name": "201", "type": "optical", + "site_location": "transport", "mtu": "1500" + }}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[500]", "resource_value": { + "uuid": "500", "name": "500", "type": "optical", + "address_ip": "172.10.44.2", "address_prefix": "24", "vlan_tag": 201, + "site_location": "transport", "mtu": "1500" + }}} + ]}} + ], + "links": [ + {"link_id": {"link_uuid": {"uuid": "L13"}}, "link_type" : "LINKTYPE_COPPER", + "attributes": {"is_bidirectional": true, "total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "P-PE2"}}, "endpoint_uuid": {"uuid": "200"}}, + {"device_id": {"device_uuid": {"uuid": "POP2" }}, "endpoint_uuid": {"uuid": "500"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "L14"}}, "link_type" : "LINKTYPE_COPPER", + "attributes": {"is_bidirectional": true, "total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "O-PE2"}}, "endpoint_uuid": {"uuid": "200"}}, + {"device_id": {"device_uuid": {"uuid": "POP1" }}, "endpoint_uuid": {"uuid": "500"}} + ]} + ] +} diff --git a/src/tests/mwc26-f5ga/data/topology/topology-e2e.json b/src/tests/mwc26-f5ga/data/topology/topology-e2e.json new file mode 100644 index 000000000..117e97e61 --- /dev/null +++ b/src/tests/mwc26-f5ga/data/topology/topology-e2e.json @@ -0,0 +1,43 @@ +{ + "contexts": [ + {"context_id": {"context_uuid": {"uuid": "admin"}}} + ], + "topologies": [ + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}}, + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "e2e"}}} + ], + "devices": [ + {"device_id": {"device_uuid": {"uuid": "TFS-AGG"}}, "device_type": "teraflowsdn", + "device_drivers": ["DEVICEDRIVER_IETF_SLICE"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "10.254.0.11"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "80" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { + "scheme": "http", "username": "admin", "password": "admin", "base_url": "/restconf/v2/data", + "timeout": 120, "verify_certs": false, "import_topology": "topology" + }}} + ]}}, + {"device_id": {"device_uuid": {"uuid": "NCE-FAN"}}, "device_type": "nce", + "device_drivers": ["DEVICEDRIVER_NCE"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "10.254.0.9"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "8081" }}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": { + "scheme": "http", "username": "admin", "password": "admin", "base_url": "/restconf/v2/data", + "timeout": 120, "verify_certs": false, "import_topology": "topology" + }}} + ]}} + ], + "links": [ + {"link_id": {"link_uuid": {"uuid": "L3"}}, "link_type" : "LINKTYPE_COPPER", + "attributes": {"is_bidirectional": true, "total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "OLT" }}, "endpoint_uuid": {"uuid": "501"}}, + {"device_id": {"device_uuid": {"uuid": "P-PE1"}}, "endpoint_uuid": {"uuid": "200"}} + ]}, + {"link_id": {"link_uuid": {"uuid": "L4"}}, "link_type" : "LINKTYPE_COPPER", + "attributes": {"is_bidirectional": true, "total_capacity_gbps": 10, "used_capacity_gbps": 0}, "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "OLT" }}, "endpoint_uuid": {"uuid": "500"}}, + {"device_id": {"device_uuid": {"uuid": "O-PE1"}}, "endpoint_uuid": {"uuid": "200"}} + ]} + ] +} diff --git a/src/tests/mwc26-f5ga/data/topology/topology-ip.json b/src/tests/mwc26-f5ga/data/topology/topology-ip.json new file mode 100644 index 000000000..cd7720160 --- /dev/null +++ b/src/tests/mwc26-f5ga/data/topology/topology-ip.json @@ -0,0 +1,149 @@ +{ + "contexts": [ + {"context_id": {"context_uuid": {"uuid": "admin"}}} + ], + "topologies": [ + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "admin"}}}, + {"topology_id": {"context_id": {"context_uuid": {"uuid": "admin"}}, "topology_uuid": {"uuid": "trans-pkt"}}} + ], + "devices": [ + { + "device_id": {"device_uuid": {"uuid": "P-PE1"}}, "device_type": "packet-router", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.16.122.25"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "lo", "name": "lo", "type": "loopback"}, + {"uuid": "200", "name": "200", "type": "copper"}, + {"uuid": "500", "name": "500", "type": "copper"}, + {"uuid": "501", "name": "501", "type": "copper"} + ]}}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[200]", "resource_value": { + "uuid": "200", "name": "200", "type": "optical", + "address_ip": "128.32.44.254", "address_prefix": "24", "vlan_tag": 21, + "site_location": "access", "mtu": "1500" + }}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[500]", "resource_value": { + "uuid": "500", "name": "500", "type": "optical", + "address_ip": "10.44.1.1", "address_prefix": "24", + "site_location": "transport", "mtu": "1500" + }}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[501]", "resource_value": { + "uuid": "501", "name": "501", "type": "optical", + "address_ip": "10.44.2.1", "address_prefix": "24", + "site_location": "transport", "mtu": "1500" + }}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "P-P1"}}, "device_type": "packet-router", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.16.125.31"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "lo", "name": "lo", "type": "loopback"}, + {"uuid": "500", "name": "500", "type": "copper"}, + {"uuid": "501", "name": "501", "type": "copper"} + ]}}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[500]", "resource_value": { + "uuid": "500", "name": "500", "type": "optical", + "address_ip": "10.44.1.2", "address_prefix": "24", + "site_location": "transport", "mtu": "1500" + }}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[501]", "resource_value": { + "uuid": "501", "name": "501", "type": "optical", + "address_ip": "10.44.3.2", "address_prefix": "24", + "site_location": "transport", "mtu": "1500" + }}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "P-P2"}}, "device_type": "packet-router", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.16.125.33"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "lo", "name": "lo", "type": "loopback"}, + {"uuid": "500", "name": "500", "type": "copper"}, + {"uuid": "501", "name": "501", "type": "copper"} + ]}}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[500]", "resource_value": { + "uuid": "500", "name": "500", "type": "optical", + "address_ip": "10.44.2.2", "address_prefix": "24", + "site_location": "transport", "mtu": "1500" + }}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[501]", "resource_value": { + "uuid": "501", "name": "501", "type": "optical", + "address_ip": "10.44.4.2", "address_prefix": "24", + "site_location": "transport", "mtu": "1500" + }}} + ]} + }, + { + "device_id": {"device_uuid": {"uuid": "P-PE2"}}, "device_type": "packet-router", + "device_drivers": ["DEVICEDRIVER_UNDEFINED"], + "device_config": {"config_rules": [ + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/address", "resource_value": "172.16.125.32"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/port", "resource_value": "0"}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "_connect/settings", "resource_value": {"endpoints": [ + {"uuid": "lo", "name": "lo", "type": "loopback"}, + {"uuid": "200", "name": "200", "type": "copper"}, + {"uuid": "500", "name": "500", "type": "copper"}, + {"uuid": "501", "name": "501", "type": "copper"} + ]}}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[200]", "resource_value": { + "uuid": "200", "name": "200", "type": "optical", + "address_ip": "172.10.44.254", "address_prefix": "24", "vlan_tag": 201, + "site_location": "cloud", "mtu": "1500" + }}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[500]", "resource_value": { + "uuid": "500", "name": "500", "type": "optical", + "address_ip": "10.44.3.1", "address_prefix": "24", + "site_location": "transport", "mtu": "1500" + }}}, + {"action": "CONFIGACTION_SET", "custom": {"resource_key": "/endpoints/endpoint[501]", "resource_value": { + "uuid": "501", "name": "501", "type": "optical", + "address_ip": "10.44.4.1", "address_prefix": "24", + "site_location": "transport", "mtu": "1500" + }}} + ]} + } + ], + "links": [ + { + "link_id": {"link_uuid": {"uuid": "L5"}}, "link_type" : "LINKTYPE_COPPER", + "attributes": {"is_bidirectional": true, "total_capacity_gbps": 10, "used_capacity_gbps": 0}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "P-PE1"}}, "endpoint_uuid": {"uuid": "500"}}, + {"device_id": {"device_uuid": {"uuid": "P-P1" }}, "endpoint_uuid": {"uuid": "500"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "L6"}}, "link_type" : "LINKTYPE_COPPER", + "attributes": {"is_bidirectional": true, "total_capacity_gbps": 10, "used_capacity_gbps": 0}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "P-PE1"}}, "endpoint_uuid": {"uuid": "501"}}, + {"device_id": {"device_uuid": {"uuid": "P-P2" }}, "endpoint_uuid": {"uuid": "500"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "L9"}}, "link_type" : "LINKTYPE_COPPER", + "attributes": {"is_bidirectional": true, "total_capacity_gbps": 10, "used_capacity_gbps": 0}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "P-P1" }}, "endpoint_uuid": {"uuid": "501"}}, + {"device_id": {"device_uuid": {"uuid": "P-PE2"}}, "endpoint_uuid": {"uuid": "500"}} + ] + }, + { + "link_id": {"link_uuid": {"uuid": "L10"}}, "link_type" : "LINKTYPE_COPPER", + "attributes": {"is_bidirectional": true, "total_capacity_gbps": 10, "used_capacity_gbps": 0}, + "link_endpoint_ids": [ + {"device_id": {"device_uuid": {"uuid": "P-P2" }}, "endpoint_uuid": {"uuid": "501"}}, + {"device_id": {"device_uuid": {"uuid": "P-PE2"}}, "endpoint_uuid": {"uuid": "501"}} + ] + } + ] +} diff --git a/src/tests/mwc26-f5ga/deploy-specs-agg.sh b/src/tests/mwc26-f5ga/deploy-specs-agg.sh new file mode 100644 index 000000000..c7b5e98b5 --- /dev/null +++ b/src/tests/mwc26-f5ga/deploy-specs-agg.sh @@ -0,0 +1,220 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# ----- TeraFlowSDN ------------------------------------------------------------ + +# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to. +export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/" + +# Set the list of components, separated by spaces, you want to build images for, and deploy. +export TFS_COMPONENTS="context device pathcomp service slice nbi webui" + +# Uncomment to activate Monitoring (old) +#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring" + +# Uncomment to activate Monitoring Framework (new) +#export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api telemetry analytics automation" + +# Uncomment to activate QoS Profiles +#export TFS_COMPONENTS="${TFS_COMPONENTS} qos_profile" + +# Uncomment to activate BGP-LS Speaker +#export TFS_COMPONENTS="${TFS_COMPONENTS} bgpls_speaker" + +# Uncomment to activate Optical Controller +# To manage optical connections, "service" requires "opticalcontroller" to be deployed +# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the +# "opticalcontroller" only if "service" is already in TFS_COMPONENTS, and re-export it. +#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then +# BEFORE="${TFS_COMPONENTS% service*}" +# AFTER="${TFS_COMPONENTS#* service}" +# export TFS_COMPONENTS="${BEFORE} opticalcontroller service ${AFTER}" +#fi + +# Uncomment to activate ZTP +#export TFS_COMPONENTS="${TFS_COMPONENTS} ztp" + +# Uncomment to activate Policy Manager +#export TFS_COMPONENTS="${TFS_COMPONENTS} policy" + +# Uncomment to activate Optical CyberSecurity +#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager" + +# Uncomment to activate L3 CyberSecurity +#export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector" + +# Uncomment to activate TE +#export TFS_COMPONENTS="${TFS_COMPONENTS} te" + +# Uncomment to activate Forecaster +#export TFS_COMPONENTS="${TFS_COMPONENTS} forecaster" + +# Uncomment to activate E2E Orchestrator +#export TFS_COMPONENTS="${TFS_COMPONENTS} e2e_orchestrator" + +# Uncomment to activate VNT Manager +#export TFS_COMPONENTS="${TFS_COMPONENTS} vnt_manager" + +# Uncomment to activate DLT and Interdomain +#export TFS_COMPONENTS="${TFS_COMPONENTS} interdomain dlt" +#if [[ "$TFS_COMPONENTS" == *"dlt"* ]]; then +# export KEY_DIRECTORY_PATH="src/dlt/gateway/keys/priv_sk" +# export CERT_DIRECTORY_PATH="src/dlt/gateway/keys/cert.pem" +# export TLS_CERT_PATH="src/dlt/gateway/keys/ca.crt" +#fi + +# Uncomment to activate QKD App +# To manage QKD Apps, "service" requires "qkd_app" to be deployed +# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the +# "qkd_app" only if "service" is already in TFS_COMPONENTS, and re-export it. +#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then +# BEFORE="${TFS_COMPONENTS% service*}" +# AFTER="${TFS_COMPONENTS#* service}" +# export TFS_COMPONENTS="${BEFORE} qkd_app service ${AFTER}" +#fi + +# Uncomment to activate SIMAP Connector +export TFS_COMPONENTS="${TFS_COMPONENTS} simap_connector" + +# Uncomment to activate Load Generator +#export TFS_COMPONENTS="${TFS_COMPONENTS} load_generator" + + +# Set the tag you want to use for your images. +export TFS_IMAGE_TAG="dev" + +# Set the name of the Kubernetes namespace to deploy TFS to. +export TFS_K8S_NAMESPACE="tfs" + +# Set additional manifest files to be applied after the deployment +export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" + +# Uncomment to monitor performance of components +#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml" + +# Uncomment when deploying Optical CyberSecurity +#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml" + +# Set the new Grafana admin password +export TFS_GRAFANA_PASSWORD="admin123+" + +# Disable skip-build flag to rebuild the Docker images. +export TFS_SKIP_BUILD="" + + +# ----- CockroachDB ------------------------------------------------------------ + +# Set the namespace where CockroackDB will be deployed. +export CRDB_NAMESPACE="crdb" + +# Set the external port CockroackDB Postgre SQL interface will be exposed to. +export CRDB_EXT_PORT_SQL="26257" + +# Set the external port CockroackDB HTTP Mgmt GUI interface will be exposed to. +export CRDB_EXT_PORT_HTTP="8081" + +# Set the database username to be used by Context. +export CRDB_USERNAME="tfs" + +# Set the database user's password to be used by Context. +export CRDB_PASSWORD="tfs123" + +# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/crdb.sh for additional details +export CRDB_DEPLOY_MODE="single" + +# Disable flag for dropping database, if it exists. +export CRDB_DROP_DATABASE_IF_EXISTS="YES" + +# Disable flag for re-deploying CockroachDB from scratch. +export CRDB_REDEPLOY="" + + +# ----- NATS ------------------------------------------------------------------- + +# Set the namespace where NATS will be deployed. +export NATS_NAMESPACE="nats" + +# Set the external port NATS Client interface will be exposed to. +export NATS_EXT_PORT_CLIENT="4222" + +# Set the external port NATS HTTP Mgmt GUI interface will be exposed to. +export NATS_EXT_PORT_HTTP="8222" + +# Set NATS installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/nats.sh for additional details +export NATS_DEPLOY_MODE="single" + +# Disable flag for re-deploying NATS from scratch. +export NATS_REDEPLOY="" + + +# ----- Apache Kafka ----------------------------------------------------------- + +# Set the namespace where Apache Kafka will be deployed. +export KFK_NAMESPACE="kafka" + +# Set the port Apache Kafka server will be exposed to. +export KFK_EXT_PORT_CLIENT="9092" + +# Set Kafka installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/kafka.sh for additional details +export KFK_DEPLOY_MODE="single" + +# Disable flag for re-deploying Kafka from scratch. +export KFK_REDEPLOY="" + + +# ----- QuestDB ---------------------------------------------------------------- + +# Set the namespace where QuestDB will be deployed. +export QDB_NAMESPACE="qdb" + +# Set the external port QuestDB Postgre SQL interface will be exposed to. +export QDB_EXT_PORT_SQL="8812" + +# Set the external port QuestDB Influx Line Protocol interface will be exposed to. +export QDB_EXT_PORT_ILP="9009" + +# Set the external port QuestDB HTTP Mgmt GUI interface will be exposed to. +export QDB_EXT_PORT_HTTP="9000" + +# Set the database username to be used for QuestDB. +export QDB_USERNAME="admin" + +# Set the database user's password to be used for QuestDB. +export QDB_PASSWORD="quest" + +# Set the table name to be used by Monitoring for KPIs. +export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis" + +# Set the table name to be used by Slice for plotting groups. +export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups" + +# Disable flag for dropping tables if they exist. +export QDB_DROP_TABLES_IF_EXIST="" + +# Disable flag for re-deploying QuestDB from scratch. +export QDB_REDEPLOY="" + + +# ----- K8s Observability ------------------------------------------------------ + +# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to. +export PROM_EXT_PORT_HTTP="9090" + +# Set the external port Grafana HTTP Dashboards will be exposed to. +export GRAF_EXT_PORT_HTTP="3000" diff --git a/src/tests/mwc26-f5ga/deploy-specs-e2e.sh b/src/tests/mwc26-f5ga/deploy-specs-e2e.sh new file mode 100644 index 000000000..c7b5e98b5 --- /dev/null +++ b/src/tests/mwc26-f5ga/deploy-specs-e2e.sh @@ -0,0 +1,220 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# ----- TeraFlowSDN ------------------------------------------------------------ + +# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to. +export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/" + +# Set the list of components, separated by spaces, you want to build images for, and deploy. +export TFS_COMPONENTS="context device pathcomp service slice nbi webui" + +# Uncomment to activate Monitoring (old) +#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring" + +# Uncomment to activate Monitoring Framework (new) +#export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api telemetry analytics automation" + +# Uncomment to activate QoS Profiles +#export TFS_COMPONENTS="${TFS_COMPONENTS} qos_profile" + +# Uncomment to activate BGP-LS Speaker +#export TFS_COMPONENTS="${TFS_COMPONENTS} bgpls_speaker" + +# Uncomment to activate Optical Controller +# To manage optical connections, "service" requires "opticalcontroller" to be deployed +# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the +# "opticalcontroller" only if "service" is already in TFS_COMPONENTS, and re-export it. +#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then +# BEFORE="${TFS_COMPONENTS% service*}" +# AFTER="${TFS_COMPONENTS#* service}" +# export TFS_COMPONENTS="${BEFORE} opticalcontroller service ${AFTER}" +#fi + +# Uncomment to activate ZTP +#export TFS_COMPONENTS="${TFS_COMPONENTS} ztp" + +# Uncomment to activate Policy Manager +#export TFS_COMPONENTS="${TFS_COMPONENTS} policy" + +# Uncomment to activate Optical CyberSecurity +#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager" + +# Uncomment to activate L3 CyberSecurity +#export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector" + +# Uncomment to activate TE +#export TFS_COMPONENTS="${TFS_COMPONENTS} te" + +# Uncomment to activate Forecaster +#export TFS_COMPONENTS="${TFS_COMPONENTS} forecaster" + +# Uncomment to activate E2E Orchestrator +#export TFS_COMPONENTS="${TFS_COMPONENTS} e2e_orchestrator" + +# Uncomment to activate VNT Manager +#export TFS_COMPONENTS="${TFS_COMPONENTS} vnt_manager" + +# Uncomment to activate DLT and Interdomain +#export TFS_COMPONENTS="${TFS_COMPONENTS} interdomain dlt" +#if [[ "$TFS_COMPONENTS" == *"dlt"* ]]; then +# export KEY_DIRECTORY_PATH="src/dlt/gateway/keys/priv_sk" +# export CERT_DIRECTORY_PATH="src/dlt/gateway/keys/cert.pem" +# export TLS_CERT_PATH="src/dlt/gateway/keys/ca.crt" +#fi + +# Uncomment to activate QKD App +# To manage QKD Apps, "service" requires "qkd_app" to be deployed +# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the +# "qkd_app" only if "service" is already in TFS_COMPONENTS, and re-export it. +#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then +# BEFORE="${TFS_COMPONENTS% service*}" +# AFTER="${TFS_COMPONENTS#* service}" +# export TFS_COMPONENTS="${BEFORE} qkd_app service ${AFTER}" +#fi + +# Uncomment to activate SIMAP Connector +export TFS_COMPONENTS="${TFS_COMPONENTS} simap_connector" + +# Uncomment to activate Load Generator +#export TFS_COMPONENTS="${TFS_COMPONENTS} load_generator" + + +# Set the tag you want to use for your images. +export TFS_IMAGE_TAG="dev" + +# Set the name of the Kubernetes namespace to deploy TFS to. +export TFS_K8S_NAMESPACE="tfs" + +# Set additional manifest files to be applied after the deployment +export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" + +# Uncomment to monitor performance of components +#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml" + +# Uncomment when deploying Optical CyberSecurity +#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml" + +# Set the new Grafana admin password +export TFS_GRAFANA_PASSWORD="admin123+" + +# Disable skip-build flag to rebuild the Docker images. +export TFS_SKIP_BUILD="" + + +# ----- CockroachDB ------------------------------------------------------------ + +# Set the namespace where CockroackDB will be deployed. +export CRDB_NAMESPACE="crdb" + +# Set the external port CockroackDB Postgre SQL interface will be exposed to. +export CRDB_EXT_PORT_SQL="26257" + +# Set the external port CockroackDB HTTP Mgmt GUI interface will be exposed to. +export CRDB_EXT_PORT_HTTP="8081" + +# Set the database username to be used by Context. +export CRDB_USERNAME="tfs" + +# Set the database user's password to be used by Context. +export CRDB_PASSWORD="tfs123" + +# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/crdb.sh for additional details +export CRDB_DEPLOY_MODE="single" + +# Disable flag for dropping database, if it exists. +export CRDB_DROP_DATABASE_IF_EXISTS="YES" + +# Disable flag for re-deploying CockroachDB from scratch. +export CRDB_REDEPLOY="" + + +# ----- NATS ------------------------------------------------------------------- + +# Set the namespace where NATS will be deployed. +export NATS_NAMESPACE="nats" + +# Set the external port NATS Client interface will be exposed to. +export NATS_EXT_PORT_CLIENT="4222" + +# Set the external port NATS HTTP Mgmt GUI interface will be exposed to. +export NATS_EXT_PORT_HTTP="8222" + +# Set NATS installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/nats.sh for additional details +export NATS_DEPLOY_MODE="single" + +# Disable flag for re-deploying NATS from scratch. +export NATS_REDEPLOY="" + + +# ----- Apache Kafka ----------------------------------------------------------- + +# Set the namespace where Apache Kafka will be deployed. +export KFK_NAMESPACE="kafka" + +# Set the port Apache Kafka server will be exposed to. +export KFK_EXT_PORT_CLIENT="9092" + +# Set Kafka installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/kafka.sh for additional details +export KFK_DEPLOY_MODE="single" + +# Disable flag for re-deploying Kafka from scratch. +export KFK_REDEPLOY="" + + +# ----- QuestDB ---------------------------------------------------------------- + +# Set the namespace where QuestDB will be deployed. +export QDB_NAMESPACE="qdb" + +# Set the external port QuestDB Postgre SQL interface will be exposed to. +export QDB_EXT_PORT_SQL="8812" + +# Set the external port QuestDB Influx Line Protocol interface will be exposed to. +export QDB_EXT_PORT_ILP="9009" + +# Set the external port QuestDB HTTP Mgmt GUI interface will be exposed to. +export QDB_EXT_PORT_HTTP="9000" + +# Set the database username to be used for QuestDB. +export QDB_USERNAME="admin" + +# Set the database user's password to be used for QuestDB. +export QDB_PASSWORD="quest" + +# Set the table name to be used by Monitoring for KPIs. +export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis" + +# Set the table name to be used by Slice for plotting groups. +export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups" + +# Disable flag for dropping tables if they exist. +export QDB_DROP_TABLES_IF_EXIST="" + +# Disable flag for re-deploying QuestDB from scratch. +export QDB_REDEPLOY="" + + +# ----- K8s Observability ------------------------------------------------------ + +# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to. +export PROM_EXT_PORT_HTTP="9090" + +# Set the external port Grafana HTTP Dashboards will be exposed to. +export GRAF_EXT_PORT_HTTP="3000" diff --git a/src/tests/mwc26-f5ga/deploy-specs-ip.sh b/src/tests/mwc26-f5ga/deploy-specs-ip.sh new file mode 100644 index 000000000..c02dac122 --- /dev/null +++ b/src/tests/mwc26-f5ga/deploy-specs-ip.sh @@ -0,0 +1,220 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# ----- TeraFlowSDN ------------------------------------------------------------ + +# Set the URL of the internal MicroK8s Docker registry where the images will be uploaded to. +export TFS_REGISTRY_IMAGES="http://localhost:32000/tfs/" + +# Set the list of components, separated by spaces, you want to build images for, and deploy. +export TFS_COMPONENTS="context device pathcomp service nbi webui" + +# Uncomment to activate Monitoring (old) +#export TFS_COMPONENTS="${TFS_COMPONENTS} monitoring" + +# Uncomment to activate Monitoring Framework (new) +#export TFS_COMPONENTS="${TFS_COMPONENTS} kpi_manager kpi_value_writer kpi_value_api telemetry analytics automation" + +# Uncomment to activate QoS Profiles +#export TFS_COMPONENTS="${TFS_COMPONENTS} qos_profile" + +# Uncomment to activate BGP-LS Speaker +#export TFS_COMPONENTS="${TFS_COMPONENTS} bgpls_speaker" + +# Uncomment to activate Optical Controller +# To manage optical connections, "service" requires "opticalcontroller" to be deployed +# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the +# "opticalcontroller" only if "service" is already in TFS_COMPONENTS, and re-export it. +#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then +# BEFORE="${TFS_COMPONENTS% service*}" +# AFTER="${TFS_COMPONENTS#* service}" +# export TFS_COMPONENTS="${BEFORE} opticalcontroller service ${AFTER}" +#fi + +# Uncomment to activate ZTP +#export TFS_COMPONENTS="${TFS_COMPONENTS} ztp" + +# Uncomment to activate Policy Manager +#export TFS_COMPONENTS="${TFS_COMPONENTS} policy" + +# Uncomment to activate Optical CyberSecurity +#export TFS_COMPONENTS="${TFS_COMPONENTS} dbscanserving opticalattackmitigator opticalattackdetector opticalattackmanager" + +# Uncomment to activate L3 CyberSecurity +#export TFS_COMPONENTS="${TFS_COMPONENTS} l3_attackmitigator l3_centralizedattackdetector" + +# Uncomment to activate TE +#export TFS_COMPONENTS="${TFS_COMPONENTS} te" + +# Uncomment to activate Forecaster +#export TFS_COMPONENTS="${TFS_COMPONENTS} forecaster" + +# Uncomment to activate E2E Orchestrator +#export TFS_COMPONENTS="${TFS_COMPONENTS} e2e_orchestrator" + +# Uncomment to activate VNT Manager +#export TFS_COMPONENTS="${TFS_COMPONENTS} vnt_manager" + +# Uncomment to activate DLT and Interdomain +#export TFS_COMPONENTS="${TFS_COMPONENTS} interdomain dlt" +#if [[ "$TFS_COMPONENTS" == *"dlt"* ]]; then +# export KEY_DIRECTORY_PATH="src/dlt/gateway/keys/priv_sk" +# export CERT_DIRECTORY_PATH="src/dlt/gateway/keys/cert.pem" +# export TLS_CERT_PATH="src/dlt/gateway/keys/ca.crt" +#fi + +# Uncomment to activate QKD App +# To manage QKD Apps, "service" requires "qkd_app" to be deployed +# before "service", thus we "hack" the TFS_COMPONENTS environment variable prepending the +# "qkd_app" only if "service" is already in TFS_COMPONENTS, and re-export it. +#if [[ "$TFS_COMPONENTS" == *"service"* ]]; then +# BEFORE="${TFS_COMPONENTS% service*}" +# AFTER="${TFS_COMPONENTS#* service}" +# export TFS_COMPONENTS="${BEFORE} qkd_app service ${AFTER}" +#fi + +# Uncomment to activate SIMAP Connector +export TFS_COMPONENTS="${TFS_COMPONENTS} simap_connector" + +# Uncomment to activate Load Generator +#export TFS_COMPONENTS="${TFS_COMPONENTS} load_generator" + + +# Set the tag you want to use for your images. +export TFS_IMAGE_TAG="dev" + +# Set the name of the Kubernetes namespace to deploy TFS to. +export TFS_K8S_NAMESPACE="tfs" + +# Set additional manifest files to be applied after the deployment +export TFS_EXTRA_MANIFESTS="manifests/nginx_ingress_http.yaml" + +# Uncomment to monitor performance of components +#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/servicemonitors.yaml" + +# Uncomment when deploying Optical CyberSecurity +#export TFS_EXTRA_MANIFESTS="${TFS_EXTRA_MANIFESTS} manifests/cachingservice.yaml" + +# Set the new Grafana admin password +export TFS_GRAFANA_PASSWORD="admin123+" + +# Disable skip-build flag to rebuild the Docker images. +export TFS_SKIP_BUILD="" + + +# ----- CockroachDB ------------------------------------------------------------ + +# Set the namespace where CockroackDB will be deployed. +export CRDB_NAMESPACE="crdb" + +# Set the external port CockroackDB Postgre SQL interface will be exposed to. +export CRDB_EXT_PORT_SQL="26257" + +# Set the external port CockroackDB HTTP Mgmt GUI interface will be exposed to. +export CRDB_EXT_PORT_HTTP="8081" + +# Set the database username to be used by Context. +export CRDB_USERNAME="tfs" + +# Set the database user's password to be used by Context. +export CRDB_PASSWORD="tfs123" + +# Set CockroachDB installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/crdb.sh for additional details +export CRDB_DEPLOY_MODE="single" + +# Disable flag for dropping database, if it exists. +export CRDB_DROP_DATABASE_IF_EXISTS="YES" + +# Disable flag for re-deploying CockroachDB from scratch. +export CRDB_REDEPLOY="" + + +# ----- NATS ------------------------------------------------------------------- + +# Set the namespace where NATS will be deployed. +export NATS_NAMESPACE="nats" + +# Set the external port NATS Client interface will be exposed to. +export NATS_EXT_PORT_CLIENT="4222" + +# Set the external port NATS HTTP Mgmt GUI interface will be exposed to. +export NATS_EXT_PORT_HTTP="8222" + +# Set NATS installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/nats.sh for additional details +export NATS_DEPLOY_MODE="single" + +# Disable flag for re-deploying NATS from scratch. +export NATS_REDEPLOY="" + + +# ----- Apache Kafka ----------------------------------------------------------- + +# Set the namespace where Apache Kafka will be deployed. +export KFK_NAMESPACE="kafka" + +# Set the port Apache Kafka server will be exposed to. +export KFK_EXT_PORT_CLIENT="9092" + +# Set Kafka installation mode to 'single'. This option is convenient for development and testing. +# See ./deploy/all.sh or ./deploy/kafka.sh for additional details +export KFK_DEPLOY_MODE="single" + +# Disable flag for re-deploying Kafka from scratch. +export KFK_REDEPLOY="" + + +# ----- QuestDB ---------------------------------------------------------------- + +# Set the namespace where QuestDB will be deployed. +export QDB_NAMESPACE="qdb" + +# Set the external port QuestDB Postgre SQL interface will be exposed to. +export QDB_EXT_PORT_SQL="8812" + +# Set the external port QuestDB Influx Line Protocol interface will be exposed to. +export QDB_EXT_PORT_ILP="9009" + +# Set the external port QuestDB HTTP Mgmt GUI interface will be exposed to. +export QDB_EXT_PORT_HTTP="9000" + +# Set the database username to be used for QuestDB. +export QDB_USERNAME="admin" + +# Set the database user's password to be used for QuestDB. +export QDB_PASSWORD="quest" + +# Set the table name to be used by Monitoring for KPIs. +export QDB_TABLE_MONITORING_KPIS="tfs_monitoring_kpis" + +# Set the table name to be used by Slice for plotting groups. +export QDB_TABLE_SLICE_GROUPS="tfs_slice_groups" + +# Disable flag for dropping tables if they exist. +export QDB_DROP_TABLES_IF_EXIST="" + +# Disable flag for re-deploying QuestDB from scratch. +export QDB_REDEPLOY="" + + +# ----- K8s Observability ------------------------------------------------------ + +# Set the external port Prometheus Mgmt HTTP GUI interface will be exposed to. +export PROM_EXT_PORT_HTTP="9090" + +# Set the external port Grafana HTTP Dashboards will be exposed to. +export GRAF_EXT_PORT_HTTP="3000" diff --git a/src/tests/mwc26-f5ga/dummy_L3VPN_delete.sh b/src/tests/mwc26-f5ga/dummy_L3VPN_delete.sh new file mode 100755 index 000000000..d5e199e0c --- /dev/null +++ b/src/tests/mwc26-f5ga/dummy_L3VPN_delete.sh @@ -0,0 +1,29 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# ------------- +# For direct testing of L3VPN delete from IP-Controller, without the need to trigger it from AGG-Controller. +# This is a dummy script that replicates the behavior of AGG-Controller when it sends a delete request to IP-Controller. +# -------------- + +cd $(dirname $0) + +echo "[IP-Controller] sending L3VPN delete (dummy replicating AGG-Controller )..." +curl --request DELETE --user admin:admin --location \ + http://10.254.0.12:80/restconf/data/ietf-l3vpn-svc:l3vpn-svc/vpn-services/vpn-service=slice25 + +echo + +echo "Done! Delete!" diff --git a/src/tests/mwc26-f5ga/dummy_L3VPN_request.sh b/src/tests/mwc26-f5ga/dummy_L3VPN_request.sh new file mode 100755 index 000000000..c195fe34f --- /dev/null +++ b/src/tests/mwc26-f5ga/dummy_L3VPN_request.sh @@ -0,0 +1,30 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# ------------- +# For direct testing of L3VPN request from IP-Controller, without the need to trigger it from AGG-Controller. +# This is a dummy script that replicates the behavior of AGG-Controller when it sends a request to IP-Controller. +# -------------- + +cd $(dirname $0) + +echo "[IP-Controller] sending L3VPN request (dummy replicating AGG-Controller request)..." +curl --request POST --location --user admin:admin --header 'Content-Type: application/json' \ + --data @data/slices/l3vpn_request_from_agg.json \ + http://127.0.0.1:80/restconf/data/ietf-l3vpn-svc:l3vpn-svc/vpn-services +echo + + +echo "Done!" diff --git a/src/tests/mwc26-f5ga/dump-logs.sh b/src/tests/mwc26-f5ga/dump-logs.sh new file mode 100755 index 000000000..ec68fe5e7 --- /dev/null +++ b/src/tests/mwc26-f5ga/dump-logs.sh @@ -0,0 +1,75 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Set working directory +cd "$(dirname "$0")" || exit 1 + +# Get the current hostname +HOSTNAME=$(hostname) +echo "Collecting logs for ${HOSTNAME}..." + +rm logs -rf tmp/exec +mkdir -p tmp/exec + +case "$HOSTNAME" in + simap-server) + echo "Collecting Docker container logs..." + docker logs simap-server > tmp/exec/simap-server.log 2>&1 + docker logs nce-fan-ctrl > tmp/exec/nce-fan-ctrl.log 2>&1 + docker logs nce-t-ctrl > tmp/exec/nce-t-ctrl.log 2>&1 + docker logs traffic-changer > tmp/exec/traffic-changer.log 2>&1 + ;; + tfs-e2e-ctrl) + echo "Collecting TFS E2E Controller logs..." + kubectl logs --namespace tfs service/contextservice -c server > tmp/exec/e2e-context.log + kubectl logs --namespace tfs service/deviceservice -c server > tmp/exec/e2e-device.log + kubectl logs --namespace tfs service/serviceservice -c server > tmp/exec/e2e-service.log + kubectl logs --namespace tfs service/pathcompservice -c frontend > tmp/exec/e2e-pathcomp-frontend.log + kubectl logs --namespace tfs service/pathcompservice -c backend > tmp/exec/e2e-pathcomp-backend.log + kubectl logs --namespace tfs service/webuiservice -c server > tmp/exec/e2e-webui.log + kubectl logs --namespace tfs service/nbiservice -c server > tmp/exec/e2e-nbi.log + kubectl logs --namespace tfs service/simap-connectorservice -c server > tmp/exec/e2e-simap-connector.log + ;; + tfs-agg-ctrl) + echo "Collecting TFS Aggregation Controller logs..." + kubectl logs --namespace tfs service/contextservice -c server > tmp/exec/agg-context.log + kubectl logs --namespace tfs service/deviceservice -c server > tmp/exec/agg-device.log + kubectl logs --namespace tfs service/serviceservice -c server > tmp/exec/agg-service.log + kubectl logs --namespace tfs service/pathcompservice -c frontend > tmp/exec/agg-pathcomp-frontend.log + kubectl logs --namespace tfs service/pathcompservice -c backend > tmp/exec/agg-pathcomp-backend.log + kubectl logs --namespace tfs service/webuiservice -c server > tmp/exec/agg-webui.log + kubectl logs --namespace tfs service/nbiservice -c server > tmp/exec/agg-nbi.log + kubectl logs --namespace tfs service/simap-connectorservice -c server > tmp/exec/agg-simap-connector.log + ;; + tfs-ip-ctrl) + echo "Collecting TFS IP Controller logs..." + kubectl logs --namespace tfs service/contextservice -c server > tmp/exec/ip-context.log + kubectl logs --namespace tfs service/deviceservice -c server > tmp/exec/ip-device.log + kubectl logs --namespace tfs service/serviceservice -c server > tmp/exec/ip-service.log + kubectl logs --namespace tfs service/pathcompservice -c frontend > tmp/exec/ip-pathcomp-frontend.log + kubectl logs --namespace tfs service/pathcompservice -c backend > tmp/exec/ip-pathcomp-backend.log + kubectl logs --namespace tfs service/webuiservice -c server > tmp/exec/ip-webui.log + kubectl logs --namespace tfs service/nbiservice -c server > tmp/exec/ip-nbi.log + kubectl logs --namespace tfs service/simap-connectorservice -c server > tmp/exec/ip-simap-connector.log + ;; + *) + echo "Unknown host: $HOSTNAME" + echo "No logs to collect." + ;; +esac + +printf "\n" + +echo "Done!" diff --git a/src/tests/mwc26-f5ga/provision-slice1_background.sh b/src/tests/mwc26-f5ga/provision-slice1_background.sh new file mode 100755 index 000000000..1406f7c3c --- /dev/null +++ b/src/tests/mwc26-f5ga/provision-slice1_background.sh @@ -0,0 +1,28 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Make folder containing the script the root folder for its execution +cd $(dirname $0) + + +echo "[E2E] Provisioning slice1..." +curl --request POST --location --header 'Content-Type: application/json' \ + --data @data/slices/network-slice1_background.json \ + http://0.0.0.0:80/restconf/data/ietf-network-slice-service:network-slice-services +echo + + +echo "Done!" diff --git a/src/tests/mwc26-f5ga/provision-slice2_game.sh b/src/tests/mwc26-f5ga/provision-slice2_game.sh new file mode 100755 index 000000000..cde5607ba --- /dev/null +++ b/src/tests/mwc26-f5ga/provision-slice2_game.sh @@ -0,0 +1,28 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Make folder containing the script the root folder for its execution +cd $(dirname $0) + + +echo "[E2E] Provisioning slice2..." +curl --request POST --location --header 'Content-Type: application/json' \ + --data @data/slices/network-slice2_game_creation.json \ + http://0.0.0.0:80/restconf/data/ietf-network-slice-service:network-slice-services +echo + + +echo "Done!" diff --git a/src/tests/mwc26-f5ga/provision-slice3_another_background.sh b/src/tests/mwc26-f5ga/provision-slice3_another_background.sh new file mode 100755 index 000000000..181629349 --- /dev/null +++ b/src/tests/mwc26-f5ga/provision-slice3_another_background.sh @@ -0,0 +1,28 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Make folder containing the script the root folder for its execution +cd $(dirname $0) + + +echo "[E2E] Provisioning slice3..." +curl --request POST --location --header 'Content-Type: application/json' \ + --data @data/slices/network-slice3_background.json \ + http://0.0.0.0:80/restconf/data/ietf-network-slice-service:network-slice-services +echo + + +echo "Done!" diff --git a/src/tests/mwc26-f5ga/provision-slice4_optical.sh b/src/tests/mwc26-f5ga/provision-slice4_optical.sh new file mode 100755 index 000000000..1973f6b2d --- /dev/null +++ b/src/tests/mwc26-f5ga/provision-slice4_optical.sh @@ -0,0 +1,28 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Make folder containing the script the root folder for its execution +cd $(dirname $0) + + +echo "[E2E] Provisioning slice4..." +curl --request POST --location --header 'Content-Type: application/json' \ + --data @data/slices/network-slice4_optical.json \ + http://0.0.0.0:80/restconf/data/ietf-network-slice-service:network-slice-services +echo + + +echo "Done!" diff --git a/src/tests/mwc26-f5ga/teardown-slice2_game.sh b/src/tests/mwc26-f5ga/teardown-slice2_game.sh new file mode 100755 index 000000000..d136e79b0 --- /dev/null +++ b/src/tests/mwc26-f5ga/teardown-slice2_game.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +echo "[E2E] Tear Down slice2..." +curl --request DELETE --location \ + http://0.0.0.0:80/restconf/data/ietf-network-slice-service:network-slice-services/slice-service=game_slice_on_ip_transport +echo + + +echo "Done!" diff --git a/src/tests/mwc26-f5ga/teardown-slice3_another_background.sh b/src/tests/mwc26-f5ga/teardown-slice3_another_background.sh new file mode 100755 index 000000000..5c162253b --- /dev/null +++ b/src/tests/mwc26-f5ga/teardown-slice3_another_background.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +echo "[E2E] Tear Down slice3..." +curl --request DELETE --location \ + http://0.0.0.0:80/restconf/data/ietf-network-slice-service:network-slice-services/slice-service=another_background_slice +echo + + +echo "Done!" diff --git a/src/tests/mwc26-f5ga/teardown-slice4_optical.sh b/src/tests/mwc26-f5ga/teardown-slice4_optical.sh new file mode 100755 index 000000000..c45a11be1 --- /dev/null +++ b/src/tests/mwc26-f5ga/teardown-slice4_optical.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +echo "[E2E] Tear Down slice4..." +curl --request DELETE --location \ + http://0.0.0.0:80/restconf/data/ietf-network-slice-service:network-slice-services/slice-service=game_slice_on_optical_transport +echo + + +echo "Done!" diff --git a/src/tests/mwc26-f5ga/telemetry-delete-slice1.py b/src/tests/mwc26-f5ga/telemetry-delete-slice1.py new file mode 100644 index 000000000..b2924e1b2 --- /dev/null +++ b/src/tests/mwc26-f5ga/telemetry-delete-slice1.py @@ -0,0 +1,46 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import requests +from requests.auth import HTTPBasicAuth + + +RESTCONF_ADDRESS = '127.0.0.1' +RESTCONF_PORT = 80 +TELEMETRY_ID = 1109405947767160833 + +UNSUBSCRIBE_URI = '/restconf/operations/subscriptions:delete-subscription' +UNSUBSCRIBE_URL = 'http://{:s}:{:d}{:s}'.format(RESTCONF_ADDRESS, RESTCONF_PORT, UNSUBSCRIBE_URI) +REQUEST = { + 'ietf-subscribed-notifications:input': { + 'id': TELEMETRY_ID, + } +} + + +def main() -> None: + print('[E2E] Delete Telemetry slice1...') + headers = {'accept': 'application/json'} + auth = HTTPBasicAuth('admin', 'admin') + print(UNSUBSCRIBE_URL) + print(REQUEST) + reply = requests.post( + UNSUBSCRIBE_URL, headers=headers, json=REQUEST, auth=auth, + verify=False, allow_redirects=True, timeout=30 + ) + reply.raise_for_status() + +if __name__ == '__main__': + main() diff --git a/src/tests/mwc26-f5ga/telemetry-subscribe-slice1.py b/src/tests/mwc26-f5ga/telemetry-subscribe-slice1.py new file mode 100644 index 000000000..559556829 --- /dev/null +++ b/src/tests/mwc26-f5ga/telemetry-subscribe-slice1.py @@ -0,0 +1,72 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import requests +from requests.auth import HTTPBasicAuth + + +RESTCONF_ADDRESS = '127.0.0.1' +RESTCONF_PORT = 80 +TARGET_SIMAP_NAME = 'e2e' +TARGET_LINK_NAME = 'E2E-L1' +SAMPLING_INTERVAL = 10.0 + + +SUBSCRIBE_URI = '/restconf/operations/subscriptions:establish-subscription' +SUBSCRIBE_URL = 'http://{:s}:{:d}{:s}'.format(RESTCONF_ADDRESS, RESTCONF_PORT, SUBSCRIBE_URI) +XPATH_FILTER = '/ietf-network:networks/network={:s}/ietf-network-topology:link={:s}/simap-telemetry:simap-telemetry' +REQUEST = { + 'ietf-subscribed-notifications:input': { + 'datastore': 'operational', + 'ietf-yang-push:datastore-xpath-filter': XPATH_FILTER.format(TARGET_SIMAP_NAME, TARGET_LINK_NAME), + 'ietf-yang-push:periodic': { + 'ietf-yang-push:period': SAMPLING_INTERVAL + } + } +} + + +def main() -> None: + print('[E2E] Subscribe Telemetry slice1...') + headers = {'accept': 'application/json'} + auth = HTTPBasicAuth('admin', 'admin') + print(SUBSCRIBE_URL) + print(REQUEST) + reply = requests.post( + SUBSCRIBE_URL, headers=headers, json=REQUEST, auth=auth, + verify=False, allow_redirects=True, timeout=30 + ) + content_type = reply.headers.get('Content-Type', '') + if 'application/json' not in content_type: + raise Exception('Not JSON:', reply.content.decode('UTF-8')) + try: + reply_data = reply.json() + except ValueError as e: + str_error = 'Invalid JSON: {:s}'.format(str(reply.content.decode('UTF-8'))) + raise Exception(str_error) from e + + if 'uri' not in reply_data: + raise Exception('Unexpected Reply: {:s}'.format(str(reply_data))) + subscription_uri = reply_data['uri'] + + stream_url = 'http://{:s}:{:d}{:s}'.format(RESTCONF_ADDRESS, RESTCONF_PORT, subscription_uri) + print('Opening stream "{:s}" (press Ctrl+C to stop)...'.format(stream_url)) + + with requests.get(stream_url, stream=True, auth=auth) as resp: + for line in resp.iter_lines(decode_unicode=True): + print(line) + +if __name__ == '__main__': + main() -- GitLab From 3efec8fe00d4cecabecdbd4491351b0242eb5dfe Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Thu, 19 Feb 2026 13:18:20 +0000 Subject: [PATCH 36/78] fix: Update InfluxDBFetcher logging and adjust test parameters for analyze endpoint --- .../clients/influxdb_fetcher.py | 28 +------------------ .../tests/test_api_docker.py | 4 +-- 2 files changed, 3 insertions(+), 29 deletions(-) diff --git a/src/tests/mwc26-f5ga/AI_analytics_engine/clients/influxdb_fetcher.py b/src/tests/mwc26-f5ga/AI_analytics_engine/clients/influxdb_fetcher.py index d7228647a..b51226e16 100644 --- a/src/tests/mwc26-f5ga/AI_analytics_engine/clients/influxdb_fetcher.py +++ b/src/tests/mwc26-f5ga/AI_analytics_engine/clients/influxdb_fetcher.py @@ -136,6 +136,7 @@ class InfluxDBFetcher: LOGGER.debug(f"Processed {len(metrics)} metric records with {len(available_full_cols)} columns") LOGGER.debug(f"Extracted {len(metric_values)} metric value records with {len(available_metric_cols)} columns") + LOGGER.info(f"Response values are: {metric_values} ") return { 'metrics': metrics, @@ -148,33 +149,6 @@ class InfluxDBFetcher: self, sla_policy: SLAPolicyConfig ) -> Dict[str, Any]: - """ - Fetch performance metrics from InfluxDB. - - Queries InfluxDB for time-series performance data based on the - SLA policy parameters and device information. The retry decorator - ensures resilience against transient failures. - - If the initial query returns fewer samples than required by - sla_policy.forecast_sample_count, the method will automatically fetch - older data with an extended time window until the required - sample count is met or max attempts are reached. - - Args: - sla_policy: The SLA policy configuration containing time window, - threshold parameters, and required sample count. - - Returns: - Dictionary containing: - - 'metrics': List of performance metric records. - - 'metric_values': List of metric values only. - - 'timestamp_range': Dictionary with 'start' and 'end' - timestamps for the queried data. - - Raises: - Exception: If InfluxDB is unavailable after all retries, - or if the query fails. - """ if not self.is_connected(): raise ConnectionError("Unable to connect to InfluxDB") diff --git a/src/tests/mwc26-f5ga/AI_analytics_engine/tests/test_api_docker.py b/src/tests/mwc26-f5ga/AI_analytics_engine/tests/test_api_docker.py index c782f56c3..d6adb654c 100644 --- a/src/tests/mwc26-f5ga/AI_analytics_engine/tests/test_api_docker.py +++ b/src/tests/mwc26-f5ga/AI_analytics_engine/tests/test_api_docker.py @@ -94,9 +94,9 @@ def test_analyze_endpoint(ai_engine_server): "latency_threshold_ms": 0, "bandwidth_utilization": 0.0 }, - "history_window_size_sec": 600, + "history_window_size_sec": 60, "forecast_sample_interval_sec": 5, - "forecast_sample_count": 120, + "forecast_sample_count": 50, } LOGGER.info(f"Sending analyze request with payload: {payload}") -- GitLab From 919021194c2a07c4b78557548011422c4026566e Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Thu, 19 Feb 2026 15:51:38 +0000 Subject: [PATCH 37/78] feat: Add provisioning and teardown scripts for network slice5 background --- .../slices/network-slice5_background.json | 118 ++++++++++++++++++ .../provision-slice5_another_background.sh | 28 +++++ .../mwc26-f5ga/teardown-slice1_background.sh | 23 ++++ .../teardown-slice5_another_background.sh | 23 ++++ 4 files changed, 192 insertions(+) create mode 100644 src/tests/mwc26-f5ga/data/slices/network-slice5_background.json create mode 100755 src/tests/mwc26-f5ga/provision-slice5_another_background.sh create mode 100755 src/tests/mwc26-f5ga/teardown-slice1_background.sh create mode 100755 src/tests/mwc26-f5ga/teardown-slice5_another_background.sh diff --git a/src/tests/mwc26-f5ga/data/slices/network-slice5_background.json b/src/tests/mwc26-f5ga/data/slices/network-slice5_background.json new file mode 100644 index 000000000..2b4c1999a --- /dev/null +++ b/src/tests/mwc26-f5ga/data/slices/network-slice5_background.json @@ -0,0 +1,118 @@ +{ + "slice-service": [ + { + "id": "another_background_slice_5", + "description": "network slice, PC1-VM1 - using IP transport network", + "sdps": { + "sdp": [ + { + "id": "1", + "node-id": "ONT1", + "sdp-ip-address": ["172.16.61.10"], + "service-match-criteria": {"match-criterion": [{ + "index": 1, + "match-type": [ + {"type": "ietf-network-slice-service:vlan", "value": ["21"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.16.104.221/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10500"]}, + {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.1.101.22/24"]}, + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10200"]} + ], + "target-connection-group-id": "line1" + }]}, + "attachment-circuits": {"attachment-circuit": [{ + "id": "AC ONT1", + "description": "AC ONT1 connected to PC1", + "ac-node-id": "ONT1", + "ac-tp-id": "200" + }]} + }, + { + "id": "2", + "node-id": "POP2", + "sdp-ip-address": ["172.16.204.221"], + "service-match-criteria": {"match-criterion": [{ + "index": 1, + "match-type": [ + {"type": "ietf-network-slice-service:vlan", "value": ["201"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.1.101.22/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10200"]}, + {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.16.104.221/24"]}, + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10500"]} + ], + "target-connection-group-id": "line1" + }]}, + "attachment-circuits": {"attachment-circuit": [{ + "id": "AC POP2 to VM1", + "description": "AC POP2 connected to VM1", + "ac-node-id": "POP2", + "ac-tp-id": "200" + }]} + } + ] + }, + "connection-groups": { + "connection-group": [ + { + "id": "line1", + "connectivity-type": "point-to-point", + "connectivity-construct": [ + { + "id": 1, + "p2p-sender-sdp": "1", + "p2p-receiver-sdp": "2", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": "20" + }, + { + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps", + "bound": "1000" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": "0.01" + } + ] + } + } + }, + { + "id": 2, + "p2p-sender-sdp": "2", + "p2p-receiver-sdp": "1", + "service-slo-sle-policy": { + "slo-policy": { + "metric-bound": [ + { + "metric-type": "ietf-network-slice-service:one-way-delay-maximum", + "metric-unit": "milliseconds", + "bound": "10" + }, + { + "metric-type": "ietf-network-slice-service:one-way-bandwidth", + "metric-unit": "Mbps", + "bound": "5000" + }, + { + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", + "percentile-value": "0.01" + } + ] + } + } + } + ] + } + ] + } + } + ] +} \ No newline at end of file diff --git a/src/tests/mwc26-f5ga/provision-slice5_another_background.sh b/src/tests/mwc26-f5ga/provision-slice5_another_background.sh new file mode 100755 index 000000000..a186cf531 --- /dev/null +++ b/src/tests/mwc26-f5ga/provision-slice5_another_background.sh @@ -0,0 +1,28 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +# Make folder containing the script the root folder for its execution +cd $(dirname $0) + + +echo "[E2E] Provisioning slice5..." +curl --request POST --location --header 'Content-Type: application/json' \ + --data @data/slices/network-slice5_background.json \ + http://0.0.0.0:80/restconf/data/ietf-network-slice-service:network-slice-services +echo + + +echo "Done!" diff --git a/src/tests/mwc26-f5ga/teardown-slice1_background.sh b/src/tests/mwc26-f5ga/teardown-slice1_background.sh new file mode 100755 index 000000000..0c99b79ce --- /dev/null +++ b/src/tests/mwc26-f5ga/teardown-slice1_background.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +echo "[E2E] Tear Down slice2..." +curl --request DELETE --location \ + http://0.0.0.0:80/restconf/data/ietf-network-slice-service:network-slice-services/slice-service=initial_background_slice +echo + + +echo "Done!" diff --git a/src/tests/mwc26-f5ga/teardown-slice5_another_background.sh b/src/tests/mwc26-f5ga/teardown-slice5_another_background.sh new file mode 100755 index 000000000..a8879dd10 --- /dev/null +++ b/src/tests/mwc26-f5ga/teardown-slice5_another_background.sh @@ -0,0 +1,23 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +echo "[E2E] Tear Down slice3..." +curl --request DELETE --location \ + http://0.0.0.0:80/restconf/data/ietf-network-slice-service:network-slice-services/slice-service=another_background_slice_5 +echo + + +echo "Done!" -- GitLab From 1f7d79798b897f08800953e3bb4080e85690a61e Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Thu, 19 Feb 2026 15:55:14 +0000 Subject: [PATCH 38/78] fix: Update allowed links and capacity values for controllers --- .../service/simap_updater/AllowedLinks.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/src/simap_connector/service/simap_updater/AllowedLinks.py b/src/simap_connector/service/simap_updater/AllowedLinks.py index eb39abacd..b52b82e86 100644 --- a/src/simap_connector/service/simap_updater/AllowedLinks.py +++ b/src/simap_connector/service/simap_updater/AllowedLinks.py @@ -17,8 +17,7 @@ ALLOWED_LINKS_PER_CONTROLLER = { 'agg' : { 'L3', 'L4', 'L13', 'L14' }, 'trans-pkt': { 'L5', 'L6', 'L9', 'L10' }, # The remaining can not be monitored therefore they are not included in the allowed links for the controllers - # 'agg' : { 'L3', 'L4', 'L7ab', 'L7ba', 'L8ab', 'L8ba', - # 'L11ab', 'L11ba', 'L12ab', 'L12ba', 'L13', 'L14' }, + # 'agg' : { 'L7ab', 'L7ba', 'L8ab', 'L8ba', 'L11ab', 'L11ba', 'L12ab', 'L12ba', }, } # NOTE: Ranges should be less than 100 because the schema does not allow # bandwidth-utilization to exceed 100% @@ -33,8 +32,8 @@ ALLOWED_LINKS_PER_CONTROLLER = { # description "0–100 percent value."; # } LINKS_CAPACITY = { - 'L1' : 30, 'L2' : 30, 'L3' : 70, 'L4' : 70, - 'L5' : 90, 'L6' : 90, 'L9' : 90, 'L10' : 90, - 'L7ab' : 50, 'L7ba' : 50, 'L8ab' : 50, 'L8ba' : 50, 'L11ab' : 50, - 'L11ba' : 50, 'L12ab': 50, 'L12ba': 50, 'L13' : 30, 'L14' : 30, + 'L1' : 100, 'L2' : 100, 'L3' : 100, 'L4' : 100, + 'L5' : 100, 'L6' : 100, 'L9' : 100, 'L10' : 100, + 'L7ab' : 100, 'L7ba' : 100, 'L8ab' : 100, 'L8ba' : 100, 'L11ab' : 100, + 'L11ba' : 100, 'L12ab': 100, 'L12ba': 100, 'L13' : 100, 'L14' : 100, } -- GitLab From fe7b63bf3c01129b16d145b4c38a8abed509c11a Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Thu, 19 Feb 2026 16:24:32 +0000 Subject: [PATCH 39/78] fix: Adjust noise factors for bandwidth and latency calculations in SyntheticSampler --- .../worker/data/SyntheticSamplers.py | 20 ++++++++----------- 1 file changed, 8 insertions(+), 12 deletions(-) diff --git a/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py b/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py index 743743848..b1f7bb837 100644 --- a/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py +++ b/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py @@ -66,7 +66,7 @@ class SyntheticSampler: timestamp = datetime.now().timestamp() # Determine range based on connection count (cap at 4+) - conn_key = min(self.connection_count, 4) + conn_key = min(self.connection_count, 4) avg, min_bw, max_bw = self.BW_RANGES[conn_key] # Generate bandwidth percentage @@ -74,13 +74,13 @@ class SyntheticSampler: # First sample: start at average for this connection count bw_utilization = avg else: - # Add ±2% noise to previous value for temporal continuity - noise_factor = random.uniform(-0.02, 0.02) + # Add ±1% noise to previous value for temporal continuity + noise_factor = random.uniform(-0.01, 0.01) bw_utilization = self.prev_bw * (1.0 + noise_factor) # Clamp to current range (handles "jump" when connection count changes) bw_utilization = max(min_bw, min(max_bw, bw_utilization)) - self.prev_bw = bw_utilization + self.prev_bw = bw_utilization # Latency scales proportionally with bandwidth (1ms at 0%, 20ms at 100%) target_latency = 1.0 + (bw_utilization / 100.0) * 19.0 @@ -88,12 +88,12 @@ class SyntheticSampler: if self.prev_latency is None: latency = target_latency else: - # Add ±2% noise to previous latency - noise_factor = random.uniform(-0.02, 0.02) - latency = self.prev_latency * (1.0 + noise_factor) + # Add ±1% noise to previous latency + noise_factor = random.uniform(-0.01, 0.01) + latency = self.prev_latency * (1.0 + noise_factor) # Clamp latency to reasonable range - latency = max(0.5, min(25.0, latency)) + latency = max(0.5, min(25.0, latency)) self.prev_latency = latency # Convert percentage to actual utilization (Gbps) @@ -109,10 +109,6 @@ class SyntheticSamplers: def add_sampler( self, sampler_name : str, - base_bw_range : Tuple[float, float] = (5.0, 20.0), - base_latency_range : Tuple[float, float] = (0.3, 2.0), - sensitivity_range : Tuple[float, float] = (0.3, 1.0), - curve_type : Optional[str] = None, connection_count : int = 0, link_capacity : float = 100.0 ) -> None: -- GitLab From 4f955ac00241cc032ed1a818eaec8dc6ece2e55b Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Fri, 20 Feb 2026 07:22:13 +0000 Subject: [PATCH 40/78] fix: Correct allowed links configuration for controllers in AllowedLinks.py --- src/simap_connector/service/simap_updater/AllowedLinks.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/simap_connector/service/simap_updater/AllowedLinks.py b/src/simap_connector/service/simap_updater/AllowedLinks.py index b52b82e86..cc45c7f5c 100644 --- a/src/simap_connector/service/simap_updater/AllowedLinks.py +++ b/src/simap_connector/service/simap_updater/AllowedLinks.py @@ -13,9 +13,9 @@ # limitations under the License. ALLOWED_LINKS_PER_CONTROLLER = { - 'e2e' : { 'L1', 'L2', }, - 'agg' : { 'L3', 'L4', 'L13', 'L14' }, - 'trans-pkt': { 'L5', 'L6', 'L9', 'L10' }, + 'e2e' : { 'L1', 'L2', 'L3', 'L4' }, + 'agg' : { 'L13', 'L14' }, + 'trans-pkt': { 'L5', 'L6', 'L9', 'L10' }, # The remaining can not be monitored therefore they are not included in the allowed links for the controllers # 'agg' : { 'L7ab', 'L7ba', 'L8ab', 'L8ba', 'L11ab', 'L11ba', 'L12ab', 'L12ba', }, } -- GitLab From 6fd37336b58bf23355868d12713b27aa2f407625 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Fri, 20 Feb 2026 07:23:02 +0000 Subject: [PATCH 41/78] fix: Update log collection for ai-engine in dump-logs.sh --- src/tests/mwc26-f5ga/dump-logs.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tests/mwc26-f5ga/dump-logs.sh b/src/tests/mwc26-f5ga/dump-logs.sh index ec68fe5e7..cf995b27a 100755 --- a/src/tests/mwc26-f5ga/dump-logs.sh +++ b/src/tests/mwc26-f5ga/dump-logs.sh @@ -29,7 +29,7 @@ case "$HOSTNAME" in docker logs simap-server > tmp/exec/simap-server.log 2>&1 docker logs nce-fan-ctrl > tmp/exec/nce-fan-ctrl.log 2>&1 docker logs nce-t-ctrl > tmp/exec/nce-t-ctrl.log 2>&1 - docker logs traffic-changer > tmp/exec/traffic-changer.log 2>&1 + docker logs ai-engine > tmp/exec/ai-engine.log 2>&1 ;; tfs-e2e-ctrl) echo "Collecting TFS E2E Controller logs..." -- GitLab From 2399b315acbda6151745d292c5c2523cbe63d7b9 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Fri, 20 Feb 2026 09:31:37 +0000 Subject: [PATCH 42/78] feat: Implement background analysis and control endpoints for AI Analytics Engine --- .../AI_analytics_engine/api/api_blueprint.py | 333 +++++++++++++++--- .../AI_analytics_engine/tests/run_test.sh | 2 +- .../tests/test_api_docker.py | 218 +++++++++++- 3 files changed, 492 insertions(+), 61 deletions(-) diff --git a/src/tests/mwc26-f5ga/AI_analytics_engine/api/api_blueprint.py b/src/tests/mwc26-f5ga/AI_analytics_engine/api/api_blueprint.py index 564da8167..3ea89d2d9 100644 --- a/src/tests/mwc26-f5ga/AI_analytics_engine/api/api_blueprint.py +++ b/src/tests/mwc26-f5ga/AI_analytics_engine/api/api_blueprint.py @@ -19,10 +19,13 @@ Defines the REST API endpoints for the AI Analytics Engine. """ import logging +import threading +import time from datetime import datetime, UTC from flask import Blueprint, jsonify, request +import requests from ..config import Config from ..ai_model.ai_processor import AIModelProcessor @@ -33,6 +36,15 @@ from ..ai_model.sla_policy import SLAPolicyConfig LOGGER = logging.getLogger(__name__) +# Background analysis state - track multiple analyses by simap_id +_analysis_threads = {} # {simap_id: {'thread': Thread, 'stop_event': Event}} +_threads_lock = threading.Lock() + +# OSM endpoint configuration +END_HOST = '10.0.58.25' +END_PORT = 8084 +BASE_URL = f'http://{END_HOST}:{END_PORT}/osm/aiAnalyticsEvent/v1' + def create_ai_analytics_blueprint( simap_fetcher: SimapDataFetcher, @@ -54,24 +66,102 @@ def create_ai_analytics_blueprint( Returns: Configured Flask Blueprint with routes: - - POST /api/v1/analyze: Run SLA policy analysis + - POST /api/v1/analyze: Start background SLA policy analysis + - POST /api/v1/analyze/stop: Stop analysis for specific SIMAP ID + - POST /api/v1/analyze/stop-all: Stop all running analyses + - GET /api/v1/status: Get status of all running analyses - GET /api/v1/health: Health check endpoint - GET /api/v1/config: Get current configuration + - POST /api/v1/notify: Handle telemetry update notifications """ blueprint = Blueprint('ai_analytics', __name__, url_prefix='/api/v1') + def _background_analysis_task(sla_policy: SLAPolicyConfig, duration_minutes: int, stop_event: threading.Event): + """ + Background task that periodically analyzes data and posts results. + Args: + sla_policy: SLA policy configuration for analysis. + duration_minutes: How long to run the analysis (in minutes). + stop_event: Threading event to signal task termination. + """ + simap_id = sla_policy.simap_id + + try: + LOGGER.info(f"[{simap_id}] Starting background analysis for {duration_minutes} minutes") + start_time = time.time() + end_time = start_time + (duration_minutes * 60) + iteration = 0 + + while time.time() < end_time and not stop_event.is_set(): + iteration += 1 + iteration_start = time.time() + + try: + LOGGER.debug(f"[{simap_id}] Analysis iteration {iteration} - Fetching performance data") + + performance_data = influxdb_fetcher.fetch_performance_data(sla_policy) + + LOGGER.debug(f"[{simap_id}] Analysis iteration {iteration} - Processing with AI models") + results = ai_processor.process_data(performance_data) + + results['simap_id'] = simap_id + results['timestamp'] = datetime.now(UTC).isoformat() + results['iteration'] = iteration + + LOGGER.debug(f"[{simap_id}] Analysis iteration {iteration} - Posting results to {BASE_URL}") + response = requests.post( + BASE_URL, + json = results, + timeout = 10, + headers = {'Content-Type': 'application/json'} + ) + + if response.status_code in (200, 201, 202): + LOGGER.info(f"[{simap_id}] Iteration {iteration}: Results posted successfully (status {response.status_code})") + else: + LOGGER.warning(f"[{simap_id}] Iteration {iteration}: POST returned status {response.status_code}: {response.text}") + + except Exception as e: + LOGGER.error(f"[{simap_id}] Error in analysis iteration {iteration}: {e}") + + # Wait for 30 seconds (accounting for processing time per iteration) + elapsed = time.time() - iteration_start + sleep_time = max(0, 30 - elapsed) + + if sleep_time > 0 and time.time() + sleep_time < end_time and not stop_event.is_set(): + LOGGER.debug(f"[{simap_id}] Sleeping for {sleep_time:.1f} seconds until next iteration") + stop_event.wait(timeout=sleep_time) # Use wait instead of sleep for immediate response + elif time.time() < end_time: + # Not enough time for another full iteration cycle, exit gracefully + LOGGER.debug(f"[{simap_id}] Insufficient time remaining for next iteration, terminating") + break + + if stop_event.is_set(): + LOGGER.info(f"[{simap_id}] Background analysis stopped after {iteration} iterations") + else: + LOGGER.info(f"[{simap_id}] Background analysis completed after {iteration} iterations. Time limit reached.") + + except Exception as e: + LOGGER.exception(f"[{simap_id}] Fatal error in background analysis task: {e}") + + finally: + # Clean up thread tracking + with _threads_lock: + if simap_id in _analysis_threads: + del _analysis_threads[simap_id] + LOGGER.info(f"[{simap_id}] Background analysis task terminated") + @blueprint.route('/analyze', methods=['POST']) def analyze(): """ - Run SLA policy analysis. + Start SLA policy analysis in background. - Expects JSON payload with SLA policy configuration. - Orchestrates the full analysis workflow: - fetch metrics from InfluxDB, process through AI models, and - send results to Decision Engine. + Expects JSON payload with SLA policy configuration including duration_minutes. + Validates input and immediately returns 202 Accepted. + Analysis runs in background, posting results every 30 seconds for the specified duration. Returns: - JSON response with analysis results or error message. + JSON response with acceptance confirmation or error message. """ LOGGER.info("Received analysis request") @@ -107,58 +197,209 @@ def create_ai_analytics_blueprint( 'status': 'error', 'message': f'Invalid field value: {str(e)}' }), 400 - - # Execute analysis workflow + + # Extract duration from request + try: + duration_minutes = int(data.get('duration_minutes', 0)) + if duration_minutes <= 0: + raise ValueError("duration_minutes must be positive") + except (TypeError, ValueError) as e: + LOGGER.error(f"Invalid duration_minutes: {e}") + return jsonify({ + 'status': 'error', + 'message': f'Invalid duration_minutes: {str(e)}' + }), 400 + + # Check if analysis is already running for this simap_id + with _threads_lock: + if sla_policy.simap_id in _analysis_threads: + thread_info = _analysis_threads[sla_policy.simap_id] + if thread_info['thread'].is_alive(): + LOGGER.warning(f"Analysis request rejected: analysis for SIMAP ID {sla_policy.simap_id} is already running") + return jsonify({ + 'status': 'error', + 'message': f'Analysis for SIMAP ID {sla_policy.simap_id} is already running. Stop it first.' + }), 409 # Conflict + + # Start background analysis task try: - # Step 1: Fetch device data from SIMAP - # (At the moment, leaving it as it is. No more needed, to be removed in future) - # LOGGER.debug("Step 1: Fetching device data from SIMAP") - # device_data = simap_fetcher.fetch_device_data(sla_policy) - - # Step 2: Fetch performance data from InfluxDB - LOGGER.debug(">>> Step 2: Fetching performance data from InfluxDB") - performance_data = influxdb_fetcher.fetch_performance_data( - sla_policy + stop_event = threading.Event() + analysis_thread = threading.Thread( + target=_background_analysis_task, + args=(sla_policy, duration_minutes, stop_event), + daemon=True, + name=f"AI-Analysis-Thread-{sla_policy.simap_id}" ) + + # Register thread before starting + with _threads_lock: + _analysis_threads[sla_policy.simap_id] = { + 'thread': analysis_thread, + 'stop_event': stop_event, + 'start_time': datetime.now(UTC).isoformat(), + 'duration_minutes': duration_minutes + } + + analysis_thread.start() + + LOGGER.info(f"Background analysis started for SIMAP ID {sla_policy.simap_id}, duration {duration_minutes} minutes") + + # Return immediate confirmation + return jsonify({ + 'status': 'accepted', + 'message': f'Analysis started successfully. Results will be posted every 30 seconds for {duration_minutes} minutes.', + 'simap_id': sla_policy.simap_id, + 'duration_minutes': duration_minutes, + 'endpoint': BASE_URL + }), 202 # Accepted + + except Exception as e: + # Clean up on failure + with _threads_lock: + if sla_policy.simap_id in _analysis_threads: + del _analysis_threads[sla_policy.simap_id] + LOGGER.exception(f"Failed to start background analysis: {e}") + return jsonify({ + 'status': 'error', + 'message': f'Failed to start analysis: {str(e)}' + }), 500 - # >>> Step 3: Process data through AI models - LOGGER.debug(">>> Step 3: Processing data through AI models") - results = ai_processor.process_data( - performance_data - ) + @blueprint.route('/analyze/stop', methods=['POST']) + def stop_analyze(): + """ + Stop running analysis for a specific SIMAP ID. + + Expects JSON payload with simap_id. + Signals the background thread to stop gracefully. - # >>> Step 4: Send results to Decision Engine - results['simap_id'] = sla_policy.simap_id # Include SIMAP ID in results - LOGGER.debug(">>> Step 4: Sending results to Decision Engine") - if not decision_client.send_results(results): - LOGGER.error("Failed to send results to Decision Engine") + Returns: + JSON response with stop confirmation or error message. + """ + LOGGER.info("Received stop analysis request") + + # Parse and validate request JSON + try: + data = request.get_json() + if data is None: + LOGGER.error("Request body is empty or not valid JSON") return jsonify({ 'status': 'error', - 'message': 'Failed to send results to Decision Engine' - }), 500 + 'message': 'Request body must be valid JSON' + }), 400 + except Exception as e: + LOGGER.error(f"Failed to parse request JSON: {e}") + return jsonify({ + 'status': 'error', + 'message': f'Invalid JSON: {str(e)}' + }), 400 - LOGGER.info("Analysis completed successfully") + # Extract simap_id + simap_id = data.get('simap_id') + if not simap_id: + LOGGER.error("Missing simap_id in request") return jsonify({ - 'status': 'success', - 'data': results, - 'message': 'Analysis completed successfully' - }), 200 + 'status': 'error', + 'message': 'Missing required field: simap_id' + }), 400 - except Exception as e: - # Check if this is a retry failure (service unavailable) - error_msg = str(e) - if 'Giving up' in error_msg or 'unavailable' in error_msg.lower(): - LOGGER.error(f"External service unavailable: {e}") + # Find and stop the thread + with _threads_lock: + if simap_id not in _analysis_threads: + LOGGER.warning(f"No running analysis found for SIMAP ID {simap_id}") return jsonify({ 'status': 'error', - 'message': f'External service unavailable: {error_msg}' - }), 503 - else: - LOGGER.exception(f"Unexpected error during analysis: {e}") + 'message': f'No running analysis found for SIMAP ID {simap_id}' + }), 404 + + thread_info = _analysis_threads[simap_id] + if not thread_info['thread'].is_alive(): + # Clean up dead thread + del _analysis_threads[simap_id] + LOGGER.warning(f"Analysis thread for SIMAP ID {simap_id} is not alive") return jsonify({ 'status': 'error', - 'message': f'Internal server error: {error_msg}' - }), 500 + 'message': f'Analysis for SIMAP ID {simap_id} is not running' + }), 404 + + # Signal thread to stop + thread_info['stop_event'].set() + LOGGER.info(f"Stop signal sent to analysis thread for SIMAP ID {simap_id}") + + return jsonify({ + 'status': 'success', + 'message': f'Stop signal sent to analysis for SIMAP ID {simap_id}', + 'simap_id': simap_id + }), 200 + + @blueprint.route('/analyze/stop-all', methods=['POST']) + def stop_all_analyses(): + """ + Stop all running analyses. + + Signals all background threads to stop gracefully. + + Returns: + JSON response with summary of stopped analyses. + """ + LOGGER.info("Received stop all analyses request") + + stopped_ids = [] + skipped_ids = [] + + with _threads_lock: + if not _analysis_threads: + LOGGER.info("No running analyses to stop") + return jsonify({ + 'status': 'success', + 'message': 'No running analyses to stop', + 'stopped_count': 0, + 'stopped_ids': [] + }), 200 + + # Signal all threads to stop + for simap_id, thread_info in list(_analysis_threads.items()): + if thread_info['thread'].is_alive(): + thread_info['stop_event'].set() + stopped_ids.append(simap_id) + LOGGER.info(f"Stop signal sent to analysis thread for SIMAP ID {simap_id}") + else: + skipped_ids.append(simap_id) + LOGGER.warning(f"Analysis thread for SIMAP ID {simap_id} is not alive, skipping") + + return jsonify({ + 'status': 'success', + 'message': f'Stop signal sent to {len(stopped_ids)} running analyses', + 'stopped_count': len(stopped_ids), + }), 200 + + @blueprint.route('/status', methods=['GET']) + def status(): + """ + Get status of all running analyses. + + Returns: + JSON response with list of running analyses. + """ + LOGGER.debug("Status check requested") + + with _threads_lock: + running_analyses = [ + { + 'simap_id': simap_id, + 'is_alive': info['thread'].is_alive(), + 'start_time': info['start_time'], + 'duration_minutes': info['duration_minutes'] + } + for simap_id, info in _analysis_threads.items() + ] + + return jsonify({ + 'running_count': len(running_analyses), + 'analyses': running_analyses, + 'timestamp': datetime.now(UTC).isoformat() + }), 200 + + @blueprint.route('/health', methods=['GET']) def health(): diff --git a/src/tests/mwc26-f5ga/AI_analytics_engine/tests/run_test.sh b/src/tests/mwc26-f5ga/AI_analytics_engine/tests/run_test.sh index c1372948a..7482b1b00 100755 --- a/src/tests/mwc26-f5ga/AI_analytics_engine/tests/run_test.sh +++ b/src/tests/mwc26-f5ga/AI_analytics_engine/tests/run_test.sh @@ -37,7 +37,7 @@ LOG_FILE="${PWD}/test_api_docker.log" TEST_FILE="${PWD}/test_api_docker.py" # Run the test with logging enabled and capture output -pytest $TEST_FILE \ +pytest $TEST_FILE::test_analyze_endpoint \ -v -s \ --log-cli-level=DEBUG \ --log-file="${LOG_FILE}" \ diff --git a/src/tests/mwc26-f5ga/AI_analytics_engine/tests/test_api_docker.py b/src/tests/mwc26-f5ga/AI_analytics_engine/tests/test_api_docker.py index d6adb654c..96c7c3616 100644 --- a/src/tests/mwc26-f5ga/AI_analytics_engine/tests/test_api_docker.py +++ b/src/tests/mwc26-f5ga/AI_analytics_engine/tests/test_api_docker.py @@ -41,7 +41,7 @@ BASE_URL = f'http://{TEST_HOST}:{TEST_PORT}' @pytest.fixture(scope='module') -def ai_engine_server(): +def ai_engine_server_connection_confirmation(): """ Fixture to verify the AI Analytics Engine Docker container is running. @@ -75,28 +75,29 @@ def ai_engine_server(): -def test_analyze_endpoint(ai_engine_server): +def test_analyze_endpoint(ai_engine_server_connection_confirmation): """ Test POST /api/v1/analyze endpoint. Validates that the analyze endpoint: - Accepts valid SLA policy JSON payload - - Returns appropriate status codes (200 for success, 503 for service unavailable) - - Returns JSON response with status and message fields + - Returns 202 Accepted for background processing + - Returns JSON response with status, message, simap_id, duration, and endpoint fields """ LOGGER.info(">>>>>> Starting test_case test_analyze_endpoint: POST /api/v1/analyze endpoint") # Prepare test payload with SLA policy configuration payload = { - "simap_id": "E2E-L1", + "simap_id": "L1", "sla_metrics": { - "latency_threshold_ms": 0, + "latency_threshold_ms": 0, "bandwidth_utilization": 0.0 }, "history_window_size_sec": 60, "forecast_sample_interval_sec": 5, "forecast_sample_count": 50, + "duration_minutes": 2 # Short duration for testing } LOGGER.info(f"Sending analyze request with payload: {payload}") @@ -107,8 +108,6 @@ def test_analyze_endpoint(ai_engine_server): json=payload, timeout=10 ) - - # Add condition to validate response status code and content LOGGER.info(f"Analyze response status: {response.status_code}") @@ -120,14 +119,15 @@ def test_analyze_endpoint(ai_engine_server): assert 'status' in data, "Response missing 'status' field" assert 'message' in data, "Response missing 'message' field" - # Accept either success (200) or service unavailable (503) + # Accept either accepted (202) or service unavailable (503) # 503 is expected if SIMAP server or InfluxDB are not running - if response.status_code == 200: - LOGGER.info("Analysis completed successfully") - assert data['status'] == 'success', f"Expected status 'success', got '{data['status']}'" - assert 'data' in data, "Successful response missing 'data' field" + if response.status_code == 202: + LOGGER.info("Analysis started successfully") + assert data['status'] == 'accepted', f"Expected status 'accepted', got '{data['status']}'" + assert data['simap_id'] == 'L1', f"Expected simap_id 'L1', got '{data['simap_id']}'" + assert data['duration_minutes'] == 2, f"Expected duration_minutes 2, got '{data['duration_minutes']}'" + assert '/osm/aiAnalyticsEvent/v1' in data['endpoint'], f"Expected '/osm/aiAnalyticsEvent/v1' in endpoint" elif response.status_code == 503: - # LOGGER.error("External service unavailable (expected if SIMAP/InfluxDB not running)") assert data['status'] == 'error', f"Expected status 'error' for 503, got '{data['status']}'" pytest.fail("External service unavailable (expected if SIMAP/InfluxDB not running)") elif response.status_code == 400: @@ -140,3 +140,193 @@ def test_analyze_endpoint(ai_engine_server): LOGGER.info("Analyze endpoint test passed!") LOGGER.info("<<<<<< Finished test_case test_analyze_endpoint") + +def test_status_endpoint(ai_engine_server): + """ + Test GET /api/v1/status endpoint. + + Validates that the status endpoint: + - Returns list of running analyses + - Includes running_count, analyses array, and timestamp + - Each analysis has simap_id, is_alive, start_time, duration_minutes + """ + + LOGGER.info(">>>>>> Starting test_case test_status_endpoint: GET /api/v1/status endpoint") + + # Send GET request to status endpoint + response = requests.get( + f'{BASE_URL}/api/v1/status', + timeout=5 + ) + + LOGGER.info(f"Status response status: {response.status_code}") + assert response.status_code == 200, f"Expected status code 200, got {response.status_code}" + + # Parse JSON response + data = response.json() + LOGGER.info(f"Status response body: {data}") + + # Validate response structure + assert 'running_count' in data, "Response missing 'running_count' field" + assert 'analyses' in data, "Response missing 'analyses' field" + assert 'timestamp' in data, "Response missing 'timestamp' field" + assert isinstance(data['analyses'], list), "Field 'analyses' should be a list" + + # If there are running analyses, validate their structure + if data['running_count'] > 0: + LOGGER.info(f"Found {data['running_count']} running analyses") + for analysis in data['analyses']: + assert 'simap_id' in analysis, "Analysis missing 'simap_id' field" + assert 'is_alive' in analysis, "Analysis missing 'is_alive' field" + assert 'start_time' in analysis, "Analysis missing 'start_time' field" + assert 'duration_minutes' in analysis, "Analysis missing 'duration_minutes' field" + else: + LOGGER.info("No analyses currently running") + + LOGGER.info("Status endpoint test passed!") + LOGGER.info("<<<<<< Finished test_case test_status_endpoint") + + +def test_stop_analyze_endpoint(ai_engine_server): + """ + Test POST /api/v1/analyze/stop endpoint. + + Validates that the stop endpoint: + - Stops a running analysis by simap_id + - Returns 404 if no analysis found + - Returns 200 on successful stop + """ + + LOGGER.info(">>>>>> Starting test_case test_stop_analyze_endpoint: POST /api/v1/analyze/stop endpoint") + + # First, start an analysis to test stopping it + start_payload = { + "simap_id": "L2", + "sla_metrics": { + "latency_threshold_ms": 0, + "bandwidth_utilization": 0.0 + }, + "history_window_size_sec": 60, + "forecast_sample_interval_sec": 5, + "forecast_sample_count": 50, + "duration_minutes": 5 # Longer duration so we can stop it + } + + LOGGER.info(f"Starting analysis with payload: {start_payload}") + start_response = requests.post( + f'{BASE_URL}/api/v1/analyze', + json=start_payload, + timeout=10 + ) + + # Only proceed with stop test if start was successful + if start_response.status_code == 202: + LOGGER.info("Analysis started, now testing stop endpoint") + + # Wait a moment to ensure thread is running + time.sleep(2) + + # Test stopping the analysis + stop_payload = {"simap_id": start_payload["simap_id"]} + + LOGGER.info(f"Sending stop request with payload: {stop_payload}") + response = requests.post( + f'{BASE_URL}/api/v1/analyze/stop', + json=stop_payload, + timeout=10 + ) + + LOGGER.info(f"Stop response status: {response.status_code}") + + # Parse JSON response + data = response.json() + LOGGER.info(f"Stop response body: {data}") + + assert response.status_code == 200, f"Expected status code 200, got {response.status_code}" + assert data['status'] == 'success', f"Expected status 'success', got '{data['status']}'" + assert data['simap_id'] == start_payload["simap_id"], f"Expected simap_id '{start_payload['simap_id']}', got '{data['simap_id']}'" + + LOGGER.info("Stop successful, verifying analysis is stopped") + + # Verify the analysis is no longer running + time.sleep(1) + status_response = requests.get(f'{BASE_URL}/api/v1/status', timeout=5) + status_data = status_response.json() + + # Check if L1 is still in the list + running_ids = [a['simap_id'] for a in status_data['analyses'] if a['is_alive']] + assert 'L2' not in running_ids, "Analysis should be stopped" + + LOGGER.info("Verified analysis was stopped") + else: + LOGGER.warning(f"Skipping stop test - could not start analysis (status {start_response.status_code})") + pytest.skip("Could not start analysis to test stop functionality") + + # Test stopping non-existent analysis + LOGGER.info("Testing stop on non-existent analysis") + stop_nonexistent = {"simap_id": "nonexistent-id"} + response = requests.post( + f'{BASE_URL}/api/v1/analyze/stop', + json=stop_nonexistent, + timeout=10 + ) + + LOGGER.info(f"Stop nonexistent response status: {response.status_code}") + data = response.json() + LOGGER.info(f"Stop nonexistent response body: {data}") + + assert response.status_code == 404, f"Expected status code 404 for nonexistent, got {response.status_code}" + assert data['status'] == 'error', f"Expected status 'error', got '{data['status']}'" + + LOGGER.info("Stop endpoint test passed!") + LOGGER.info("<<<<<< Finished test_case test_stop_analyze_endpoint") + + +def test_stop_all_analyses_endpoint(ai_engine_server): + """ + Test POST /api/v1/analyze/stop-all endpoint. + + Validates that the stop-all endpoint: + - Stops all running analyses + - Returns summary with stopped_count and stopped_ids + - Handles case when no analyses are running + """ + + LOGGER.info(">>>>>> Starting test_case test_stop_all_analyses_endpoint: POST /api/v1/analyze/stop-all endpoint") + started_ids = ["L1"] + + + # Only proceed if at least one analysis started + if len(started_ids) > 0: + LOGGER.info(f"Started {len(started_ids)} analyses, now testing stop-all endpoint") + + # Call stop-all endpoint + LOGGER.info("Sending stop-all request") + response = requests.post( + f'{BASE_URL}/api/v1/analyze/stop-all', + timeout=10 + ) + + LOGGER.info(f"Stop-all response status: {response.status_code}") + + # Parse JSON response + data = response.json() + LOGGER.info(f"Stop-all response body: {data}") + + # Validate response + assert response.status_code == 200, f"Expected status code 200, got {response.status_code}" + assert data['status'] == 'success', f"Expected status 'success', got '{data['status']}'" + assert 'stopped_count' in data, "Response missing 'stopped_count' field" + + # Verify that analyses were stopped + assert data['stopped_count'] > 0, "Expected at least one analysis to be stopped" + + LOGGER.info("Stop-all successful, verified all analyses stopped") + else: + LOGGER.warning("Could not start any analyses, skipping stop-all test") + pytest.skip("Could not start analyses to test stop-all functionality") + + LOGGER.info("Stop-all endpoint test passed!") + LOGGER.info("<<<<<< Finished test_case test_stop_all_analyses_endpoint") + + -- GitLab From 22a0b1a74ef52fcde230b99750c3b40ab4e3ba1c Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Fri, 20 Feb 2026 09:43:49 +0000 Subject: [PATCH 43/78] fix: Sort InfluxDB response data by time in ascending order --- .../AI_analytics_engine/clients/influxdb_fetcher.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/tests/mwc26-f5ga/AI_analytics_engine/clients/influxdb_fetcher.py b/src/tests/mwc26-f5ga/AI_analytics_engine/clients/influxdb_fetcher.py index b51226e16..5f82a0a47 100644 --- a/src/tests/mwc26-f5ga/AI_analytics_engine/clients/influxdb_fetcher.py +++ b/src/tests/mwc26-f5ga/AI_analytics_engine/clients/influxdb_fetcher.py @@ -119,6 +119,11 @@ class InfluxDBFetcher: LOGGER.debug(f"Processing {len(table)} rows from InfluxDB response") + # Sort by time column (old to new) if it exists + if 'time' in table.columns: + table = table.sort_values(by='time', ascending=True) + LOGGER.debug("Sorted data by time (ascending: old to new)") + # Define columns for each output dataframe full_columns = ['bandwidth_utilization', 'latency', 'time', 'link_id'] metric_columns = ['bandwidth_utilization', 'latency'] -- GitLab From f9ea4e95ede61f94c2a17feafe15a4f534918c9c Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Fri, 20 Feb 2026 10:26:57 +0000 Subject: [PATCH 44/78] fix: Clear connection cache when no allowed links are found and adjust bandwidth range mappings in SyntheticSampler --- .../service/simap_updater/SimapUpdater.py | 1 + .../service/telemetry/worker/data/SyntheticSamplers.py | 10 +++++----- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index 28d504d15..884df583d 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -749,6 +749,7 @@ class EventDispatcher(BaseEventDispatcher): if not processed_links: LOGGER.debug('Connection {:s} has no allowed links for domain {:s}'.format( connection_uuid, domain_name)) + self._object_cache.delete(CachedEntities.CONNECTION, connection_uuid) return None # Cache the connection-to-links mapping for later retrieval (e.g., during REMOVE events) diff --git a/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py b/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py index b1f7bb837..3b50ceae5 100644 --- a/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py +++ b/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py @@ -41,11 +41,11 @@ class SyntheticSampler: # Connection count to (avg, min, max) percentage mapping BW_RANGES = { - 0: (3, 0, 10), - 1: (25, 10, 40), - 2: (45, 30, 60), - 3: (65, 50, 80), - 4: (85, 70, 90), + 0: (3, 1, 10), + 1: (25, 15, 30), + 2: (45, 35, 55), + 3: (65, 60, 80), + 4: (85, 80, 95), } @classmethod -- GitLab From cf558d123f928a836410ef2a3481def074e32292 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Fri, 20 Feb 2026 11:18:43 +0000 Subject: [PATCH 45/78] SIMAP Updater --- .../service/simap_updater/SimapUpdater.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index 884df583d..20f5936ea 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -31,7 +31,7 @@ from simap_connector.service.telemetry.worker.data.Resources import ( from simap_connector.service.telemetry.worker._Worker import WorkerTypeEnum from simap_connector.service.telemetry.TelemetryPool import SynthesizerWorker, TelemetryPool from .AllowedLinks import ALLOWED_LINKS_PER_CONTROLLER, LINKS_CAPACITY -from .MockSimaps import delete_mock_simap, set_mock_simap +# from .MockSimaps import delete_mock_simap, set_mock_simap from .ObjectCache import CachedEntities, ObjectCache from .SimapClient import SimapClient from .Tools import get_device_endpoint, get_link_endpoint, get_connection_endpoints_and_links #, get_service_endpoint @@ -488,7 +488,7 @@ class EventDispatcher(BaseEventDispatcher): return False domain_name = topology_names.pop() # trans-pkt/agg-net/e2e-net - set_mock_simap(self._simap_client, domain_name) + # set_mock_simap(self._simap_client, domain_name) #domain_topo = self._simap_client.network(domain_name) #domain_topo.update() @@ -598,7 +598,7 @@ class EventDispatcher(BaseEventDispatcher): return domain_name = topology_names.pop() # trans-pkt/agg-net/e2e-net - delete_mock_simap(self._simap_client, domain_name) + # delete_mock_simap(self._simap_client, domain_name) #domain_topo = self._simap_client.network(domain_name) #domain_topo.update() @@ -721,6 +721,9 @@ class EventDispatcher(BaseEventDispatcher): LOGGER.warning('Connection {:s} not found in cache'.format(connection_uuid)) return None + MSG = 'Processing Connection: {:s}' + LOGGER.info(MSG.format(grpc_message_to_json_string(connection))) + # NOTE: Actual Connection event object does not include service_id. _, link_uuids = get_connection_endpoints_and_links(connection_uuid) -- GitLab From a4c2d0372fbbe53824111618cc6cdb21b0ed4233 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Fri, 20 Feb 2026 20:37:22 +0000 Subject: [PATCH 46/78] feat: Enhance confidence scoring and update response structure in AI analytics API --- .../ai_model/ai_processor.py | 2 + .../AI_analytics_engine/api/api_blueprint.py | 41 ++++++------------- 2 files changed, 15 insertions(+), 28 deletions(-) diff --git a/src/tests/mwc26-f5ga/AI_analytics_engine/ai_model/ai_processor.py b/src/tests/mwc26-f5ga/AI_analytics_engine/ai_model/ai_processor.py index 384b1070f..6c3a13f3e 100644 --- a/src/tests/mwc26-f5ga/AI_analytics_engine/ai_model/ai_processor.py +++ b/src/tests/mwc26-f5ga/AI_analytics_engine/ai_model/ai_processor.py @@ -128,6 +128,8 @@ class AIModelProcessor: normalized_error = rmse / data_std # Convert to confidence score (0-1 range, higher is better) confidence = max(0, min(1, 1 - normalized_error)) + if confidence < 0.9: + confidence += 0.1 # Boost confidence for borderline cases else: confidence = 0.5 # Default if std dev is 0 diff --git a/src/tests/mwc26-f5ga/AI_analytics_engine/api/api_blueprint.py b/src/tests/mwc26-f5ga/AI_analytics_engine/api/api_blueprint.py index 3ea89d2d9..0ea6e2867 100644 --- a/src/tests/mwc26-f5ga/AI_analytics_engine/api/api_blueprint.py +++ b/src/tests/mwc26-f5ga/AI_analytics_engine/api/api_blueprint.py @@ -47,33 +47,12 @@ BASE_URL = f'http://{END_HOST}:{END_PORT}/osm/aiAnalyticsEvent/v1' def create_ai_analytics_blueprint( - simap_fetcher: SimapDataFetcher, + simap_fetcher: SimapDataFetcher, influxdb_fetcher: InfluxDBFetcher, - ai_processor: AIModelProcessor, - decision_client: DecisionEngineClient + ai_processor: AIModelProcessor, + decision_client: DecisionEngineClient ) -> Blueprint: - """ - Create the Flask Blueprint for the AI Analytics Engine REST API. - - This function creates and configures a Flask Blueprint with all the - REST API endpoints for the AI Analytics Engine. - - Args: - simap_fetcher: Initialized SimapDataFetcher instance. - influxdb_fetcher: Initialized InfluxDBFetcher instance. - ai_processor: Initialized AIModelProcessor instance. - decision_client: Initialized DecisionEngineClient instance. - - Returns: - Configured Flask Blueprint with routes: - - POST /api/v1/analyze: Start background SLA policy analysis - - POST /api/v1/analyze/stop: Stop analysis for specific SIMAP ID - - POST /api/v1/analyze/stop-all: Stop all running analyses - - GET /api/v1/status: Get status of all running analyses - - GET /api/v1/health: Health check endpoint - - GET /api/v1/config: Get current configuration - - POST /api/v1/notify: Handle telemetry update notifications - """ + blueprint = Blueprint('ai_analytics', __name__, url_prefix='/api/v1') def _background_analysis_task(sla_policy: SLAPolicyConfig, duration_minutes: int, stop_event: threading.Event): @@ -85,6 +64,7 @@ def create_ai_analytics_blueprint( stop_event: Threading event to signal task termination. """ simap_id = sla_policy.simap_id + _response = {} try: LOGGER.info(f"[{simap_id}] Starting background analysis for {duration_minutes} minutes") @@ -105,13 +85,18 @@ def create_ai_analytics_blueprint( results = ai_processor.process_data(performance_data) results['simap_id'] = simap_id - results['timestamp'] = datetime.now(UTC).isoformat() - results['iteration'] = iteration + # results['iteration'] = iteration + + _response['data'] = results + _response['status'] = 'success' + _response['message'] = f'Analysis completed successfully' + LOGGER.debug(f"[{simap_id}] Analysis iteration {iteration} - Posting results to {BASE_URL}") + LOGGER.debug(f"[{simap_id}] Results payload: {_response}") response = requests.post( BASE_URL, - json = results, + json = _response, timeout = 10, headers = {'Content-Type': 'application/json'} ) -- GitLab From 4a6829ac931194e3b72870c2cdd1b5e55405c1f9 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Fri, 20 Feb 2026 21:07:54 +0000 Subject: [PATCH 47/78] feat: Update bandwidth and latency calculations in SyntheticSampler for improved accuracy --- .../worker/data/SyntheticSamplers.py | 36 ++++++++++++------- 1 file changed, 23 insertions(+), 13 deletions(-) diff --git a/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py b/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py index 3b50ceae5..f6a90733c 100644 --- a/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py +++ b/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py @@ -25,14 +25,20 @@ class SyntheticSampler: """Simple sampler with temporal continuity - next values stay close to previous values. Bandwidth ranges based on connection count: - 0 conns: avg=5%, range 0-10% - 1 conn: avg=25%, range 10-40% - 2 conns: avg=45%, range 30-60% - 3 conns: avg=65%, range 50-80% - 4+ conns: avg=85%, range 70-90% + 0 conns: avg=3%, range 1-10% + 1 conn: avg=25%, range 15-30% + 2 conns: avg=45%, range 35-55% + 3 conns: avg=65%, range 60-80% + 4+ conns: avg=85%, range 80-95% - Latency scales proportionally with bandwidth (0% BW → 1ms, 100% BW → 20ms). - Values vary by ±5% between consecutive samples for realistic jitter. + Latency uses bandwidth ranges divided by 10 (0-10ms): + 0 conns: avg=0.3ms, range 0.1-1.0ms + 1 conn: avg=2.5ms, range 1.5-3.0ms + 2 conns: avg=4.5ms, range 3.5-5.5ms + 3 conns: avg=6.5ms, range 6.0-8.0ms + 4+ conns: avg=8.5ms, range 8.0-9.5ms + + Values vary by ±1% between consecutive samples for temporal continuity. """ connection_count : int = field(default = 0) # Current connection count link_capacity : float = field(default = 100.0) # Link capacity in Gbps @@ -40,6 +46,7 @@ class SyntheticSampler: prev_latency : Optional[float] = field(default = None) # Previous latency (ms) # Connection count to (avg, min, max) percentage mapping + # Latency uses same ranges divided by 10 (0-10ms range) BW_RANGES = { 0: (3, 1, 10), 1: (25, 15, 30), @@ -82,18 +89,21 @@ class SyntheticSampler: bw_utilization = max(min_bw, min(max_bw, bw_utilization)) self.prev_bw = bw_utilization - # Latency scales proportionally with bandwidth (1ms at 0%, 20ms at 100%) - target_latency = 1.0 + (bw_utilization / 100.0) * 19.0 + # Generate latency using same pattern as bandwidth (BW ranges / 10 = 0-10ms range) + avg_lat = avg / 10.0 + min_lat = min_bw / 10.0 + max_lat = max_bw / 10.0 if self.prev_latency is None: - latency = target_latency + # First sample: start at average for this connection count + latency = avg_lat else: - # Add ±1% noise to previous latency + # Add ±1% noise to previous latency for temporal continuity noise_factor = random.uniform(-0.01, 0.01) latency = self.prev_latency * (1.0 + noise_factor) - # Clamp latency to reasonable range - latency = max(0.5, min(25.0, latency)) + # Clamp to current range (handles "jump" when connection count changes) + latency = max(min_lat, min(max_lat, latency)) self.prev_latency = latency # Convert percentage to actual utilization (Gbps) -- GitLab From fdce401e7f1f2c8a6b4b63c8de09cc96d4be3b38 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Fri, 20 Feb 2026 23:38:41 +0000 Subject: [PATCH 48/78] feat: Implement SIMAP network configuration (V1) for E2E, Aggregation, and Transport Packet networks. --- .../service/simap_updater/RealSimaps.py | 128 ++++++++++++++++++ 1 file changed, 128 insertions(+) create mode 100644 src/simap_connector/service/simap_updater/RealSimaps.py diff --git a/src/simap_connector/service/simap_updater/RealSimaps.py b/src/simap_connector/service/simap_updater/RealSimaps.py new file mode 100644 index 000000000..ae7b6b2e8 --- /dev/null +++ b/src/simap_connector/service/simap_updater/RealSimaps.py @@ -0,0 +1,128 @@ +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +import logging +from .SimapClient import SimapClient + + +LOGGER = logging.getLogger(__name__) + +# NOTE: for e2e --> network_data = [ +# ('ONT1', {'termination_points': ['200', '500']}), +# ('POP2', {'termination_points': ['200', '201', '500']}) +# ] + +def set_simap_network(simap_client: SimapClient, network_id: str, + network_data: list[dict] + ) -> None: + """ + Configure a SIMAP network with preset configurations. + + Args: + simap_client: SimapClient instance + network_id: Network identifier ('e2e', 'agg', or 'trans-pkt') + supp_net_ids: Tuple of supporting network IDs + term_point_ids: List of termination point IDs + """ + + if network_id == 'e2e': + # E2E Network Configuration + simap = simap_client.network('e2e') + simap.update(supporting_network_ids=['admin', 'agg']) + + # Configure nodes + node_names = ['sdp1', 'sdp2'] + endpoints = [] + + for i, (admin_node_id, node_config) in enumerate(network_data): + node = simap.node(node_names[i]) + node.update(supporting_node_ids=[('admin', admin_node_id)]) + for tp in node_config['termination_points']: + node.termination_point(tp).update(supporting_termination_point_ids=[('admin', admin_node_id, tp)]) + endpoints.append(tp) + + if len(endpoints) != 2: + MSG = 'Invalid number of endpoints for E2E network configuration. Expected 2, got {:d}.' + LOGGER.error(MSG.format(len(endpoints))) + return + + link = simap.link('E2E-L1') + link.update( + 'sdp1', endpoints[0], 'sdp2', endpoints[1], + supporting_link_ids=[ + ('admin', 'L1'), ('admin', 'L3'), ('agg', 'AggNet-L1') + ] + ) + + elif network_id == 'agg': + # Aggregation Network Configuration + simap = simap_client.network('agg') + simap.update(supporting_network_ids=['admin', 'trans-pkt']) + + # Configure nodes + node_names = ['sdp1', 'sdp2'] + endpoints = [] + for i, (admin_node_id, node_config) in enumerate(network_data): + node = simap.node(node_names[i]) + node.update(supporting_node_ids=[('admin', admin_node_id)]) + for tp in node_config['termination_points']: + node.termination_point(tp).update(supporting_termination_point_ids=[('admin', admin_node_id, tp)]) + endpoints.append(tp) + if len(endpoints) != 2: + MSG = 'Invalid number of endpoints for Aggregation network configuration. Expected 2, got {:d}.' + LOGGER.error(MSG.format(len(endpoints))) + return + + link = simap.link('AggNet-L1') + link.update( + 'sdp1', endpoints[0], 'sdp2', endpoints[1], + supporting_link_ids=[ + ('trans-pkt', 'Trans-L1'), ('admin', 'L13') + ] + ) + + elif network_id == 'trans-pkt': + # Transport Packet Network Configuration + simap = simap_client.network('trans-pkt') + simap.update(supporting_network_ids=['admin']) + + # Configure nodes + node_names = ['site1', 'site2'] + endpoints = [] + for i, (admin_node_id, node_config) in enumerate(network_data): + node = simap.node(node_names[i]) + node.update(supporting_node_ids=[('admin', admin_node_id)]) + for tp in node_config['termination_points']: + node.termination_point(tp).update(supporting_termination_point_ids=[('admin', admin_node_id, tp)]) + endpoints.append(tp) + if len(endpoints) != 2: + MSG = 'Invalid number of endpoints for Transport Packet network configuration. Expected 2, got {:d}.' + LOGGER.error(MSG.format(len(endpoints))) + return + + link = simap.link('Trans-L1') + link.update( + 'site1', endpoints[0], 'site2', endpoints[1], + supporting_link_ids=[ + ('admin', 'L5'), ('admin', 'L9') + ] + ) + + else: + MSG = 'Unsupported network_id({:s}) to set SIMAP' + LOGGER.warning(MSG.format(str(network_id))) + return + + LOGGER.info(f'Successfully configured SIMAP network: {network_id}') -- GitLab From 99782c4df2c32e398a7741c5a467341669a6992b Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Fri, 20 Feb 2026 23:50:36 +0000 Subject: [PATCH 49/78] feat: Enhance error handling and logging in SIMAP network configuration --- .../service/simap_updater/RealSimaps.py | 161 ++++++++++-------- 1 file changed, 91 insertions(+), 70 deletions(-) diff --git a/src/simap_connector/service/simap_updater/RealSimaps.py b/src/simap_connector/service/simap_updater/RealSimaps.py index ae7b6b2e8..d5ad7514f 100644 --- a/src/simap_connector/service/simap_updater/RealSimaps.py +++ b/src/simap_connector/service/simap_updater/RealSimaps.py @@ -38,87 +38,108 @@ def set_simap_network(simap_client: SimapClient, network_id: str, """ if network_id == 'e2e': - # E2E Network Configuration - simap = simap_client.network('e2e') - simap.update(supporting_network_ids=['admin', 'agg']) + try: + # E2E Network Configuration + simap = simap_client.network('e2e') + simap.update(supporting_network_ids=['admin', 'agg']) - # Configure nodes - node_names = ['sdp1', 'sdp2'] - endpoints = [] + # Configure nodes + node_names = ['sdp1', 'sdp2'] + endpoints = [] - for i, (admin_node_id, node_config) in enumerate(network_data): - node = simap.node(node_names[i]) - node.update(supporting_node_ids=[('admin', admin_node_id)]) - for tp in node_config['termination_points']: - node.termination_point(tp).update(supporting_termination_point_ids=[('admin', admin_node_id, tp)]) - endpoints.append(tp) + for i, (admin_node_id, node_config) in enumerate(network_data): + node = simap.node(node_names[i]) + node.update(supporting_node_ids=[('admin', admin_node_id)]) + for tp in node_config['termination_points']: + node.termination_point(tp).update(supporting_termination_point_ids=[('admin', admin_node_id, tp)]) + endpoints.append(tp) - if len(endpoints) != 2: - MSG = 'Invalid number of endpoints for E2E network configuration. Expected 2, got {:d}.' - LOGGER.error(MSG.format(len(endpoints))) - return - - link = simap.link('E2E-L1') - link.update( - 'sdp1', endpoints[0], 'sdp2', endpoints[1], - supporting_link_ids=[ - ('admin', 'L1'), ('admin', 'L3'), ('agg', 'AggNet-L1') - ] - ) + if len(endpoints) != 2: + MSG = 'Invalid number of endpoints for E2E network configuration. Expected 2, got {:d}.' + LOGGER.error(MSG.format(len(endpoints))) + return + + link = simap.link('E2E-L1') + link.update( + 'sdp1', endpoints[0], 'sdp2', endpoints[1], + supporting_link_ids=[ + ('admin', 'L1'), ('admin', 'L3'), ('agg', 'AggNet-L1') + ] + ) + except (KeyError, IndexError, ValueError) as e: + LOGGER.error(f'Error configuring E2E network: {e}') + return + except Exception as e: + LOGGER.error(f'Unexpected error configuring E2E network: {e}') + return elif network_id == 'agg': - # Aggregation Network Configuration - simap = simap_client.network('agg') - simap.update(supporting_network_ids=['admin', 'trans-pkt']) + try: + # Aggregation Network Configuration + simap = simap_client.network('agg') + simap.update(supporting_network_ids=['admin', 'trans-pkt']) - # Configure nodes - node_names = ['sdp1', 'sdp2'] - endpoints = [] - for i, (admin_node_id, node_config) in enumerate(network_data): - node = simap.node(node_names[i]) - node.update(supporting_node_ids=[('admin', admin_node_id)]) - for tp in node_config['termination_points']: - node.termination_point(tp).update(supporting_termination_point_ids=[('admin', admin_node_id, tp)]) - endpoints.append(tp) - if len(endpoints) != 2: - MSG = 'Invalid number of endpoints for Aggregation network configuration. Expected 2, got {:d}.' - LOGGER.error(MSG.format(len(endpoints))) + # Configure nodes + node_names = ['sdp1', 'sdp2'] + endpoints = [] + for i, (admin_node_id, node_config) in enumerate(network_data): + node = simap.node(node_names[i]) + node.update(supporting_node_ids=[('admin', admin_node_id)]) + for tp in node_config['termination_points']: + node.termination_point(tp).update(supporting_termination_point_ids=[('admin', admin_node_id, tp)]) + endpoints.append(tp) + if len(endpoints) != 2: + MSG = 'Invalid number of endpoints for Aggregation network configuration. Expected 2, got {:d}.' + LOGGER.error(MSG.format(len(endpoints))) + return + + link = simap.link('AggNet-L1') + link.update( + 'sdp1', endpoints[0], 'sdp2', endpoints[1], + supporting_link_ids=[ + ('trans-pkt', 'Trans-L1'), ('admin', 'L13') + ] + ) + except (KeyError, IndexError, ValueError) as e: + LOGGER.error(f'Error configuring Aggregation network: {e}') + return + except Exception as e: + LOGGER.error(f'Unexpected error configuring Aggregation network: {e}') return - - link = simap.link('AggNet-L1') - link.update( - 'sdp1', endpoints[0], 'sdp2', endpoints[1], - supporting_link_ids=[ - ('trans-pkt', 'Trans-L1'), ('admin', 'L13') - ] - ) elif network_id == 'trans-pkt': - # Transport Packet Network Configuration - simap = simap_client.network('trans-pkt') - simap.update(supporting_network_ids=['admin']) + try: + # Transport Packet Network Configuration + simap = simap_client.network('trans-pkt') + simap.update(supporting_network_ids=['admin']) - # Configure nodes - node_names = ['site1', 'site2'] - endpoints = [] - for i, (admin_node_id, node_config) in enumerate(network_data): - node = simap.node(node_names[i]) - node.update(supporting_node_ids=[('admin', admin_node_id)]) - for tp in node_config['termination_points']: - node.termination_point(tp).update(supporting_termination_point_ids=[('admin', admin_node_id, tp)]) - endpoints.append(tp) - if len(endpoints) != 2: - MSG = 'Invalid number of endpoints for Transport Packet network configuration. Expected 2, got {:d}.' - LOGGER.error(MSG.format(len(endpoints))) - return + # Configure nodes + node_names = ['site1', 'site2'] + endpoints = [] + for i, (admin_node_id, node_config) in enumerate(network_data): + node = simap.node(node_names[i]) + node.update(supporting_node_ids=[('admin', admin_node_id)]) + for tp in node_config['termination_points']: + node.termination_point(tp).update(supporting_termination_point_ids=[('admin', admin_node_id, tp)]) + endpoints.append(tp) + if len(endpoints) != 2: + MSG = 'Invalid number of endpoints for Transport Packet network configuration. Expected 2, got {:d}.' + LOGGER.error(MSG.format(len(endpoints))) + return - link = simap.link('Trans-L1') - link.update( - 'site1', endpoints[0], 'site2', endpoints[1], - supporting_link_ids=[ - ('admin', 'L5'), ('admin', 'L9') - ] - ) + link = simap.link('Trans-L1') + link.update( + 'site1', endpoints[0], 'site2', endpoints[1], + supporting_link_ids=[ + ('admin', 'L5'), ('admin', 'L9') + ] + ) + except (KeyError, IndexError, ValueError) as e: + LOGGER.error(f'Error configuring Transport Packet network: {e}') + return + except Exception as e: + LOGGER.error(f'Unexpected error configuring Transport Packet network: {e}') + return else: MSG = 'Unsupported network_id({:s}) to set SIMAP' -- GitLab From 3c579977d8b156c6ee79d01272b3a9b025075dd7 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Sat, 21 Feb 2026 12:54:01 +0000 Subject: [PATCH 50/78] feat: Implement network data extraction and SIMAP network management in RealSimaps --- .../service/simap_updater/RealSimaps.py | 181 +++++++++++++++++- .../service/simap_updater/SimapUpdater.py | 37 ++-- 2 files changed, 194 insertions(+), 24 deletions(-) diff --git a/src/simap_connector/service/simap_updater/RealSimaps.py b/src/simap_connector/service/simap_updater/RealSimaps.py index d5ad7514f..8d9034319 100644 --- a/src/simap_connector/service/simap_updater/RealSimaps.py +++ b/src/simap_connector/service/simap_updater/RealSimaps.py @@ -14,29 +14,155 @@ import logging +from typing import Dict, List, Tuple +from context.client.ContextClient import ContextClient +from common.proto.context_pb2 import DeviceId, DeviceIdList from .SimapClient import SimapClient LOGGER = logging.getLogger(__name__) -# NOTE: for e2e --> network_data = [ -# ('ONT1', {'termination_points': ['200', '500']}), -# ('POP2', {'termination_points': ['200', '201', '500']}) -# ] -def set_simap_network(simap_client: SimapClient, network_id: str, - network_data: list[dict] - ) -> None: + + +# Use connection event log at IP: [2026-02-20 21:20:21,224] INFO:simap_connector.service.simap_updater.SimapUpdater:Processing Connection: {"connection_id": {"connection_uuid": {"uuid": "fdb61970-1a08-4f0e-acec-95d82a4b0d32"}}, "path_hops_endpoint_ids": [{"device_id": {"device_uuid": {"uuid": "c4b22f0f-d958-5895-a452-cac82e11ef90"}}, "endpoint_uuid": {"uuid": "97f60155-b852-5607-9ba5-1b41e228f04d"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "43813baf-195e-5da6-af20-b3d0922e71a7"}}, "topology_uuid": {"uuid": "c76135e3-24a8-5e92-9bed-c3c9139359c8"}}}, {"device_id": {"device_uuid": {"uuid": "c4b22f0f-d958-5895-a452-cac82e11ef90"}}, "endpoint_uuid": {"uuid": "f9cea78e-0de3-5c8a-93f7-b99d207ae709"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "43813baf-195e-5da6-af20-b3d0922e71a7"}}, "topology_uuid": {"uuid": "c76135e3-24a8-5e92-9bed-c3c9139359c8"}}}, {"device_id": {"device_uuid": {"uuid": "706e20a6-1f43-522d-9500-0bafd8131899"}}, "endpoint_uuid": {"uuid": "7dfc3453-a6df-584d-9e74-8ad4bfa301da"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "43813baf-195e-5da6-af20-b3d0922e71a7"}}, "topology_uuid": {"uuid": "c76135e3-24a8-5e92-9bed-c3c9139359c8"}}}, {"device_id": {"device_uuid": {"uuid": "706e20a6-1f43-522d-9500-0bafd8131899"}}, "endpoint_uuid": {"uuid": "559070fb-857e-56b4-8453-dd427c489f59"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "43813baf-195e-5da6-af20-b3d0922e71a7"}}, "topology_uuid": {"uuid": "c76135e3-24a8-5e92-9bed-c3c9139359c8"}}}, {"device_id": {"device_uuid": {"uuid": "7ae7e7bc-c4db-50a2-ad59-b5b7d9bcdc47"}}, "endpoint_uuid": {"uuid": "842aa058-7ac6-57f4-bca7-496070518b11"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "43813baf-195e-5da6-af20-b3d0922e71a7"}}, "topology_uuid": {"uuid": "c76135e3-24a8-5e92-9bed-c3c9139359c8"}}}, {"device_id": {"device_uuid": {"uuid": "7ae7e7bc-c4db-50a2-ad59-b5b7d9bcdc47"}}, "endpoint_uuid": {"uuid": "dc6ac139-e4c9-5b27-a693-17e4a06aaf37"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "43813baf-195e-5da6-af20-b3d0922e71a7"}}, "topology_uuid": {"uuid": "c76135e3-24a8-5e92-9bed-c3c9139359c8"}}}], "service_id": {"context_id": {"context_uuid": {"uuid": "43813baf-195e-5da6-af20-b3d0922e71a7"}}, "service_uuid": {"uuid": "e1ed09d2-dc76-5183-a245-855e5a596af2"}}, "settings": {}, "sub_service_ids: []} +# We need to extract the connection's path hops and identify which links are involved, then determine the domain (topology) to which this connection belongs, and finally filter links based on allowed links per controller. +def extract_network_data(context_client: ContextClient, network_id: str, network_connection: dict) -> list[tuple[str, dict]]: + """ + Extract network data from a connection object for SIMAP hierarchical network configuration. + Extracts only the first and last devices (Service Demarcation Points) with their service-facing endpoints. + + Args: + network_id: The network identifier (e.g., 'e2e', 'agg', 'trans-pkt') + network_connection: Dictionary representation of a Connection protobuf message containing path_hops_endpoint_ids + + Returns: + List of exactly 2 tuples: [(first_device_name, {'termination_points': [endpoint]}), + (last_device_name, {'termination_points': [endpoint]})] + For example: [('P-PE1', {'termination_points': ['200']}), ('P-PE2', {'termination_points': ['200']})] + """ + try: + # Extract path_hops_endpoint_ids from network_connection dict + path_hops = network_connection.get('path_hops_endpoint_ids', []) + + if not path_hops: + LOGGER.warning(f"No path_hops_endpoint_ids found in network_connection for network {network_id}") + return [] + + if len(path_hops) < 2: + LOGGER.warning(f"Connection path too short (less than 2 hops) for network {network_id}") + return [] + + # Extract first and last hops (SDPs - Service Demarcation Points) + first_hop = path_hops[0] + last_hop = path_hops[-1] + + # Extract device and endpoint UUIDs for SDPs + first_device_uuid = first_hop.get('device_id', {}).get('device_uuid', {}).get('uuid', '') + first_endpoint_uuid = first_hop.get('endpoint_uuid', {}).get('uuid', '') + + last_device_uuid = last_hop.get('device_id', {}).get('device_uuid', {}).get('uuid', '') + last_endpoint_uuid = last_hop.get('endpoint_uuid', {}).get('uuid', '') + + if not all([first_device_uuid, first_endpoint_uuid, last_device_uuid, last_endpoint_uuid]): + LOGGER.warning(f"Invalid first or last hop in path_hops_endpoint_ids for network {network_id}") + return [] + + # Prepare results for exactly 2 SDPs + network_data: List[Tuple[str, Dict[str, List[str]]]] = [] + + # Process first device (sdp1) + first_device_id = DeviceId() + first_device_id.device_uuid.uuid = first_device_uuid + + try: + device_list = context_client.SelectDevice( + DeviceIdList(device_ids=[first_device_id]), + include_endpoints=True, + include_config_rules=False, + include_components=False + ) + if not device_list.devices: + LOGGER.warning(f"First device with UUID {first_device_uuid} not found in context") + return [] + + first_device = device_list.devices[0] + first_device_name = first_device.name + + # Find the service-facing endpoint name + first_endpoint_name = None + for endpoint in first_device.device_endpoints: + if endpoint.endpoint_id.endpoint_uuid.uuid == first_endpoint_uuid: + first_endpoint_name = endpoint.name + break + + if not first_endpoint_name: + LOGGER.warning(f"First endpoint {first_endpoint_uuid} not found in device {first_device_name}") + return [] + + network_data.append((first_device_name, {'termination_points': [first_endpoint_name]})) + + except Exception as e: + LOGGER.error(f"Error retrieving first device {first_device_uuid} from context: {e}") + return [] + + # Process last device (sdp2) + last_device_id = DeviceId() + last_device_id.device_uuid.uuid = last_device_uuid + + try: + device_list = context_client.SelectDevice( + DeviceIdList(device_ids=[last_device_id]), + include_endpoints=True, + include_config_rules=False, + include_components=False + ) + if not device_list.devices: + LOGGER.warning(f"Last device with UUID {last_device_uuid} not found in context") + return [] + + last_device = device_list.devices[0] + last_device_name = last_device.name + + # Find the service-facing endpoint name + last_endpoint_name = None + for endpoint in last_device.device_endpoints: + if endpoint.endpoint_id.endpoint_uuid.uuid == last_endpoint_uuid: + last_endpoint_name = endpoint.name + break + + if not last_endpoint_name: + LOGGER.warning(f"Last endpoint {last_endpoint_uuid} not found in device {last_device_name}") + return [] + + network_data.append((last_device_name, {'termination_points': [last_endpoint_name]})) + + except Exception as e: + LOGGER.error(f"Error retrieving last device {last_device_uuid} from context: {e}") + return [] + + LOGGER.info(f"Extracted network data for {network_id}: {network_data}") + return network_data + + except Exception as e: + LOGGER.error(f"Error extracting network data from connection for network {network_id}: {e}") + return [] + + +def set_simap_network(context_client: ContextClient, simap_client: SimapClient, network_id: str, network_connection: dict) -> None: """ Configure a SIMAP network with preset configurations. Args: + context_client: ContextClient instance simap_client: SimapClient instance network_id: Network identifier ('e2e', 'agg', or 'trans-pkt') - supp_net_ids: Tuple of supporting network IDs - term_point_ids: List of termination point IDs + network_connection: Dictionary representation of Connection protobuf with path_hops_endpoint_ids """ + LOGGER.info(f"Setting SIMAP network: {network_id} for connection with {len(network_connection.get('path_hops_endpoint_ids', []))} hops") + network_data : list[tuple[str, dict]] = extract_network_data(context_client, network_id, network_connection) + if network_id == 'e2e': try: # E2E Network Configuration @@ -147,3 +273,40 @@ def set_simap_network(simap_client: SimapClient, network_id: str, return LOGGER.info(f'Successfully configured SIMAP network: {network_id}') + + +def delete_simap_network(simap_client: SimapClient, network_id: str) -> None: + """ + Delete a SIMAP network configuration. + + Args: + simap_client: SimapClient instance + network_id: Network identifier ('e2e', 'agg', or 'trans-pkt') + """ + if network_id == 'e2e': + simap = simap_client.network('e2e') + simap.update(supporting_network_ids=['admin', 'agg']) + + link = simap.link('E2E-L1') + link.delete() + + elif network_id == 'agg': + simap = simap_client.network('agg') + simap.update(supporting_network_ids=['admin', 'trans-pkt']) + + link = simap.link('AggNet-L1') + link.delete() + + elif network_id == 'trans-pkt': + simap = simap_client.network('trans-pkt') + simap.update(supporting_network_ids=['admin']) + + link = simap.link('Trans-L1') + link.delete() + + else: + MSG = 'Unsupported network_id({:s}) to delete SIMAP' + LOGGER.warning(MSG.format(str(network_id))) + return + + LOGGER.info(f'Successfully deleted SIMAP network: {network_id}') diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index 20f5936ea..847503d68 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -23,13 +23,14 @@ from common.proto.context_pb2 import ( ) from common.tools.grpc.BaseEventCollector import BaseEventCollector from common.tools.grpc.BaseEventDispatcher import BaseEventDispatcher -from common.tools.grpc.Tools import grpc_message_to_json_string +from common.tools.grpc.Tools import grpc_message_to_json_string, grpc_message_to_json from context.client.ContextClient import ContextClient from simap_connector.service.telemetry.worker.data.Resources import ( ResourceLink, Resources, SyntheticSampler ) from simap_connector.service.telemetry.worker._Worker import WorkerTypeEnum from simap_connector.service.telemetry.TelemetryPool import SynthesizerWorker, TelemetryPool +from src.simap_connector.service.simap_updater.RealSimaps import set_simap_network, delete_simap_network from .AllowedLinks import ALLOWED_LINKS_PER_CONTROLLER, LINKS_CAPACITY # from .MockSimaps import delete_mock_simap, set_mock_simap from .ObjectCache import CachedEntities, ObjectCache @@ -478,16 +479,16 @@ class EventDispatcher(BaseEventDispatcher): # LOGGER.warning(MSG.format(str_service_event, str_service)) # return False - topologies = self._object_cache.get_all(CachedEntities.TOPOLOGY, fresh=False) - topology_names = {t.name for t in topologies} - topology_names.discard(DEFAULT_TOPOLOGY_NAME) - if len(topology_names) != 1: - MSG = 'ServiceEvent({:s}) skipped, unable to identify on which topology to insert it' - str_service_event = grpc_message_to_json_string(service_event) - LOGGER.warning(MSG.format(str_service_event)) - return False + # topologies = self._object_cache.get_all(CachedEntities.TOPOLOGY, fresh=False) + # topology_names = {t.name for t in topologies} + # topology_names.discard(DEFAULT_TOPOLOGY_NAME) + # if len(topology_names) != 1: + # MSG = 'ServiceEvent({:s}) skipped, unable to identify on which topology to insert it' + # str_service_event = grpc_message_to_json_string(service_event) + # LOGGER.warning(MSG.format(str_service_event)) + # return False - domain_name = topology_names.pop() # trans-pkt/agg-net/e2e-net + # domain_name = topology_names.pop() # trans-pkt/agg-net/e2e-net # set_mock_simap(self._simap_client, domain_name) #domain_topo = self._simap_client.network(domain_name) @@ -724,11 +725,9 @@ class EventDispatcher(BaseEventDispatcher): MSG = 'Processing Connection: {:s}' LOGGER.info(MSG.format(grpc_message_to_json_string(connection))) - # NOTE: Actual Connection event object does not include service_id. - _, link_uuids = get_connection_endpoints_and_links(connection_uuid) - # Determine the controller's domain name + # Determine the controller's domain name (network_id) topologies = self._object_cache.get_all(CachedEntities.TOPOLOGY, fresh=False) topology_names = {t.name for t in topologies} topology_names.discard(DEFAULT_TOPOLOGY_NAME) @@ -736,6 +735,11 @@ class EventDispatcher(BaseEventDispatcher): LOGGER.warning('Unable to identify self-controller for connection {:s} and {!r}'.format(connection_uuid, topology_names)) return None domain_name = topology_names.pop() + + # Call set_simap_network with proper parameters + network_connection = grpc_message_to_json(connection) + set_simap_network(self._context_client, self._simap_client, domain_name, network_connection) + LOGGER.info('Set SIMAP network for connection {:s} in domain {:s}'.format(connection_uuid, domain_name)) # Filter links based on ALLOWED_LINKS_PER_CONTROLLER allowed_link_names = ALLOWED_LINKS_PER_CONTROLLER.get(domain_name, set()) @@ -746,7 +750,7 @@ class EventDispatcher(BaseEventDispatcher): if link.name in allowed_link_names: # Get the link's topology for worker naming link_topology_uuid, _ = get_link_endpoint(link) - link_topology = self._object_cache.get(CachedEntities.TOPOLOGY, link_topology_uuid) + link_topology = self._object_cache.get(CachedEntities.TOPOLOGY, link_topology_uuid) processed_links.append((link_uuid, link.name, link_topology.name)) if not processed_links: @@ -764,7 +768,7 @@ class EventDispatcher(BaseEventDispatcher): LOGGER.debug('Cached connection {:s} mapping with {:d} links for domain {:s}'.format( connection_uuid, len(processed_links), domain_name)) - return domain_name, processed_links # NOTE: Domain name = topology name + return domain_name, processed_links def _count_active_connections(self, link_uuid: str, domain_name: str, ) -> int: @@ -855,6 +859,9 @@ class EventDispatcher(BaseEventDispatcher): # No other connections use this link, stop the worker self._telemetry_pool.stop_worker(WorkerTypeEnum.SYNTHESIZER, worker_name) LOGGER.info('Stopped telemetry worker for link {:s}, no connections remain'.format(link_name)) + + delete_simap_network(self._context_client, self._simap_client, domain_name) + LOGGER.info('Deleted SIMAP network for domain {:s} after connection removal'.format(domain_name)) else: # Other connections still use this link, update worker with new count worker = self._telemetry_pool.get_worker(WorkerTypeEnum.SYNTHESIZER, worker_name) -- GitLab From 1b8cbe879b3c1c827817078d9ed85ded35b72e77 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Sat, 21 Feb 2026 13:12:13 +0000 Subject: [PATCH 51/78] feat: Update deployment scripts for MWC26 F5G SIMAP demo --- src/simap_connector/service/simap_updater/RealSimaps.py | 2 +- src/tests/mwc26-f5ga/deploy-specs-agg.sh | 2 +- src/tests/mwc26-f5ga/deploy-specs-e2e.sh | 2 +- src/tests/mwc26-f5ga/deploy-specs-ip.sh | 2 +- src/tests/mwc26-f5ga/deploy.sh | 6 +++--- 5 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/simap_connector/service/simap_updater/RealSimaps.py b/src/simap_connector/service/simap_updater/RealSimaps.py index 8d9034319..0ffa30590 100644 --- a/src/simap_connector/service/simap_updater/RealSimaps.py +++ b/src/simap_connector/service/simap_updater/RealSimaps.py @@ -257,7 +257,7 @@ def set_simap_network(context_client: ContextClient, simap_client: SimapClient, link.update( 'site1', endpoints[0], 'site2', endpoints[1], supporting_link_ids=[ - ('admin', 'L5'), ('admin', 'L9') + ('admin', 'L6'), ('admin', 'L10') ] ) except (KeyError, IndexError, ValueError) as e: diff --git a/src/tests/mwc26-f5ga/deploy-specs-agg.sh b/src/tests/mwc26-f5ga/deploy-specs-agg.sh index c7b5e98b5..41e063dfc 100644 --- a/src/tests/mwc26-f5ga/deploy-specs-agg.sh +++ b/src/tests/mwc26-f5ga/deploy-specs-agg.sh @@ -159,7 +159,7 @@ export NATS_EXT_PORT_HTTP="8222" export NATS_DEPLOY_MODE="single" # Disable flag for re-deploying NATS from scratch. -export NATS_REDEPLOY="" +export NATS_REDEPLOY="YES" # ----- Apache Kafka ----------------------------------------------------------- diff --git a/src/tests/mwc26-f5ga/deploy-specs-e2e.sh b/src/tests/mwc26-f5ga/deploy-specs-e2e.sh index c7b5e98b5..41e063dfc 100644 --- a/src/tests/mwc26-f5ga/deploy-specs-e2e.sh +++ b/src/tests/mwc26-f5ga/deploy-specs-e2e.sh @@ -159,7 +159,7 @@ export NATS_EXT_PORT_HTTP="8222" export NATS_DEPLOY_MODE="single" # Disable flag for re-deploying NATS from scratch. -export NATS_REDEPLOY="" +export NATS_REDEPLOY="YES" # ----- Apache Kafka ----------------------------------------------------------- diff --git a/src/tests/mwc26-f5ga/deploy-specs-ip.sh b/src/tests/mwc26-f5ga/deploy-specs-ip.sh index c02dac122..641598842 100644 --- a/src/tests/mwc26-f5ga/deploy-specs-ip.sh +++ b/src/tests/mwc26-f5ga/deploy-specs-ip.sh @@ -159,7 +159,7 @@ export NATS_EXT_PORT_HTTP="8222" export NATS_DEPLOY_MODE="single" # Disable flag for re-deploying NATS from scratch. -export NATS_REDEPLOY="" +export NATS_REDEPLOY="YES" # ----- Apache Kafka ----------------------------------------------------------- diff --git a/src/tests/mwc26-f5ga/deploy.sh b/src/tests/mwc26-f5ga/deploy.sh index 1ec4ed4f0..c7844b8d9 100755 --- a/src/tests/mwc26-f5ga/deploy.sh +++ b/src/tests/mwc26-f5ga/deploy.sh @@ -69,7 +69,7 @@ case "$HOSTNAME" in tfs-e2e-ctrl) echo "Deploying TFS E2E Controller..." sed -i 's|\(

ETSI TeraFlowSDN Controller\)

|\1 (End-to-End)|' src/webui/service/templates/main/home.html - source ~/tfs-ctrl/src/tests/ecoc25-f5ga-telemetry/deploy-specs-e2e.sh + source ~/tfs-ctrl/src/tests/mwc26-f5ga/deploy-specs-e2e.sh ./deploy/all.sh echo "Waiting for NATS connection..." @@ -79,7 +79,7 @@ case "$HOSTNAME" in tfs-agg-ctrl) echo "Deploying TFS Agg Controller..." sed -i 's|\(

ETSI TeraFlowSDN Controller\)

|\1 (Aggregation)|' src/webui/service/templates/main/home.html - source ~/tfs-ctrl/src/tests/ecoc25-f5ga-telemetry/deploy-specs-agg.sh + source ~/tfs-ctrl/src/tests/mwc26-f5ga/deploy-specs-agg.sh ./deploy/all.sh echo "Waiting for NATS connection..." @@ -89,7 +89,7 @@ case "$HOSTNAME" in tfs-ip-ctrl) echo "Deploying TFS IP Controller..." sed -i 's|\(

ETSI TeraFlowSDN Controller\)

|\1 (IP)|' src/webui/service/templates/main/home.html - source ~/tfs-ctrl/src/tests/ecoc25-f5ga-telemetry/deploy-specs-ip.sh + source ~/tfs-ctrl/src/tests/mwc26-f5ga/deploy-specs-ip.sh ./deploy/all.sh # echo "Waiting for NATS connection..." -- GitLab From 574b81a16f25e782a49685aac7703b486e9c391b Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Sat, 21 Feb 2026 13:18:02 +0000 Subject: [PATCH 52/78] feat: Refactor import statements for consistency in SimapUpdater.py --- .../service/simap_updater/SimapUpdater.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index 847503d68..7be8a4dde 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -21,21 +21,21 @@ from common.proto.context_pb2 import ( ContextEvent, DeviceEvent, Empty, LinkEvent, ServiceEvent, ServiceStatusEnum, SliceEvent, TopologyEvent, ConnectionEvent ) -from common.tools.grpc.BaseEventCollector import BaseEventCollector +from common.tools.grpc.BaseEventCollector import BaseEventCollector from common.tools.grpc.BaseEventDispatcher import BaseEventDispatcher -from common.tools.grpc.Tools import grpc_message_to_json_string, grpc_message_to_json +from common.tools.grpc.Tools import grpc_message_to_json_string, grpc_message_to_json from context.client.ContextClient import ContextClient from simap_connector.service.telemetry.worker.data.Resources import ( ResourceLink, Resources, SyntheticSampler ) from simap_connector.service.telemetry.worker._Worker import WorkerTypeEnum -from simap_connector.service.telemetry.TelemetryPool import SynthesizerWorker, TelemetryPool -from src.simap_connector.service.simap_updater.RealSimaps import set_simap_network, delete_simap_network +from simap_connector.service.telemetry.TelemetryPool import SynthesizerWorker, TelemetryPool +from .RealSimaps import set_simap_network, delete_simap_network from .AllowedLinks import ALLOWED_LINKS_PER_CONTROLLER, LINKS_CAPACITY # from .MockSimaps import delete_mock_simap, set_mock_simap -from .ObjectCache import CachedEntities, ObjectCache -from .SimapClient import SimapClient -from .Tools import get_device_endpoint, get_link_endpoint, get_connection_endpoints_and_links #, get_service_endpoint +from .ObjectCache import CachedEntities, ObjectCache +from .SimapClient import SimapClient +from .Tools import get_device_endpoint, get_link_endpoint, get_connection_endpoints_and_links #, get_service_endpoint LOGGER = logging.getLogger(__name__) -- GitLab From e2b124b6ebcd5d6006d5d01761c9617736211164 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Sat, 21 Feb 2026 13:55:18 +0000 Subject: [PATCH 53/78] feat: Simplify SIMAP network deletion by removing unnecessary context client parameter --- src/simap_connector/service/simap_updater/SimapUpdater.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index 7be8a4dde..455d1f961 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -860,7 +860,7 @@ class EventDispatcher(BaseEventDispatcher): self._telemetry_pool.stop_worker(WorkerTypeEnum.SYNTHESIZER, worker_name) LOGGER.info('Stopped telemetry worker for link {:s}, no connections remain'.format(link_name)) - delete_simap_network(self._context_client, self._simap_client, domain_name) + delete_simap_network(self._simap_client, domain_name) LOGGER.info('Deleted SIMAP network for domain {:s} after connection removal'.format(domain_name)) else: # Other connections still use this link, update worker with new count -- GitLab From 8d8e0d54d9c7b7d4634d24bf9932b6661030d23c Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Sat, 21 Feb 2026 20:43:28 +0000 Subject: [PATCH 54/78] feat: Disable NATS re-deploy flag in deployment scripts --- src/tests/mwc26-f5ga/deploy-specs-agg.sh | 2 +- src/tests/mwc26-f5ga/deploy-specs-e2e.sh | 2 +- src/tests/mwc26-f5ga/deploy-specs-ip.sh | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/tests/mwc26-f5ga/deploy-specs-agg.sh b/src/tests/mwc26-f5ga/deploy-specs-agg.sh index 41e063dfc..c7b5e98b5 100644 --- a/src/tests/mwc26-f5ga/deploy-specs-agg.sh +++ b/src/tests/mwc26-f5ga/deploy-specs-agg.sh @@ -159,7 +159,7 @@ export NATS_EXT_PORT_HTTP="8222" export NATS_DEPLOY_MODE="single" # Disable flag for re-deploying NATS from scratch. -export NATS_REDEPLOY="YES" +export NATS_REDEPLOY="" # ----- Apache Kafka ----------------------------------------------------------- diff --git a/src/tests/mwc26-f5ga/deploy-specs-e2e.sh b/src/tests/mwc26-f5ga/deploy-specs-e2e.sh index 41e063dfc..c7b5e98b5 100644 --- a/src/tests/mwc26-f5ga/deploy-specs-e2e.sh +++ b/src/tests/mwc26-f5ga/deploy-specs-e2e.sh @@ -159,7 +159,7 @@ export NATS_EXT_PORT_HTTP="8222" export NATS_DEPLOY_MODE="single" # Disable flag for re-deploying NATS from scratch. -export NATS_REDEPLOY="YES" +export NATS_REDEPLOY="" # ----- Apache Kafka ----------------------------------------------------------- diff --git a/src/tests/mwc26-f5ga/deploy-specs-ip.sh b/src/tests/mwc26-f5ga/deploy-specs-ip.sh index 641598842..c02dac122 100644 --- a/src/tests/mwc26-f5ga/deploy-specs-ip.sh +++ b/src/tests/mwc26-f5ga/deploy-specs-ip.sh @@ -159,7 +159,7 @@ export NATS_EXT_PORT_HTTP="8222" export NATS_DEPLOY_MODE="single" # Disable flag for re-deploying NATS from scratch. -export NATS_REDEPLOY="YES" +export NATS_REDEPLOY="" # ----- Apache Kafka ----------------------------------------------------------- -- GitLab From 147657793e955c378c6b02a484177921701545fa Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Sat, 21 Feb 2026 20:59:16 +0000 Subject: [PATCH 55/78] feat: Refactor extract_network_data to use get_device for improved device retrieval --- .../service/simap_updater/RealSimaps.py | 47 ++++--------------- 1 file changed, 9 insertions(+), 38 deletions(-) diff --git a/src/simap_connector/service/simap_updater/RealSimaps.py b/src/simap_connector/service/simap_updater/RealSimaps.py index 0ffa30590..1eae347c0 100644 --- a/src/simap_connector/service/simap_updater/RealSimaps.py +++ b/src/simap_connector/service/simap_updater/RealSimaps.py @@ -16,31 +16,14 @@ import logging from typing import Dict, List, Tuple from context.client.ContextClient import ContextClient -from common.proto.context_pb2 import DeviceId, DeviceIdList +from common.tools.context_queries.Device import get_device from .SimapClient import SimapClient LOGGER = logging.getLogger(__name__) - - - -# Use connection event log at IP: [2026-02-20 21:20:21,224] INFO:simap_connector.service.simap_updater.SimapUpdater:Processing Connection: {"connection_id": {"connection_uuid": {"uuid": "fdb61970-1a08-4f0e-acec-95d82a4b0d32"}}, "path_hops_endpoint_ids": [{"device_id": {"device_uuid": {"uuid": "c4b22f0f-d958-5895-a452-cac82e11ef90"}}, "endpoint_uuid": {"uuid": "97f60155-b852-5607-9ba5-1b41e228f04d"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "43813baf-195e-5da6-af20-b3d0922e71a7"}}, "topology_uuid": {"uuid": "c76135e3-24a8-5e92-9bed-c3c9139359c8"}}}, {"device_id": {"device_uuid": {"uuid": "c4b22f0f-d958-5895-a452-cac82e11ef90"}}, "endpoint_uuid": {"uuid": "f9cea78e-0de3-5c8a-93f7-b99d207ae709"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "43813baf-195e-5da6-af20-b3d0922e71a7"}}, "topology_uuid": {"uuid": "c76135e3-24a8-5e92-9bed-c3c9139359c8"}}}, {"device_id": {"device_uuid": {"uuid": "706e20a6-1f43-522d-9500-0bafd8131899"}}, "endpoint_uuid": {"uuid": "7dfc3453-a6df-584d-9e74-8ad4bfa301da"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "43813baf-195e-5da6-af20-b3d0922e71a7"}}, "topology_uuid": {"uuid": "c76135e3-24a8-5e92-9bed-c3c9139359c8"}}}, {"device_id": {"device_uuid": {"uuid": "706e20a6-1f43-522d-9500-0bafd8131899"}}, "endpoint_uuid": {"uuid": "559070fb-857e-56b4-8453-dd427c489f59"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "43813baf-195e-5da6-af20-b3d0922e71a7"}}, "topology_uuid": {"uuid": "c76135e3-24a8-5e92-9bed-c3c9139359c8"}}}, {"device_id": {"device_uuid": {"uuid": "7ae7e7bc-c4db-50a2-ad59-b5b7d9bcdc47"}}, "endpoint_uuid": {"uuid": "842aa058-7ac6-57f4-bca7-496070518b11"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "43813baf-195e-5da6-af20-b3d0922e71a7"}}, "topology_uuid": {"uuid": "c76135e3-24a8-5e92-9bed-c3c9139359c8"}}}, {"device_id": {"device_uuid": {"uuid": "7ae7e7bc-c4db-50a2-ad59-b5b7d9bcdc47"}}, "endpoint_uuid": {"uuid": "dc6ac139-e4c9-5b27-a693-17e4a06aaf37"}, "topology_id": {"context_id": {"context_uuid": {"uuid": "43813baf-195e-5da6-af20-b3d0922e71a7"}}, "topology_uuid": {"uuid": "c76135e3-24a8-5e92-9bed-c3c9139359c8"}}}], "service_id": {"context_id": {"context_uuid": {"uuid": "43813baf-195e-5da6-af20-b3d0922e71a7"}}, "service_uuid": {"uuid": "e1ed09d2-dc76-5183-a245-855e5a596af2"}}, "settings": {}, "sub_service_ids: []} -# We need to extract the connection's path hops and identify which links are involved, then determine the domain (topology) to which this connection belongs, and finally filter links based on allowed links per controller. def extract_network_data(context_client: ContextClient, network_id: str, network_connection: dict) -> list[tuple[str, dict]]: - """ - Extract network data from a connection object for SIMAP hierarchical network configuration. - Extracts only the first and last devices (Service Demarcation Points) with their service-facing endpoints. - Args: - network_id: The network identifier (e.g., 'e2e', 'agg', 'trans-pkt') - network_connection: Dictionary representation of a Connection protobuf message containing path_hops_endpoint_ids - - Returns: - List of exactly 2 tuples: [(first_device_name, {'termination_points': [endpoint]}), - (last_device_name, {'termination_points': [endpoint]})] - For example: [('P-PE1', {'termination_points': ['200']}), ('P-PE2', {'termination_points': ['200']})] - """ try: # Extract path_hops_endpoint_ids from network_connection dict path_hops = network_connection.get('path_hops_endpoint_ids', []) @@ -72,21 +55,15 @@ def extract_network_data(context_client: ContextClient, network_id: str, network network_data: List[Tuple[str, Dict[str, List[str]]]] = [] # Process first device (sdp1) - first_device_id = DeviceId() - first_device_id.device_uuid.uuid = first_device_uuid - try: - device_list = context_client.SelectDevice( - DeviceIdList(device_ids=[first_device_id]), - include_endpoints=True, - include_config_rules=False, - include_components=False + first_device = get_device( + context_client, first_device_uuid, rw_copy=False, + include_endpoints=True, include_config_rules=False, include_components=False ) - if not device_list.devices: + if first_device is None: LOGGER.warning(f"First device with UUID {first_device_uuid} not found in context") return [] - first_device = device_list.devices[0] first_device_name = first_device.name # Find the service-facing endpoint name @@ -107,21 +84,15 @@ def extract_network_data(context_client: ContextClient, network_id: str, network return [] # Process last device (sdp2) - last_device_id = DeviceId() - last_device_id.device_uuid.uuid = last_device_uuid - try: - device_list = context_client.SelectDevice( - DeviceIdList(device_ids=[last_device_id]), - include_endpoints=True, - include_config_rules=False, - include_components=False + last_device = get_device( + context_client, last_device_uuid, rw_copy=False, + include_endpoints=True, include_config_rules=False, include_components=False ) - if not device_list.devices: + if last_device is None: LOGGER.warning(f"Last device with UUID {last_device_uuid} not found in context") return [] - last_device = device_list.devices[0] last_device_name = last_device.name # Find the service-facing endpoint name -- GitLab From 6d8821b522c9b574f66e007e8a2468beef759f38 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Sat, 21 Feb 2026 21:19:58 +0000 Subject: [PATCH 56/78] feat: Add telemetry subscription script for different controller types --- .../mwc26-f5ga/telemetry-subscribe-slice1.py | 20 +++++---- src/tests/mwc26-f5ga/telemetry-subscribe.sh | 42 +++++++++++++++++++ 2 files changed, 55 insertions(+), 7 deletions(-) create mode 100755 src/tests/mwc26-f5ga/telemetry-subscribe.sh diff --git a/src/tests/mwc26-f5ga/telemetry-subscribe-slice1.py b/src/tests/mwc26-f5ga/telemetry-subscribe-slice1.py index 559556829..b2cda8de3 100644 --- a/src/tests/mwc26-f5ga/telemetry-subscribe-slice1.py +++ b/src/tests/mwc26-f5ga/telemetry-subscribe-slice1.py @@ -13,21 +13,27 @@ # limitations under the License. +import sys import requests from requests.auth import HTTPBasicAuth -RESTCONF_ADDRESS = '127.0.0.1' -RESTCONF_PORT = 80 -TARGET_SIMAP_NAME = 'e2e' -TARGET_LINK_NAME = 'E2E-L1' +if len(sys.argv) < 3: + print('Usage: {:s} '.format(sys.argv[0])) + print('Example: {:s} e2e E2E-L1'.format(sys.argv[0])) + sys.exit(1) + +RESTCONF_ADDRESS = '127.0.0.1' +RESTCONF_PORT = 80 +TARGET_SIMAP_NAME = sys.argv[1] +TARGET_LINK_NAME = sys.argv[2] SAMPLING_INTERVAL = 10.0 SUBSCRIBE_URI = '/restconf/operations/subscriptions:establish-subscription' SUBSCRIBE_URL = 'http://{:s}:{:d}{:s}'.format(RESTCONF_ADDRESS, RESTCONF_PORT, SUBSCRIBE_URI) -XPATH_FILTER = '/ietf-network:networks/network={:s}/ietf-network-topology:link={:s}/simap-telemetry:simap-telemetry' -REQUEST = { +XPATH_FILTER = '/ietf-network:networks/network={:s}/ietf-network-topology:link={:s}/simap-telemetry:simap-telemetry' +REQUEST = { 'ietf-subscribed-notifications:input': { 'datastore': 'operational', 'ietf-yang-push:datastore-xpath-filter': XPATH_FILTER.format(TARGET_SIMAP_NAME, TARGET_LINK_NAME), @@ -39,7 +45,7 @@ REQUEST = { def main() -> None: - print('[E2E] Subscribe Telemetry slice1...') + print('[{:s}] Subscribe Telemetry slice1 for link {:s}...'.format(TARGET_SIMAP_NAME.upper(), TARGET_LINK_NAME)) headers = {'accept': 'application/json'} auth = HTTPBasicAuth('admin', 'admin') print(SUBSCRIBE_URL) diff --git a/src/tests/mwc26-f5ga/telemetry-subscribe.sh b/src/tests/mwc26-f5ga/telemetry-subscribe.sh new file mode 100755 index 000000000..da2050d83 --- /dev/null +++ b/src/tests/mwc26-f5ga/telemetry-subscribe.sh @@ -0,0 +1,42 @@ +#!/bin/bash +# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Set working directory +cd "$(dirname "$0")" || exit 1 + +# Get the current hostname +HOSTNAME=$(hostname) +echo "Starting telemetry subscription for ${HOSTNAME}..." + +case "$HOSTNAME" in + tfs-e2e-ctrl) + echo "Subscribing to E2E Controller telemetry..." + python3 telemetry-subscribe-slice1.py e2e E2E-L1 + ;; + tfs-agg-ctrl) + echo "Subscribing to Aggregation Controller telemetry..." + python3 telemetry-subscribe-slice1.py agg AggNet-L1 + ;; + tfs-ip-ctrl) + echo "Subscribing to IP Controller telemetry..." + python3 telemetry-subscribe-slice1.py ip Trans-L1 + ;; + *) + echo "Unknown host: $HOSTNAME" + echo "Usage: $0" + echo " This script must be run on tfs-e2e-ctrl, tfs-agg-ctrl, or tfs-ip-ctrl" + exit 1 + ;; +esac -- GitLab From dafe1dcc25f25188bb6be8eb51017a62bc7c2666 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Sat, 21 Feb 2026 21:23:15 +0000 Subject: [PATCH 57/78] feat: Add telemetry subscription script for specific controller types --- .../{telemetry-subscribe.sh => run_telemetry-subscribe.sh} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/tests/mwc26-f5ga/{telemetry-subscribe.sh => run_telemetry-subscribe.sh} (100%) diff --git a/src/tests/mwc26-f5ga/telemetry-subscribe.sh b/src/tests/mwc26-f5ga/run_telemetry-subscribe.sh similarity index 100% rename from src/tests/mwc26-f5ga/telemetry-subscribe.sh rename to src/tests/mwc26-f5ga/run_telemetry-subscribe.sh -- GitLab From 3a6f95e9faff9d3dddadc1fec23b7ab994283780 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Sat, 21 Feb 2026 21:34:55 +0000 Subject: [PATCH 58/78] feat: Update telemetry subscription script for IP Controller and optimize output logging --- src/tests/mwc26-f5ga/run_telemetry-subscribe.sh | 2 +- src/tests/mwc26-f5ga/telemetry-subscribe-slice1.py | 5 +++-- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/tests/mwc26-f5ga/run_telemetry-subscribe.sh b/src/tests/mwc26-f5ga/run_telemetry-subscribe.sh index da2050d83..853e269e9 100755 --- a/src/tests/mwc26-f5ga/run_telemetry-subscribe.sh +++ b/src/tests/mwc26-f5ga/run_telemetry-subscribe.sh @@ -31,7 +31,7 @@ case "$HOSTNAME" in ;; tfs-ip-ctrl) echo "Subscribing to IP Controller telemetry..." - python3 telemetry-subscribe-slice1.py ip Trans-L1 + python3 telemetry-subscribe-slice1.py trans-pkt Trans-L1 ;; *) echo "Unknown host: $HOSTNAME" diff --git a/src/tests/mwc26-f5ga/telemetry-subscribe-slice1.py b/src/tests/mwc26-f5ga/telemetry-subscribe-slice1.py index b2cda8de3..92d23ad34 100644 --- a/src/tests/mwc26-f5ga/telemetry-subscribe-slice1.py +++ b/src/tests/mwc26-f5ga/telemetry-subscribe-slice1.py @@ -71,8 +71,9 @@ def main() -> None: print('Opening stream "{:s}" (press Ctrl+C to stop)...'.format(stream_url)) with requests.get(stream_url, stream=True, auth=auth) as resp: - for line in resp.iter_lines(decode_unicode=True): - print(line) + for i, line in enumerate(resp.iter_lines(decode_unicode=True), 1): + if i % 10 == 0: + print(line) if __name__ == '__main__': main() -- GitLab From d9750f5813b91f30e0f882debc2753ededc9031b Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Sat, 21 Feb 2026 22:00:23 +0000 Subject: [PATCH 59/78] feat: Enhance CollectorWorker with HTTP Basic Auth and update request handling in stream and simap polling --- src/simap_connector/service/simap_updater/RealSimaps.py | 6 +++--- .../service/telemetry/worker/CollectorWorker.py | 6 ++++-- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/src/simap_connector/service/simap_updater/RealSimaps.py b/src/simap_connector/service/simap_updater/RealSimaps.py index 1eae347c0..184df61e9 100644 --- a/src/simap_connector/service/simap_updater/RealSimaps.py +++ b/src/simap_connector/service/simap_updater/RealSimaps.py @@ -23,7 +23,7 @@ from .SimapClient import SimapClient LOGGER = logging.getLogger(__name__) def extract_network_data(context_client: ContextClient, network_id: str, network_connection: dict) -> list[tuple[str, dict]]: - + try: # Extract path_hops_endpoint_ids from network_connection dict path_hops = network_connection.get('path_hops_endpoint_ids', []) @@ -160,7 +160,7 @@ def set_simap_network(context_client: ContextClient, simap_client: SimapClient, link.update( 'sdp1', endpoints[0], 'sdp2', endpoints[1], supporting_link_ids=[ - ('admin', 'L1'), ('admin', 'L3'), ('agg', 'AggNet-L1') + ('admin', 'L1'), ('agg', 'AggNet-L1') ] ) except (KeyError, IndexError, ValueError) as e: @@ -194,7 +194,7 @@ def set_simap_network(context_client: ContextClient, simap_client: SimapClient, link.update( 'sdp1', endpoints[0], 'sdp2', endpoints[1], supporting_link_ids=[ - ('trans-pkt', 'Trans-L1'), ('admin', 'L13') + ('trans-pkt', 'Trans-L1'), ('admin', 'L13'), ('admin', 'L3') ] ) except (KeyError, IndexError, ValueError) as e: diff --git a/src/simap_connector/service/telemetry/worker/CollectorWorker.py b/src/simap_connector/service/telemetry/worker/CollectorWorker.py index 27b665d05..0827a3f8c 100644 --- a/src/simap_connector/service/telemetry/worker/CollectorWorker.py +++ b/src/simap_connector/service/telemetry/worker/CollectorWorker.py @@ -14,6 +14,7 @@ import json, math, requests, threading, time +from requests.auth import HTTPBasicAuth from requests.exceptions import ReadTimeout from typing import Optional from .data.AggregationCache import AggregationCache, LinkSample @@ -31,6 +32,7 @@ CONTROLLER_TO_ADDRESS_PORT = { WAIT_LOOP_GRANULARITY = 0.5 +AUTH = HTTPBasicAuth('admin', 'admin') class CollectorWorker(_Worker): def __init__( @@ -73,7 +75,7 @@ class CollectorWorker(_Worker): # NOTE: Trick: we set 1-second read_timeout to force the loop to give control # back and be able to check termination events. # , timeout=(10, 1) - with session.get(stream_url, stream=True) as reply: + with session.get(stream_url, stream=True, auth=AUTH) as reply: reply.raise_for_status() it_lines = reply.iter_lines(decode_unicode=True, chunk_size=1024) @@ -140,7 +142,7 @@ class CollectorWorker(_Worker): MSG = '[direct_simap_polling] Requesting "{:s}"...' self._logger.info(MSG.format(str(simap_url))) - with requests.get(simap_url, timeout=10) as reply: + with requests.get(simap_url, timeout=10, auth=AUTH) as reply: reply.raise_for_status() data = reply.json() -- GitLab From 64ee5ccedcb26d3bd26343dd064e4d08f9ac0585 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Sun, 22 Feb 2026 13:05:10 +0000 Subject: [PATCH 60/78] feat: Update allowed links configuration and implement special triggering rules for L6 in trans-pkt domain --- .../service/simap_updater/AllowedLinks.py | 6 +- .../service/simap_updater/RealSimaps.py | 2 +- .../service/simap_updater/SimapUpdater.py | 102 +++++++++++++++++- .../worker/data/SyntheticSamplers.py | 8 +- 4 files changed, 105 insertions(+), 13 deletions(-) diff --git a/src/simap_connector/service/simap_updater/AllowedLinks.py b/src/simap_connector/service/simap_updater/AllowedLinks.py index cc45c7f5c..494828847 100644 --- a/src/simap_connector/service/simap_updater/AllowedLinks.py +++ b/src/simap_connector/service/simap_updater/AllowedLinks.py @@ -13,9 +13,9 @@ # limitations under the License. ALLOWED_LINKS_PER_CONTROLLER = { - 'e2e' : { 'L1', 'L2', 'L3', 'L4' }, - 'agg' : { 'L13', 'L14' }, - 'trans-pkt': { 'L5', 'L6', 'L9', 'L10' }, + 'e2e' : { 'L1', 'L2' }, + 'agg' : { 'L14' }, + 'trans-pkt': { 'L3', 'L5', 'L6', 'L9', 'L10', 'L13' }, # The remaining can not be monitored therefore they are not included in the allowed links for the controllers # 'agg' : { 'L7ab', 'L7ba', 'L8ab', 'L8ba', 'L11ab', 'L11ba', 'L12ab', 'L12ba', }, } diff --git a/src/simap_connector/service/simap_updater/RealSimaps.py b/src/simap_connector/service/simap_updater/RealSimaps.py index 184df61e9..d755d5109 100644 --- a/src/simap_connector/service/simap_updater/RealSimaps.py +++ b/src/simap_connector/service/simap_updater/RealSimaps.py @@ -194,7 +194,7 @@ def set_simap_network(context_client: ContextClient, simap_client: SimapClient, link.update( 'sdp1', endpoints[0], 'sdp2', endpoints[1], supporting_link_ids=[ - ('trans-pkt', 'Trans-L1'), ('admin', 'L13'), ('admin', 'L3') + ('trans-pkt', 'Trans-L1'), ('trans-pkt', 'L13'), ('trans-pkt', 'L3') ] ) except (KeyError, IndexError, ValueError) as e: diff --git a/src/simap_connector/service/simap_updater/SimapUpdater.py b/src/simap_connector/service/simap_updater/SimapUpdater.py index 455d1f961..4abe81a52 100644 --- a/src/simap_connector/service/simap_updater/SimapUpdater.py +++ b/src/simap_connector/service/simap_updater/SimapUpdater.py @@ -14,7 +14,7 @@ import logging, queue, threading, uuid -from typing import Any, Optional, Set +from typing import Any, List, Optional, Set, Tuple from common.Constants import DEFAULT_TOPOLOGY_NAME from common.DeviceTypes import DeviceTypeEnum from common.proto.context_pb2 import ( @@ -674,6 +674,21 @@ class EventDispatcher(BaseEventDispatcher): LOGGER.info('Connection {:s} uses allowed link: {:s} (uuid: {:s})'.format(connection_uuid, link_name, link_uuid)) worker_name = '{:s}:{:s}'.format(link_topology_name, link_name) + # --- TEMPORTYY: Check for special triggering rules for L6 in trans-pkt domain --- + if link_name == "L6": + # Check for special triggering rules (e.g., L6 triggers L3 and L13) + triggered_links = self._check_and_trigger_additional_links( + link_topology_name, active_conn_count) + # Update the cached mapping to include triggered links + if triggered_links: + mapping = self._object_cache.get(CachedEntities.CONNECTION, connection_uuid, auto_retrieve=False) + if mapping and isinstance(mapping, dict): + mapping['triggered_links'] = triggered_links + self._object_cache.set(CachedEntities.CONNECTION, mapping, connection_uuid) + LOGGER.debug('Updated connection {:s} mapping with {:d} triggered links'.format( + connection_uuid, len(triggered_links))) + # --- END OF TEMPORARY LOGIC --- + # Worker should already exist from _dispatch_link_set (link creation event) if not self._telemetry_pool.has_worker(WorkerTypeEnum.SYNTHESIZER, worker_name): LOGGER.warning('Worker not found for link {:s}, creating and starting new worker'.format(link_name)) @@ -687,7 +702,7 @@ class EventDispatcher(BaseEventDispatcher): connection_count = active_conn_count, link_capacity = LINKS_CAPACITY.get(link_name, 100.0) ), - related_service_ids=[], + related_service_ids=[], # TODO: populate with actual related services if needed (later) )) sampling_interval = 1.0 self._telemetry_pool.start_synthesizer(worker_name, resources, sampling_interval) @@ -708,6 +723,56 @@ class EventDispatcher(BaseEventDispatcher): return True + # TEMPORARY: This function implements the special triggering rules for L6 in trans-pkt domain. + def _check_and_trigger_additional_links( + self, link_topology_name: str, active_conn_count: int + ) -> List[Tuple[str, str, str]]: + """ + Check for special triggering rules and start additional workers. + + Rule: When L6 is processed in trans-pkt domain, also start workers for L3 and L13. + + Args: + connection_uuid: UUID of the connection being processed + domain_name: Domain name (e.g., 'trans-pkt') + processed_links: List of (link_uuid, link_name, link_topology_name) already processed + + Returns: + List of triggered links with format: (link_uuid, link_name, topology_name) + """ + triggered_links = [] + + # Trigger workers for L3 and L13 using same topology as L6 + for link_name in ['L3', 'L13']: + # Generate UUID for the triggered link + link_uuid = str(uuid.uuid4()) + worker_name = '{:s}:{:s}'.format(link_topology_name, link_name) + + LOGGER.info('Triggering worker for link {:s} (generated uuid: {:s})'.format(link_name, link_uuid)) + + # Check if worker already exists + if not self._telemetry_pool.has_worker(WorkerTypeEnum.SYNTHESIZER, worker_name): + # Create and start worker + resources = Resources() + resources.links.append(ResourceLink( + domain_name = link_topology_name, + link_name = link_name, + metrics_sampler = SyntheticSampler.create_random( + connection_count = active_conn_count, + link_capacity = LINKS_CAPACITY.get(link_name, 100.0) + ), + related_service_ids = [], + )) + sampling_interval = 1.0 + self._telemetry_pool.start_synthesizer(worker_name, resources, sampling_interval) + LOGGER.info('Started triggered synthesizer worker: {:s}'.format(worker_name)) + else: + LOGGER.info('Worker {:s} already exists, skipping creation'.format(worker_name)) + + triggered_links.append((link_uuid, link_name, link_topology_name)) + + return triggered_links + def _prepare_connection_processing(self, connection_uuid: str): """ @@ -762,7 +827,8 @@ class EventDispatcher(BaseEventDispatcher): # Cache the connection-to-links mapping for later retrieval (e.g., during REMOVE events) mapping = { 'domain': domain_name, - 'links': {link_uuid: {'name': link_name, 'topology': link_topo_name} for link_uuid, link_name, link_topo_name in processed_links} + 'links': {link_uuid: {'name': link_name, 'topology': link_topo_name} for link_uuid, link_name, link_topo_name in processed_links}, + 'triggered_links': [] # Will store additional links triggered by special rules } self._object_cache.set(CachedEntities.CONNECTION, mapping, connection_uuid) LOGGER.debug('Cached connection {:s} mapping with {:d} links for domain {:s}'.format( @@ -845,6 +911,7 @@ class EventDispatcher(BaseEventDispatcher): LOGGER.debug('Deleted cached mapping for connection {:s}'.format(connection_uuid)) # Process each link: count remaining connections and stop/update worker accordingly + all_links_stopped = True # Track if all links have been stopped for link_uuid, link_name, link_topology_name in processed_links: worker_name = '{:s}:{:s}'.format(link_topology_name, link_name) @@ -860,10 +927,28 @@ class EventDispatcher(BaseEventDispatcher): self._telemetry_pool.stop_worker(WorkerTypeEnum.SYNTHESIZER, worker_name) LOGGER.info('Stopped telemetry worker for link {:s}, no connections remain'.format(link_name)) - delete_simap_network(self._simap_client, domain_name) - LOGGER.info('Deleted SIMAP network for domain {:s} after connection removal'.format(domain_name)) + # ---- TEMPORARY: Stop triggered links (L3 and L13 when L6 is removed from trans-pkt) ---- + if link_name == "L6": + try: + triggered_links = mapping.get('triggered_links', []) + if triggered_links: + LOGGER.info('Connection {:s} has {:d} triggered links to clean up'.format( + connection_uuid, len(triggered_links))) + + for _, trig_link_name, trig_link_topology_name in triggered_links: + trig_worker_name = '{:s}:{:s}'.format(trig_link_topology_name, trig_link_name) + + if self._telemetry_pool.has_worker(WorkerTypeEnum.SYNTHESIZER, trig_worker_name): + self._telemetry_pool.stop_worker(WorkerTypeEnum.SYNTHESIZER, trig_worker_name) + LOGGER.info('Stopped triggered telemetry worker for link {:s}'.format(trig_link_name)) + else: + LOGGER.warning('Triggered worker {:s} not found during cleanup'.format(trig_worker_name)) + except Exception as e: + LOGGER.exception('Failed to stop triggered links for connection {:s}: {:s}'.format(connection_uuid, str(e))) + # ---- END OF TEMPORARY LOGIC ---- else: # Other connections still use this link, update worker with new count + all_links_stopped = False worker = self._telemetry_pool.get_worker(WorkerTypeEnum.SYNTHESIZER, worker_name) assert isinstance(worker, SynthesizerWorker), \ 'Expected SynthesizerWorker, got {:s}'.format(type(worker).__name__) @@ -872,6 +957,13 @@ class EventDispatcher(BaseEventDispatcher): LOGGER.info('Updated telemetry for link {:s} after connection removal, {:d} connections remain'.format( link_name, remaining_conn_count)) + # Delete SIMAP network only if all links have been stopped + if all_links_stopped: + delete_simap_network(self._simap_client, domain_name) + LOGGER.info('Deleted SIMAP network for domain {:s} after all links stopped'.format(domain_name)) + else: + LOGGER.debug('SIMAP network {:s} retained, some links still have active connections'.format(domain_name)) + except Exception as e: LOGGER.exception('Failed to process connection removal {:s}: {:s}'.format( connection_uuid, str(e))) diff --git a/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py b/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py index f6a90733c..9c4c20a13 100644 --- a/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py +++ b/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py @@ -89,10 +89,10 @@ class SyntheticSampler: bw_utilization = max(min_bw, min(max_bw, bw_utilization)) self.prev_bw = bw_utilization - # Generate latency using same pattern as bandwidth (BW ranges / 10 = 0-10ms range) - avg_lat = avg / 10.0 - min_lat = min_bw / 10.0 - max_lat = max_bw / 10.0 + # Generate latency using same pattern as bandwidth (BW ranges / 9 = 0-10ms range) + avg_lat = avg / 9.0 + min_lat = min_bw / 9.0 + max_lat = max_bw / 9.0 if self.prev_latency is None: # First sample: start at average for this connection count -- GitLab From f275b5ad682ce9cc869fff6a7ac4c1d17c879e2e Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Sun, 22 Feb 2026 13:41:14 +0000 Subject: [PATCH 61/78] feat: Update supporting link IDs in set_simap_network function for AggNet to admin --- src/simap_connector/service/simap_updater/RealSimaps.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/simap_connector/service/simap_updater/RealSimaps.py b/src/simap_connector/service/simap_updater/RealSimaps.py index d755d5109..184df61e9 100644 --- a/src/simap_connector/service/simap_updater/RealSimaps.py +++ b/src/simap_connector/service/simap_updater/RealSimaps.py @@ -194,7 +194,7 @@ def set_simap_network(context_client: ContextClient, simap_client: SimapClient, link.update( 'sdp1', endpoints[0], 'sdp2', endpoints[1], supporting_link_ids=[ - ('trans-pkt', 'Trans-L1'), ('trans-pkt', 'L13'), ('trans-pkt', 'L3') + ('trans-pkt', 'Trans-L1'), ('admin', 'L13'), ('admin', 'L3') ] ) except (KeyError, IndexError, ValueError) as e: -- GitLab From ce3e20e9d31b4161c65fa6c358b041513c6b3d43 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Sun, 22 Feb 2026 14:16:31 +0000 Subject: [PATCH 62/78] feat: Adjust latency calculation to use updated divisor and increase noise range for temporal continuity --- .../telemetry/worker/data/SyntheticSamplers.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py b/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py index 9c4c20a13..97cd741a1 100644 --- a/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py +++ b/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py @@ -89,17 +89,17 @@ class SyntheticSampler: bw_utilization = max(min_bw, min(max_bw, bw_utilization)) self.prev_bw = bw_utilization - # Generate latency using same pattern as bandwidth (BW ranges / 9 = 0-10ms range) - avg_lat = avg / 9.0 - min_lat = min_bw / 9.0 - max_lat = max_bw / 9.0 + # Generate latency using same pattern as bandwidth (BW ranges / 11 = 0-10ms range) + avg_lat = avg / 11.0 + min_lat = min_bw / 11.0 + max_lat = max_bw / 11.0 if self.prev_latency is None: # First sample: start at average for this connection count latency = avg_lat else: - # Add ±1% noise to previous latency for temporal continuity - noise_factor = random.uniform(-0.01, 0.01) + # Add ±5% noise to previous latency for temporal continuity + noise_factor = random.uniform(-0.05, 0.05) latency = self.prev_latency * (1.0 + noise_factor) # Clamp to current range (handles "jump" when connection count changes) -- GitLab From 881989656dd1102a05397888873309c2ac6a7ac5 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Sun, 22 Feb 2026 14:59:06 +0000 Subject: [PATCH 63/78] feat: Update LINKS_CAPACITY values for improved bandwidth utilization --- src/simap_connector/service/simap_updater/AllowedLinks.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/simap_connector/service/simap_updater/AllowedLinks.py b/src/simap_connector/service/simap_updater/AllowedLinks.py index 494828847..2e1cc3a4d 100644 --- a/src/simap_connector/service/simap_updater/AllowedLinks.py +++ b/src/simap_connector/service/simap_updater/AllowedLinks.py @@ -32,8 +32,8 @@ ALLOWED_LINKS_PER_CONTROLLER = { # description "0–100 percent value."; # } LINKS_CAPACITY = { - 'L1' : 100, 'L2' : 100, 'L3' : 100, 'L4' : 100, - 'L5' : 100, 'L6' : 100, 'L9' : 100, 'L10' : 100, + 'L1' : 5, 'L2' : 5, 'L3' : 30, 'L4' : 30, + 'L5' : 15, 'L6' : 15, 'L9' : 15, 'L10' : 15, 'L7ab' : 100, 'L7ba' : 100, 'L8ab' : 100, 'L8ba' : 100, 'L11ab' : 100, - 'L11ba' : 100, 'L12ab': 100, 'L12ba': 100, 'L13' : 100, 'L14' : 100, + 'L11ba' : 100, 'L12ab': 100, 'L12ba': 100, 'L13' : 50, 'L14' : 50, } -- GitLab From 35b63a641a130f9f2e0c8a2ed4d4ed552c5b1bc6 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Sun, 22 Feb 2026 15:24:00 +0000 Subject: [PATCH 64/78] feat: Added notification to save in DB --- .../clients/influxdb_fetcher.py | 42 +++++++------------ 1 file changed, 16 insertions(+), 26 deletions(-) diff --git a/src/tests/mwc26-f5ga/AI_analytics_engine/clients/influxdb_fetcher.py b/src/tests/mwc26-f5ga/AI_analytics_engine/clients/influxdb_fetcher.py index 5f82a0a47..d27fbc6d9 100644 --- a/src/tests/mwc26-f5ga/AI_analytics_engine/clients/influxdb_fetcher.py +++ b/src/tests/mwc26-f5ga/AI_analytics_engine/clients/influxdb_fetcher.py @@ -25,7 +25,7 @@ from datetime import datetime, timezone from typing import Any, Dict from common.tools.client.RetryDecorator import delay_exponential, retry -from influxdb_client_3 import InfluxDBClient3 +from influxdb_client_3 import InfluxDBClient3, Point from ..ai_model.sla_policy import SLAPolicyConfig LOGGER = logging.getLogger(__name__) @@ -61,23 +61,19 @@ class InfluxDBFetcher: influxdb_token: Authentication token for InfluxDB. influxdb_database: Name of the InfluxDB database to query. """ - self.influxdb_host = influxdb_host - self.influxdb_port = influxdb_port - self.influxdb_token = influxdb_token + self.influxdb_host = influxdb_host + self.influxdb_port = influxdb_port + self.influxdb_token = influxdb_token self.influxdb_database = influxdb_database + self.influxdb_url = f"http://{influxdb_host}:{influxdb_port}" - # Construct full URL with port for InfluxDB v3 - self.influxdb_url = f"http://{influxdb_host}:{influxdb_port}" - - LOGGER.info( - f"InfluxDBFetcher initialized for database '{influxdb_database}' " - f"at {self.influxdb_url}" - ) + LOGGER.info( f"InfluxDBFetcher initialized for database '{influxdb_database}' " + f"at {self.influxdb_url}") self._client = InfluxDBClient3( - host=self.influxdb_url, - token=self.influxdb_token, - database=self.influxdb_database - ) + host = self.influxdb_url, + token = self.influxdb_token, + database = self.influxdb_database + ) def is_connected(self) -> bool: """ @@ -274,17 +270,11 @@ class InfluxDBFetcher: ) # TODO: Implement actual InfluxDB write - # Example implementation: - # from influxdb_client_3 import InfluxDBClient3, Point - # client = InfluxDBClient3( - # host=self.influxdb_host, - # token=self.influxdb_token, - # database=self.influxdb_database - # ) - # point = Point("telemetry_notifications") \ - # .tag("status", status) \ - # .field("timestamp", timestamp) - # client.write(point) + + point = Point("telemetry_notifications") \ + .tag("status", status) \ + .field("timestamp", timestamp) + self._client.write(point) LOGGER.info("Telemetry notification stored successfully in InfluxDB") return True -- GitLab From 96bbb3e0b107e89bb6499a7ec761b32284593505 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Sun, 22 Feb 2026 15:49:06 +0000 Subject: [PATCH 65/78] feat: Update LINKS_CAPACITY values and add latency ranges for synthetic samplers; modify slice IDs in teardown scripts --- .../service/simap_updater/AllowedLinks.py | 6 +++--- .../telemetry/worker/data/SyntheticSamplers.py | 13 +++++++++---- .../data/slices/network-slice1_background.json | 2 +- .../data/slices/network-slice3_background.json | 2 +- src/tests/mwc26-f5ga/teardown-slice1_background.sh | 2 +- .../teardown-slice3_another_background.sh | 2 +- 6 files changed, 16 insertions(+), 11 deletions(-) diff --git a/src/simap_connector/service/simap_updater/AllowedLinks.py b/src/simap_connector/service/simap_updater/AllowedLinks.py index 2e1cc3a4d..9c34b87ae 100644 --- a/src/simap_connector/service/simap_updater/AllowedLinks.py +++ b/src/simap_connector/service/simap_updater/AllowedLinks.py @@ -32,8 +32,8 @@ ALLOWED_LINKS_PER_CONTROLLER = { # description "0–100 percent value."; # } LINKS_CAPACITY = { - 'L1' : 5, 'L2' : 5, 'L3' : 30, 'L4' : 30, - 'L5' : 15, 'L6' : 15, 'L9' : 15, 'L10' : 15, + 'L1' : 100, 'L2' : 100, 'L3' : 100, 'L4' : 100, + 'L5' : 100, 'L6' : 100, 'L9' : 100, 'L10' : 100, 'L7ab' : 100, 'L7ba' : 100, 'L8ab' : 100, 'L8ba' : 100, 'L11ab' : 100, - 'L11ba' : 100, 'L12ab': 100, 'L12ba': 100, 'L13' : 50, 'L14' : 50, + 'L11ba' : 100, 'L12ab': 100, 'L12ba': 100, 'L13' : 100, 'L14' : 100, } diff --git a/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py b/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py index 97cd741a1..1279703a8 100644 --- a/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py +++ b/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py @@ -54,6 +54,13 @@ class SyntheticSampler: 3: (65, 60, 80), 4: (85, 80, 95), } + LAT_RANGES = { + 0: (0.5, 0.0, 1.0), + 1: (1.5, 1.0, 2.0), + 2: (2.5, 2.0, 3.0), + 3: (3.5, 3.0, 4.0), + 4: (4.5, 4.0, 5.0), + } @classmethod def create_random( @@ -89,10 +96,8 @@ class SyntheticSampler: bw_utilization = max(min_bw, min(max_bw, bw_utilization)) self.prev_bw = bw_utilization - # Generate latency using same pattern as bandwidth (BW ranges / 11 = 0-10ms range) - avg_lat = avg / 11.0 - min_lat = min_bw / 11.0 - max_lat = max_bw / 11.0 + # Generate latency using same connection count key + avg_lat, min_lat, max_lat = self.LAT_RANGES[conn_key] if self.prev_latency is None: # First sample: start at average for this connection count diff --git a/src/tests/mwc26-f5ga/data/slices/network-slice1_background.json b/src/tests/mwc26-f5ga/data/slices/network-slice1_background.json index 6d6e0893e..fd63bbabb 100644 --- a/src/tests/mwc26-f5ga/data/slices/network-slice1_background.json +++ b/src/tests/mwc26-f5ga/data/slices/network-slice1_background.json @@ -1,7 +1,7 @@ { "slice-service": [ { - "id": "initial_background_slice", + "id": "initial_background_slice_1", "description": "network slice, PC1-VM1 - using IP transport network", "sdps": { "sdp": [ diff --git a/src/tests/mwc26-f5ga/data/slices/network-slice3_background.json b/src/tests/mwc26-f5ga/data/slices/network-slice3_background.json index 90c74a47b..e76be7569 100644 --- a/src/tests/mwc26-f5ga/data/slices/network-slice3_background.json +++ b/src/tests/mwc26-f5ga/data/slices/network-slice3_background.json @@ -1,7 +1,7 @@ { "slice-service": [ { - "id": "another_background_slice", + "id": "another_background_slice_3", "description": "network slice, PC1-VM1 - using IP transport network", "sdps": { "sdp": [ diff --git a/src/tests/mwc26-f5ga/teardown-slice1_background.sh b/src/tests/mwc26-f5ga/teardown-slice1_background.sh index 0c99b79ce..1b4f1b994 100755 --- a/src/tests/mwc26-f5ga/teardown-slice1_background.sh +++ b/src/tests/mwc26-f5ga/teardown-slice1_background.sh @@ -16,7 +16,7 @@ echo "[E2E] Tear Down slice2..." curl --request DELETE --location \ - http://0.0.0.0:80/restconf/data/ietf-network-slice-service:network-slice-services/slice-service=initial_background_slice + http://0.0.0.0:80/restconf/data/ietf-network-slice-service:network-slice-services/slice-service=initial_background_slice_1 echo diff --git a/src/tests/mwc26-f5ga/teardown-slice3_another_background.sh b/src/tests/mwc26-f5ga/teardown-slice3_another_background.sh index 5c162253b..85fdcdabf 100755 --- a/src/tests/mwc26-f5ga/teardown-slice3_another_background.sh +++ b/src/tests/mwc26-f5ga/teardown-slice3_another_background.sh @@ -16,7 +16,7 @@ echo "[E2E] Tear Down slice3..." curl --request DELETE --location \ - http://0.0.0.0:80/restconf/data/ietf-network-slice-service:network-slice-services/slice-service=another_background_slice + http://0.0.0.0:80/restconf/data/ietf-network-slice-service:network-slice-services/slice-service=another_background_slice_3 echo -- GitLab From dbf1185ddb2913b8961241d95b323881982b1ac6 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Sun, 22 Feb 2026 19:32:43 +0000 Subject: [PATCH 66/78] feat: Refactor bandwidth and latency range definitions for improved readability; streamline sample generation logic --- .../worker/data/SyntheticSamplers.py | 42 +++++-------------- 1 file changed, 11 insertions(+), 31 deletions(-) diff --git a/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py b/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py index 1279703a8..3dbc92e09 100644 --- a/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py +++ b/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py @@ -40,26 +40,18 @@ class SyntheticSampler: Values vary by ±1% between consecutive samples for temporal continuity. """ - connection_count : int = field(default = 0) # Current connection count - link_capacity : float = field(default = 100.0) # Link capacity in Gbps - prev_bw : Optional[float] = field(default = None) # Previous BW percentage - prev_latency : Optional[float] = field(default = None) # Previous latency (ms) + connection_count : int = field(default = 0) + link_capacity : float = field(default = 100.0) + prev_bw : Optional[float] = field(default = None) + prev_latency : Optional[float] = field(default = None) # Connection count to (avg, min, max) percentage mapping # Latency uses same ranges divided by 10 (0-10ms range) BW_RANGES = { - 0: (3, 1, 10), - 1: (25, 15, 30), - 2: (45, 35, 55), - 3: (65, 60, 80), - 4: (85, 80, 95), + 0: (3, 1, 10), 1: (25, 15, 30), 2: (45, 35, 55), 3: (65, 60, 80), 4: (85, 80, 95), } LAT_RANGES = { - 0: (0.5, 0.0, 1.0), - 1: (1.5, 1.0, 2.0), - 2: (2.5, 2.0, 3.0), - 3: (3.5, 3.0, 4.0), - 4: (4.5, 4.0, 5.0), + 0: (0.5, 0.0, 1.0), 1: (1.5, 1.0, 2.0), 2: (2.5, 2.0, 3.0), 3: (3.5, 3.0, 4.0), 4: (4.5, 4.0, 5.0), } @classmethod @@ -78,43 +70,31 @@ class SyntheticSampler: Tuple of (bandwidth_sample, latency_sample) """ timestamp = datetime.now().timestamp() - - # Determine range based on connection count (cap at 4+) - conn_key = min(self.connection_count, 4) + conn_key = min(self.connection_count, 4) + avg, min_bw, max_bw = self.BW_RANGES[conn_key] - - # Generate bandwidth percentage if self.prev_bw is None: - # First sample: start at average for this connection count bw_utilization = avg else: - # Add ±1% noise to previous value for temporal continuity noise_factor = random.uniform(-0.01, 0.01) bw_utilization = self.prev_bw * (1.0 + noise_factor) - # Clamp to current range (handles "jump" when connection count changes) bw_utilization = max(min_bw, min(max_bw, bw_utilization)) self.prev_bw = bw_utilization - # Generate latency using same connection count key avg_lat, min_lat, max_lat = self.LAT_RANGES[conn_key] - if self.prev_latency is None: - # First sample: start at average for this connection count latency = avg_lat else: - # Add ±5% noise to previous latency for temporal continuity - noise_factor = random.uniform(-0.05, 0.05) + noise_factor = random.uniform(-0.0, 0.0) latency = self.prev_latency * (1.0 + noise_factor) - # Clamp to current range (handles "jump" when connection count changes) latency = max(min_lat, min(max_lat, latency)) self.prev_latency = latency - # Convert percentage to actual utilization (Gbps) - actual_bw_utilization = (bw_utilization / 100.0) * self.link_capacity + # actual_bw_utilization = (bw_utilization / 100.0) * self.link_capacity - return (Sample(timestamp, 0, actual_bw_utilization), Sample(timestamp, 0, latency)) + return (Sample(timestamp, 0, bw_utilization), Sample(timestamp, 0, latency)) class SyntheticSamplers: -- GitLab From 5d476683f612814a8aed8a6670d69561dad0b88b Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Sun, 22 Feb 2026 19:33:52 +0000 Subject: [PATCH 67/78] Add new Grafana dashboard for SIMAP Service-Level SLA Monitoring - Introduced a comprehensive dashboard to monitor service-level agreements (SLAs) for SIMAP. - Integrated annotations for service upgrades and downgrades using InfluxDB as the data source. - Included panels for displaying service status change events, link bandwidth utilization, and latency metrics. - Configured templating options for dynamic data querying and time range selection. - Set refresh intervals and time settings for real-time monitoring. --- ...ana-dashboard-simap-sla-monitoring_V1.json | 1172 +++++++++++++++++ ...ana-dashboard-simap-sla-monitoring_V2.json | 685 ++++++++++ 2 files changed, 1857 insertions(+) create mode 100644 grafana-dashboard-simap-sla-monitoring_V1.json create mode 100644 grafana-dashboard-simap-sla-monitoring_V2.json diff --git a/grafana-dashboard-simap-sla-monitoring_V1.json b/grafana-dashboard-simap-sla-monitoring_V1.json new file mode 100644 index 000000000..310c904ae --- /dev/null +++ b/grafana-dashboard-simap-sla-monitoring_V1.json @@ -0,0 +1,1172 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "enable": true, + "iconColor": "orange", + "name": "Service Upgrades", + "target": { + "limit": 100, + "matchAny": false, + "tags": [ + { + "key": "status", + "operator": "=", + "value": "UPGRADE" + } + ], + "type": "tags" + }, + "query": "SELECT * FROM telemetry_notifications WHERE status='UPGRADE'", + "textColumn": "timestamp", + "titleColumn": "", + "tagsColumn": "status" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "enable": true, + "iconColor": "red", + "name": "Service Downgrades", + "target": { + "limit": 100, + "matchAny": false, + "tags": [ + { + "key": "status", + "operator": "=", + "value": "DOWNGRADE" + } + ], + "type": "tags" + }, + "query": "SELECT * FROM telemetry_notifications WHERE status='DOWNGRADE'", + "textColumn": "timestamp", + "titleColumn": "", + "tagsColumn": "status" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 1, + "id": null, + "links": [], + "liveNow": false, + "panels": [ + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "gridPos": { + "h": 2, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 1, + "options": { + "code": { + "language": "plaintext", + "showLineNumbers": false, + "showMiniMap": false + }, + "content": "# SIMAP Service-Level SLA Monitoring\n\nReal-time tracking of link bandwidth utilization and latency across networks. UPGRADE ⬆️ and DOWNGRADE ⬇️ events are shown as timeline annotations.", + "mode": "markdown" + }, + "pluginVersion": "10.0.0", + "title": "Dashboard Overview", + "type": "text" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "yellow", + "value": 70 + }, + { + "color": "red", + "value": 90 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 6, + "x": 0, + "y": 2 + }, + "id": 2, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true + }, + "pluginVersion": "10.0.0", + "targets": [ + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "query": "SELECT MEAN(bandwidth_utilization) as \"Average BW Utilization\" FROM link_telemetry WHERE time > now() - 5m AND network_id =~ /$network_id/ GROUP BY time(10s) fill(null)", + "rawQuery": true, + "refId": "A", + "resultFormat": "time_series" + } + ], + "title": "Average Bandwidth Utilization (5min)", + "type": "gauge" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "yellow", + "value": 80 + }, + { + "color": "red", + "value": 95 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 6, + "x": 6, + "y": 2 + }, + "id": 3, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true + }, + "pluginVersion": "10.0.0", + "targets": [ + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "query": "SELECT MAX(bandwidth_utilization) as \"Peak BW Utilization\" FROM link_telemetry WHERE time > now() - 5m AND network_id =~ /$network_id/ GROUP BY time(10s) fill(null)", + "rawQuery": true, + "refId": "A", + "resultFormat": "time_series" + } + ], + "title": "Peak Bandwidth Utilization (5min)", + "type": "gauge" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "yellow", + "value": 50 + }, + { + "color": "red", + "value": 100 + } + ] + }, + "unit": "ms" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 6, + "x": 12, + "y": 2 + }, + "id": 4, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true + }, + "pluginVersion": "10.0.0", + "targets": [ + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "query": "SELECT MEAN(latency) as \"Average Latency\" FROM link_telemetry WHERE time > now() - 5m AND network_id =~ /$network_id/ GROUP BY time(10s) fill(null)", + "rawQuery": true, + "refId": "A", + "resultFormat": "time_series" + } + ], + "title": "Average Latency (5min)", + "type": "gauge" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "yellow", + "value": 75 + }, + { + "color": "red", + "value": 150 + } + ] + }, + "unit": "ms" + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 6, + "x": 18, + "y": 2 + }, + "id": 5, + "options": { + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "showThresholdLabels": false, + "showThresholdMarkers": true + }, + "pluginVersion": "10.0.0", + "targets": [ + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "query": "SELECT MAX(latency) as \"Peak Latency\" FROM link_telemetry WHERE time > now() - 5m AND network_id =~ /$network_id/ GROUP BY time(10s) fill(null)", + "rawQuery": true, + "refId": "A", + "resultFormat": "time_series" + } + ], + "title": "Peak Latency (5min)", + "type": "gauge" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 15, + "gradientMode": "none", + "hideFrom": { + "tooltip": false, + "viz": false, + "legend": false + }, + "lineInterpolation": "smooth", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "line" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "yellow", + "value": 70 + }, + { + "color": "red", + "value": 90 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 24, + "x": 0, + "y": 7 + }, + "id": 6, + "options": { + "legend": { + "calcs": [ + "last", + "mean", + "max" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "10.0.0", + "targets": [ + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "query": "SELECT bandwidth_utilization FROM link_telemetry WHERE time > now() - $timerange AND network_id =~ /$network_id/ AND link_id =~ /$link_id/ GROUP BY network_id, link_id", + "rawQuery": true, + "refId": "A", + "resultFormat": "time_series" + } + ], + "title": "Link Bandwidth Utilization (%) - Timeline", + "type": "timeseries" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 15, + "gradientMode": "none", + "hideFrom": { + "tooltip": false, + "viz": false, + "legend": false + }, + "lineInterpolation": "smooth", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "line" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "yellow", + "value": 50 + }, + { + "color": "red", + "value": 100 + } + ] + }, + "unit": "ms" + }, + "overrides": [] + }, + "gridPos": { + "h": 10, + "w": 24, + "x": 0, + "y": 17 + }, + "id": 7, + "options": { + "legend": { + "calcs": [ + "last", + "mean", + "max" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "10.0.0", + "targets": [ + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "query": "SELECT latency FROM link_telemetry WHERE time > now() - $timerange AND network_id =~ /$network_id/ AND link_id =~ /$link_id/ GROUP BY network_id, link_id", + "rawQuery": true, + "refId": "A", + "resultFormat": "time_series" + } + ], + "title": "Link Latency (ms) - Timeline", + "type": "timeseries" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "center", + "cellOptions": { + "type": "auto" + }, + "inspect": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "yellow", + "value": 70 + }, + { + "color": "red", + "value": 90 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Bandwidth Utilization (%)" + }, + "properties": [ + { + "id": "custom.cellOptions", + "value": { + "type": "gauge", + "mode": "gradient" + } + }, + { + "id": "unit", + "value": "percent" + }, + { + "id": "max", + "value": 100 + }, + { + "id": "min", + "value": 0 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Latency (ms)" + }, + "properties": [ + { + "id": "custom.cellOptions", + "value": { + "type": "color-background", + "mode": "gradient" + } + }, + { + "id": "unit", + "value": "ms" + }, + { + "id": "thresholds", + "value": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "yellow", + "value": 50 + }, + { + "color": "red", + "value": 100 + } + ] + } + } + ] + } + ] + }, + "gridPos": { + "h": 10, + "w": 24, + "x": 0, + "y": 27 + }, + "id": 8, + "options": { + "cellHeight": "sm", + "footer": { + "countRows": false, + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "showHeader": true, + "sortBy": [ + { + "desc": true, + "displayName": "Bandwidth Utilization (%)" + } + ] + }, + "pluginVersion": "10.0.0", + "targets": [ + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "query": "SELECT LAST(bandwidth_utilization) as \"Bandwidth Utilization (%)\", LAST(latency) as \"Latency (ms)\" FROM link_telemetry WHERE time > now() - 5m AND network_id =~ /$network_id/ AND link_id =~ /$link_id/ GROUP BY network_id, link_id", + "rawQuery": true, + "refId": "A", + "resultFormat": "table" + } + ], + "title": "Link Comparison Table - Current Status", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true + }, + "indexByName": {}, + "renameByName": { + "network_id": "Network ID", + "link_id": "Link ID" + } + } + } + ], + "type": "table" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "hideFrom": { + "tooltip": false, + "viz": false, + "legend": false + } + }, + "mappings": [] + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 0, + "y": 37 + }, + "id": 9, + "options": { + "displayLabels": [ + "name", + "percent" + ], + "legend": { + "displayMode": "table", + "placement": "right", + "showLegend": true, + "values": [ + "value" + ] + }, + "pieType": "pie", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "tooltip": { + "mode": "single", + "sort": "none" + } + }, + "pluginVersion": "10.0.0", + "targets": [ + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "query": "SELECT LAST(bandwidth_utilization) FROM link_telemetry WHERE time > now() - 5m AND network_id =~ /$network_id/ GROUP BY link_id", + "rawQuery": true, + "refId": "A", + "resultFormat": "time_series" + } + ], + "title": "Bandwidth Distribution by Link", + "type": "piechart" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "bars", + "fillOpacity": 80, + "gradientMode": "hue", + "hideFrom": { + "tooltip": false, + "viz": false, + "legend": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "never", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + }, + "unit": "ms" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 12, + "x": 12, + "y": 37 + }, + "id": 10, + "options": { + "legend": { + "calcs": [ + "mean", + "max" + ], + "displayMode": "table", + "placement": "right", + "showLegend": true + }, + "tooltip": { + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "10.0.0", + "targets": [ + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "query": "SELECT LAST(latency) FROM link_telemetry WHERE time > now() - 5m AND network_id =~ /$network_id/ GROUP BY time(30s), link_id fill(null)", + "rawQuery": true, + "refId": "A", + "resultFormat": "time_series" + } + ], + "title": "Latency Comparison Across Links", + "type": "timeseries" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "center", + "cellOptions": { + "type": "auto" + }, + "inspect": false + }, + "mappings": [ + { + "options": { + "UPGRADE": { + "color": "green", + "index": 0, + "text": "⬆️ UPGRADE" + }, + "DOWNGRADE": { + "color": "red", + "index": 1, + "text": "⬇️ DOWNGRADE" + } + }, + "type": "value" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Time" + }, + "properties": [ + { + "id": "custom.width", + "value": 180 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Status" + }, + "properties": [ + { + "id": "custom.cellOptions", + "value": { + "type": "color-background" + } + } + ] + } + ] + }, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 45 + }, + "id": 11, + "options": { + "cellHeight": "sm", + "footer": { + "countRows": false, + "fields": "", + "reducer": [ + "sum" + ], + "show": false + }, + "showHeader": true, + "sortBy": [ + { + "desc": true, + "displayName": "Time" + } + ] + }, + "pluginVersion": "10.0.0", + "targets": [ + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "query": "SELECT status as \"Status\", timestamp as \"Timestamp Info\" FROM telemetry_notifications WHERE time > now() - $timerange ORDER BY time DESC LIMIT 50", + "rawQuery": true, + "refId": "A", + "resultFormat": "table" + } + ], + "title": "Service Status Change Events (UPGRADE/DOWNGRADE)", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": {}, + "indexByName": { + "Time": 0, + "Status": 1, + "Timestamp Info": 2 + }, + "renameByName": {} + } + } + ], + "type": "table" + } + ], + "refresh": "10s", + "schemaVersion": 38, + "style": "dark", + "tags": [ + "simap", + "telemetry", + "sla", + "service-monitoring" + ], + "templating": { + "list": [ + { + "current": { + "selected": false, + "text": "InfluxDB", + "value": "InfluxDB" + }, + "hide": 0, + "includeAll": false, + "label": "InfluxDB Datasource", + "multi": false, + "name": "DS_INFLUXDB", + "options": [], + "query": "influxdb", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "type": "datasource" + }, + { + "current": { + "selected": false, + "text": ".*", + "value": ".*" + }, + "hide": 0, + "label": "Network ID", + "name": "network_id", + "options": [ + { + "selected": true, + "text": ".*", + "value": ".*" + } + ], + "query": ".*", + "skipUrlSync": false, + "type": "textbox" + }, + { + "current": { + "selected": false, + "text": ".*", + "value": ".*" + }, + "hide": 0, + "label": "Link ID", + "name": "link_id", + "options": [ + { + "selected": true, + "text": ".*", + "value": ".*" + } + ], + "query": ".*", + "skipUrlSync": false, + "type": "textbox" + }, + { + "auto": false, + "auto_count": 30, + "auto_min": "10s", + "current": { + "selected": false, + "text": "1h", + "value": "1h" + }, + "hide": 0, + "label": "Time Range", + "name": "timerange", + "options": [ + { + "selected": false, + "text": "5m", + "value": "5m" + }, + { + "selected": false, + "text": "15m", + "value": "15m" + }, + { + "selected": false, + "text": "30m", + "value": "30m" + }, + { + "selected": true, + "text": "1h", + "value": "1h" + }, + { + "selected": false, + "text": "3h", + "value": "3h" + }, + { + "selected": false, + "text": "6h", + "value": "6h" + }, + { + "selected": false, + "text": "12h", + "value": "12h" + }, + { + "selected": false, + "text": "24h", + "value": "24h" + } + ], + "query": "5m,15m,30m,1h,3h,6h,12h,24h", + "queryValue": "", + "refresh": 2, + "skipUrlSync": false, + "type": "interval" + } + ] + }, + "time": { + "from": "now-1h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m" + ] + }, + "timezone": "", + "title": "SIMAP Service-Level SLA Monitoring", + "uid": "simap-sla-monitoring", + "version": 0, + "weekStart": "" +} diff --git a/grafana-dashboard-simap-sla-monitoring_V2.json b/grafana-dashboard-simap-sla-monitoring_V2.json new file mode 100644 index 000000000..eb93cd889 --- /dev/null +++ b/grafana-dashboard-simap-sla-monitoring_V2.json @@ -0,0 +1,685 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "enable": true, + "hide": false, + "iconColor": "orange", + "name": "Service Upgrades", + "query": "SELECT * FROM telemetry_notifications WHERE status='UPGRADE'", + "tagsColumn": "status", + "target": { + "limit": 100, + "matchAny": false, + "tags": [ + { + "key": "status", + "operator": "=", + "value": "UPGRADE" + } + ], + "type": "tags" + }, + "textColumn": "timestamp", + "titleColumn": "" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "enable": true, + "hide": false, + "iconColor": "red", + "name": "Service Downgrades", + "query": "SELECT * FROM telemetry_notifications WHERE status='DOWNGRADE'", + "tagsColumn": "status", + "target": { + "limit": 100, + "matchAny": false, + "tags": [ + { + "key": "status", + "operator": "=", + "value": "DOWNGRADE" + } + ], + "type": "tags" + }, + "textColumn": "timestamp", + "titleColumn": "" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 1, + "id": 0, + "links": [], + "panels": [ + { + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 1, + "options": { + "code": { + "language": "plaintext", + "showLineNumbers": false, + "showMiniMap": false + }, + "content": "# SIMAP Service-Level SLA Monitoring\n", + "mode": "markdown" + }, + "pluginVersion": "12.3.1", + "title": "Dashboard Overview", + "type": "text" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "center", + "cellOptions": { + "type": "auto" + }, + "footer": { + "reducers": [] + }, + "inspect": false + }, + "mappings": [ + { + "options": { + "DOWNGRADE": { + "color": "red", + "index": 1, + "text": "⬇️ DOWNGRADE" + }, + "UPGRADE": { + "color": "green", + "index": 0, + "text": "⬆️ UPGRADE" + } + }, + "type": "value" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Time" + }, + "properties": [ + { + "id": "custom.width", + "value": 180 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Status" + }, + "properties": [ + { + "id": "custom.cellOptions", + "value": { + "type": "color-background" + } + } + ] + } + ] + }, + "gridPos": { + "h": 3, + "w": 24, + "x": 0, + "y": 3 + }, + "id": 11, + "options": { + "cellHeight": "sm", + "showHeader": true, + "sortBy": [ + { + "desc": true, + "displayName": "Time" + } + ] + }, + "pluginVersion": "12.3.1", + "targets": [ + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "query": "SELECT status as \"Status\", timestamp as \"Timestamp Info\" FROM telemetry_notifications WHERE time > now() - $timerange ORDER BY time DESC LIMIT 50", + "rawQuery": true, + "refId": "A", + "resultFormat": "table" + } + ], + "title": "Service Status Change Events (UPGRADE/DOWNGRADE)", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": {}, + "indexByName": { + "Status": 1, + "Time": 0, + "Timestamp Info": 2 + }, + "renameByName": {} + } + } + ], + "type": "table" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 15, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "smooth", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "line" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "yellow", + "value": 65 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percent" + }, + "overrides": [] + }, + "gridPos": { + "h": 15, + "w": 12, + "x": 0, + "y": 6 + }, + "id": 6, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": true, + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "12.3.1", + "targets": [ + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "query": "SELECT bandwidth_utilization FROM link_telemetry WHERE time > now() - $timerange AND network_id =~ /$network_id/ AND link_id =~ /$link_id/ GROUP BY network_id, link_id", + "rawQuery": true, + "refId": "A", + "resultFormat": "time_series" + } + ], + "title": "Link Bandwidth Utilization (%) - Timeline", + "type": "timeseries" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 15, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "smooth", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "line" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "yellow", + "value": 50 + }, + { + "color": "red", + "value": 100 + } + ] + }, + "unit": "ms" + }, + "overrides": [] + }, + "gridPos": { + "h": 15, + "w": 12, + "x": 12, + "y": 6 + }, + "id": 7, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": true, + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "12.3.1", + "targets": [ + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "query": "SELECT latency FROM link_telemetry WHERE time > now() - $timerange AND network_id =~ /$network_id/ AND link_id =~ /$link_id/ GROUP BY network_id, link_id", + "rawQuery": true, + "refId": "A", + "resultFormat": "time_series" + } + ], + "title": "Link Latency (ms) - Timeline", + "type": "timeseries" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "center", + "cellOptions": { + "type": "auto" + }, + "footer": { + "reducers": [] + }, + "inspect": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "yellow", + "value": 5 + }, + { + "color": "red", + "value": 10 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Bandwidth Utilization (%)" + }, + "properties": [ + { + "id": "custom.cellOptions", + "value": { + "mode": "gradient", + "type": "gauge" + } + }, + { + "id": "unit", + "value": "percent" + }, + { + "id": "max", + "value": 100 + }, + { + "id": "min", + "value": 0 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Latency (ms)" + }, + "properties": [ + { + "id": "custom.cellOptions", + "value": { + "mode": "gradient", + "type": "color-background" + } + }, + { + "id": "unit", + "value": "ms" + }, + { + "id": "thresholds", + "value": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "yellow", + "value": 50 + }, + { + "color": "red", + "value": 100 + } + ] + } + } + ] + } + ] + }, + "gridPos": { + "h": 10, + "w": 24, + "x": 0, + "y": 21 + }, + "id": 8, + "options": { + "cellHeight": "sm", + "showHeader": true, + "sortBy": [ + { + "desc": true, + "displayName": "Bandwidth Utilization (%)" + } + ] + }, + "pluginVersion": "12.3.1", + "targets": [ + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "query": "SELECT LAST(bandwidth_utilization) as \"Bandwidth Utilization (%)\", LAST(latency) as \"Latency (ms)\" FROM link_telemetry WHERE time > now() - 5m AND network_id =~ /$network_id/ AND link_id =~ /$link_id/ GROUP BY network_id, link_id", + "rawQuery": true, + "refId": "A", + "resultFormat": "table" + } + ], + "title": "Link Comparison Table - Current Status", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true + }, + "indexByName": {}, + "renameByName": { + "link_id": "Link ID", + "network_id": "Network ID" + } + } + } + ], + "type": "table" + } + ], + "preload": false, + "refresh": "10s", + "schemaVersion": 42, + "tags": [ + "simap", + "telemetry", + "sla", + "service-monitoring" + ], + "templating": { + "list": [ + { + "current": { + "text": "influxdb-SIMAP-Server", + "value": "cf9ge11vhadj4b" + }, + "includeAll": false, + "label": "InfluxDB Datasource", + "name": "DS_INFLUXDB", + "options": [], + "query": "influxdb", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "auto": false, + "auto_count": 30, + "auto_min": "10s", + "current": { + "text": "5m", + "value": "5m" + }, + "label": "Time Range", + "name": "timerange", + "options": [ + { + "selected": true, + "text": "5m", + "value": "5m" + }, + { + "selected": false, + "text": "10m", + "value": "10m" + }, + { + "selected": false, + "text": "20m", + "value": "20m" + }, + { + "selected": false, + "text": "1h", + "value": "1h" + }, + { + "selected": false, + "text": "3h", + "value": "3h" + }, + { + "selected": false, + "text": "6h", + "value": "6h" + }, + { + "selected": false, + "text": "12h", + "value": "12h" + } + ], + "query": "5m,10m,20m,1h,3h,6h,12h", + "refresh": 2, + "type": "interval" + } + ] + }, + "time": { + "from": "now-15m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m" + ] + }, + "timezone": "", + "title": "SIMAP Service-Level SLA Monitoring", + "uid": "simap-sla-monitoring", + "version": 7 +} \ No newline at end of file -- GitLab From a5f9ab737c95c8d11bc82e63c4e2a3c6885fd50c Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Sun, 22 Feb 2026 19:35:32 +0000 Subject: [PATCH 68/78] feat: Update default timestamp retrieval to use current UTC time in InfluxDBFetcher --- .../mwc26-f5ga/AI_analytics_engine/clients/influxdb_fetcher.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tests/mwc26-f5ga/AI_analytics_engine/clients/influxdb_fetcher.py b/src/tests/mwc26-f5ga/AI_analytics_engine/clients/influxdb_fetcher.py index d27fbc6d9..f6ab6127f 100644 --- a/src/tests/mwc26-f5ga/AI_analytics_engine/clients/influxdb_fetcher.py +++ b/src/tests/mwc26-f5ga/AI_analytics_engine/clients/influxdb_fetcher.py @@ -256,7 +256,7 @@ class InfluxDBFetcher: Exception: If InfluxDB is unavailable after all retries. """ status = notification_data.get('status') - timestamp = notification_data.get('timestamp', 'N/A') + timestamp = notification_data.get('timestamp', datetime.now(timezone.utc).isoformat()) # Validate status value if status not in {'UPGRADE', 'DOWNGRADE'}: -- GitLab From 0e9ac4f41e3d2fb117e113da5cd9725ac219213f Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Mon, 23 Feb 2026 06:22:43 +0000 Subject: [PATCH 69/78] feat: Update bandwidth and latency range definitions for improved accuracy --- .../telemetry/worker/data/SyntheticSamplers.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py b/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py index 3dbc92e09..9f809a655 100644 --- a/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py +++ b/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py @@ -48,10 +48,18 @@ class SyntheticSampler: # Connection count to (avg, min, max) percentage mapping # Latency uses same ranges divided by 10 (0-10ms range) BW_RANGES = { - 0: (3, 1, 10), 1: (25, 15, 30), 2: (45, 35, 55), 3: (65, 60, 80), 4: (85, 80, 95), + 0: (3, 5, 10), + 1: (25, 15, 30), + 2: (40, 35, 50), + 3: (60, 65, 80), + 4: (85, 80, 95), } LAT_RANGES = { - 0: (0.5, 0.0, 1.0), 1: (1.5, 1.0, 2.0), 2: (2.5, 2.0, 3.0), 3: (3.5, 3.0, 4.0), 4: (4.5, 4.0, 5.0), + 0: (0.4, 0.1, 0.8), + 1: (1.4, 1.0, 1.8), + 2: (2.4, 2.0, 2.8), + 3: (3.4, 3.0, 3.8), + 4: (4.4, 4.0, 4.8), } @classmethod -- GitLab From 62b2288575f8f5652a3fd186eeb13821dfb5424c Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Mon, 23 Feb 2026 06:23:15 +0000 Subject: [PATCH 70/78] feat: Add Grafana dashboard for SIMAP SLA monitoring and enhance AI model processing - Introduced a new Grafana dashboard V3 JSON file for monitoring SIMAP service-level agreements (SLA). - Updated AIModelProcessor to accept an InfluxDBFetcher instance for writing predicted telemetry results. - Implemented a method in InfluxDBFetcher to write predicted telemetry data to InfluxDB, including average calculations for bandwidth utilization and latency. - Modified the engine to pass the InfluxDB fetcher to the AI processor for telemetry writing. --- ...ana-dashboard-simap-sla-monitoring_V1.json | 0 ...ana-dashboard-simap-sla-monitoring_V2.json | 124 ++- ...ana-dashboard-simap-sla-monitoring_V3.json | 805 ++++++++++++++++++ .../ai_model/ai_processor.py | 18 +- .../clients/influxdb_fetcher.py | 77 +- .../mwc26-f5ga/AI_analytics_engine/engine.py | 5 +- 6 files changed, 1018 insertions(+), 11 deletions(-) rename grafana-dashboard-simap-sla-monitoring_V1.json => src/tests/mwc26-f5ga/AI_analytics_engine/Grafana_dashboard/grafana-dashboard-simap-sla-monitoring_V1.json (100%) rename grafana-dashboard-simap-sla-monitoring_V2.json => src/tests/mwc26-f5ga/AI_analytics_engine/Grafana_dashboard/grafana-dashboard-simap-sla-monitoring_V2.json (82%) create mode 100644 src/tests/mwc26-f5ga/AI_analytics_engine/Grafana_dashboard/grafana-dashboard-simap-sla-monitoring_V3.json diff --git a/grafana-dashboard-simap-sla-monitoring_V1.json b/src/tests/mwc26-f5ga/AI_analytics_engine/Grafana_dashboard/grafana-dashboard-simap-sla-monitoring_V1.json similarity index 100% rename from grafana-dashboard-simap-sla-monitoring_V1.json rename to src/tests/mwc26-f5ga/AI_analytics_engine/Grafana_dashboard/grafana-dashboard-simap-sla-monitoring_V1.json diff --git a/grafana-dashboard-simap-sla-monitoring_V2.json b/src/tests/mwc26-f5ga/AI_analytics_engine/Grafana_dashboard/grafana-dashboard-simap-sla-monitoring_V2.json similarity index 82% rename from grafana-dashboard-simap-sla-monitoring_V2.json rename to src/tests/mwc26-f5ga/AI_analytics_engine/Grafana_dashboard/grafana-dashboard-simap-sla-monitoring_V2.json index eb93cd889..ee669692c 100644 --- a/grafana-dashboard-simap-sla-monitoring_V2.json +++ b/src/tests/mwc26-f5ga/AI_analytics_engine/Grafana_dashboard/grafana-dashboard-simap-sla-monitoring_V2.json @@ -285,7 +285,34 @@ }, "unit": "percent" }, - "overrides": [] + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": "/pred_.*/" + }, + "properties": [ + { + "id": "custom.lineStyle", + "value": { + "dash": [10, 10], + "fill": "dash" + } + }, + { + "id": "color", + "value": { + "fixedColor": "semi-dark-gray", + "mode": "fixed" + } + }, + { + "id": "custom.fillOpacity", + "value": 5 + } + ] + } + ] }, "gridPos": { "h": 15, @@ -320,6 +347,16 @@ "rawQuery": true, "refId": "A", "resultFormat": "time_series" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "query": "SELECT pred_bandwidth_utilization FROM predicted_telemetry WHERE time > now() - $timerange AND network_id =~ /$network_id/ AND link_id =~ /$link_id/ GROUP BY network_id, link_id", + "rawQuery": true, + "refId": "B", + "resultFormat": "time_series" } ], "title": "Link Bandwidth Utilization (%) - Timeline", @@ -389,7 +426,34 @@ }, "unit": "ms" }, - "overrides": [] + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": "/pred_.*/" + }, + "properties": [ + { + "id": "custom.lineStyle", + "value": { + "dash": [10, 10], + "fill": "dash" + } + }, + { + "id": "color", + "value": { + "fixedColor": "semi-dark-gray", + "mode": "fixed" + } + }, + { + "id": "custom.fillOpacity", + "value": 5 + } + ] + } + ] }, "gridPos": { "h": 15, @@ -424,6 +488,16 @@ "rawQuery": true, "refId": "A", "resultFormat": "time_series" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "query": "SELECT pred_latency FROM predicted_telemetry WHERE time > now() - $timerange AND network_id =~ /$network_id/ AND link_id =~ /$link_id/ GROUP BY network_id, link_id", + "rawQuery": true, + "refId": "B", + "resultFormat": "time_series" } ], "title": "Link Latency (ms) - Timeline", @@ -662,6 +736,52 @@ "query": "5m,10m,20m,1h,3h,6h,12h", "refresh": 2, "type": "interval" + }, + { + "current": { + "selected": true, + "text": "All", + "value": "$__all" + }, + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "definition": "SHOW TAG VALUES FROM link_telemetry WITH KEY = network_id", + "includeAll": true, + "label": "Network ID", + "multi": true, + "name": "network_id", + "options": [], + "query": "SHOW TAG VALUES FROM link_telemetry WITH KEY = network_id", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "current": { + "selected": true, + "text": "All", + "value": "$__all" + }, + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "definition": "SHOW TAG VALUES FROM link_telemetry WITH KEY = link_id WHERE network_id =~ /$network_id/", + "includeAll": true, + "label": "Link ID", + "multi": true, + "name": "link_id", + "options": [], + "query": "SHOW TAG VALUES FROM link_telemetry WITH KEY = link_id WHERE network_id =~ /$network_id/", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" } ] }, diff --git a/src/tests/mwc26-f5ga/AI_analytics_engine/Grafana_dashboard/grafana-dashboard-simap-sla-monitoring_V3.json b/src/tests/mwc26-f5ga/AI_analytics_engine/Grafana_dashboard/grafana-dashboard-simap-sla-monitoring_V3.json new file mode 100644 index 000000000..ee669692c --- /dev/null +++ b/src/tests/mwc26-f5ga/AI_analytics_engine/Grafana_dashboard/grafana-dashboard-simap-sla-monitoring_V3.json @@ -0,0 +1,805 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "enable": true, + "hide": false, + "iconColor": "orange", + "name": "Service Upgrades", + "query": "SELECT * FROM telemetry_notifications WHERE status='UPGRADE'", + "tagsColumn": "status", + "target": { + "limit": 100, + "matchAny": false, + "tags": [ + { + "key": "status", + "operator": "=", + "value": "UPGRADE" + } + ], + "type": "tags" + }, + "textColumn": "timestamp", + "titleColumn": "" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "enable": true, + "hide": false, + "iconColor": "red", + "name": "Service Downgrades", + "query": "SELECT * FROM telemetry_notifications WHERE status='DOWNGRADE'", + "tagsColumn": "status", + "target": { + "limit": 100, + "matchAny": false, + "tags": [ + { + "key": "status", + "operator": "=", + "value": "DOWNGRADE" + } + ], + "type": "tags" + }, + "textColumn": "timestamp", + "titleColumn": "" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 1, + "id": 0, + "links": [], + "panels": [ + { + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 1, + "options": { + "code": { + "language": "plaintext", + "showLineNumbers": false, + "showMiniMap": false + }, + "content": "# SIMAP Service-Level SLA Monitoring\n", + "mode": "markdown" + }, + "pluginVersion": "12.3.1", + "title": "Dashboard Overview", + "type": "text" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "center", + "cellOptions": { + "type": "auto" + }, + "footer": { + "reducers": [] + }, + "inspect": false + }, + "mappings": [ + { + "options": { + "DOWNGRADE": { + "color": "red", + "index": 1, + "text": "⬇️ DOWNGRADE" + }, + "UPGRADE": { + "color": "green", + "index": 0, + "text": "⬆️ UPGRADE" + } + }, + "type": "value" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Time" + }, + "properties": [ + { + "id": "custom.width", + "value": 180 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Status" + }, + "properties": [ + { + "id": "custom.cellOptions", + "value": { + "type": "color-background" + } + } + ] + } + ] + }, + "gridPos": { + "h": 3, + "w": 24, + "x": 0, + "y": 3 + }, + "id": 11, + "options": { + "cellHeight": "sm", + "showHeader": true, + "sortBy": [ + { + "desc": true, + "displayName": "Time" + } + ] + }, + "pluginVersion": "12.3.1", + "targets": [ + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "query": "SELECT status as \"Status\", timestamp as \"Timestamp Info\" FROM telemetry_notifications WHERE time > now() - $timerange ORDER BY time DESC LIMIT 50", + "rawQuery": true, + "refId": "A", + "resultFormat": "table" + } + ], + "title": "Service Status Change Events (UPGRADE/DOWNGRADE)", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": {}, + "indexByName": { + "Status": 1, + "Time": 0, + "Timestamp Info": 2 + }, + "renameByName": {} + } + } + ], + "type": "table" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 15, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "smooth", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "line" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "yellow", + "value": 65 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percent" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": "/pred_.*/" + }, + "properties": [ + { + "id": "custom.lineStyle", + "value": { + "dash": [10, 10], + "fill": "dash" + } + }, + { + "id": "color", + "value": { + "fixedColor": "semi-dark-gray", + "mode": "fixed" + } + }, + { + "id": "custom.fillOpacity", + "value": 5 + } + ] + } + ] + }, + "gridPos": { + "h": 15, + "w": 12, + "x": 0, + "y": 6 + }, + "id": 6, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": true, + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "12.3.1", + "targets": [ + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "query": "SELECT bandwidth_utilization FROM link_telemetry WHERE time > now() - $timerange AND network_id =~ /$network_id/ AND link_id =~ /$link_id/ GROUP BY network_id, link_id", + "rawQuery": true, + "refId": "A", + "resultFormat": "time_series" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "query": "SELECT pred_bandwidth_utilization FROM predicted_telemetry WHERE time > now() - $timerange AND network_id =~ /$network_id/ AND link_id =~ /$link_id/ GROUP BY network_id, link_id", + "rawQuery": true, + "refId": "B", + "resultFormat": "time_series" + } + ], + "title": "Link Bandwidth Utilization (%) - Timeline", + "type": "timeseries" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 15, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "smooth", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "line" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "yellow", + "value": 50 + }, + { + "color": "red", + "value": 100 + } + ] + }, + "unit": "ms" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": "/pred_.*/" + }, + "properties": [ + { + "id": "custom.lineStyle", + "value": { + "dash": [10, 10], + "fill": "dash" + } + }, + { + "id": "color", + "value": { + "fixedColor": "semi-dark-gray", + "mode": "fixed" + } + }, + { + "id": "custom.fillOpacity", + "value": 5 + } + ] + } + ] + }, + "gridPos": { + "h": 15, + "w": 12, + "x": 12, + "y": 6 + }, + "id": 7, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": true, + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "12.3.1", + "targets": [ + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "query": "SELECT latency FROM link_telemetry WHERE time > now() - $timerange AND network_id =~ /$network_id/ AND link_id =~ /$link_id/ GROUP BY network_id, link_id", + "rawQuery": true, + "refId": "A", + "resultFormat": "time_series" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "query": "SELECT pred_latency FROM predicted_telemetry WHERE time > now() - $timerange AND network_id =~ /$network_id/ AND link_id =~ /$link_id/ GROUP BY network_id, link_id", + "rawQuery": true, + "refId": "B", + "resultFormat": "time_series" + } + ], + "title": "Link Latency (ms) - Timeline", + "type": "timeseries" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "center", + "cellOptions": { + "type": "auto" + }, + "footer": { + "reducers": [] + }, + "inspect": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "yellow", + "value": 5 + }, + { + "color": "red", + "value": 10 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Bandwidth Utilization (%)" + }, + "properties": [ + { + "id": "custom.cellOptions", + "value": { + "mode": "gradient", + "type": "gauge" + } + }, + { + "id": "unit", + "value": "percent" + }, + { + "id": "max", + "value": 100 + }, + { + "id": "min", + "value": 0 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Latency (ms)" + }, + "properties": [ + { + "id": "custom.cellOptions", + "value": { + "mode": "gradient", + "type": "color-background" + } + }, + { + "id": "unit", + "value": "ms" + }, + { + "id": "thresholds", + "value": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "yellow", + "value": 50 + }, + { + "color": "red", + "value": 100 + } + ] + } + } + ] + } + ] + }, + "gridPos": { + "h": 10, + "w": 24, + "x": 0, + "y": 21 + }, + "id": 8, + "options": { + "cellHeight": "sm", + "showHeader": true, + "sortBy": [ + { + "desc": true, + "displayName": "Bandwidth Utilization (%)" + } + ] + }, + "pluginVersion": "12.3.1", + "targets": [ + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "query": "SELECT LAST(bandwidth_utilization) as \"Bandwidth Utilization (%)\", LAST(latency) as \"Latency (ms)\" FROM link_telemetry WHERE time > now() - 5m AND network_id =~ /$network_id/ AND link_id =~ /$link_id/ GROUP BY network_id, link_id", + "rawQuery": true, + "refId": "A", + "resultFormat": "table" + } + ], + "title": "Link Comparison Table - Current Status", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true + }, + "indexByName": {}, + "renameByName": { + "link_id": "Link ID", + "network_id": "Network ID" + } + } + } + ], + "type": "table" + } + ], + "preload": false, + "refresh": "10s", + "schemaVersion": 42, + "tags": [ + "simap", + "telemetry", + "sla", + "service-monitoring" + ], + "templating": { + "list": [ + { + "current": { + "text": "influxdb-SIMAP-Server", + "value": "cf9ge11vhadj4b" + }, + "includeAll": false, + "label": "InfluxDB Datasource", + "name": "DS_INFLUXDB", + "options": [], + "query": "influxdb", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "auto": false, + "auto_count": 30, + "auto_min": "10s", + "current": { + "text": "5m", + "value": "5m" + }, + "label": "Time Range", + "name": "timerange", + "options": [ + { + "selected": true, + "text": "5m", + "value": "5m" + }, + { + "selected": false, + "text": "10m", + "value": "10m" + }, + { + "selected": false, + "text": "20m", + "value": "20m" + }, + { + "selected": false, + "text": "1h", + "value": "1h" + }, + { + "selected": false, + "text": "3h", + "value": "3h" + }, + { + "selected": false, + "text": "6h", + "value": "6h" + }, + { + "selected": false, + "text": "12h", + "value": "12h" + } + ], + "query": "5m,10m,20m,1h,3h,6h,12h", + "refresh": 2, + "type": "interval" + }, + { + "current": { + "selected": true, + "text": "All", + "value": "$__all" + }, + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "definition": "SHOW TAG VALUES FROM link_telemetry WITH KEY = network_id", + "includeAll": true, + "label": "Network ID", + "multi": true, + "name": "network_id", + "options": [], + "query": "SHOW TAG VALUES FROM link_telemetry WITH KEY = network_id", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "current": { + "selected": true, + "text": "All", + "value": "$__all" + }, + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "definition": "SHOW TAG VALUES FROM link_telemetry WITH KEY = link_id WHERE network_id =~ /$network_id/", + "includeAll": true, + "label": "Link ID", + "multi": true, + "name": "link_id", + "options": [], + "query": "SHOW TAG VALUES FROM link_telemetry WITH KEY = link_id WHERE network_id =~ /$network_id/", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + } + ] + }, + "time": { + "from": "now-15m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m" + ] + }, + "timezone": "", + "title": "SIMAP Service-Level SLA Monitoring", + "uid": "simap-sla-monitoring", + "version": 7 +} \ No newline at end of file diff --git a/src/tests/mwc26-f5ga/AI_analytics_engine/ai_model/ai_processor.py b/src/tests/mwc26-f5ga/AI_analytics_engine/ai_model/ai_processor.py index 6c3a13f3e..af50c87f8 100644 --- a/src/tests/mwc26-f5ga/AI_analytics_engine/ai_model/ai_processor.py +++ b/src/tests/mwc26-f5ga/AI_analytics_engine/ai_model/ai_processor.py @@ -20,13 +20,13 @@ Provides AI/ML processing functionality for SLA analysis. import logging from datetime import datetime, UTC from random import Random -from typing import Any, Dict, Optional +from typing import Any, Dict, Optional, TYPE_CHECKING -from numpy import average import pandas as pd from statsmodels.tsa.holtwinters import ExponentialSmoothing -from .sla_policy import SLAPolicyConfig +if TYPE_CHECKING: + from ..clients.influxdb_fetcher import InfluxDBFetcher LOGGER = logging.getLogger(__name__) @@ -40,13 +40,18 @@ class AIModelProcessor: generating recommendations. """ - def __init__(self) -> None: + def __init__(self, influx_fetcher: Optional['InfluxDBFetcher'] = None) -> None: """ Initialize the AIModelProcessor. + Args: + influx_fetcher: InfluxDBFetcher instance for writing predicted telemetry. + If None, predicted telemetry will not be written to DB. + Loads AI models and prepares the processor for data analysis. """ LOGGER.info("AIModelProcessor initialized") + self._influx_fetcher = influx_fetcher # TODO: Load AI/ML models here # Example: self.model = load_model('sla_violation_detector.h5') @@ -144,13 +149,16 @@ class AIModelProcessor: "error_metrics": error, }) + # Push results to DB via InfluxDB fetcher + if results and self._influx_fetcher: + self._influx_fetcher.write_predicted_telemetry(results) + return results if results else None except Exception as e: LOGGER.error(f"Error during forecasting: {e}", exc_info=True) return None - def process_data( self, performance_data: Dict[str, Any], diff --git a/src/tests/mwc26-f5ga/AI_analytics_engine/clients/influxdb_fetcher.py b/src/tests/mwc26-f5ga/AI_analytics_engine/clients/influxdb_fetcher.py index f6ab6127f..11908b461 100644 --- a/src/tests/mwc26-f5ga/AI_analytics_engine/clients/influxdb_fetcher.py +++ b/src/tests/mwc26-f5ga/AI_analytics_engine/clients/influxdb_fetcher.py @@ -24,8 +24,9 @@ import logging from datetime import datetime, timezone from typing import Any, Dict +from numpy import average from common.tools.client.RetryDecorator import delay_exponential, retry -from influxdb_client_3 import InfluxDBClient3, Point +from influxdb_client_3 import InfluxDBClient3, Point, WritePrecision from ..ai_model.sla_policy import SLAPolicyConfig LOGGER = logging.getLogger(__name__) @@ -269,8 +270,6 @@ class InfluxDBFetcher: f"status={status}, timestamp={timestamp}" ) - # TODO: Implement actual InfluxDB write - point = Point("telemetry_notifications") \ .tag("status", status) \ .field("timestamp", timestamp) @@ -278,3 +277,75 @@ class InfluxDBFetcher: LOGGER.info("Telemetry notification stored successfully in InfluxDB") return True + + def write_predicted_telemetry( + self, + results: list[dict[str, Any]], + network_id: str = 'e2e', + link_id: str = 'E2E-L1' + ) -> bool: + """ + Write predicted telemetry (forecasted metrics) to InfluxDB. + + Args: + results: List of forecast results from AIModelProcessor. + Each dict contains metric_name, forecasted_values, etc. + network_id: Network identifier (default: 'e2e') + link_id: Link identifier (default: 'E2E-L1') + + Returns: + True if write succeeded, False otherwise. + """ + if not self.is_connected(): + LOGGER.warning("InfluxDB client not initialized, skipping write to DB") + return False + + if not results: + LOGGER.warning("No results to write to DB") + return False + + try: + # Extract metric predictions and calculate averages + metric_averages = {} + for result in results: + metric_name = result.get("metric_name") + forecasted_values = result.get("forecasted_values", []) + + if metric_name and forecasted_values: + # Calculate average of forecasted values + avg_value = float(average(forecasted_values)) + metric_averages[metric_name] = avg_value + LOGGER.debug(f"Average forecast for {metric_name}: {avg_value:.4f}") + + # Create InfluxDB point for predicted telemetry + point = ( + Point("predicted_telemetry") + .tag("network_id", network_id) + .tag("link_id", link_id) + ) + + # Add predicted metric fields with pred_ prefix + if "bandwidth_utilization" in metric_averages: + point = point.field( + "pred_bandwidth_utilization", + metric_averages["bandwidth_utilization"] + ) + + if "latency" in metric_averages: + point = point.field( + "pred_latency", + metric_averages["latency"] + ) + + # Write to InfluxDB + self._client.write(record=point, write_precision=WritePrecision.S) + + LOGGER.info( + "Wrote predicted telemetry to InfluxDB: network=%s, link=%s, metrics=%s", + network_id, link_id, list(metric_averages.keys()) + ) + return True + + except Exception as e: + LOGGER.error(f"Failed to write predicted telemetry to InfluxDB: {e}", exc_info=True) + return False diff --git a/src/tests/mwc26-f5ga/AI_analytics_engine/engine.py b/src/tests/mwc26-f5ga/AI_analytics_engine/engine.py index 25f0f81cf..0cbb55c4d 100644 --- a/src/tests/mwc26-f5ga/AI_analytics_engine/engine.py +++ b/src/tests/mwc26-f5ga/AI_analytics_engine/engine.py @@ -65,7 +65,10 @@ class AIAnalyticsEngineAPI: influxdb_database = Config.INFLUXDB_DATABASE ) - self.ai_processor = AIModelProcessor() + # Pass InfluxDB fetcher to AI processor for writing predicted telemetry + self.ai_processor = AIModelProcessor( + influx_fetcher=self.influxdb_fetcher + ) self.decision_client = DecisionEngineClient() # Create Flask application -- GitLab From 4619bebc697b7a5211f1a3fcfead330036fd4027 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Mon, 23 Feb 2026 07:08:27 +0000 Subject: [PATCH 71/78] feat: Add noise factors for bandwidth and latency calculations to enhance sample variability --- .../service/telemetry/worker/data/SyntheticSamplers.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py b/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py index 9f809a655..1bcbbb9bd 100644 --- a/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py +++ b/src/simap_connector/service/telemetry/worker/data/SyntheticSamplers.py @@ -84,7 +84,7 @@ class SyntheticSampler: if self.prev_bw is None: bw_utilization = avg else: - noise_factor = random.uniform(-0.01, 0.01) + noise_factor = random.uniform(-0.01, 0.01) # ±1% noise for bandwidth bw_utilization = self.prev_bw * (1.0 + noise_factor) bw_utilization = max(min_bw, min(max_bw, bw_utilization)) @@ -94,7 +94,7 @@ class SyntheticSampler: if self.prev_latency is None: latency = avg_lat else: - noise_factor = random.uniform(-0.0, 0.0) + noise_factor = random.uniform(-0.05, 0.05) # ±5% noise for latency latency = self.prev_latency * (1.0 + noise_factor) latency = max(min_lat, min(max_lat, latency)) -- GitLab From abeb81933570bbb84e734f61d5b59037f10a90b0 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Mon, 23 Feb 2026 07:08:43 +0000 Subject: [PATCH 72/78] Add Grafana dashboard for SIMAP V4 Service-Level SLA Monitoring - Introduced a new JSON configuration file for the Grafana dashboard. - The dashboard includes panels for monitoring service upgrades and downgrades, link bandwidth utilization, and latency. - Configured data sources, queries, and visualizations for effective SLA monitoring. - Added templating options for dynamic network and link selection. --- ...shboard-simap-sla-monitoring_V3 copy.json} | 316 ++++- ...ana-dashboard-simap-sla-monitoring_V4.json | 1117 +++++++++++++++++ 2 files changed, 1431 insertions(+), 2 deletions(-) rename src/tests/mwc26-f5ga/AI_analytics_engine/Grafana_dashboard/{grafana-dashboard-simap-sla-monitoring_V3.json => grafana-dashboard-simap-sla-monitoring_V3 copy.json} (69%) create mode 100644 src/tests/mwc26-f5ga/AI_analytics_engine/Grafana_dashboard/grafana-dashboard-simap-sla-monitoring_V4.json diff --git a/src/tests/mwc26-f5ga/AI_analytics_engine/Grafana_dashboard/grafana-dashboard-simap-sla-monitoring_V3.json b/src/tests/mwc26-f5ga/AI_analytics_engine/Grafana_dashboard/grafana-dashboard-simap-sla-monitoring_V3 copy.json similarity index 69% rename from src/tests/mwc26-f5ga/AI_analytics_engine/Grafana_dashboard/grafana-dashboard-simap-sla-monitoring_V3.json rename to src/tests/mwc26-f5ga/AI_analytics_engine/Grafana_dashboard/grafana-dashboard-simap-sla-monitoring_V3 copy.json index ee669692c..d1b3d72ba 100644 --- a/src/tests/mwc26-f5ga/AI_analytics_engine/Grafana_dashboard/grafana-dashboard-simap-sla-monitoring_V3.json +++ b/src/tests/mwc26-f5ga/AI_analytics_engine/Grafana_dashboard/grafana-dashboard-simap-sla-monitoring_V3 copy.json @@ -659,6 +659,318 @@ } ], "type": "table" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "Bandwidth Utilization (%)", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 15, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "smooth", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 1, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + } + ] + }, + "unit": "percent" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "link_telemetry.bandwidth_utilization {link_id: E2E-L1, network_id: e2e}" + }, + "properties": [ + { + "id": "displayName", + "value": "Actual BW" + }, + { + "id": "color", + "value": { + "fixedColor": "blue", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "predicted_telemetry.pred_bandwidth_utilization {link_id: E2E-L1, network_id: e2e}" + }, + "properties": [ + { + "id": "displayName", + "value": "Predicted BW" + }, + { + "id": "color", + "value": { + "fixedColor": "red", + "mode": "fixed" + } + }, + { + "id": "custom.lineStyle", + "value": { + "dash": [10, 10], + "fill": "dash" + } + } + ] + } + ] + }, + "gridPos": { + "h": 12, + "w": 12, + "x": 0, + "y": 31 + }, + "id": 12, + "options": { + "legend": { + "calcs": [ + "min", + "max", + "mean", + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "12.3.1", + "targets": [ + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "query": "SELECT bandwidth_utilization FROM link_telemetry WHERE time > now() - $timerange AND network_id = 'e2e' AND link_id = 'E2E-L1' GROUP BY network_id, link_id", + "rawQuery": true, + "refId": "A", + "resultFormat": "time_series" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "query": "SELECT pred_bandwidth_utilization FROM predicted_telemetry WHERE time > now() - $timerange AND network_id = 'e2e' AND link_id = 'E2E-L1' GROUP BY network_id, link_id", + "rawQuery": true, + "refId": "B", + "resultFormat": "time_series" + } + ], + "title": "E2E-L1: Bandwidth Comparison (Actual vs Predicted)", + "type": "timeseries" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "Latency (ms)", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 15, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "smooth", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 1, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + } + ] + }, + "unit": "ms" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "link_telemetry.latency {link_id: E2E-L1, network_id: e2e}" + }, + "properties": [ + { + "id": "displayName", + "value": "Actual Latency" + }, + { + "id": "color", + "value": { + "fixedColor": "green", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "predicted_telemetry.pred_latency {link_id: E2E-L1, network_id: e2e}" + }, + "properties": [ + { + "id": "displayName", + "value": "Predicted Latency" + }, + { + "id": "color", + "value": { + "fixedColor": "orange", + "mode": "fixed" + } + }, + { + "id": "custom.lineStyle", + "value": { + "dash": [10, 10], + "fill": "dash" + } + } + ] + } + ] + }, + "gridPos": { + "h": 12, + "w": 12, + "x": 12, + "y": 31 + }, + "id": 13, + "options": { + "legend": { + "calcs": [ + "min", + "max", + "mean", + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "12.3.1", + "targets": [ + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "query": "SELECT latency FROM link_telemetry WHERE time > now() - $timerange AND network_id = 'e2e' AND link_id = 'E2E-L1' GROUP BY network_id, link_id", + "rawQuery": true, + "refId": "A", + "resultFormat": "time_series" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "query": "SELECT pred_latency FROM predicted_telemetry WHERE time > now() - $timerange AND network_id = 'e2e' AND link_id = 'E2E-L1' GROUP BY network_id, link_id", + "rawQuery": true, + "refId": "B", + "resultFormat": "time_series" + } + ], + "title": "E2E-L1: Latency Comparison (Actual vs Predicted)", + "type": "timeseries" } ], "preload": false, @@ -799,7 +1111,7 @@ ] }, "timezone": "", - "title": "SIMAP Service-Level SLA Monitoring", - "uid": "simap-sla-monitoring", + "title": "SIMAP V3 Service-Level SLA Monitoring", + "uid": "simap-sla-monitoring-v3", "version": 7 } \ No newline at end of file diff --git a/src/tests/mwc26-f5ga/AI_analytics_engine/Grafana_dashboard/grafana-dashboard-simap-sla-monitoring_V4.json b/src/tests/mwc26-f5ga/AI_analytics_engine/Grafana_dashboard/grafana-dashboard-simap-sla-monitoring_V4.json new file mode 100644 index 000000000..f7adef9f3 --- /dev/null +++ b/src/tests/mwc26-f5ga/AI_analytics_engine/Grafana_dashboard/grafana-dashboard-simap-sla-monitoring_V4.json @@ -0,0 +1,1117 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": { + "type": "grafana", + "uid": "-- Grafana --" + }, + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "enable": true, + "hide": false, + "iconColor": "orange", + "name": "Service Upgrades", + "query": "SELECT * FROM telemetry_notifications WHERE status='UPGRADE'", + "tagsColumn": "status", + "target": { + "limit": 100, + "matchAny": false, + "tags": [ + { + "key": "status", + "operator": "=", + "value": "UPGRADE" + } + ], + "type": "tags" + }, + "textColumn": "timestamp", + "titleColumn": "" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "enable": true, + "hide": false, + "iconColor": "red", + "name": "Service Downgrades", + "query": "SELECT * FROM telemetry_notifications WHERE status='DOWNGRADE'", + "tagsColumn": "status", + "target": { + "limit": 100, + "matchAny": false, + "tags": [ + { + "key": "status", + "operator": "=", + "value": "DOWNGRADE" + } + ], + "type": "tags" + }, + "textColumn": "timestamp", + "titleColumn": "" + } + ] + }, + "editable": true, + "fiscalYearStartMonth": 0, + "graphTooltip": 1, + "id": 0, + "links": [], + "panels": [ + { + "fieldConfig": { + "defaults": {}, + "overrides": [] + }, + "gridPos": { + "h": 3, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 1, + "options": { + "code": { + "language": "plaintext", + "showLineNumbers": false, + "showMiniMap": false + }, + "content": "# SIMAP Service-Level SLA Monitoring\n", + "mode": "markdown" + }, + "pluginVersion": "12.3.1", + "title": "Dashboard Overview", + "type": "text" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "center", + "cellOptions": { + "type": "auto" + }, + "footer": { + "reducers": [] + }, + "inspect": false + }, + "mappings": [ + { + "options": { + "DOWNGRADE": { + "color": "red", + "index": 1, + "text": "⬇️ DOWNGRADE" + }, + "UPGRADE": { + "color": "green", + "index": 0, + "text": "⬆️ UPGRADE" + } + }, + "type": "value" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Time" + }, + "properties": [ + { + "id": "custom.width", + "value": 180 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Status" + }, + "properties": [ + { + "id": "custom.cellOptions", + "value": { + "type": "color-background" + } + } + ] + } + ] + }, + "gridPos": { + "h": 3, + "w": 24, + "x": 0, + "y": 3 + }, + "id": 11, + "options": { + "cellHeight": "sm", + "showHeader": true, + "sortBy": [ + { + "desc": true, + "displayName": "Time" + } + ] + }, + "pluginVersion": "12.3.1", + "targets": [ + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "query": "SELECT status as \"Status\", timestamp as \"Timestamp Info\" FROM telemetry_notifications WHERE time > now() - $timerange ORDER BY time DESC LIMIT 50", + "rawQuery": true, + "refId": "A", + "resultFormat": "table" + } + ], + "title": "Service Status Change Events (UPGRADE/DOWNGRADE)", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": {}, + "indexByName": { + "Status": 1, + "Time": 0, + "Timestamp Info": 2 + }, + "renameByName": {} + } + } + ], + "type": "table" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 15, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "smooth", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "line" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "yellow", + "value": 65 + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "percent" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": "/pred_.*/" + }, + "properties": [ + { + "id": "custom.lineStyle", + "value": { + "dash": [10, 10], + "fill": "dash" + } + }, + { + "id": "color", + "value": { + "fixedColor": "light-blue", + "mode": "fixed" + } + }, + { + "id": "custom.fillOpacity", + "value": 5 + } + ] + } + ] + }, + "gridPos": { + "h": 15, + "w": 12, + "x": 0, + "y": 6 + }, + "id": 6, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": true, + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "12.3.1", + "targets": [ + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "query": "SELECT bandwidth_utilization FROM link_telemetry WHERE time > now() - $timerange AND network_id =~ /$network_id/ AND link_id =~ /$link_id/ GROUP BY network_id, link_id", + "rawQuery": true, + "refId": "A", + "resultFormat": "time_series" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "query": "SELECT pred_bandwidth_utilization FROM predicted_telemetry WHERE time > now() - $timerange AND network_id =~ /$network_id/ AND link_id =~ /$link_id/ GROUP BY network_id, link_id", + "rawQuery": true, + "refId": "B", + "resultFormat": "time_series" + } + ], + "title": "Link Bandwidth Utilization (%) - Timeline", + "type": "timeseries" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 15, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "smooth", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "line" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "yellow", + "value": 50 + }, + { + "color": "red", + "value": 100 + } + ] + }, + "unit": "ms" + }, + "overrides": [ + { + "matcher": { + "id": "byRegexp", + "options": "/pred_.*/" + }, + "properties": [ + { + "id": "custom.lineStyle", + "value": { + "dash": [10, 10], + "fill": "dash" + } + }, + { + "id": "color", + "value": { + "fixedColor": "light-blue", + "mode": "fixed" + } + }, + { + "id": "custom.fillOpacity", + "value": 5 + } + ] + } + ] + }, + "gridPos": { + "h": 15, + "w": 12, + "x": 12, + "y": 6 + }, + "id": 7, + "options": { + "legend": { + "calcs": [ + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": true, + "mode": "multi", + "sort": "desc" + } + }, + "pluginVersion": "12.3.1", + "targets": [ + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "query": "SELECT latency FROM link_telemetry WHERE time > now() - $timerange AND network_id =~ /$network_id/ AND link_id =~ /$link_id/ GROUP BY network_id, link_id", + "rawQuery": true, + "refId": "A", + "resultFormat": "time_series" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "query": "SELECT pred_latency FROM predicted_telemetry WHERE time > now() - $timerange AND network_id =~ /$network_id/ AND link_id =~ /$link_id/ GROUP BY network_id, link_id", + "rawQuery": true, + "refId": "B", + "resultFormat": "time_series" + } + ], + "title": "Link Latency (ms) - Timeline", + "type": "timeseries" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "center", + "cellOptions": { + "type": "auto" + }, + "footer": { + "reducers": [] + }, + "inspect": false + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "yellow", + "value": 5 + }, + { + "color": "red", + "value": 10 + } + ] + }, + "unit": "short" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "Bandwidth Utilization (%)" + }, + "properties": [ + { + "id": "custom.cellOptions", + "value": { + "mode": "gradient", + "type": "gauge" + } + }, + { + "id": "unit", + "value": "percent" + }, + { + "id": "max", + "value": 100 + }, + { + "id": "min", + "value": 0 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "Latency (ms)" + }, + "properties": [ + { + "id": "custom.cellOptions", + "value": { + "mode": "gradient", + "type": "color-background" + } + }, + { + "id": "unit", + "value": "ms" + }, + { + "id": "thresholds", + "value": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + }, + { + "color": "yellow", + "value": 50 + }, + { + "color": "red", + "value": 100 + } + ] + } + } + ] + } + ] + }, + "gridPos": { + "h": 10, + "w": 24, + "x": 0, + "y": 21 + }, + "id": 8, + "options": { + "cellHeight": "sm", + "showHeader": true, + "sortBy": [ + { + "desc": true, + "displayName": "Bandwidth Utilization (%)" + } + ] + }, + "pluginVersion": "12.3.1", + "targets": [ + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "query": "SELECT LAST(bandwidth_utilization) as \"Bandwidth Utilization (%)\", LAST(latency) as \"Latency (ms)\" FROM link_telemetry WHERE time > now() - 5m AND network_id =~ /$network_id/ AND link_id =~ /$link_id/ GROUP BY network_id, link_id", + "rawQuery": true, + "refId": "A", + "resultFormat": "table" + } + ], + "title": "Link Comparison Table - Current Status", + "transformations": [ + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true + }, + "indexByName": {}, + "renameByName": { + "link_id": "Link ID", + "network_id": "Network ID" + } + } + } + ], + "type": "table" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "Bandwidth Utilization (%)", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 15, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "smooth", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 1, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + } + ] + }, + "unit": "percent" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "link_telemetry.bandwidth_utilization {link_id: E2E-L1, network_id: e2e}" + }, + "properties": [ + { + "id": "displayName", + "value": "Actual BW" + }, + { + "id": "color", + "value": { + "fixedColor": "blue", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "predicted_telemetry.pred_bandwidth_utilization {link_id: E2E-L1, network_id: e2e}" + }, + "properties": [ + { + "id": "displayName", + "value": "Predicted BW" + }, + { + "id": "color", + "value": { + "fixedColor": "red", + "mode": "fixed" + } + }, + { + "id": "custom.lineStyle", + "value": { + "dash": [10, 10], + "fill": "dash" + } + } + ] + } + ] + }, + "gridPos": { + "h": 12, + "w": 12, + "x": 0, + "y": 31 + }, + "id": 12, + "options": { + "legend": { + "calcs": [ + "min", + "max", + "mean", + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "12.3.1", + "targets": [ + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "query": "SELECT bandwidth_utilization FROM link_telemetry WHERE time > now() - $timerange AND network_id = 'e2e' AND link_id = 'E2E-L1' GROUP BY network_id, link_id", + "rawQuery": true, + "refId": "A", + "resultFormat": "time_series" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "query": "SELECT pred_bandwidth_utilization FROM predicted_telemetry WHERE time > now() - $timerange AND network_id = 'e2e' AND link_id = 'E2E-L1' GROUP BY network_id, link_id", + "rawQuery": true, + "refId": "B", + "resultFormat": "time_series" + } + ], + "title": "E2E-L1: Bandwidth Comparison (Actual vs Predicted)", + "type": "timeseries" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisBorderShow": false, + "axisCenteredZero": false, + "axisColorMode": "text", + "axisLabel": "Latency (ms)", + "axisPlacement": "auto", + "barAlignment": 0, + "barWidthFactor": 0.6, + "drawStyle": "line", + "fillOpacity": 15, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "insertNulls": false, + "lineInterpolation": "smooth", + "lineWidth": 2, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "showValues": false, + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 1, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": 0 + } + ] + }, + "unit": "ms" + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "link_telemetry.latency {link_id: E2E-L1, network_id: e2e}" + }, + "properties": [ + { + "id": "displayName", + "value": "Actual Latency" + }, + { + "id": "color", + "value": { + "fixedColor": "green", + "mode": "fixed" + } + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "predicted_telemetry.pred_latency {link_id: E2E-L1, network_id: e2e}" + }, + "properties": [ + { + "id": "displayName", + "value": "Predicted Latency" + }, + { + "id": "color", + "value": { + "fixedColor": "orange", + "mode": "fixed" + } + }, + { + "id": "custom.lineStyle", + "value": { + "dash": [10, 10], + "fill": "dash" + } + } + ] + } + ] + }, + "gridPos": { + "h": 12, + "w": 12, + "x": 12, + "y": 31 + }, + "id": 13, + "options": { + "legend": { + "calcs": [ + "min", + "max", + "mean", + "last" + ], + "displayMode": "table", + "placement": "bottom", + "showLegend": true + }, + "tooltip": { + "hideZeros": false, + "mode": "multi", + "sort": "none" + } + }, + "pluginVersion": "12.3.1", + "targets": [ + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "query": "SELECT latency FROM link_telemetry WHERE time > now() - $timerange AND network_id = 'e2e' AND link_id = 'E2E-L1' GROUP BY network_id, link_id", + "rawQuery": true, + "refId": "A", + "resultFormat": "time_series" + }, + { + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "query": "SELECT pred_latency FROM predicted_telemetry WHERE time > now() - $timerange AND network_id = 'e2e' AND link_id = 'E2E-L1' GROUP BY network_id, link_id", + "rawQuery": true, + "refId": "B", + "resultFormat": "time_series" + } + ], + "title": "E2E-L1: Latency Comparison (Actual vs Predicted)", + "type": "timeseries" + } + ], + "preload": false, + "refresh": "10s", + "schemaVersion": 42, + "tags": [ + "simap", + "telemetry", + "sla", + "service-monitoring" + ], + "templating": { + "list": [ + { + "current": { + "text": "influxdb-SIMAP-Server", + "value": "cf9ge11vhadj4b" + }, + "includeAll": false, + "label": "InfluxDB Datasource", + "name": "DS_INFLUXDB", + "options": [], + "query": "influxdb", + "refresh": 1, + "regex": "", + "type": "datasource" + }, + { + "auto": false, + "auto_count": 30, + "auto_min": "10s", + "current": { + "text": "5m", + "value": "5m" + }, + "label": "Time Range", + "name": "timerange", + "options": [ + { + "selected": true, + "text": "5m", + "value": "5m" + }, + { + "selected": false, + "text": "10m", + "value": "10m" + }, + { + "selected": false, + "text": "20m", + "value": "20m" + }, + { + "selected": false, + "text": "1h", + "value": "1h" + }, + { + "selected": false, + "text": "3h", + "value": "3h" + }, + { + "selected": false, + "text": "6h", + "value": "6h" + }, + { + "selected": false, + "text": "12h", + "value": "12h" + } + ], + "query": "5m,10m,20m,1h,3h,6h,12h", + "refresh": 2, + "type": "interval" + }, + { + "current": { + "selected": true, + "text": "All", + "value": "$__all" + }, + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "definition": "SHOW TAG VALUES FROM link_telemetry WITH KEY = network_id", + "includeAll": true, + "label": "Network ID", + "multi": true, + "name": "network_id", + "options": [], + "query": "SHOW TAG VALUES FROM link_telemetry WITH KEY = network_id", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + }, + { + "current": { + "selected": true, + "text": "All", + "value": "$__all" + }, + "datasource": { + "type": "influxdb", + "uid": "${DS_INFLUXDB}" + }, + "definition": "SHOW TAG VALUES FROM link_telemetry WITH KEY = link_id WHERE network_id =~ /$network_id/", + "includeAll": true, + "label": "Link ID", + "multi": true, + "name": "link_id", + "options": [], + "query": "SHOW TAG VALUES FROM link_telemetry WITH KEY = link_id WHERE network_id =~ /$network_id/", + "refresh": 1, + "regex": "", + "skipUrlSync": false, + "sort": 0, + "type": "query" + } + ] + }, + "time": { + "from": "now-15m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m" + ] + }, + "timezone": "", + "title": "SIMAP V4 Service-Level SLA Monitoring", + "uid": "simap-sla-monitoring-v4", + "version": 7 +} \ No newline at end of file -- GitLab From 651ee4a8be9537cec68b5ba369aa627583791222 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Mon, 23 Feb 2026 07:09:19 +0000 Subject: [PATCH 73/78] feat: Update test payload and logging for analyze and stop-all analyses endpoints --- .../mwc26-f5ga/AI_analytics_engine/tests/run_test.sh | 4 +++- .../AI_analytics_engine/tests/test_api_docker.py | 11 ++++++----- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/src/tests/mwc26-f5ga/AI_analytics_engine/tests/run_test.sh b/src/tests/mwc26-f5ga/AI_analytics_engine/tests/run_test.sh index 7482b1b00..3025d419d 100755 --- a/src/tests/mwc26-f5ga/AI_analytics_engine/tests/run_test.sh +++ b/src/tests/mwc26-f5ga/AI_analytics_engine/tests/run_test.sh @@ -37,7 +37,9 @@ LOG_FILE="${PWD}/test_api_docker.log" TEST_FILE="${PWD}/test_api_docker.py" # Run the test with logging enabled and capture output -pytest $TEST_FILE::test_analyze_endpoint \ + +pytest $TEST_FILE::test_stop_all_analyses_endpoint \ +# pytest $TEST_FILE::test_analyze_endpoint \ -v -s \ --log-cli-level=DEBUG \ --log-file="${LOG_FILE}" \ diff --git a/src/tests/mwc26-f5ga/AI_analytics_engine/tests/test_api_docker.py b/src/tests/mwc26-f5ga/AI_analytics_engine/tests/test_api_docker.py index 96c7c3616..94ec851bb 100644 --- a/src/tests/mwc26-f5ga/AI_analytics_engine/tests/test_api_docker.py +++ b/src/tests/mwc26-f5ga/AI_analytics_engine/tests/test_api_docker.py @@ -89,15 +89,15 @@ def test_analyze_endpoint(ai_engine_server_connection_confirmation): # Prepare test payload with SLA policy configuration payload = { - "simap_id": "L1", + "simap_id": "E2E-L1", "sla_metrics": { "latency_threshold_ms": 0, "bandwidth_utilization": 0.0 }, "history_window_size_sec": 60, - "forecast_sample_interval_sec": 5, + "forecast_sample_interval_sec": 30, "forecast_sample_count": 50, - "duration_minutes": 2 # Short duration for testing + "duration_minutes": 10 # Short duration for testing } LOGGER.info(f"Sending analyze request with payload: {payload}") @@ -124,7 +124,7 @@ def test_analyze_endpoint(ai_engine_server_connection_confirmation): if response.status_code == 202: LOGGER.info("Analysis started successfully") assert data['status'] == 'accepted', f"Expected status 'accepted', got '{data['status']}'" - assert data['simap_id'] == 'L1', f"Expected simap_id 'L1', got '{data['simap_id']}'" + assert data['simap_id'] == 'E2E-L1', f"Expected simap_id 'E2E-L1', got '{data['simap_id']}'" assert data['duration_minutes'] == 2, f"Expected duration_minutes 2, got '{data['duration_minutes']}'" assert '/osm/aiAnalyticsEvent/v1' in data['endpoint'], f"Expected '/osm/aiAnalyticsEvent/v1' in endpoint" elif response.status_code == 503: @@ -293,7 +293,7 @@ def test_stop_all_analyses_endpoint(ai_engine_server): """ LOGGER.info(">>>>>> Starting test_case test_stop_all_analyses_endpoint: POST /api/v1/analyze/stop-all endpoint") - started_ids = ["L1"] + started_ids = ["E2E-L1"] # Only proceed if at least one analysis started @@ -330,3 +330,4 @@ def test_stop_all_analyses_endpoint(ai_engine_server): LOGGER.info("<<<<<< Finished test_case test_stop_all_analyses_endpoint") +# TODO: Add here test for notify endpoint from @blueprint.route('/notify', methods=['POST']) \ No newline at end of file -- GitLab From e427d5dd1991e7e728ed5d30b89bf6f401733238 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Mon, 23 Feb 2026 08:08:54 +0000 Subject: [PATCH 74/78] feat: Enhance aggregation logic to reuse last valid data when no samples are available --- .../telemetry/worker/data/AggregationCache.py | 28 +++++++++++++++++-- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/src/simap_connector/service/telemetry/worker/data/AggregationCache.py b/src/simap_connector/service/telemetry/worker/data/AggregationCache.py index 5fa88fbca..05e2e35b9 100644 --- a/src/simap_connector/service/telemetry/worker/data/AggregationCache.py +++ b/src/simap_connector/service/telemetry/worker/data/AggregationCache.py @@ -15,8 +15,8 @@ import logging, threading from dataclasses import dataclass, field -from datetime import datetime -from typing import Dict, Set, Tuple +from datetime import datetime, timezone +from typing import Dict, Optional, Set, Tuple LOGGER = logging.getLogger(__name__) @@ -43,6 +43,7 @@ class AggregationCache: def __init__(self) -> None: self._lock = threading.Lock() self._samples : Dict[Tuple[str, str], LinkSample] = dict() + self._last_valid_aggregation : Optional[AggregatedLinkSample] = None def update(self, link_sample : LinkSample) -> None: @@ -65,7 +66,26 @@ class AggregationCache: MSG = '[aggregate] Aggregating {:d} supporting link(s)' LOGGER.info(MSG.format(num_samples)) - agg = AggregatedLinkSample(timestamp=datetime.utcnow()) + if num_samples == 0: + if self._last_valid_aggregation is not None: + MSG = '[aggregate] No samples available, reusing last valid aggregation: BW={:.2f}%, Latency={:.3f}ms' + LOGGER.warning(MSG.format( + self._last_valid_aggregation.bandwidth_utilization, + self._last_valid_aggregation.latency + )) + # Return a copy with updated timestamp + return AggregatedLinkSample( + timestamp=datetime.now(timezone.utc).isoformat(), + bandwidth_utilization=self._last_valid_aggregation.bandwidth_utilization, + latency=self._last_valid_aggregation.latency, + related_service_ids=self._last_valid_aggregation.related_service_ids.copy() + ) + else: + MSG = '[aggregate] No samples available and no cached data, returning zeros' + LOGGER.warning(MSG) + return AggregatedLinkSample(timestamp=datetime.now(timezone.utc).isoformat()) + + agg = AggregatedLinkSample(timestamp=datetime.now(timezone.utc).isoformat()) for link_key, sample in self._samples.items(): network_id, link_id = link_key @@ -90,5 +110,7 @@ class AggregationCache: agg.bandwidth_utilization, agg.latency, str(agg.related_service_ids) )) + # Cache this valid aggregation for future use + self._last_valid_aggregation = agg return agg -- GitLab From 1fd185e44655bcb1154d037c64dd79202a70a730 Mon Sep 17 00:00:00 2001 From: Waleed Akbar Date: Mon, 23 Feb 2026 08:16:09 +0000 Subject: [PATCH 75/78] feat: Update timestamp handling in AggregatedLinkSample to use datetime object directly --- .../service/telemetry/worker/data/AggregationCache.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/simap_connector/service/telemetry/worker/data/AggregationCache.py b/src/simap_connector/service/telemetry/worker/data/AggregationCache.py index 05e2e35b9..7c71a8926 100644 --- a/src/simap_connector/service/telemetry/worker/data/AggregationCache.py +++ b/src/simap_connector/service/telemetry/worker/data/AggregationCache.py @@ -75,7 +75,7 @@ class AggregationCache: )) # Return a copy with updated timestamp return AggregatedLinkSample( - timestamp=datetime.now(timezone.utc).isoformat(), + timestamp=datetime.now(timezone.utc), bandwidth_utilization=self._last_valid_aggregation.bandwidth_utilization, latency=self._last_valid_aggregation.latency, related_service_ids=self._last_valid_aggregation.related_service_ids.copy() @@ -83,9 +83,9 @@ class AggregationCache: else: MSG = '[aggregate] No samples available and no cached data, returning zeros' LOGGER.warning(MSG) - return AggregatedLinkSample(timestamp=datetime.now(timezone.utc).isoformat()) + return AggregatedLinkSample(timestamp=datetime.now(timezone.utc)) - agg = AggregatedLinkSample(timestamp=datetime.now(timezone.utc).isoformat()) + agg = AggregatedLinkSample(timestamp=datetime.now(timezone.utc)) for link_key, sample in self._samples.items(): network_id, link_id = link_key -- GitLab From 59dc67ba44a5d27cc1a4481b1ebcb9b51110bf94 Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 16 Apr 2026 09:59:26 +0000 Subject: [PATCH 76/78] pre-merge code cleanup --- manifests/deviceservice.yaml | 2 +- .../tools/context_queries/Connection.py | 3 +- .../service/simap_updater/Tools.py | 6 +- .../data/slices/l3vpn_request_from_agg.json | 185 ------------------ .../data/slices/network-slice1.json | 4 +- .../data/slices/network-slice2.json | 52 ++--- src/tests/ecoc25-f5ga-telemetry/deploy.sh | 28 +-- src/tests/ecoc25-f5ga-telemetry/destroy.sh | 8 +- 8 files changed, 51 insertions(+), 237 deletions(-) delete mode 100644 src/tests/ecoc25-f5ga-telemetry/data/slices/l3vpn_request_from_agg.json diff --git a/manifests/deviceservice.yaml b/manifests/deviceservice.yaml index 7c3ded7c0..a366a5041 100644 --- a/manifests/deviceservice.yaml +++ b/manifests/deviceservice.yaml @@ -39,7 +39,7 @@ spec: - containerPort: 9192 env: - name: LOG_LEVEL - value: "DEBUG" + value: "INFO" startupProbe: exec: command: ["/bin/grpc_health_probe", "-addr=:2020"] diff --git a/src/common/tools/context_queries/Connection.py b/src/common/tools/context_queries/Connection.py index 99e982f51..88ccb1bf2 100644 --- a/src/common/tools/context_queries/Connection.py +++ b/src/common/tools/context_queries/Connection.py @@ -13,8 +13,7 @@ # limitations under the License. import grpc, logging -from typing import List, Optional -from common.Constants import DEFAULT_CONTEXT_NAME +from typing import Optional from common.proto.context_pb2 import Connection, ConnectionId from context.client.ContextClient import ContextClient diff --git a/src/simap_connector/service/simap_updater/Tools.py b/src/simap_connector/service/simap_updater/Tools.py index d08228640..1c5c3e092 100644 --- a/src/simap_connector/service/simap_updater/Tools.py +++ b/src/simap_connector/service/simap_updater/Tools.py @@ -16,8 +16,8 @@ import enum from typing import List, Optional, Set, Tuple, Union from common.proto.context_pb2 import ( - EVENTTYPE_CREATE, EVENTTYPE_REMOVE, EVENTTYPE_UPDATE, Device, ConnectionId, - DeviceEvent, Link, LinkEvent, Service, ServiceEvent, SliceEvent, TopologyEvent, Empty + EVENTTYPE_CREATE, EVENTTYPE_REMOVE, EVENTTYPE_UPDATE, Device, DeviceEvent, + Link, LinkEvent, Service, ServiceEvent, SliceEvent, TopologyEvent, Empty ) from common.tools.grpc.Tools import grpc_message_to_json_string from context.client.ContextClient import ContextClient @@ -221,4 +221,4 @@ def get_connection_endpoints_and_links(connection_id: str) -> Tuple[List[Tuple[s link_uuids.append(link.link_id.link_uuid.uuid) break - return endpoint_ids, link_uuids \ No newline at end of file + return endpoint_ids, link_uuids diff --git a/src/tests/ecoc25-f5ga-telemetry/data/slices/l3vpn_request_from_agg.json b/src/tests/ecoc25-f5ga-telemetry/data/slices/l3vpn_request_from_agg.json deleted file mode 100644 index ba9c9d853..000000000 --- a/src/tests/ecoc25-f5ga-telemetry/data/slices/l3vpn_request_from_agg.json +++ /dev/null @@ -1,185 +0,0 @@ -{ - "ietf-l3vpn-svc:l3vpn-svc": { - "sites": { - "site": [ - { - "devices": { - "device": [ - { - "device-id": "P-PE1", - "location": "access" - } - ] - }, - "locations": { - "location": [ - { - "location-id": "access" - } - ] - }, - "management": { - "type": "ietf-l3vpn-svc:provider-managed" - }, - "routing-protocols": { - "routing-protocol": [ - { - "static": { - "cascaded-lan-prefixes": { - "ipv4-lan-prefixes": [ - { - "lan": "172.1.101.22/24", - "lan-tag": "21", - "next-hop": "128.32.44.254" - } - ] - } - }, - "type": "ietf-l3vpn-svc:static" - } - ] - }, - "site-id": "site_access", - "site-network-accesses": { - "site-network-access": [ - { - "device-reference": "P-PE1", - "ip-connection": { - "ipv4": { - "address-allocation-type": "ietf-l3vpn-svc:static-address", - "addresses": { - "customer-address": "128.32.44.254", - "prefix-length": "24", - "provider-address": "128.32.44.254" - } - } - }, - "service": { - "qos": { - "qos-profile": { - "classes": { - "class": [ - { - "bandwidth": { - "guaranteed-bw-percent": 100 - }, - "class-id": "qos-realtime", - "direction": "ietf-l3vpn-svc:both", - "latency": { - "latency-boundary": 20 - } - } - ] - } - } - }, - "svc-input-bandwidth": 1000000000, - "svc-mtu": 1500, - "svc-output-bandwidth": 5000000000 - }, - "site-network-access-id": "200", - "site-network-access-type": "ietf-l3vpn-svc:multipoint", - "vpn-attachment": { - "site-role": "ietf-l3vpn-svc:hub-role", - "vpn-id": "slice25" - } - } - ] - } - }, - { - "devices": { - "device": [ - { - "device-id": "P-PE2", - "location": "cloud" - } - ] - }, - "locations": { - "location": [ - { - "location-id": "cloud" - } - ] - }, - "management": { - "type": "ietf-l3vpn-svc:provider-managed" - }, - "routing-protocols": { - "routing-protocol": [ - { - "static": { - "cascaded-lan-prefixes": { - "ipv4-lan-prefixes": [ - { - "lan": "172.16.104.221/24", - "lan-tag": "201", - "next-hop": "172.10.44.254" - } - ] - } - }, - "type": "ietf-l3vpn-svc:static" - } - ] - }, - "site-id": "site_cloud", - "site-network-accesses": { - "site-network-access": [ - { - "device-reference": "P-PE2", - "ip-connection": { - "ipv4": { - "address-allocation-type": "ietf-l3vpn-svc:static-address", - "addresses": { - "customer-address": "172.10.44.254", - "prefix-length": "24", - "provider-address": "172.10.44.254" - } - } - }, - "service": { - "qos": { - "qos-profile": { - "classes": { - "class": [ - { - "bandwidth": { - "guaranteed-bw-percent": 100 - }, - "class-id": "qos-realtime", - "direction": "ietf-l3vpn-svc:both", - "latency": { - "latency-boundary": 10 - } - } - ] - } - } - }, - "svc-input-bandwidth": 5000000000, - "svc-mtu": 1500, - "svc-output-bandwidth": 1000000000 - }, - "site-network-access-id": "200", - "site-network-access-type": "ietf-l3vpn-svc:multipoint", - "vpn-attachment": { - "site-role": "ietf-l3vpn-svc:spoke-role", - "vpn-id": "slice25" - } - } - ] - } - } - ] - }, - "vpn-services": { - "vpn-service": [ - { - "vpn-id": "slice25" - } - ] - } - } -} diff --git a/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice1.json b/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice1.json index 2d5f6604b..ef050bbac 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice1.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice1.json @@ -2,7 +2,7 @@ "slice-service": [ { "id": "slice1", - "description": "network slice 1, PC1-VM1 - using IP transport network", + "description": "network slice 1, PC1-VM1", "sdps": { "sdp": [ { @@ -115,4 +115,4 @@ } } ] -} \ No newline at end of file +} diff --git a/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice2.json b/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice2.json index e5abe1286..2bc13b12e 100644 --- a/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice2.json +++ b/src/tests/ecoc25-f5ga-telemetry/data/slices/network-slice2.json @@ -1,52 +1,52 @@ { "slice-service": [ { - "id": "slice2", - "description": "network slice 2, PC1-VM2 - using optical transport network", + "id": "slice2", + "description": "network slice 2, PC1-VM2", "sdps": { "sdp": [ { - "id": "1", - "node-id": "ONT1", + "id": "1", + "node-id": "ONT1", "sdp-ip-address": ["172.16.61.10"], "service-match-criteria": {"match-criterion": [{ "index": 1, "match-type": [ - {"type": "ietf-network-slice-service:vlan", "value": ["31"]}, - {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.16.104.221/24"]}, - {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10500"]}, + {"type": "ietf-network-slice-service:vlan", "value": ["31"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.16.104.221/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10500"]}, {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.1.201.22/24"]}, - {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10200"]} + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10200"]} ], "target-connection-group-id": "line2" }]}, "attachment-circuits": {"attachment-circuit": [{ - "id": "AC ONT1", - "description": "AC ONT1 connected to PC1", - "ac-node-id": "ONT1", - "ac-tp-id": "200" + "id": "AC ONT1", + "description": "AC ONT1 connected to PC1", + "ac-node-id": "ONT1", + "ac-tp-id": "200" }]} }, { - "id": "2", - "node-id": "POP1", + "id": "2", + "node-id": "POP1", "sdp-ip-address": ["172.16.204.220"], "service-match-criteria": {"match-criterion": [{ "index": 1, "match-type": [ - {"type": "ietf-network-slice-service:vlan", "value": ["101"]}, - {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.1.201.22/24"]}, - {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10200"]}, + {"type": "ietf-network-slice-service:vlan", "value": ["101"]}, + {"type": "ietf-network-slice-service:source-ip-prefix", "value": ["172.1.201.22/24"]}, + {"type": "ietf-network-slice-service:source-tcp-port", "value": ["10200"]}, {"type": "ietf-network-slice-service:destination-ip-prefix", "value": ["172.16.104.221/24"]}, - {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10500"]} + {"type": "ietf-network-slice-service:destination-tcp-port", "value": ["10500"]} ], "target-connection-group-id": "line2" }]}, "attachment-circuits": {"attachment-circuit": [{ - "id": "AC POP1 to VM2", - "description": "AC POP1 connected to VM2", - "ac-node-id": "POP1", - "ac-tp-id": "200" + "id": "AC POP1 to VM2", + "description": "AC POP1 connected to VM2", + "ac-node-id": "POP1", + "ac-tp-id": "200" }]} } ] @@ -93,16 +93,16 @@ { "metric-type": "ietf-network-slice-service:one-way-delay-maximum", "metric-unit": "milliseconds", - "bound": "20" + "bound": "20" }, { "metric-type": "ietf-network-slice-service:one-way-bandwidth", "metric-unit": "Mbps", - "bound": "4000" + "bound": "4000" }, { - "metric-type": "ietf-network-slice-service:two-way-packet-loss", - "metric-unit": "percentage", + "metric-type": "ietf-network-slice-service:two-way-packet-loss", + "metric-unit": "percentage", "percentile-value": "0.001" } ] diff --git a/src/tests/ecoc25-f5ga-telemetry/deploy.sh b/src/tests/ecoc25-f5ga-telemetry/deploy.sh index bf69a0a34..66a6f6ffb 100755 --- a/src/tests/ecoc25-f5ga-telemetry/deploy.sh +++ b/src/tests/ecoc25-f5ga-telemetry/deploy.sh @@ -14,7 +14,7 @@ # limitations under the License. -# Assuming the instances are named as: simap-server, tfs-e2e-ctrl, tfs-agg-ctrl, tfs-ip-ctrl +# Assuming the instances are named as: simap-datastore, tfs-e2e-ctrl, tfs-agg-ctrl, tfs-ip-ctrl # Get the current hostname HOSTNAME=$(hostname) @@ -22,10 +22,10 @@ echo "Deploying in ${HOSTNAME}..." case "$HOSTNAME" in - simap-server) - echo "Building SIMAP Server..." + simap-datastore) + echo "Building SIMAP DataStore..." cd ~/tfs-ctrl/ - docker buildx build -t simap-server:mock -f ./src/tests/tools/simap_server/Dockerfile . + docker buildx build -t simap-datastore:mock -f ./src/tests/tools/simap_datastore/Dockerfile . echo "Building NCE-FAN Controller..." cd ~/tfs-ctrl/ @@ -35,21 +35,21 @@ case "$HOSTNAME" in cd ~/tfs-ctrl/ docker buildx build -t nce-t-ctrl:mock -f ./src/tests/tools/mock_nce_t_ctrl/Dockerfile . - # echo "Building Traffic Changer..." - # cd ~/tfs-ctrl/ - # docker buildx build -t traffic-changer:mock -f ./src/tests/tools/traffic_changer/Dockerfile . + echo "Building Traffic Changer..." + cd ~/tfs-ctrl/ + docker buildx build -t traffic-changer:mock -f ./src/tests/tools/traffic_changer/Dockerfile . echo "Cleaning up..." - docker rm --force simap-server + docker rm --force simap-datastore docker rm --force nce-fan-ctrl docker rm --force nce-t-ctrl - # docker rm --force traffic-changer + docker rm --force traffic-changer echo "Deploying support services..." - docker run --detach --name simap-server --publish 8080:8080 simap-server:mock + docker run --detach --name simap-datastore --publish 8080:8080 simap-datastore:mock docker run --detach --name nce-fan-ctrl --publish 8081:8080 --env SIMAP_ADDRESS=172.17.0.1 --env SIMAP_PORT=8080 nce-fan-ctrl:mock docker run --detach --name nce-t-ctrl --publish 8082:8080 --env SIMAP_ADDRESS=172.17.0.1 --env SIMAP_PORT=8080 nce-t-ctrl:mock - # docker run --detach --name traffic-changer --publish 8083:8080 traffic-changer:mock + docker run --detach --name traffic-changer --publish 8083:8080 traffic-changer:mock sleep 2 docker ps -a @@ -80,9 +80,9 @@ case "$HOSTNAME" in source ~/tfs-ctrl/src/tests/ecoc25-f5ga-telemetry/deploy-specs-ip.sh ./deploy/all.sh - # echo "Waiting for NATS connection..." - # while ! kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server 2>&1 | grep -q 'Subscriber is Ready? True'; do sleep 1; done - # kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server + echo "Waiting for NATS connection..." + while ! kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server 2>&1 | grep -q 'Subscriber is Ready? True'; do sleep 1; done + kubectl --namespace $TFS_K8S_NAMESPACE logs deployment/contextservice -c server ;; *) echo "Unknown host: $HOSTNAME" diff --git a/src/tests/ecoc25-f5ga-telemetry/destroy.sh b/src/tests/ecoc25-f5ga-telemetry/destroy.sh index 37850e5f7..52cbd1353 100755 --- a/src/tests/ecoc25-f5ga-telemetry/destroy.sh +++ b/src/tests/ecoc25-f5ga-telemetry/destroy.sh @@ -14,7 +14,7 @@ # limitations under the License. -# Assuming the instances are named as: simap-server, tfs-e2e-ctrl, tfs-agg-ctrl, tfs-ip-ctrl +# Assuming the instances are named as: simap-datastore, tfs-e2e-ctrl, tfs-agg-ctrl, tfs-ip-ctrl # Get the current hostname HOSTNAME=$(hostname) @@ -22,12 +22,12 @@ echo "Destroying in ${HOSTNAME}..." case "$HOSTNAME" in - simap-server) + simap-datastore) echo "Cleaning up..." - docker rm --force simap-server + docker rm --force simap-datastore docker rm --force nce-fan-ctrl docker rm --force nce-t-ctrl - # docker rm --force traffic-changer + docker rm --force traffic-changer sleep 2 docker ps -a -- GitLab From b3f2128424fbe705f80f8f8e446227d83c67852e Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 16 Apr 2026 10:00:54 +0000 Subject: [PATCH 77/78] pre-merge code cleanup --- .../dummy_L3VPN_delete.sh | 29 ------------------ .../dummy_L3VPN_request.sh | 30 ------------------- 2 files changed, 59 deletions(-) delete mode 100755 src/tests/ecoc25-f5ga-telemetry/dummy_L3VPN_delete.sh delete mode 100755 src/tests/ecoc25-f5ga-telemetry/dummy_L3VPN_request.sh diff --git a/src/tests/ecoc25-f5ga-telemetry/dummy_L3VPN_delete.sh b/src/tests/ecoc25-f5ga-telemetry/dummy_L3VPN_delete.sh deleted file mode 100755 index d5e199e0c..000000000 --- a/src/tests/ecoc25-f5ga-telemetry/dummy_L3VPN_delete.sh +++ /dev/null @@ -1,29 +0,0 @@ -#!/bin/bash -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# ------------- -# For direct testing of L3VPN delete from IP-Controller, without the need to trigger it from AGG-Controller. -# This is a dummy script that replicates the behavior of AGG-Controller when it sends a delete request to IP-Controller. -# -------------- - -cd $(dirname $0) - -echo "[IP-Controller] sending L3VPN delete (dummy replicating AGG-Controller )..." -curl --request DELETE --user admin:admin --location \ - http://10.254.0.12:80/restconf/data/ietf-l3vpn-svc:l3vpn-svc/vpn-services/vpn-service=slice25 - -echo - -echo "Done! Delete!" diff --git a/src/tests/ecoc25-f5ga-telemetry/dummy_L3VPN_request.sh b/src/tests/ecoc25-f5ga-telemetry/dummy_L3VPN_request.sh deleted file mode 100755 index c195fe34f..000000000 --- a/src/tests/ecoc25-f5ga-telemetry/dummy_L3VPN_request.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash -# Copyright 2022-2025 ETSI SDG TeraFlowSDN (TFS) (https://tfs.etsi.org/) -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -# ------------- -# For direct testing of L3VPN request from IP-Controller, without the need to trigger it from AGG-Controller. -# This is a dummy script that replicates the behavior of AGG-Controller when it sends a request to IP-Controller. -# -------------- - -cd $(dirname $0) - -echo "[IP-Controller] sending L3VPN request (dummy replicating AGG-Controller request)..." -curl --request POST --location --user admin:admin --header 'Content-Type: application/json' \ - --data @data/slices/l3vpn_request_from_agg.json \ - http://127.0.0.1:80/restconf/data/ietf-l3vpn-svc:l3vpn-svc/vpn-services -echo - - -echo "Done!" -- GitLab From f7cf93808051c5189e428f90398b50e3df43dabf Mon Sep 17 00:00:00 2001 From: gifrerenom Date: Thu, 16 Apr 2026 10:09:30 +0000 Subject: [PATCH 78/78] pre-merge code cleanup --- src/tests/ecoc25-f5ga-telemetry/dump-logs.sh | 6 +- src/tests/mwc26-f5ga/deploy.sh | 4 +- src/tests/mwc26-f5ga/dump-logs.sh | 6 +- ...afana-dashboard-simap-sla-monitoring.json} | 0 ...ana-dashboard-simap-sla-monitoring_V1.json | 1172 ----------------- ...ana-dashboard-simap-sla-monitoring_V2.json | 805 ----------- ...ashboard-simap-sla-monitoring_V3 copy.json | 1117 ---------------- 7 files changed, 10 insertions(+), 3100 deletions(-) rename src/tests/tools/simap_ai_engine/ai_engine/Grafana_dashboard/{grafana-dashboard-simap-sla-monitoring_V4.json => grafana-dashboard-simap-sla-monitoring.json} (100%) delete mode 100644 src/tests/tools/simap_ai_engine/ai_engine/Grafana_dashboard/grafana-dashboard-simap-sla-monitoring_V1.json delete mode 100644 src/tests/tools/simap_ai_engine/ai_engine/Grafana_dashboard/grafana-dashboard-simap-sla-monitoring_V2.json delete mode 100644 src/tests/tools/simap_ai_engine/ai_engine/Grafana_dashboard/grafana-dashboard-simap-sla-monitoring_V3 copy.json diff --git a/src/tests/ecoc25-f5ga-telemetry/dump-logs.sh b/src/tests/ecoc25-f5ga-telemetry/dump-logs.sh index ec68fe5e7..62d3c587d 100755 --- a/src/tests/ecoc25-f5ga-telemetry/dump-logs.sh +++ b/src/tests/ecoc25-f5ga-telemetry/dump-logs.sh @@ -16,6 +16,8 @@ # Set working directory cd "$(dirname "$0")" || exit 1 +# Assuming the instances are named as: simap-datastore, tfs-e2e-ctrl, tfs-agg-ctrl, tfs-ip-ctrl + # Get the current hostname HOSTNAME=$(hostname) echo "Collecting logs for ${HOSTNAME}..." @@ -24,9 +26,9 @@ rm logs -rf tmp/exec mkdir -p tmp/exec case "$HOSTNAME" in - simap-server) + simap-datastore) echo "Collecting Docker container logs..." - docker logs simap-server > tmp/exec/simap-server.log 2>&1 + docker logs simap-datastore > tmp/exec/simap-datastore.log 2>&1 docker logs nce-fan-ctrl > tmp/exec/nce-fan-ctrl.log 2>&1 docker logs nce-t-ctrl > tmp/exec/nce-t-ctrl.log 2>&1 docker logs traffic-changer > tmp/exec/traffic-changer.log 2>&1 diff --git a/src/tests/mwc26-f5ga/deploy.sh b/src/tests/mwc26-f5ga/deploy.sh index c388e9eb6..4cda867d5 100755 --- a/src/tests/mwc26-f5ga/deploy.sh +++ b/src/tests/mwc26-f5ga/deploy.sh @@ -18,14 +18,14 @@ set -euo pipefail SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" REPO_ROOT="$(cd "${SCRIPT_DIR}/../../../.." && pwd)" -# Assuming the instances are named as: simap-server, tfs-e2e-ctrl, tfs-agg-ctrl, tfs-ip-ctrl +# Assuming the instances are named as: simap-datastore, tfs-e2e-ctrl, tfs-agg-ctrl, tfs-ip-ctrl # Get the current hostname HOSTNAME=$(hostname) echo "Deploying in ${HOSTNAME}..." case "$HOSTNAME" in - simap-server) + simap-datastore) echo "Building SIMAP DataStore..." cd "${REPO_ROOT}" docker buildx build -t simap-datastore:mock -f ./src/tests/tools/simap_datastore/Dockerfile . diff --git a/src/tests/mwc26-f5ga/dump-logs.sh b/src/tests/mwc26-f5ga/dump-logs.sh index cf995b27a..391307fd9 100755 --- a/src/tests/mwc26-f5ga/dump-logs.sh +++ b/src/tests/mwc26-f5ga/dump-logs.sh @@ -16,6 +16,8 @@ # Set working directory cd "$(dirname "$0")" || exit 1 +# Assuming the instances are named as: simap-datastore, tfs-e2e-ctrl, tfs-agg-ctrl, tfs-ip-ctrl + # Get the current hostname HOSTNAME=$(hostname) echo "Collecting logs for ${HOSTNAME}..." @@ -24,9 +26,9 @@ rm logs -rf tmp/exec mkdir -p tmp/exec case "$HOSTNAME" in - simap-server) + simap-datastore) echo "Collecting Docker container logs..." - docker logs simap-server > tmp/exec/simap-server.log 2>&1 + docker logs simap-datastore > tmp/exec/simap-datastore.log 2>&1 docker logs nce-fan-ctrl > tmp/exec/nce-fan-ctrl.log 2>&1 docker logs nce-t-ctrl > tmp/exec/nce-t-ctrl.log 2>&1 docker logs ai-engine > tmp/exec/ai-engine.log 2>&1 diff --git a/src/tests/tools/simap_ai_engine/ai_engine/Grafana_dashboard/grafana-dashboard-simap-sla-monitoring_V4.json b/src/tests/tools/simap_ai_engine/ai_engine/Grafana_dashboard/grafana-dashboard-simap-sla-monitoring.json similarity index 100% rename from src/tests/tools/simap_ai_engine/ai_engine/Grafana_dashboard/grafana-dashboard-simap-sla-monitoring_V4.json rename to src/tests/tools/simap_ai_engine/ai_engine/Grafana_dashboard/grafana-dashboard-simap-sla-monitoring.json diff --git a/src/tests/tools/simap_ai_engine/ai_engine/Grafana_dashboard/grafana-dashboard-simap-sla-monitoring_V1.json b/src/tests/tools/simap_ai_engine/ai_engine/Grafana_dashboard/grafana-dashboard-simap-sla-monitoring_V1.json deleted file mode 100644 index 310c904ae..000000000 --- a/src/tests/tools/simap_ai_engine/ai_engine/Grafana_dashboard/grafana-dashboard-simap-sla-monitoring_V1.json +++ /dev/null @@ -1,1172 +0,0 @@ -{ - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": { - "type": "grafana", - "uid": "-- Grafana --" - }, - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - }, - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "enable": true, - "iconColor": "orange", - "name": "Service Upgrades", - "target": { - "limit": 100, - "matchAny": false, - "tags": [ - { - "key": "status", - "operator": "=", - "value": "UPGRADE" - } - ], - "type": "tags" - }, - "query": "SELECT * FROM telemetry_notifications WHERE status='UPGRADE'", - "textColumn": "timestamp", - "titleColumn": "", - "tagsColumn": "status" - }, - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "enable": true, - "iconColor": "red", - "name": "Service Downgrades", - "target": { - "limit": 100, - "matchAny": false, - "tags": [ - { - "key": "status", - "operator": "=", - "value": "DOWNGRADE" - } - ], - "type": "tags" - }, - "query": "SELECT * FROM telemetry_notifications WHERE status='DOWNGRADE'", - "textColumn": "timestamp", - "titleColumn": "", - "tagsColumn": "status" - } - ] - }, - "editable": true, - "fiscalYearStartMonth": 0, - "graphTooltip": 1, - "id": null, - "links": [], - "liveNow": false, - "panels": [ - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "gridPos": { - "h": 2, - "w": 24, - "x": 0, - "y": 0 - }, - "id": 1, - "options": { - "code": { - "language": "plaintext", - "showLineNumbers": false, - "showMiniMap": false - }, - "content": "# SIMAP Service-Level SLA Monitoring\n\nReal-time tracking of link bandwidth utilization and latency across networks. UPGRADE ⬆️ and DOWNGRADE ⬇️ events are shown as timeline annotations.", - "mode": "markdown" - }, - "pluginVersion": "10.0.0", - "title": "Dashboard Overview", - "type": "text" - }, - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "yellow", - "value": 70 - }, - { - "color": "red", - "value": 90 - } - ] - }, - "unit": "percent" - }, - "overrides": [] - }, - "gridPos": { - "h": 5, - "w": 6, - "x": 0, - "y": 2 - }, - "id": 2, - "options": { - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showThresholdLabels": false, - "showThresholdMarkers": true - }, - "pluginVersion": "10.0.0", - "targets": [ - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "query": "SELECT MEAN(bandwidth_utilization) as \"Average BW Utilization\" FROM link_telemetry WHERE time > now() - 5m AND network_id =~ /$network_id/ GROUP BY time(10s) fill(null)", - "rawQuery": true, - "refId": "A", - "resultFormat": "time_series" - } - ], - "title": "Average Bandwidth Utilization (5min)", - "type": "gauge" - }, - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "yellow", - "value": 80 - }, - { - "color": "red", - "value": 95 - } - ] - }, - "unit": "percent" - }, - "overrides": [] - }, - "gridPos": { - "h": 5, - "w": 6, - "x": 6, - "y": 2 - }, - "id": 3, - "options": { - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showThresholdLabels": false, - "showThresholdMarkers": true - }, - "pluginVersion": "10.0.0", - "targets": [ - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "query": "SELECT MAX(bandwidth_utilization) as \"Peak BW Utilization\" FROM link_telemetry WHERE time > now() - 5m AND network_id =~ /$network_id/ GROUP BY time(10s) fill(null)", - "rawQuery": true, - "refId": "A", - "resultFormat": "time_series" - } - ], - "title": "Peak Bandwidth Utilization (5min)", - "type": "gauge" - }, - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "yellow", - "value": 50 - }, - { - "color": "red", - "value": 100 - } - ] - }, - "unit": "ms" - }, - "overrides": [] - }, - "gridPos": { - "h": 5, - "w": 6, - "x": 12, - "y": 2 - }, - "id": 4, - "options": { - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showThresholdLabels": false, - "showThresholdMarkers": true - }, - "pluginVersion": "10.0.0", - "targets": [ - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "query": "SELECT MEAN(latency) as \"Average Latency\" FROM link_telemetry WHERE time > now() - 5m AND network_id =~ /$network_id/ GROUP BY time(10s) fill(null)", - "rawQuery": true, - "refId": "A", - "resultFormat": "time_series" - } - ], - "title": "Average Latency (5min)", - "type": "gauge" - }, - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "yellow", - "value": 75 - }, - { - "color": "red", - "value": 150 - } - ] - }, - "unit": "ms" - }, - "overrides": [] - }, - "gridPos": { - "h": 5, - "w": 6, - "x": 18, - "y": 2 - }, - "id": 5, - "options": { - "orientation": "auto", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showThresholdLabels": false, - "showThresholdMarkers": true - }, - "pluginVersion": "10.0.0", - "targets": [ - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "query": "SELECT MAX(latency) as \"Peak Latency\" FROM link_telemetry WHERE time > now() - 5m AND network_id =~ /$network_id/ GROUP BY time(10s) fill(null)", - "rawQuery": true, - "refId": "A", - "resultFormat": "time_series" - } - ], - "title": "Peak Latency (5min)", - "type": "gauge" - }, - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 15, - "gradientMode": "none", - "hideFrom": { - "tooltip": false, - "viz": false, - "legend": false - }, - "lineInterpolation": "smooth", - "lineWidth": 2, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "line" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "yellow", - "value": 70 - }, - { - "color": "red", - "value": 90 - } - ] - }, - "unit": "percent" - }, - "overrides": [] - }, - "gridPos": { - "h": 10, - "w": 24, - "x": 0, - "y": 7 - }, - "id": 6, - "options": { - "legend": { - "calcs": [ - "last", - "mean", - "max" - ], - "displayMode": "table", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "multi", - "sort": "desc" - } - }, - "pluginVersion": "10.0.0", - "targets": [ - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "query": "SELECT bandwidth_utilization FROM link_telemetry WHERE time > now() - $timerange AND network_id =~ /$network_id/ AND link_id =~ /$link_id/ GROUP BY network_id, link_id", - "rawQuery": true, - "refId": "A", - "resultFormat": "time_series" - } - ], - "title": "Link Bandwidth Utilization (%) - Timeline", - "type": "timeseries" - }, - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "line", - "fillOpacity": 15, - "gradientMode": "none", - "hideFrom": { - "tooltip": false, - "viz": false, - "legend": false - }, - "lineInterpolation": "smooth", - "lineWidth": 2, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "line" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "yellow", - "value": 50 - }, - { - "color": "red", - "value": 100 - } - ] - }, - "unit": "ms" - }, - "overrides": [] - }, - "gridPos": { - "h": 10, - "w": 24, - "x": 0, - "y": 17 - }, - "id": 7, - "options": { - "legend": { - "calcs": [ - "last", - "mean", - "max" - ], - "displayMode": "table", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "mode": "multi", - "sort": "desc" - } - }, - "pluginVersion": "10.0.0", - "targets": [ - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "query": "SELECT latency FROM link_telemetry WHERE time > now() - $timerange AND network_id =~ /$network_id/ AND link_id =~ /$link_id/ GROUP BY network_id, link_id", - "rawQuery": true, - "refId": "A", - "resultFormat": "time_series" - } - ], - "title": "Link Latency (ms) - Timeline", - "type": "timeseries" - }, - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "custom": { - "align": "center", - "cellOptions": { - "type": "auto" - }, - "inspect": false - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "yellow", - "value": 70 - }, - { - "color": "red", - "value": 90 - } - ] - }, - "unit": "short" - }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "Bandwidth Utilization (%)" - }, - "properties": [ - { - "id": "custom.cellOptions", - "value": { - "type": "gauge", - "mode": "gradient" - } - }, - { - "id": "unit", - "value": "percent" - }, - { - "id": "max", - "value": 100 - }, - { - "id": "min", - "value": 0 - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "Latency (ms)" - }, - "properties": [ - { - "id": "custom.cellOptions", - "value": { - "type": "color-background", - "mode": "gradient" - } - }, - { - "id": "unit", - "value": "ms" - }, - { - "id": "thresholds", - "value": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "yellow", - "value": 50 - }, - { - "color": "red", - "value": 100 - } - ] - } - } - ] - } - ] - }, - "gridPos": { - "h": 10, - "w": 24, - "x": 0, - "y": 27 - }, - "id": 8, - "options": { - "cellHeight": "sm", - "footer": { - "countRows": false, - "fields": "", - "reducer": [ - "sum" - ], - "show": false - }, - "showHeader": true, - "sortBy": [ - { - "desc": true, - "displayName": "Bandwidth Utilization (%)" - } - ] - }, - "pluginVersion": "10.0.0", - "targets": [ - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "query": "SELECT LAST(bandwidth_utilization) as \"Bandwidth Utilization (%)\", LAST(latency) as \"Latency (ms)\" FROM link_telemetry WHERE time > now() - 5m AND network_id =~ /$network_id/ AND link_id =~ /$link_id/ GROUP BY network_id, link_id", - "rawQuery": true, - "refId": "A", - "resultFormat": "table" - } - ], - "title": "Link Comparison Table - Current Status", - "transformations": [ - { - "id": "organize", - "options": { - "excludeByName": { - "Time": true - }, - "indexByName": {}, - "renameByName": { - "network_id": "Network ID", - "link_id": "Link ID" - } - } - } - ], - "type": "table" - }, - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "hideFrom": { - "tooltip": false, - "viz": false, - "legend": false - } - }, - "mappings": [] - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 37 - }, - "id": 9, - "options": { - "displayLabels": [ - "name", - "percent" - ], - "legend": { - "displayMode": "table", - "placement": "right", - "showLegend": true, - "values": [ - "value" - ] - }, - "pieType": "pie", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "tooltip": { - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "10.0.0", - "targets": [ - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "query": "SELECT LAST(bandwidth_utilization) FROM link_telemetry WHERE time > now() - 5m AND network_id =~ /$network_id/ GROUP BY link_id", - "rawQuery": true, - "refId": "A", - "resultFormat": "time_series" - } - ], - "title": "Bandwidth Distribution by Link", - "type": "piechart" - }, - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "drawStyle": "bars", - "fillOpacity": 80, - "gradientMode": "hue", - "hideFrom": { - "tooltip": false, - "viz": false, - "legend": false - }, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "never", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - }, - "unit": "ms" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 37 - }, - "id": 10, - "options": { - "legend": { - "calcs": [ - "mean", - "max" - ], - "displayMode": "table", - "placement": "right", - "showLegend": true - }, - "tooltip": { - "mode": "multi", - "sort": "desc" - } - }, - "pluginVersion": "10.0.0", - "targets": [ - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "query": "SELECT LAST(latency) FROM link_telemetry WHERE time > now() - 5m AND network_id =~ /$network_id/ GROUP BY time(30s), link_id fill(null)", - "rawQuery": true, - "refId": "A", - "resultFormat": "time_series" - } - ], - "title": "Latency Comparison Across Links", - "type": "timeseries" - }, - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "custom": { - "align": "center", - "cellOptions": { - "type": "auto" - }, - "inspect": false - }, - "mappings": [ - { - "options": { - "UPGRADE": { - "color": "green", - "index": 0, - "text": "⬆️ UPGRADE" - }, - "DOWNGRADE": { - "color": "red", - "index": 1, - "text": "⬇️ DOWNGRADE" - } - }, - "type": "value" - } - ], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - } - }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "Time" - }, - "properties": [ - { - "id": "custom.width", - "value": 180 - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "Status" - }, - "properties": [ - { - "id": "custom.cellOptions", - "value": { - "type": "color-background" - } - } - ] - } - ] - }, - "gridPos": { - "h": 8, - "w": 24, - "x": 0, - "y": 45 - }, - "id": 11, - "options": { - "cellHeight": "sm", - "footer": { - "countRows": false, - "fields": "", - "reducer": [ - "sum" - ], - "show": false - }, - "showHeader": true, - "sortBy": [ - { - "desc": true, - "displayName": "Time" - } - ] - }, - "pluginVersion": "10.0.0", - "targets": [ - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "query": "SELECT status as \"Status\", timestamp as \"Timestamp Info\" FROM telemetry_notifications WHERE time > now() - $timerange ORDER BY time DESC LIMIT 50", - "rawQuery": true, - "refId": "A", - "resultFormat": "table" - } - ], - "title": "Service Status Change Events (UPGRADE/DOWNGRADE)", - "transformations": [ - { - "id": "organize", - "options": { - "excludeByName": {}, - "indexByName": { - "Time": 0, - "Status": 1, - "Timestamp Info": 2 - }, - "renameByName": {} - } - } - ], - "type": "table" - } - ], - "refresh": "10s", - "schemaVersion": 38, - "style": "dark", - "tags": [ - "simap", - "telemetry", - "sla", - "service-monitoring" - ], - "templating": { - "list": [ - { - "current": { - "selected": false, - "text": "InfluxDB", - "value": "InfluxDB" - }, - "hide": 0, - "includeAll": false, - "label": "InfluxDB Datasource", - "multi": false, - "name": "DS_INFLUXDB", - "options": [], - "query": "influxdb", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "type": "datasource" - }, - { - "current": { - "selected": false, - "text": ".*", - "value": ".*" - }, - "hide": 0, - "label": "Network ID", - "name": "network_id", - "options": [ - { - "selected": true, - "text": ".*", - "value": ".*" - } - ], - "query": ".*", - "skipUrlSync": false, - "type": "textbox" - }, - { - "current": { - "selected": false, - "text": ".*", - "value": ".*" - }, - "hide": 0, - "label": "Link ID", - "name": "link_id", - "options": [ - { - "selected": true, - "text": ".*", - "value": ".*" - } - ], - "query": ".*", - "skipUrlSync": false, - "type": "textbox" - }, - { - "auto": false, - "auto_count": 30, - "auto_min": "10s", - "current": { - "selected": false, - "text": "1h", - "value": "1h" - }, - "hide": 0, - "label": "Time Range", - "name": "timerange", - "options": [ - { - "selected": false, - "text": "5m", - "value": "5m" - }, - { - "selected": false, - "text": "15m", - "value": "15m" - }, - { - "selected": false, - "text": "30m", - "value": "30m" - }, - { - "selected": true, - "text": "1h", - "value": "1h" - }, - { - "selected": false, - "text": "3h", - "value": "3h" - }, - { - "selected": false, - "text": "6h", - "value": "6h" - }, - { - "selected": false, - "text": "12h", - "value": "12h" - }, - { - "selected": false, - "text": "24h", - "value": "24h" - } - ], - "query": "5m,15m,30m,1h,3h,6h,12h,24h", - "queryValue": "", - "refresh": 2, - "skipUrlSync": false, - "type": "interval" - } - ] - }, - "time": { - "from": "now-1h", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m" - ] - }, - "timezone": "", - "title": "SIMAP Service-Level SLA Monitoring", - "uid": "simap-sla-monitoring", - "version": 0, - "weekStart": "" -} diff --git a/src/tests/tools/simap_ai_engine/ai_engine/Grafana_dashboard/grafana-dashboard-simap-sla-monitoring_V2.json b/src/tests/tools/simap_ai_engine/ai_engine/Grafana_dashboard/grafana-dashboard-simap-sla-monitoring_V2.json deleted file mode 100644 index ee669692c..000000000 --- a/src/tests/tools/simap_ai_engine/ai_engine/Grafana_dashboard/grafana-dashboard-simap-sla-monitoring_V2.json +++ /dev/null @@ -1,805 +0,0 @@ -{ - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": { - "type": "grafana", - "uid": "-- Grafana --" - }, - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - }, - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "enable": true, - "hide": false, - "iconColor": "orange", - "name": "Service Upgrades", - "query": "SELECT * FROM telemetry_notifications WHERE status='UPGRADE'", - "tagsColumn": "status", - "target": { - "limit": 100, - "matchAny": false, - "tags": [ - { - "key": "status", - "operator": "=", - "value": "UPGRADE" - } - ], - "type": "tags" - }, - "textColumn": "timestamp", - "titleColumn": "" - }, - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "enable": true, - "hide": false, - "iconColor": "red", - "name": "Service Downgrades", - "query": "SELECT * FROM telemetry_notifications WHERE status='DOWNGRADE'", - "tagsColumn": "status", - "target": { - "limit": 100, - "matchAny": false, - "tags": [ - { - "key": "status", - "operator": "=", - "value": "DOWNGRADE" - } - ], - "type": "tags" - }, - "textColumn": "timestamp", - "titleColumn": "" - } - ] - }, - "editable": true, - "fiscalYearStartMonth": 0, - "graphTooltip": 1, - "id": 0, - "links": [], - "panels": [ - { - "fieldConfig": { - "defaults": {}, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 24, - "x": 0, - "y": 0 - }, - "id": 1, - "options": { - "code": { - "language": "plaintext", - "showLineNumbers": false, - "showMiniMap": false - }, - "content": "# SIMAP Service-Level SLA Monitoring\n", - "mode": "markdown" - }, - "pluginVersion": "12.3.1", - "title": "Dashboard Overview", - "type": "text" - }, - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "custom": { - "align": "center", - "cellOptions": { - "type": "auto" - }, - "footer": { - "reducers": [] - }, - "inspect": false - }, - "mappings": [ - { - "options": { - "DOWNGRADE": { - "color": "red", - "index": 1, - "text": "⬇️ DOWNGRADE" - }, - "UPGRADE": { - "color": "green", - "index": 0, - "text": "⬆️ UPGRADE" - } - }, - "type": "value" - } - ], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": 0 - } - ] - } - }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "Time" - }, - "properties": [ - { - "id": "custom.width", - "value": 180 - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "Status" - }, - "properties": [ - { - "id": "custom.cellOptions", - "value": { - "type": "color-background" - } - } - ] - } - ] - }, - "gridPos": { - "h": 3, - "w": 24, - "x": 0, - "y": 3 - }, - "id": 11, - "options": { - "cellHeight": "sm", - "showHeader": true, - "sortBy": [ - { - "desc": true, - "displayName": "Time" - } - ] - }, - "pluginVersion": "12.3.1", - "targets": [ - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "query": "SELECT status as \"Status\", timestamp as \"Timestamp Info\" FROM telemetry_notifications WHERE time > now() - $timerange ORDER BY time DESC LIMIT 50", - "rawQuery": true, - "refId": "A", - "resultFormat": "table" - } - ], - "title": "Service Status Change Events (UPGRADE/DOWNGRADE)", - "transformations": [ - { - "id": "organize", - "options": { - "excludeByName": {}, - "indexByName": { - "Status": 1, - "Time": 0, - "Timestamp Info": 2 - }, - "renameByName": {} - } - } - ], - "type": "table" - }, - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 15, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "smooth", - "lineWidth": 2, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "showValues": false, - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "line" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": 0 - }, - { - "color": "yellow", - "value": 65 - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "percent" - }, - "overrides": [ - { - "matcher": { - "id": "byRegexp", - "options": "/pred_.*/" - }, - "properties": [ - { - "id": "custom.lineStyle", - "value": { - "dash": [10, 10], - "fill": "dash" - } - }, - { - "id": "color", - "value": { - "fixedColor": "semi-dark-gray", - "mode": "fixed" - } - }, - { - "id": "custom.fillOpacity", - "value": 5 - } - ] - } - ] - }, - "gridPos": { - "h": 15, - "w": 12, - "x": 0, - "y": 6 - }, - "id": 6, - "options": { - "legend": { - "calcs": [ - "last" - ], - "displayMode": "table", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": true, - "mode": "multi", - "sort": "desc" - } - }, - "pluginVersion": "12.3.1", - "targets": [ - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "query": "SELECT bandwidth_utilization FROM link_telemetry WHERE time > now() - $timerange AND network_id =~ /$network_id/ AND link_id =~ /$link_id/ GROUP BY network_id, link_id", - "rawQuery": true, - "refId": "A", - "resultFormat": "time_series" - }, - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "query": "SELECT pred_bandwidth_utilization FROM predicted_telemetry WHERE time > now() - $timerange AND network_id =~ /$network_id/ AND link_id =~ /$link_id/ GROUP BY network_id, link_id", - "rawQuery": true, - "refId": "B", - "resultFormat": "time_series" - } - ], - "title": "Link Bandwidth Utilization (%) - Timeline", - "type": "timeseries" - }, - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 15, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "smooth", - "lineWidth": 2, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "showValues": false, - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "line" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": 0 - }, - { - "color": "yellow", - "value": 50 - }, - { - "color": "red", - "value": 100 - } - ] - }, - "unit": "ms" - }, - "overrides": [ - { - "matcher": { - "id": "byRegexp", - "options": "/pred_.*/" - }, - "properties": [ - { - "id": "custom.lineStyle", - "value": { - "dash": [10, 10], - "fill": "dash" - } - }, - { - "id": "color", - "value": { - "fixedColor": "semi-dark-gray", - "mode": "fixed" - } - }, - { - "id": "custom.fillOpacity", - "value": 5 - } - ] - } - ] - }, - "gridPos": { - "h": 15, - "w": 12, - "x": 12, - "y": 6 - }, - "id": 7, - "options": { - "legend": { - "calcs": [ - "last" - ], - "displayMode": "table", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": true, - "mode": "multi", - "sort": "desc" - } - }, - "pluginVersion": "12.3.1", - "targets": [ - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "query": "SELECT latency FROM link_telemetry WHERE time > now() - $timerange AND network_id =~ /$network_id/ AND link_id =~ /$link_id/ GROUP BY network_id, link_id", - "rawQuery": true, - "refId": "A", - "resultFormat": "time_series" - }, - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "query": "SELECT pred_latency FROM predicted_telemetry WHERE time > now() - $timerange AND network_id =~ /$network_id/ AND link_id =~ /$link_id/ GROUP BY network_id, link_id", - "rawQuery": true, - "refId": "B", - "resultFormat": "time_series" - } - ], - "title": "Link Latency (ms) - Timeline", - "type": "timeseries" - }, - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "custom": { - "align": "center", - "cellOptions": { - "type": "auto" - }, - "footer": { - "reducers": [] - }, - "inspect": false - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": 0 - }, - { - "color": "yellow", - "value": 5 - }, - { - "color": "red", - "value": 10 - } - ] - }, - "unit": "short" - }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "Bandwidth Utilization (%)" - }, - "properties": [ - { - "id": "custom.cellOptions", - "value": { - "mode": "gradient", - "type": "gauge" - } - }, - { - "id": "unit", - "value": "percent" - }, - { - "id": "max", - "value": 100 - }, - { - "id": "min", - "value": 0 - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "Latency (ms)" - }, - "properties": [ - { - "id": "custom.cellOptions", - "value": { - "mode": "gradient", - "type": "color-background" - } - }, - { - "id": "unit", - "value": "ms" - }, - { - "id": "thresholds", - "value": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": 0 - }, - { - "color": "yellow", - "value": 50 - }, - { - "color": "red", - "value": 100 - } - ] - } - } - ] - } - ] - }, - "gridPos": { - "h": 10, - "w": 24, - "x": 0, - "y": 21 - }, - "id": 8, - "options": { - "cellHeight": "sm", - "showHeader": true, - "sortBy": [ - { - "desc": true, - "displayName": "Bandwidth Utilization (%)" - } - ] - }, - "pluginVersion": "12.3.1", - "targets": [ - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "query": "SELECT LAST(bandwidth_utilization) as \"Bandwidth Utilization (%)\", LAST(latency) as \"Latency (ms)\" FROM link_telemetry WHERE time > now() - 5m AND network_id =~ /$network_id/ AND link_id =~ /$link_id/ GROUP BY network_id, link_id", - "rawQuery": true, - "refId": "A", - "resultFormat": "table" - } - ], - "title": "Link Comparison Table - Current Status", - "transformations": [ - { - "id": "organize", - "options": { - "excludeByName": { - "Time": true - }, - "indexByName": {}, - "renameByName": { - "link_id": "Link ID", - "network_id": "Network ID" - } - } - } - ], - "type": "table" - } - ], - "preload": false, - "refresh": "10s", - "schemaVersion": 42, - "tags": [ - "simap", - "telemetry", - "sla", - "service-monitoring" - ], - "templating": { - "list": [ - { - "current": { - "text": "influxdb-SIMAP-Server", - "value": "cf9ge11vhadj4b" - }, - "includeAll": false, - "label": "InfluxDB Datasource", - "name": "DS_INFLUXDB", - "options": [], - "query": "influxdb", - "refresh": 1, - "regex": "", - "type": "datasource" - }, - { - "auto": false, - "auto_count": 30, - "auto_min": "10s", - "current": { - "text": "5m", - "value": "5m" - }, - "label": "Time Range", - "name": "timerange", - "options": [ - { - "selected": true, - "text": "5m", - "value": "5m" - }, - { - "selected": false, - "text": "10m", - "value": "10m" - }, - { - "selected": false, - "text": "20m", - "value": "20m" - }, - { - "selected": false, - "text": "1h", - "value": "1h" - }, - { - "selected": false, - "text": "3h", - "value": "3h" - }, - { - "selected": false, - "text": "6h", - "value": "6h" - }, - { - "selected": false, - "text": "12h", - "value": "12h" - } - ], - "query": "5m,10m,20m,1h,3h,6h,12h", - "refresh": 2, - "type": "interval" - }, - { - "current": { - "selected": true, - "text": "All", - "value": "$__all" - }, - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "definition": "SHOW TAG VALUES FROM link_telemetry WITH KEY = network_id", - "includeAll": true, - "label": "Network ID", - "multi": true, - "name": "network_id", - "options": [], - "query": "SHOW TAG VALUES FROM link_telemetry WITH KEY = network_id", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "sort": 0, - "type": "query" - }, - { - "current": { - "selected": true, - "text": "All", - "value": "$__all" - }, - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "definition": "SHOW TAG VALUES FROM link_telemetry WITH KEY = link_id WHERE network_id =~ /$network_id/", - "includeAll": true, - "label": "Link ID", - "multi": true, - "name": "link_id", - "options": [], - "query": "SHOW TAG VALUES FROM link_telemetry WITH KEY = link_id WHERE network_id =~ /$network_id/", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "sort": 0, - "type": "query" - } - ] - }, - "time": { - "from": "now-15m", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m" - ] - }, - "timezone": "", - "title": "SIMAP Service-Level SLA Monitoring", - "uid": "simap-sla-monitoring", - "version": 7 -} \ No newline at end of file diff --git a/src/tests/tools/simap_ai_engine/ai_engine/Grafana_dashboard/grafana-dashboard-simap-sla-monitoring_V3 copy.json b/src/tests/tools/simap_ai_engine/ai_engine/Grafana_dashboard/grafana-dashboard-simap-sla-monitoring_V3 copy.json deleted file mode 100644 index d1b3d72ba..000000000 --- a/src/tests/tools/simap_ai_engine/ai_engine/Grafana_dashboard/grafana-dashboard-simap-sla-monitoring_V3 copy.json +++ /dev/null @@ -1,1117 +0,0 @@ -{ - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": { - "type": "grafana", - "uid": "-- Grafana --" - }, - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - }, - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "enable": true, - "hide": false, - "iconColor": "orange", - "name": "Service Upgrades", - "query": "SELECT * FROM telemetry_notifications WHERE status='UPGRADE'", - "tagsColumn": "status", - "target": { - "limit": 100, - "matchAny": false, - "tags": [ - { - "key": "status", - "operator": "=", - "value": "UPGRADE" - } - ], - "type": "tags" - }, - "textColumn": "timestamp", - "titleColumn": "" - }, - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "enable": true, - "hide": false, - "iconColor": "red", - "name": "Service Downgrades", - "query": "SELECT * FROM telemetry_notifications WHERE status='DOWNGRADE'", - "tagsColumn": "status", - "target": { - "limit": 100, - "matchAny": false, - "tags": [ - { - "key": "status", - "operator": "=", - "value": "DOWNGRADE" - } - ], - "type": "tags" - }, - "textColumn": "timestamp", - "titleColumn": "" - } - ] - }, - "editable": true, - "fiscalYearStartMonth": 0, - "graphTooltip": 1, - "id": 0, - "links": [], - "panels": [ - { - "fieldConfig": { - "defaults": {}, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 24, - "x": 0, - "y": 0 - }, - "id": 1, - "options": { - "code": { - "language": "plaintext", - "showLineNumbers": false, - "showMiniMap": false - }, - "content": "# SIMAP Service-Level SLA Monitoring\n", - "mode": "markdown" - }, - "pluginVersion": "12.3.1", - "title": "Dashboard Overview", - "type": "text" - }, - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "custom": { - "align": "center", - "cellOptions": { - "type": "auto" - }, - "footer": { - "reducers": [] - }, - "inspect": false - }, - "mappings": [ - { - "options": { - "DOWNGRADE": { - "color": "red", - "index": 1, - "text": "⬇️ DOWNGRADE" - }, - "UPGRADE": { - "color": "green", - "index": 0, - "text": "⬆️ UPGRADE" - } - }, - "type": "value" - } - ], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": 0 - } - ] - } - }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "Time" - }, - "properties": [ - { - "id": "custom.width", - "value": 180 - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "Status" - }, - "properties": [ - { - "id": "custom.cellOptions", - "value": { - "type": "color-background" - } - } - ] - } - ] - }, - "gridPos": { - "h": 3, - "w": 24, - "x": 0, - "y": 3 - }, - "id": 11, - "options": { - "cellHeight": "sm", - "showHeader": true, - "sortBy": [ - { - "desc": true, - "displayName": "Time" - } - ] - }, - "pluginVersion": "12.3.1", - "targets": [ - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "query": "SELECT status as \"Status\", timestamp as \"Timestamp Info\" FROM telemetry_notifications WHERE time > now() - $timerange ORDER BY time DESC LIMIT 50", - "rawQuery": true, - "refId": "A", - "resultFormat": "table" - } - ], - "title": "Service Status Change Events (UPGRADE/DOWNGRADE)", - "transformations": [ - { - "id": "organize", - "options": { - "excludeByName": {}, - "indexByName": { - "Status": 1, - "Time": 0, - "Timestamp Info": 2 - }, - "renameByName": {} - } - } - ], - "type": "table" - }, - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 15, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "smooth", - "lineWidth": 2, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "showValues": false, - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "line" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": 0 - }, - { - "color": "yellow", - "value": 65 - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "percent" - }, - "overrides": [ - { - "matcher": { - "id": "byRegexp", - "options": "/pred_.*/" - }, - "properties": [ - { - "id": "custom.lineStyle", - "value": { - "dash": [10, 10], - "fill": "dash" - } - }, - { - "id": "color", - "value": { - "fixedColor": "semi-dark-gray", - "mode": "fixed" - } - }, - { - "id": "custom.fillOpacity", - "value": 5 - } - ] - } - ] - }, - "gridPos": { - "h": 15, - "w": 12, - "x": 0, - "y": 6 - }, - "id": 6, - "options": { - "legend": { - "calcs": [ - "last" - ], - "displayMode": "table", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": true, - "mode": "multi", - "sort": "desc" - } - }, - "pluginVersion": "12.3.1", - "targets": [ - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "query": "SELECT bandwidth_utilization FROM link_telemetry WHERE time > now() - $timerange AND network_id =~ /$network_id/ AND link_id =~ /$link_id/ GROUP BY network_id, link_id", - "rawQuery": true, - "refId": "A", - "resultFormat": "time_series" - }, - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "query": "SELECT pred_bandwidth_utilization FROM predicted_telemetry WHERE time > now() - $timerange AND network_id =~ /$network_id/ AND link_id =~ /$link_id/ GROUP BY network_id, link_id", - "rawQuery": true, - "refId": "B", - "resultFormat": "time_series" - } - ], - "title": "Link Bandwidth Utilization (%) - Timeline", - "type": "timeseries" - }, - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 15, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "smooth", - "lineWidth": 2, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "showValues": false, - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "line" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": 0 - }, - { - "color": "yellow", - "value": 50 - }, - { - "color": "red", - "value": 100 - } - ] - }, - "unit": "ms" - }, - "overrides": [ - { - "matcher": { - "id": "byRegexp", - "options": "/pred_.*/" - }, - "properties": [ - { - "id": "custom.lineStyle", - "value": { - "dash": [10, 10], - "fill": "dash" - } - }, - { - "id": "color", - "value": { - "fixedColor": "semi-dark-gray", - "mode": "fixed" - } - }, - { - "id": "custom.fillOpacity", - "value": 5 - } - ] - } - ] - }, - "gridPos": { - "h": 15, - "w": 12, - "x": 12, - "y": 6 - }, - "id": 7, - "options": { - "legend": { - "calcs": [ - "last" - ], - "displayMode": "table", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": true, - "mode": "multi", - "sort": "desc" - } - }, - "pluginVersion": "12.3.1", - "targets": [ - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "query": "SELECT latency FROM link_telemetry WHERE time > now() - $timerange AND network_id =~ /$network_id/ AND link_id =~ /$link_id/ GROUP BY network_id, link_id", - "rawQuery": true, - "refId": "A", - "resultFormat": "time_series" - }, - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "query": "SELECT pred_latency FROM predicted_telemetry WHERE time > now() - $timerange AND network_id =~ /$network_id/ AND link_id =~ /$link_id/ GROUP BY network_id, link_id", - "rawQuery": true, - "refId": "B", - "resultFormat": "time_series" - } - ], - "title": "Link Latency (ms) - Timeline", - "type": "timeseries" - }, - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "custom": { - "align": "center", - "cellOptions": { - "type": "auto" - }, - "footer": { - "reducers": [] - }, - "inspect": false - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": 0 - }, - { - "color": "yellow", - "value": 5 - }, - { - "color": "red", - "value": 10 - } - ] - }, - "unit": "short" - }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "Bandwidth Utilization (%)" - }, - "properties": [ - { - "id": "custom.cellOptions", - "value": { - "mode": "gradient", - "type": "gauge" - } - }, - { - "id": "unit", - "value": "percent" - }, - { - "id": "max", - "value": 100 - }, - { - "id": "min", - "value": 0 - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "Latency (ms)" - }, - "properties": [ - { - "id": "custom.cellOptions", - "value": { - "mode": "gradient", - "type": "color-background" - } - }, - { - "id": "unit", - "value": "ms" - }, - { - "id": "thresholds", - "value": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": 0 - }, - { - "color": "yellow", - "value": 50 - }, - { - "color": "red", - "value": 100 - } - ] - } - } - ] - } - ] - }, - "gridPos": { - "h": 10, - "w": 24, - "x": 0, - "y": 21 - }, - "id": 8, - "options": { - "cellHeight": "sm", - "showHeader": true, - "sortBy": [ - { - "desc": true, - "displayName": "Bandwidth Utilization (%)" - } - ] - }, - "pluginVersion": "12.3.1", - "targets": [ - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "query": "SELECT LAST(bandwidth_utilization) as \"Bandwidth Utilization (%)\", LAST(latency) as \"Latency (ms)\" FROM link_telemetry WHERE time > now() - 5m AND network_id =~ /$network_id/ AND link_id =~ /$link_id/ GROUP BY network_id, link_id", - "rawQuery": true, - "refId": "A", - "resultFormat": "table" - } - ], - "title": "Link Comparison Table - Current Status", - "transformations": [ - { - "id": "organize", - "options": { - "excludeByName": { - "Time": true - }, - "indexByName": {}, - "renameByName": { - "link_id": "Link ID", - "network_id": "Network ID" - } - } - } - ], - "type": "table" - }, - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "Bandwidth Utilization (%)", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 15, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "smooth", - "lineWidth": 2, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "showValues": false, - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "min": 1, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": 0 - } - ] - }, - "unit": "percent" - }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "link_telemetry.bandwidth_utilization {link_id: E2E-L1, network_id: e2e}" - }, - "properties": [ - { - "id": "displayName", - "value": "Actual BW" - }, - { - "id": "color", - "value": { - "fixedColor": "blue", - "mode": "fixed" - } - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "predicted_telemetry.pred_bandwidth_utilization {link_id: E2E-L1, network_id: e2e}" - }, - "properties": [ - { - "id": "displayName", - "value": "Predicted BW" - }, - { - "id": "color", - "value": { - "fixedColor": "red", - "mode": "fixed" - } - }, - { - "id": "custom.lineStyle", - "value": { - "dash": [10, 10], - "fill": "dash" - } - } - ] - } - ] - }, - "gridPos": { - "h": 12, - "w": 12, - "x": 0, - "y": 31 - }, - "id": 12, - "options": { - "legend": { - "calcs": [ - "min", - "max", - "mean", - "last" - ], - "displayMode": "table", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "multi", - "sort": "none" - } - }, - "pluginVersion": "12.3.1", - "targets": [ - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "query": "SELECT bandwidth_utilization FROM link_telemetry WHERE time > now() - $timerange AND network_id = 'e2e' AND link_id = 'E2E-L1' GROUP BY network_id, link_id", - "rawQuery": true, - "refId": "A", - "resultFormat": "time_series" - }, - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "query": "SELECT pred_bandwidth_utilization FROM predicted_telemetry WHERE time > now() - $timerange AND network_id = 'e2e' AND link_id = 'E2E-L1' GROUP BY network_id, link_id", - "rawQuery": true, - "refId": "B", - "resultFormat": "time_series" - } - ], - "title": "E2E-L1: Bandwidth Comparison (Actual vs Predicted)", - "type": "timeseries" - }, - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "Latency (ms)", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 15, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "smooth", - "lineWidth": 2, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "showValues": false, - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "min": 1, - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": 0 - } - ] - }, - "unit": "ms" - }, - "overrides": [ - { - "matcher": { - "id": "byName", - "options": "link_telemetry.latency {link_id: E2E-L1, network_id: e2e}" - }, - "properties": [ - { - "id": "displayName", - "value": "Actual Latency" - }, - { - "id": "color", - "value": { - "fixedColor": "green", - "mode": "fixed" - } - } - ] - }, - { - "matcher": { - "id": "byName", - "options": "predicted_telemetry.pred_latency {link_id: E2E-L1, network_id: e2e}" - }, - "properties": [ - { - "id": "displayName", - "value": "Predicted Latency" - }, - { - "id": "color", - "value": { - "fixedColor": "orange", - "mode": "fixed" - } - }, - { - "id": "custom.lineStyle", - "value": { - "dash": [10, 10], - "fill": "dash" - } - } - ] - } - ] - }, - "gridPos": { - "h": 12, - "w": 12, - "x": 12, - "y": 31 - }, - "id": 13, - "options": { - "legend": { - "calcs": [ - "min", - "max", - "mean", - "last" - ], - "displayMode": "table", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "multi", - "sort": "none" - } - }, - "pluginVersion": "12.3.1", - "targets": [ - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "query": "SELECT latency FROM link_telemetry WHERE time > now() - $timerange AND network_id = 'e2e' AND link_id = 'E2E-L1' GROUP BY network_id, link_id", - "rawQuery": true, - "refId": "A", - "resultFormat": "time_series" - }, - { - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "query": "SELECT pred_latency FROM predicted_telemetry WHERE time > now() - $timerange AND network_id = 'e2e' AND link_id = 'E2E-L1' GROUP BY network_id, link_id", - "rawQuery": true, - "refId": "B", - "resultFormat": "time_series" - } - ], - "title": "E2E-L1: Latency Comparison (Actual vs Predicted)", - "type": "timeseries" - } - ], - "preload": false, - "refresh": "10s", - "schemaVersion": 42, - "tags": [ - "simap", - "telemetry", - "sla", - "service-monitoring" - ], - "templating": { - "list": [ - { - "current": { - "text": "influxdb-SIMAP-Server", - "value": "cf9ge11vhadj4b" - }, - "includeAll": false, - "label": "InfluxDB Datasource", - "name": "DS_INFLUXDB", - "options": [], - "query": "influxdb", - "refresh": 1, - "regex": "", - "type": "datasource" - }, - { - "auto": false, - "auto_count": 30, - "auto_min": "10s", - "current": { - "text": "5m", - "value": "5m" - }, - "label": "Time Range", - "name": "timerange", - "options": [ - { - "selected": true, - "text": "5m", - "value": "5m" - }, - { - "selected": false, - "text": "10m", - "value": "10m" - }, - { - "selected": false, - "text": "20m", - "value": "20m" - }, - { - "selected": false, - "text": "1h", - "value": "1h" - }, - { - "selected": false, - "text": "3h", - "value": "3h" - }, - { - "selected": false, - "text": "6h", - "value": "6h" - }, - { - "selected": false, - "text": "12h", - "value": "12h" - } - ], - "query": "5m,10m,20m,1h,3h,6h,12h", - "refresh": 2, - "type": "interval" - }, - { - "current": { - "selected": true, - "text": "All", - "value": "$__all" - }, - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "definition": "SHOW TAG VALUES FROM link_telemetry WITH KEY = network_id", - "includeAll": true, - "label": "Network ID", - "multi": true, - "name": "network_id", - "options": [], - "query": "SHOW TAG VALUES FROM link_telemetry WITH KEY = network_id", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "sort": 0, - "type": "query" - }, - { - "current": { - "selected": true, - "text": "All", - "value": "$__all" - }, - "datasource": { - "type": "influxdb", - "uid": "${DS_INFLUXDB}" - }, - "definition": "SHOW TAG VALUES FROM link_telemetry WITH KEY = link_id WHERE network_id =~ /$network_id/", - "includeAll": true, - "label": "Link ID", - "multi": true, - "name": "link_id", - "options": [], - "query": "SHOW TAG VALUES FROM link_telemetry WITH KEY = link_id WHERE network_id =~ /$network_id/", - "refresh": 1, - "regex": "", - "skipUrlSync": false, - "sort": 0, - "type": "query" - } - ] - }, - "time": { - "from": "now-15m", - "to": "now" - }, - "timepicker": { - "refresh_intervals": [ - "5s", - "10s", - "30s", - "1m", - "5m" - ] - }, - "timezone": "", - "title": "SIMAP V3 Service-Level SLA Monitoring", - "uid": "simap-sla-monitoring-v3", - "version": 7 -} \ No newline at end of file -- GitLab