From f822d8213db0f7808afb47b9e832bceac066cf6c Mon Sep 17 00:00:00 2001
From: "Asthana, Shivanshi" <shivanshi.asthana@studium.uni-hamburg.de>
Date: Fri, 26 Jan 2024 16:42:56 +0000
Subject: [PATCH] Delete Code for Method II
---
Code for Method II | 86 ----------------------------------------------
1 file changed, 86 deletions(-)
delete mode 100644 Code for Method II
diff --git a/Code for Method II b/Code for Method II
deleted file mode 100644
index afb9620..0000000
--- a/Code for Method II
+++ /dev/null
@@ -1,86 +0,0 @@
-import xarray as xr
-import numpy as np
-
-# Load percentile file
-percentile_data = xr.open_dataset('output_percentile_95.nc')
-percentile_thresholds = percentile_data['sst'] # Adjust variable name
-
-# Specify the range of years
-years = range(1991, 2023)
-
-# Loop through each year
-for year in years:
- # Load daily mean file for the current year
- daily_mean_data = xr.open_dataset(f'output_file_TWCPO_{year}_dailymean.nc')
- daily_mean_temperatures = daily_mean_data['sst'] # Adjust variable name
-
- # Create a mask indicating where daily mean temperatures exceed the threshold
- above_threshold = daily_mean_temperatures > percentile_thresholds
-
- # Initialize a cluster count array
- cluster_count = np.zeros_like(percentile_thresholds, dtype=int)
-
- # Loop through lat and lon dimensions to count clusters above threshold for each grid cell
- for i in range(len(daily_mean_temperatures.lat)):
- for j in range(len(daily_mean_temperatures.lon)):
- cluster_count[i, j] = np.sum(
- above_threshold.isel(lat=i, lon=j).rolling(time=3).sum() == 3
- )
-
- # Save the cluster count data as a netCDF file
- output_path = f'/home/u/u301871/counts/count_{year}.nc'
- count_dataset = xr.Dataset({'cluster_count': (['lat', 'lon'], cluster_count)})
- count_dataset.to_netcdf(output_path)
-
- # Close the daily mean dataset to free up resources
- daily_mean_data.close()
-
-
-import xarray as xr
-import numpy as np
-import matplotlib.pyplot as plt
-from statsmodels.tsa.arima.model import ARIMA
-from sklearn.metrics import mean_squared_error
-
-# Specify the range of years
-years = range(1981, 2023)
-
-# Initialize a list to store spatial averages
-spatial_averages = []
-
-# Loop through each year
-for year in years:
- # Load the count file for the current year
- count_data = xr.open_dataset(f'/home/u/u301871/counts/count_{year}.nc')
- cluster_count = count_data['cluster_count'].values # Adjust variable name
-
- # Calculate the spatial average
- spatial_average = np.mean(cluster_count)
-
- # Append the result to the list
- spatial_averages.append(spatial_average)
-
- # Close the count dataset to free up resources
- count_data.close()
-
-# Create a time series
-time_series = np.floor(spatial_averages)
-
-# Fit ARIMA model
-model = ARIMA(time_series, order=(5, 1, 0)) # Adjust order as needed
-model_fit = model.fit()
-
-# Forecast future values (2023-2033)
-future_years = range(2023, 2034)
-forecast = model_fit.forecast(steps=len(future_years))
-
-# Plot the original time series and the forecasted values
-plt.plot(years, time_series, label='Original Data', marker='o')
-plt.plot(future_years, forecast, label='ARIMA Forecast', linestyle='dashed', marker='o', color='red')
-plt.xlabel('Year')
-plt.ylabel('Spatial Average of Counts')
-plt.title('Time Series and ARIMA Forecast of Spatial Averages (1981-2033)')
-plt.legend()
-plt.grid(True)
-plt.show()
-
--
GitLab