Skip to content
Snippets Groups Projects
Commit f822d821 authored by Asthana, Shivanshi's avatar Asthana, Shivanshi :speech_balloon:
Browse files

Delete Code for Method II

parent 7cbf870f
Branches
No related tags found
No related merge requests found
import xarray as xr
import numpy as np
# Load percentile file
percentile_data = xr.open_dataset('output_percentile_95.nc')
percentile_thresholds = percentile_data['sst'] # Adjust variable name
# Specify the range of years
years = range(1991, 2023)
# Loop through each year
for year in years:
# Load daily mean file for the current year
daily_mean_data = xr.open_dataset(f'output_file_TWCPO_{year}_dailymean.nc')
daily_mean_temperatures = daily_mean_data['sst'] # Adjust variable name
# Create a mask indicating where daily mean temperatures exceed the threshold
above_threshold = daily_mean_temperatures > percentile_thresholds
# Initialize a cluster count array
cluster_count = np.zeros_like(percentile_thresholds, dtype=int)
# Loop through lat and lon dimensions to count clusters above threshold for each grid cell
for i in range(len(daily_mean_temperatures.lat)):
for j in range(len(daily_mean_temperatures.lon)):
cluster_count[i, j] = np.sum(
above_threshold.isel(lat=i, lon=j).rolling(time=3).sum() == 3
)
# Save the cluster count data as a netCDF file
output_path = f'/home/u/u301871/counts/count_{year}.nc'
count_dataset = xr.Dataset({'cluster_count': (['lat', 'lon'], cluster_count)})
count_dataset.to_netcdf(output_path)
# Close the daily mean dataset to free up resources
daily_mean_data.close()
import xarray as xr
import numpy as np
import matplotlib.pyplot as plt
from statsmodels.tsa.arima.model import ARIMA
from sklearn.metrics import mean_squared_error
# Specify the range of years
years = range(1981, 2023)
# Initialize a list to store spatial averages
spatial_averages = []
# Loop through each year
for year in years:
# Load the count file for the current year
count_data = xr.open_dataset(f'/home/u/u301871/counts/count_{year}.nc')
cluster_count = count_data['cluster_count'].values # Adjust variable name
# Calculate the spatial average
spatial_average = np.mean(cluster_count)
# Append the result to the list
spatial_averages.append(spatial_average)
# Close the count dataset to free up resources
count_data.close()
# Create a time series
time_series = np.floor(spatial_averages)
# Fit ARIMA model
model = ARIMA(time_series, order=(5, 1, 0)) # Adjust order as needed
model_fit = model.fit()
# Forecast future values (2023-2033)
future_years = range(2023, 2034)
forecast = model_fit.forecast(steps=len(future_years))
# Plot the original time series and the forecasted values
plt.plot(years, time_series, label='Original Data', marker='o')
plt.plot(future_years, forecast, label='ARIMA Forecast', linestyle='dashed', marker='o', color='red')
plt.xlabel('Year')
plt.ylabel('Spatial Average of Counts')
plt.title('Time Series and ARIMA Forecast of Spatial Averages (1981-2033)')
plt.legend()
plt.grid(True)
plt.show()
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment