Fluxnet Hainich

Load Hainich dataset from fluxnet
['PA', 'P', 'WS', 'WD', 'LW_IN', 'NETRAD']
['PA', 'P', 'WS', 'WD', 'LW_IN', 'NETRAD']
_def_meteo_vars = {
    "TA_F": "TA",
    "SW_IN_F": "SW_IN",
    # "LW_IN_F": "LW_IN",
    "VPD_F": "VPD",
    #"PA": "PA"
}


meteo_vars_big = {f"{var}_F" : var for var in ['TA', 'SW_IN', 'LW_IN', 'VPD', 'WS', 'PA', 'P']} | {'SWC_F_MDS_1': 'SWC', 'TS_F_MDS_1': 'TS'}


units = {
    'TA': '°C',
    'SW_IN': 'W m-2',
    # 'LW_IN': 'W m-2',
    'VPD': 'hPa'
}

units_big = {
    'TA': '°C',
    'SW_IN': 'W m-2',
    'VPD': 'hPa',
    'PA': 'hPa',
    'P': 'mm',
    'WS': 'm s-1',
    'LW_IN': 'W m-2',
    'SWC': '%',
    'TS': '°C',
    
}

hai_path_raw = here("data/Hainich") / "FLX_DE-Hai_FLUXNET2015_FULLSET_HH_2000-2012_1-4.csv"
hai_path = here("data/Hainich") / "FLX_DE-Hai_FLUXNET2015_FULLSET_HH_2000-2012_1-4_float32.parquet"
hai_path64 = here("data/Hainich") / "FLX_DE-Hai_FLUXNET2015_FULLSET_HH_2000-2012_1-4_float64.parquet"
hai_big_path = here("data/Hainich") / "FLX_DE-Hai_FLUXNET2015_FULLSET_HH_2000-2012_1-4_float64_big.parquet"
read_col_names(hai_path_raw)
Index(['TIMESTAMP_START', 'TIMESTAMP_END', 'TA_F_MDS', 'TA_F_MDS_QC', 'TA_ERA',
       'TA_F', 'TA_F_QC', 'SW_IN_POT', 'SW_IN_F_MDS', 'SW_IN_F_MDS_QC',
       ...
       'GPP_DT_CUT_MEAN', 'GPP_DT_CUT_SE', 'GPP_DT_CUT_05', 'GPP_DT_CUT_16',
       'GPP_DT_CUT_25', 'GPP_DT_CUT_50', 'GPP_DT_CUT_75', 'GPP_DT_CUT_84',
       'GPP_DT_CUT_95', 'RECO_SR'],
      dtype='object', length=238)
col_types(read_col_names(hai_path_raw)[:10]) # only for 10 cols for testing
{'TIMESTAMP_START': 'str',
 'TIMESTAMP_END': 'str',
 'TA_F_MDS': numpy.float32,
 'TA_F_MDS_QC': None,
 'TA_ERA': numpy.float32,
 'TA_F': numpy.float32,
 'TA_F_QC': None,
 'SW_IN_POT': numpy.float32,
 'SW_IN_F_MDS': numpy.float32,
 'SW_IN_F_MDS_QC': None}

source

read_fluxnet_csv

 read_fluxnet_csv (path, nrows:int, meteo_vars:dict[str,str]={'TA_F':
                   'TA', 'SW_IN_F': 'SW_IN', 'VPD_F': 'VPD'},
                   num_dtype=<class 'numpy.float32'>)

Read fluxnet csv in Pandas with correct parsing of csv

Type Default Details
path
nrows int
meteo_vars dict {‘TA_F’: ‘TA’, ‘SW_IN_F’: ‘SW_IN’, ‘VPD_F’: ‘VPD’}
num_dtype type float32 type for numerical columns

hainich default df

hai_path_raw
PosixPath('/home/simone/Documents/uni/Thesis/GPFA_imputation/data/FLX_DE-Hai_FLUXNET2015_FULLSET_HH_2000-2012_1-4.csv')
CPU times: user 25.2 s, sys: 1min 48s, total: 2min 13s
Wall time: 2min 14s
hai.to_parquet(hai_path)
CPU times: user 23.5 s, sys: 1.44 s, total: 25 s
Wall time: 25.1 s
hai64.to_parquet(hai_path64)
CPU times: user 16.2 ms, sys: 6.29 ms, total: 22.5 ms
Wall time: 14.6 ms
TA SW_IN VPD
time
2000-01-01 00:30:00 -0.60 0.0 0.222
2000-01-01 01:00:00 -0.65 0.0 0.122
2000-01-01 01:30:00 -0.58 0.0 0.090
2000-01-01 02:00:00 -0.51 0.0 0.110
2000-01-01 02:30:00 -0.49 0.0 0.102
... ... ... ...
2012-12-31 22:00:00 4.75 0.0 2.249
2012-12-31 22:30:00 4.48 0.0 2.154
2012-12-31 23:00:00 4.32 0.0 2.108
2012-12-31 23:30:00 4.02 0.0 1.996
2013-01-01 00:00:00 3.99 0.0 2.000

227952 rows × 3 columns

meteo_vars_big
{'TA_F': 'TA',
 'SW_IN_F': 'SW_IN',
 'LW_IN_F': 'LW_IN',
 'VPD_F': 'VPD',
 'WS_F': 'WS',
 'PA_F': 'PA',
 'P_F': 'P',
 'SWC_F_MDS': 'SWC',
 'TS_F_MDS': 'TS'}
read_fluxnet_csv(hai_path_raw, None, meteo_vars = meteo_vars_big, num_dtype=np.float64).to_parquet(hai_big_path)
hai_big = pd.read_parquet(hai_big_path)
sum(hai_big.P == 0.0) / len(hai_big)
0.9071997613532674
hai.dtypes
TA       float32
SW_IN    float32
VPD      float32
dtype: object
hai
TA SW_IN VPD
time
2000-01-01 00:30:00 -0.60 0.0 0.222
2000-01-01 01:00:00 -0.65 0.0 0.122
2000-01-01 01:30:00 -0.58 0.0 0.090
2000-01-01 02:00:00 -0.51 0.0 0.110
2000-01-01 02:30:00 -0.49 0.0 0.102
... ... ... ...
2012-12-31 22:00:00 4.75 0.0 2.249
2012-12-31 22:30:00 4.48 0.0 2.154
2012-12-31 23:00:00 4.32 0.0 2.108
2012-12-31 23:30:00 4.02 0.0 1.996
2013-01-01 00:00:00 3.99 0.0 2.000

227952 rows × 3 columns

ERA

_def_meteo_vars
{'TA_F': 'TA', 'SW_IN_F': 'SW_IN', 'VPD_F': 'VPD'}
CPU times: user 41 s, sys: 32.4 ms, total: 41.1 s
Wall time: 41.3 s
hai_era.to_parquet(hai_era_path)
CPU times: user 21.2 ms, sys: 10.3 ms, total: 31.5 ms
Wall time: 18.4 ms
hai_era64 = read_fluxnet_csv(hai_era_path_raw, None, meteo_vars = era_vars, num_dtype=np.float64)
hai_era64.to_parquet(hai_era_path64)
CPU times: user 39.8 s, sys: 65.6 ms, total: 39.9 s
Wall time: 40.1 s

Control map

Plotting

Scales for consistent colors for plotting variables

units_big.keys()
dict_keys(['TA', 'SW_IN', 'VPD', 'PA', 'P', 'WS', 'LW_IN', 'TS', 'SWC'])
df = pd.DataFrame({'vars' : units_big.keys()})
scale_meteo
Scale({
  domain: ['TA', 'SW_IN', 'LW_IN', 'VPD', 'WS', 'PA', 'SWC', 'TS', 'P'],
  range: ['#1B9E77', '#D95F02', '#7570B3', '#E7298A', '#66A61E', '#E6AB02', '#A6761D', '#666666']
})
alt.Chart(df).mark_rect().encode(x = 'vars', color = alt.Color('vars', scale= scale_meteo))

if we remove one variable the order doesn’t change

alt.Chart(df[df.vars != 'SW_IN']).mark_rect().encode(x = 'vars', color = alt.Color('vars', scale= scale_meteo))

Export