@@ -161,8 +161,6 @@ def __init__(
161
161
self ._max_threads = max_threads
162
162
self .missing = None
163
163
self .ds = None
164
- self .netCDF4Dataset = None
165
- self .metric_data = {}
166
164
self .data_read = 0
167
165
168
166
def __load_nc_file (self ):
@@ -179,10 +177,6 @@ def __load_nc_file(self):
179
177
self .ds = nc [ncvar ]
180
178
return self .ds
181
179
182
- def _netCDF4Dataset (self ):
183
- if not self .netCDF4Dataset :
184
- return self .__load_nc_file ()
185
-
186
180
def __get_missing_attributes (self ):
187
181
if self .ds is None :
188
182
self .__load_nc_file ()
@@ -193,7 +187,6 @@ def __getitem__(self, index):
193
187
Provides support for a standard get item.
194
188
#FIXME-BNL: Why is the argument index?
195
189
"""
196
- self .metric_data = {}
197
190
if self .ds is None :
198
191
self .__load_nc_file ()
199
192
@@ -307,10 +300,6 @@ def _get_selection(self, *args):
307
300
# hopefully fix pyfive to get a dtype directly
308
301
array = pyfive .indexing .ZarrArrayStub (self .ds .shape , self .ds .chunks )
309
302
ds = self .ds .id
310
-
311
- self .metric_data ['args' ] = args
312
- self .metric_data ['dataset shape' ] = self .ds .shape
313
- self .metric_data ['dataset chunks' ] = self .ds .chunks
314
303
if ds .filter_pipeline is None :
315
304
compressor , filters = None , None
316
305
else :
@@ -359,13 +348,6 @@ def _from_storage(self, ds, indexer, chunks, out_shape, out_dtype, compressor, f
359
348
# Because we do this, we need to read the dataset b-tree now, not as we go, so
360
349
# it is already in cache. If we remove the thread pool from here, we probably
361
350
# wouldn't need to do it before the first one.
362
-
363
- if ds .chunks is not None :
364
- t1 = time .time ()
365
- # ds._get_chunk_addresses()
366
- t2 = time .time () - t1
367
- self .metric_data ['indexing time (s)' ] = t2
368
- # self.metric_data['chunk number'] = len(ds._zchunk_index)
369
351
chunk_count = 0
370
352
t1 = time .time ()
371
353
with concurrent .futures .ThreadPoolExecutor (max_workers = self ._max_threads ) as executor :
@@ -430,10 +412,6 @@ def _from_storage(self, ds, indexer, chunks, out_shape, out_dtype, compressor, f
430
412
# size.
431
413
out = out / np .sum (counts ).reshape (shape1 )
432
414
433
- t2 = time .time ()
434
- self .metric_data ['reduction time (s)' ] = t2 - t1
435
- self .metric_data ['chunks processed' ] = chunk_count
436
- self .metric_data ['storage read (B)' ] = self .data_read
437
415
return out
438
416
439
417
def _get_endpoint_url (self ):
0 commit comments