Skip to content

Commit 25668bd

Browse files
committed
clean up
1 parent 28d97e5 commit 25668bd

File tree

1 file changed

+0
-22
lines changed

1 file changed

+0
-22
lines changed

activestorage/active.py

-22
Original file line numberDiff line numberDiff line change
@@ -161,8 +161,6 @@ def __init__(
161161
self._max_threads = max_threads
162162
self.missing = None
163163
self.ds = None
164-
self.netCDF4Dataset = None
165-
self.metric_data = {}
166164
self.data_read = 0
167165

168166
def __load_nc_file(self):
@@ -179,10 +177,6 @@ def __load_nc_file(self):
179177
self.ds = nc[ncvar]
180178
return self.ds
181179

182-
def _netCDF4Dataset(self):
183-
if not self.netCDF4Dataset:
184-
return self.__load_nc_file()
185-
186180
def __get_missing_attributes(self):
187181
if self.ds is None:
188182
self.__load_nc_file()
@@ -193,7 +187,6 @@ def __getitem__(self, index):
193187
Provides support for a standard get item.
194188
#FIXME-BNL: Why is the argument index?
195189
"""
196-
self.metric_data = {}
197190
if self.ds is None:
198191
self.__load_nc_file()
199192

@@ -307,10 +300,6 @@ def _get_selection(self, *args):
307300
# hopefully fix pyfive to get a dtype directly
308301
array = pyfive.indexing.ZarrArrayStub(self.ds.shape, self.ds.chunks)
309302
ds = self.ds.id
310-
311-
self.metric_data['args'] = args
312-
self.metric_data['dataset shape'] = self.ds.shape
313-
self.metric_data['dataset chunks'] = self.ds.chunks
314303
if ds.filter_pipeline is None:
315304
compressor, filters = None, None
316305
else:
@@ -359,13 +348,6 @@ def _from_storage(self, ds, indexer, chunks, out_shape, out_dtype, compressor, f
359348
# Because we do this, we need to read the dataset b-tree now, not as we go, so
360349
# it is already in cache. If we remove the thread pool from here, we probably
361350
# wouldn't need to do it before the first one.
362-
363-
if ds.chunks is not None:
364-
t1 = time.time()
365-
# ds._get_chunk_addresses()
366-
t2 = time.time() - t1
367-
self.metric_data['indexing time (s)'] = t2
368-
# self.metric_data['chunk number'] = len(ds._zchunk_index)
369351
chunk_count = 0
370352
t1 = time.time()
371353
with concurrent.futures.ThreadPoolExecutor(max_workers=self._max_threads) as executor:
@@ -430,10 +412,6 @@ def _from_storage(self, ds, indexer, chunks, out_shape, out_dtype, compressor, f
430412
# size.
431413
out = out / np.sum(counts).reshape(shape1)
432414

433-
t2 = time.time()
434-
self.metric_data['reduction time (s)'] = t2-t1
435-
self.metric_data['chunks processed'] = chunk_count
436-
self.metric_data['storage read (B)'] = self.data_read
437415
return out
438416

439417
def _get_endpoint_url(self):

0 commit comments

Comments
 (0)