Coverage for src/hdmf/monitor.py: 0%

38 statements  

« prev     ^ index     » next       coverage.py v7.2.5, created at 2023-08-18 20:49 +0000

1from abc import ABCMeta, abstractmethod 

2 

3from .data_utils import AbstractDataChunkIterator, DataChunkIterator, DataChunk 

4from .utils import docval, getargs 

5 

6 

7class NotYetExhausted(Exception): 

8 pass 

9 

10 

11class DataChunkProcessor(AbstractDataChunkIterator, metaclass=ABCMeta): 

12 

13 @docval({'name': 'data', 'type': DataChunkIterator, 'doc': 'the DataChunkIterator to analyze'}) 

14 def __init__(self, **kwargs): 

15 """Initialize the DataChunkIterator""" 

16 # Get the user parameters 

17 self.__dci = getargs('data', kwargs) 

18 

19 def __next__(self): 

20 try: 

21 dc = self.__dci.__next__() 

22 except StopIteration as e: 

23 self.__done = True 

24 raise e 

25 self.process_data_chunk(dc) 

26 return dc 

27 

28 def __iter__(self): 

29 return iter(self.__dci) 

30 

31 def recommended_chunk_shape(self): 

32 return self.__dci.recommended_chunk_shape() 

33 

34 def recommended_data_shape(self): 

35 return self.__dci.recommended_data_shape() 

36 

37 def get_final_result(self, **kwargs): 

38 ''' Return the result of processing data fed by this DataChunkIterator ''' 

39 if not self.__done: 

40 raise NotYetExhausted() 

41 return self.compute_final_result() 

42 

43 @abstractmethod 

44 @docval({'name': 'data_chunk', 'type': DataChunk, 'doc': 'a chunk to process'}) 

45 def process_data_chunk(self, **kwargs): 

46 ''' This method should take in a DataChunk, 

47 and process it. 

48 ''' 

49 pass 

50 

51 @abstractmethod 

52 @docval(returns='the result of processing this stream') 

53 def compute_final_result(self, **kwargs): 

54 ''' Return the result of processing this stream 

55 Should raise NotYetExhaused exception 

56 ''' 

57 pass 

58 

59 

60class NumSampleCounter(DataChunkProcessor): 

61 

62 def __init__(self, **kwargs): 

63 super().__init__(**kwargs) 

64 self.__sample_count = 0 

65 

66 @docval({'name': 'data_chunk', 'type': DataChunk, 'doc': 'a chunk to process'}) 

67 def process_data_chunk(self, **kwargs): 

68 dc = getargs('data_chunk', kwargs) 

69 self.__sample_count += len(dc) 

70 

71 @docval(returns='the result of processing this stream') 

72 def compute_final_result(self, **kwargs): 

73 return self.__sample_count