Module pyinotify
[hide private]
[frames] | no frames]

Source Code for Module pyinotify

   1  #!/usr/bin/env python 
   2   
   3  # pyinotify.py - python interface to inotify 
   4  # Copyright (c) 2010 Sebastien Martini <seb@dbzteam.org> 
   5  # 
   6  # Permission is hereby granted, free of charge, to any person obtaining a copy 
   7  # of this software and associated documentation files (the "Software"), to deal 
   8  # in the Software without restriction, including without limitation the rights 
   9  # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 
  10  # copies of the Software, and to permit persons to whom the Software is 
  11  # furnished to do so, subject to the following conditions: 
  12  # 
  13  # The above copyright notice and this permission notice shall be included in 
  14  # all copies or substantial portions of the Software. 
  15  # 
  16  # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 
  17  # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 
  18  # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 
  19  # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 
  20  # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 
  21  # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 
  22  # THE SOFTWARE. 
  23  """ 
  24  pyinotify 
  25   
  26  @author: Sebastien Martini 
  27  @license: MIT License 
  28  @contact: seb@dbzteam.org 
  29  """ 
30 31 -class PyinotifyError(Exception):
32 """Indicates exceptions raised by a Pyinotify class.""" 33 pass
34
35 36 -class UnsupportedPythonVersionError(PyinotifyError):
37 """ 38 Raised on unsupported Python versions. 39 """
40 - def __init__(self, version):
41 """ 42 @param version: Current Python version 43 @type version: string 44 """ 45 PyinotifyError.__init__(self, 46 ('Python %s is unsupported, requires ' 47 'at least Python 2.4') % version)
48
49 50 -class UnsupportedLibcVersionError(PyinotifyError):
51 """ 52 Raised on unsupported libc versions. 53 """
54 - def __init__(self, version):
55 """ 56 @param version: Current Libc version 57 @type version: string 58 """ 59 PyinotifyError.__init__(self, 60 ('Libc %s is not supported, requires ' 61 'at least Libc 2.4') % version)
62 63 64 # Check Python version 65 import sys 66 if sys.version < '2.4': 67 raise UnsupportedPythonVersionError(sys.version) 68 69 70 # Import directives 71 import threading 72 import os 73 import select 74 import struct 75 import fcntl 76 import errno 77 import termios 78 import array 79 import logging 80 import atexit 81 from collections import deque 82 from datetime import datetime, timedelta 83 import time 84 import fnmatch 85 import re 86 import ctypes 87 import ctypes.util 88 import asyncore 89 import glob 90 91 try: 92 from functools import reduce 93 except ImportError: 94 pass # Will fail on Python 2.4 which has reduce() builtin anyway. 95 96 __author__ = "seb@dbzteam.org (Sebastien Martini)" 97 98 __version__ = "0.9.0" 99 100 __metaclass__ = type # Use new-style classes by default 101 102 103 # Compatibity mode: set to True to improve compatibility with 104 # Pyinotify 0.7.1. Do not set this variable yourself, call the 105 # function compatibility_mode() instead. 106 COMPATIBILITY_MODE = False 107 108 109 # Load libc 110 LIBC = None 111 STRERRNO = None 112 if sys.version_info[0] >= 2 and sys.version_info[1] >= 6: 113 LIBC = ctypes.CDLL(ctypes.util.find_library('c'), use_errno=True)
114 - def _strerrno():
115 code = ctypes.get_errno() 116 return ' Errno=%s (%s)' % (os.strerror(code), errno.errorcode[code])
117 STRERRNO = _strerrno 118 else: 119 LIBC = ctypes.CDLL(ctypes.util.find_library('c')) 120 STRERRNO = lambda : '' 121 122 # The libc version > 2.4 check. 123 # XXX: Maybe it is better to check if the libc has the needed functions inside? 124 # Because there are inotify patches for libc 2.3.6. 125 LIBC.gnu_get_libc_version.restype = ctypes.c_char_p 126 LIBC_VERSION = LIBC.gnu_get_libc_version() 127 if (int(LIBC_VERSION.split('.')[0]) < 2 or 128 (int(LIBC_VERSION.split('.')[0]) == 2 and 129 int(LIBC_VERSION.split('.')[1]) < 4)): 130 raise UnsupportedLibcVersionError(LIBC_VERSION)
131 132 133 -class PyinotifyLogger(logging.Logger):
134 """ 135 Pyinotify logger used for logging unicode strings. 136 """
137 - def makeRecord(self, name, level, fn, lno, msg, args, exc_info, func=None, 138 extra=None):
139 rv = UnicodeLogRecord(name, level, fn, lno, msg, args, exc_info, func) 140 if extra is not None: 141 for key in extra: 142 if (key in ["message", "asctime"]) or (key in rv.__dict__): 143 raise KeyError("Attempt to overwrite %r in LogRecord" % key) 144 rv.__dict__[key] = extra[key] 145 return rv
146
147 148 -class UnicodeLogRecord(logging.LogRecord):
149 - def __init__(self, name, level, pathname, lineno, 150 msg, args, exc_info, func=None):
151 py_version = sys.version_info 152 # func argument was added in Python 2.5, just ignore it otherwise. 153 if py_version[0] >= 2 and py_version[1] >= 5: 154 logging.LogRecord.__init__(self, name, level, pathname, lineno, 155 msg, args, exc_info, func) 156 else: 157 logging.LogRecord.__init__(self, name, level, pathname, lineno, 158 msg, args, exc_info)
159
160 - def getMessage(self):
161 msg = self.msg 162 if not isinstance(msg, (unicode, str)): 163 try: 164 msg = str(self.msg) 165 except UnicodeError: 166 pass 167 if self.args: 168 if isinstance(self.args, tuple): 169 def str_to_unicode(s): 170 """Return unicode string.""" 171 if not isinstance(s, str): 172 return s 173 return unicode(s, sys.getfilesystemencoding())
174 args = tuple([str_to_unicode(m) for m in self.args]) 175 else: 176 args = self.args 177 msg = msg % args 178 if not isinstance(msg, unicode): 179 msg = unicode(msg, sys.getfilesystemencoding()) 180 return msg
181
182 183 # Logging 184 -def logger_init():
185 """Initialize logger instance.""" 186 logging.setLoggerClass(PyinotifyLogger) 187 log = logging.getLogger("pyinotify") 188 console_handler = logging.StreamHandler() 189 console_handler.setFormatter( 190 logging.Formatter("[Pyinotify %(levelname)s] %(message)s")) 191 log.addHandler(console_handler) 192 log.setLevel(20) 193 return log
194 195 log = logger_init()
196 197 198 # inotify's variables 199 -class SysCtlINotify:
200 """ 201 Access (read, write) inotify's variables through sysctl. Usually it 202 requires administrator rights to update them. 203 204 Examples: 205 - Read max_queued_events attribute: myvar = max_queued_events.value 206 - Update max_queued_events attribute: max_queued_events.value = 42 207 """ 208 209 inotify_attrs = {'max_user_instances': 1, 210 'max_user_watches': 2, 211 'max_queued_events': 3} 212
213 - def __init__(self, attrname):
214 sino = ctypes.c_int * 3 215 self._attrname = attrname 216 self._attr = sino(5, 20, SysCtlINotify.inotify_attrs[attrname])
217
218 - def get_val(self):
219 """ 220 Gets attribute's value. 221 222 @return: stored value. 223 @rtype: int 224 """ 225 oldv = ctypes.c_int(0) 226 size = ctypes.c_int(ctypes.sizeof(oldv)) 227 LIBC.sysctl(self._attr, 3, 228 ctypes.c_voidp(ctypes.addressof(oldv)), 229 ctypes.addressof(size), 230 None, 0) 231 return oldv.value
232
233 - def set_val(self, nval):
234 """ 235 Sets new attribute's value. 236 237 @param nval: replaces current value by nval. 238 @type nval: int 239 """ 240 oldv = ctypes.c_int(0) 241 sizeo = ctypes.c_int(ctypes.sizeof(oldv)) 242 newv = ctypes.c_int(nval) 243 sizen = ctypes.c_int(ctypes.sizeof(newv)) 244 LIBC.sysctl(self._attr, 3, 245 ctypes.c_voidp(ctypes.addressof(oldv)), 246 ctypes.addressof(sizeo), 247 ctypes.c_voidp(ctypes.addressof(newv)), 248 ctypes.addressof(sizen))
249 250 value = property(get_val, set_val) 251
252 - def __repr__(self):
253 return '<%s=%d>' % (self._attrname, self.get_val())
254 255 256 # Singleton instances 257 # 258 # read: myvar = max_queued_events.value 259 # update: max_queued_events.value = 42 260 # 261 for attrname in ('max_queued_events', 'max_user_instances', 'max_user_watches'): 262 globals()[attrname] = SysCtlINotify(attrname)
263 264 265 -class EventsCodes:
266 """ 267 Set of codes corresponding to each kind of events. 268 Some of these flags are used to communicate with inotify, whereas 269 the others are sent to userspace by inotify notifying some events. 270 271 @cvar IN_ACCESS: File was accessed. 272 @type IN_ACCESS: int 273 @cvar IN_MODIFY: File was modified. 274 @type IN_MODIFY: int 275 @cvar IN_ATTRIB: Metadata changed. 276 @type IN_ATTRIB: int 277 @cvar IN_CLOSE_WRITE: Writtable file was closed. 278 @type IN_CLOSE_WRITE: int 279 @cvar IN_CLOSE_NOWRITE: Unwrittable file closed. 280 @type IN_CLOSE_NOWRITE: int 281 @cvar IN_OPEN: File was opened. 282 @type IN_OPEN: int 283 @cvar IN_MOVED_FROM: File was moved from X. 284 @type IN_MOVED_FROM: int 285 @cvar IN_MOVED_TO: File was moved to Y. 286 @type IN_MOVED_TO: int 287 @cvar IN_CREATE: Subfile was created. 288 @type IN_CREATE: int 289 @cvar IN_DELETE: Subfile was deleted. 290 @type IN_DELETE: int 291 @cvar IN_DELETE_SELF: Self (watched item itself) was deleted. 292 @type IN_DELETE_SELF: int 293 @cvar IN_MOVE_SELF: Self (watched item itself) was moved. 294 @type IN_MOVE_SELF: int 295 @cvar IN_UNMOUNT: Backing fs was unmounted. 296 @type IN_UNMOUNT: int 297 @cvar IN_Q_OVERFLOW: Event queued overflowed. 298 @type IN_Q_OVERFLOW: int 299 @cvar IN_IGNORED: File was ignored. 300 @type IN_IGNORED: int 301 @cvar IN_ONLYDIR: only watch the path if it is a directory (new 302 in kernel 2.6.15). 303 @type IN_ONLYDIR: int 304 @cvar IN_DONT_FOLLOW: don't follow a symlink (new in kernel 2.6.15). 305 IN_ONLYDIR we can make sure that we don't watch 306 the target of symlinks. 307 @type IN_DONT_FOLLOW: int 308 @cvar IN_MASK_ADD: add to the mask of an already existing watch (new 309 in kernel 2.6.14). 310 @type IN_MASK_ADD: int 311 @cvar IN_ISDIR: Event occurred against dir. 312 @type IN_ISDIR: int 313 @cvar IN_ONESHOT: Only send event once. 314 @type IN_ONESHOT: int 315 @cvar ALL_EVENTS: Alias for considering all of the events. 316 @type ALL_EVENTS: int 317 """ 318 319 # The idea here is 'configuration-as-code' - this way, we get our nice class 320 # constants, but we also get nice human-friendly text mappings to do lookups 321 # against as well, for free: 322 FLAG_COLLECTIONS = {'OP_FLAGS': { 323 'IN_ACCESS' : 0x00000001, # File was accessed 324 'IN_MODIFY' : 0x00000002, # File was modified 325 'IN_ATTRIB' : 0x00000004, # Metadata changed 326 'IN_CLOSE_WRITE' : 0x00000008, # Writable file was closed 327 'IN_CLOSE_NOWRITE' : 0x00000010, # Unwritable file closed 328 'IN_OPEN' : 0x00000020, # File was opened 329 'IN_MOVED_FROM' : 0x00000040, # File was moved from X 330 'IN_MOVED_TO' : 0x00000080, # File was moved to Y 331 'IN_CREATE' : 0x00000100, # Subfile was created 332 'IN_DELETE' : 0x00000200, # Subfile was deleted 333 'IN_DELETE_SELF' : 0x00000400, # Self (watched item itself) 334 # was deleted 335 'IN_MOVE_SELF' : 0x00000800, # Self (watched item itself) was moved 336 }, 337 'EVENT_FLAGS': { 338 'IN_UNMOUNT' : 0x00002000, # Backing fs was unmounted 339 'IN_Q_OVERFLOW' : 0x00004000, # Event queued overflowed 340 'IN_IGNORED' : 0x00008000, # File was ignored 341 }, 342 'SPECIAL_FLAGS': { 343 'IN_ONLYDIR' : 0x01000000, # only watch the path if it is a 344 # directory 345 'IN_DONT_FOLLOW' : 0x02000000, # don't follow a symlink 346 'IN_MASK_ADD' : 0x20000000, # add to the mask of an already 347 # existing watch 348 'IN_ISDIR' : 0x40000000, # event occurred against dir 349 'IN_ONESHOT' : 0x80000000, # only send event once 350 }, 351 } 352
353 - def maskname(mask):
354 """ 355 Returns the event name associated to mask. IN_ISDIR is appended to 356 the result when appropriate. Note: only one event is returned, because 357 only one event can be raised at a given time. 358 359 @param mask: mask. 360 @type mask: int 361 @return: event name. 362 @rtype: str 363 """ 364 ms = mask 365 name = '%s' 366 if mask & IN_ISDIR: 367 ms = mask - IN_ISDIR 368 name = '%s|IN_ISDIR' 369 return name % EventsCodes.ALL_VALUES[ms]
370 371 maskname = staticmethod(maskname)
372 373 374 # So let's now turn the configuration into code 375 EventsCodes.ALL_FLAGS = {} 376 EventsCodes.ALL_VALUES = {} 377 for flagc, valc in EventsCodes.FLAG_COLLECTIONS.items(): 378 # Make the collections' members directly accessible through the 379 # class dictionary 380 setattr(EventsCodes, flagc, valc) 381 382 # Collect all the flags under a common umbrella 383 EventsCodes.ALL_FLAGS.update(valc) 384 385 # Make the individual masks accessible as 'constants' at globals() scope 386 # and masknames accessible by values. 387 for name, val in valc.items(): 388 globals()[name] = val 389 EventsCodes.ALL_VALUES[val] = name 390 391 392 # all 'normal' events 393 ALL_EVENTS = reduce(lambda x, y: x | y, EventsCodes.OP_FLAGS.values()) 394 EventsCodes.ALL_FLAGS['ALL_EVENTS'] = ALL_EVENTS 395 EventsCodes.ALL_VALUES[ALL_EVENTS] = 'ALL_EVENTS'
396 397 398 -class _Event:
399 """ 400 Event structure, represent events raised by the system. This 401 is the base class and should be subclassed. 402 403 """
404 - def __init__(self, dict_):
405 """ 406 Attach attributes (contained in dict_) to self. 407 408 @param dict_: Set of attributes. 409 @type dict_: dictionary 410 """ 411 for tpl in dict_.items(): 412 setattr(self, *tpl)
413
414 - def __repr__(self):
415 """ 416 @return: Generic event string representation. 417 @rtype: str 418 """ 419 s = '' 420 for attr, value in sorted(self.__dict__.items(), key=lambda x: x[0]): 421 if attr.startswith('_'): 422 continue 423 if attr == 'mask': 424 value = hex(getattr(self, attr)) 425 elif isinstance(value, basestring) and not value: 426 value = "''" 427 s += ' %s%s%s' % (Color.field_name(attr), 428 Color.punctuation('='), 429 Color.field_value(value)) 430 431 s = '%s%s%s %s' % (Color.punctuation('<'), 432 Color.class_name(self.__class__.__name__), 433 s, 434 Color.punctuation('>')) 435 return s
436
437 438 -class _RawEvent(_Event):
439 """ 440 Raw event, it contains only the informations provided by the system. 441 It doesn't infer anything. 442 """
443 - def __init__(self, wd, mask, cookie, name):
444 """ 445 @param wd: Watch Descriptor. 446 @type wd: int 447 @param mask: Bitmask of events. 448 @type mask: int 449 @param cookie: Cookie. 450 @type cookie: int 451 @param name: Basename of the file or directory against which the 452 event was raised in case where the watched directory 453 is the parent directory. None if the event was raised 454 on the watched item itself. 455 @type name: string or None 456 """ 457 # name: remove trailing '\0' 458 _Event.__init__(self, {'wd': wd, 459 'mask': mask, 460 'cookie': cookie, 461 'name': name.rstrip('\0')}) 462 log.debug(repr(self)) 463 # Use this variable to cache the result of str(self) 464 self._str = None
465
466 - def __str__(self):
467 if self._str is None: 468 self._str = '%s %s %s %s' % (str(self.wd), str(self.mask), 469 str(self.cookie), self.name) 470 return self._str
471
472 473 -class Event(_Event):
474 """ 475 This class contains all the useful informations about the observed 476 event. However, the presence of each field is not guaranteed and 477 depends on the type of event. In effect, some fields are irrelevant 478 for some kind of event (for example 'cookie' is meaningless for 479 IN_CREATE whereas it is mandatory for IN_MOVE_TO). 480 481 The possible fields are: 482 - wd (int): Watch Descriptor. 483 - mask (int): Mask. 484 - maskname (str): Readable event name. 485 - path (str): path of the file or directory being watched. 486 - name (str): Basename of the file or directory against which the 487 event was raised in case where the watched directory 488 is the parent directory. None if the event was raised 489 on the watched item itself. This field is always provided 490 even if the string is ''. 491 - pathname (str): Concatenation of 'path' and 'name'. 492 - src_pathname (str): Only present for IN_MOVED_TO events and only in 493 the case where IN_MOVED_FROM events are watched too. Holds the 494 source pathname from where pathname was moved from. 495 - cookie (int): Cookie. 496 - dir (bool): True if the event was raised against a directory. 497 498 """
499 - def __init__(self, raw):
500 """ 501 Concretely, this is the raw event plus inferred infos. 502 """ 503 _Event.__init__(self, raw) 504 self.maskname = EventsCodes.maskname(self.mask) 505 if COMPATIBILITY_MODE: 506 self.event_name = self.maskname 507 try: 508 if self.name: 509 self.pathname = os.path.abspath(os.path.join(self.path, 510 self.name)) 511 else: 512 self.pathname = os.path.abspath(self.path) 513 except AttributeError, err: 514 # Usually it is not an error some events are perfectly valids 515 # despite the lack of these attributes. 516 log.debug(err)
517
518 519 -class ProcessEventError(PyinotifyError):
520 """ 521 ProcessEventError Exception. Raised on ProcessEvent error. 522 """
523 - def __init__(self, err):
524 """ 525 @param err: Exception error description. 526 @type err: string 527 """ 528 PyinotifyError.__init__(self, err)
529
530 531 -class _ProcessEvent:
532 """ 533 Abstract processing event class. 534 """
535 - def __call__(self, event):
536 """ 537 To behave like a functor the object must be callable. 538 This method is a dispatch method. Its lookup order is: 539 1. process_MASKNAME method 540 2. process_FAMILY_NAME method 541 3. otherwise calls process_default 542 543 @param event: Event to be processed. 544 @type event: Event object 545 @return: By convention when used from the ProcessEvent class: 546 - Returning False or None (default value) means keep on 547 executing next chained functors (see chain.py example). 548 - Returning True instead means do not execute next 549 processing functions. 550 @rtype: bool 551 @raise ProcessEventError: Event object undispatchable, 552 unknown event. 553 """ 554 stripped_mask = event.mask - (event.mask & IN_ISDIR) 555 maskname = EventsCodes.ALL_VALUES.get(stripped_mask) 556 if maskname is None: 557 raise ProcessEventError("Unknown mask 0x%08x" % stripped_mask) 558 559 # 1- look for process_MASKNAME 560 meth = getattr(self, 'process_' + maskname, None) 561 if meth is not None: 562 return meth(event) 563 # 2- look for process_FAMILY_NAME 564 meth = getattr(self, 'process_IN_' + maskname.split('_')[1], None) 565 if meth is not None: 566 return meth(event) 567 # 3- default call method process_default 568 return self.process_default(event)
569
570 - def __repr__(self):
571 return '<%s>' % self.__class__.__name__
572
573 574 -class _SysProcessEvent(_ProcessEvent):
575 """ 576 There is three kind of processing according to each event: 577 578 1. special handling (deletion from internal container, bug, ...). 579 2. default treatment: which is applied to the majority of events. 580 3. IN_ISDIR is never sent alone, he is piggybacked with a standard 581 event, he is not processed as the others events, instead, its 582 value is captured and appropriately aggregated to dst event. 583 """
584 - def __init__(self, wm, notifier):
585 """ 586 587 @param wm: Watch Manager. 588 @type wm: WatchManager instance 589 @param notifier: Notifier. 590 @type notifier: Notifier instance 591 """ 592 self._watch_manager = wm # watch manager 593 self._notifier = notifier # notifier 594 self._mv_cookie = {} # {cookie(int): (src_path(str), date), ...} 595 self._mv = {} # {src_path(str): (dst_path(str), date), ...}
596
597 - def cleanup(self):
598 """ 599 Cleanup (delete) old (>1mn) records contained in self._mv_cookie 600 and self._mv. 601 """ 602 date_cur_ = datetime.now() 603 for seq in [self._mv_cookie, self._mv]: 604 for k in seq.keys(): 605 if (date_cur_ - seq[k][1]) > timedelta(minutes=1): 606 log.debug('Cleanup: deleting entry %s', seq[k][0]) 607 del seq[k]
608
609 - def process_IN_CREATE(self, raw_event):
610 """ 611 If the event affects a directory and the auto_add flag of the 612 targetted watch is set to True, a new watch is added on this 613 new directory, with the same attribute values than those of 614 this watch. 615 """ 616 if raw_event.mask & IN_ISDIR: 617 watch_ = self._watch_manager.get_watch(raw_event.wd) 618 created_dir = os.path.join(watch_.path, raw_event.name) 619 if watch_.auto_add and not watch_.exclude_filter(created_dir): 620 addw = self._watch_manager.add_watch 621 # The newly monitored directory inherits attributes from its 622 # parent directory. 623 addw_ret = addw(created_dir, watch_.mask, 624 proc_fun=watch_.proc_fun, 625 rec=False, auto_add=watch_.auto_add, 626 exclude_filter=watch_.exclude_filter) 627 628 # Trick to handle mkdir -p /t1/t2/t3 where t1 is watched and 629 # t2 and t3 are created. 630 # Since the directory is new, then everything inside it 631 # must also be new. 632 created_dir_wd = addw_ret.get(created_dir) 633 if (created_dir_wd is not None) and created_dir_wd > 0: 634 for name in os.listdir(created_dir): 635 inner = os.path.join(created_dir, name) 636 if (os.path.isdir(inner) and 637 self._watch_manager.get_wd(inner) is None): 638 # Generate (simulate) creation event for sub 639 # directories. 640 rawevent = _RawEvent(created_dir_wd, 641 IN_CREATE | IN_ISDIR, 642 0, name) 643 self._notifier.append_event(rawevent) 644 return self.process_default(raw_event)
645
646 - def process_IN_MOVED_FROM(self, raw_event):
647 """ 648 Map the cookie with the source path (+ date for cleaning). 649 """ 650 watch_ = self._watch_manager.get_watch(raw_event.wd) 651 path_ = watch_.path 652 src_path = os.path.normpath(os.path.join(path_, raw_event.name)) 653 self._mv_cookie[raw_event.cookie] = (src_path, datetime.now()) 654 return self.process_default(raw_event, {'cookie': raw_event.cookie})
655
656 - def process_IN_MOVED_TO(self, raw_event):
657 """ 658 Map the source path with the destination path (+ date for 659 cleaning). 660 """ 661 watch_ = self._watch_manager.get_watch(raw_event.wd) 662 path_ = watch_.path 663 dst_path = os.path.normpath(os.path.join(path_, raw_event.name)) 664 mv_ = self._mv_cookie.get(raw_event.cookie) 665 to_append = {'cookie': raw_event.cookie} 666 if mv_ is not None: 667 self._mv[mv_[0]] = (dst_path, datetime.now()) 668 # Let's assume that IN_MOVED_FROM event is always queued before 669 # that its associated (they share a common cookie) IN_MOVED_TO 670 # event is queued itself. It is then possible in that scenario 671 # to provide as additional information to the IN_MOVED_TO event 672 # the original pathname of the moved file/directory. 673 to_append['src_pathname'] = mv_[0] 674 elif (raw_event.mask & IN_ISDIR and watch_.auto_add and 675 not watch_.exclude_filter(dst_path)): 676 # We got a diretory that's "moved in" from an unknown source and 677 # auto_add is enabled. Manually add watches to the inner subtrees. 678 # The newly monitored directory inherits attributes from its 679 # parent directory. 680 self._watch_manager.add_watch(dst_path, watch_.mask, 681 proc_fun=watch_.proc_fun, 682 rec=True, auto_add=True, 683 exclude_filter=watch_.exclude_filter) 684 return self.process_default(raw_event, to_append)
685
686 - def process_IN_MOVE_SELF(self, raw_event):
687 """ 688 STATUS: the following bug has been fixed in recent kernels (FIXME: 689 which version ?). Now it raises IN_DELETE_SELF instead. 690 691 Old kernels were bugged, this event raised when the watched item 692 were moved, so we had to update its path, but under some circumstances 693 it was impossible: if its parent directory and its destination 694 directory wasn't watched. The kernel (see include/linux/fsnotify.h) 695 doesn't bring us enough informations like the destination path of 696 moved items. 697 """ 698 watch_ = self._watch_manager.get_watch(raw_event.wd) 699 src_path = watch_.path 700 mv_ = self._mv.get(src_path) 701 if mv_: 702 dest_path = mv_[0] 703 watch_.path = dest_path 704 # add the separator to the source path to avoid overlapping 705 # path issue when testing with startswith() 706 src_path += os.path.sep 707 src_path_len = len(src_path) 708 # The next loop renames all watches with src_path as base path. 709 # It seems that IN_MOVE_SELF does not provide IN_ISDIR information 710 # therefore the next loop is iterated even if raw_event is a file. 711 for w in self._watch_manager.watches.values(): 712 if w.path.startswith(src_path): 713 # Note that dest_path is a normalized path. 714 w.path = os.path.join(dest_path, w.path[src_path_len:]) 715 else: 716 log.error("The pathname '%s' of this watch %s has probably changed " 717 "and couldn't be updated, so it cannot be trusted " 718 "anymore. To fix this error move directories/files only " 719 "between watched parents directories, in this case e.g. " 720 "put a watch on '%s'.", 721 watch_.path, watch_, 722 os.path.normpath(os.path.join(watch_.path, 723 os.path.pardir))) 724 if not watch_.path.endswith('-unknown-path'): 725 watch_.path += '-unknown-path' 726 return self.process_default(raw_event)
727
728 - def process_IN_Q_OVERFLOW(self, raw_event):
729 """ 730 Only signal an overflow, most of the common flags are irrelevant 731 for this event (path, wd, name). 732 """ 733 return Event({'mask': raw_event.mask})
734
735 - def process_IN_IGNORED(self, raw_event):
736 """ 737 The watch descriptor raised by this event is now ignored (forever), 738 it can be safely deleted from the watch manager dictionary. 739 After this event we can be sure that neither the event queue nor 740 the system will raise an event associated to this wd again. 741 """ 742 event_ = self.process_default(raw_event) 743 self._watch_manager.del_watch(raw_event.wd) 744 return event_
745
746 - def process_default(self, raw_event, to_append=None):
747 """ 748 Commons handling for the followings events: 749 750 IN_ACCESS, IN_MODIFY, IN_ATTRIB, IN_CLOSE_WRITE, IN_CLOSE_NOWRITE, 751 IN_OPEN, IN_DELETE, IN_DELETE_SELF, IN_UNMOUNT. 752 """ 753 watch_ = self._watch_manager.get_watch(raw_event.wd) 754 if raw_event.mask & (IN_DELETE_SELF | IN_MOVE_SELF): 755 # Unfornulately this information is not provided by the kernel 756 dir_ = watch_.dir 757 else: 758 dir_ = bool(raw_event.mask & IN_ISDIR) 759 dict_ = {'wd': raw_event.wd, 760 'mask': raw_event.mask, 761 'path': watch_.path, 762 'name': raw_event.name, 763 'dir': dir_} 764 if COMPATIBILITY_MODE: 765 dict_['is_dir'] = dir_ 766 if to_append is not None: 767 dict_.update(to_append) 768 return Event(dict_)
769
770 771 -class ProcessEvent(_ProcessEvent):
772 """ 773 Process events objects, can be specialized via subclassing, thus its 774 behavior can be overriden: 775 776 Note: you should not override __init__ in your subclass instead define 777 a my_init() method, this method will be called automatically from the 778 constructor of this class with its optionals parameters. 779 780 1. Provide specialized individual methods, e.g. process_IN_DELETE for 781 processing a precise type of event (e.g. IN_DELETE in this case). 782 2. Or/and provide methods for processing events by 'family', e.g. 783 process_IN_CLOSE method will process both IN_CLOSE_WRITE and 784 IN_CLOSE_NOWRITE events (if process_IN_CLOSE_WRITE and 785 process_IN_CLOSE_NOWRITE aren't defined though). 786 3. Or/and override process_default for catching and processing all 787 the remaining types of events. 788 """ 789 pevent = None 790
791 - def __init__(self, pevent=None, **kargs):
792 """ 793 Enable chaining of ProcessEvent instances. 794 795 @param pevent: Optional callable object, will be called on event 796 processing (before self). 797 @type pevent: callable 798 @param kargs: This constructor is implemented as a template method 799 delegating its optionals keyworded arguments to the 800 method my_init(). 801 @type kargs: dict 802 """ 803 self.pevent = pevent 804 self.my_init(**kargs)
805
806 - def my_init(self, **kargs):
807 """ 808 This method is called from ProcessEvent.__init__(). This method is 809 empty here and must be redefined to be useful. In effect, if you 810 need to specifically initialize your subclass' instance then you 811 just have to override this method in your subclass. Then all the 812 keyworded arguments passed to ProcessEvent.__init__() will be 813 transmitted as parameters to this method. Beware you MUST pass 814 keyword arguments though. 815 816 @param kargs: optional delegated arguments from __init__(). 817 @type kargs: dict 818 """ 819 pass
820
821 - def __call__(self, event):
822 stop_chaining = False 823 if self.pevent is not None: 824 # By default methods return None so we set as guideline 825 # that methods asking for stop chaining must explicitely 826 # return non None or non False values, otherwise the default 827 # behavior will be to accept chain call to the corresponding 828 # local method. 829 stop_chaining = self.pevent(event) 830 if not stop_chaining: 831 return _ProcessEvent.__call__(self, event)
832
833 - def nested_pevent(self):
834 return self.pevent
835
836 - def process_IN_Q_OVERFLOW(self, event):
837 """ 838 By default this method only reports warning messages, you can overredide 839 it by subclassing ProcessEvent and implement your own 840 process_IN_Q_OVERFLOW method. The actions you can take on receiving this 841 event is either to update the variable max_queued_events in order to 842 handle more simultaneous events or to modify your code in order to 843 accomplish a better filtering diminishing the number of raised events. 844 Because this method is defined, IN_Q_OVERFLOW will never get 845 transmitted as arguments to process_default calls. 846 847 @param event: IN_Q_OVERFLOW event. 848 @type event: dict 849 """ 850 log.warning('Event queue overflowed.')
851
852 - def process_default(self, event):
853 """ 854 Default processing event method. By default does nothing. Subclass 855 ProcessEvent and redefine this method in order to modify its behavior. 856 857 @param event: Event to be processed. Can be of any type of events but 858 IN_Q_OVERFLOW events (see method process_IN_Q_OVERFLOW). 859 @type event: Event instance 860 """ 861 pass
862
863 864 -class PrintAllEvents(ProcessEvent):
865 """ 866 Dummy class used to print events strings representations. For instance this 867 class is used from command line to print all received events to stdout. 868 """
869 - def my_init(self, out=None):
870 """ 871 @param out: Where events will be written. 872 @type out: Object providing a valid file object interface. 873 """ 874 if out is None: 875 out = sys.stdout 876 self._out = out
877
878 - def process_default(self, event):
879 """ 880 Writes event string representation to file object provided to 881 my_init(). 882 883 @param event: Event to be processed. Can be of any type of events but 884 IN_Q_OVERFLOW events (see method process_IN_Q_OVERFLOW). 885 @type event: Event instance 886 """ 887 self._out.write(repr(event)) 888 self._out.write('\n')
889
890 891 -class ChainIfTrue(ProcessEvent):
892 """ 893 Makes conditional chaining depending on the result of the nested 894 processing instance. 895 """
896 - def my_init(self, func):
897 """ 898 Method automatically called from base class constructor. 899 """ 900 self._func = func
901
902 - def process_default(self, event):
903 return not self._func(event)
904
905 906 -class Stats(ProcessEvent):
907 """ 908 Compute and display trivial statistics about processed events. 909 """
910 - def my_init(self):
911 """ 912 Method automatically called from base class constructor. 913 """ 914 self._start_time = time.time() 915 self._stats = {} 916 self._stats_lock = threading.Lock()
917
918 - def process_default(self, event):
919 """ 920 Processes |event|. 921 """ 922 self._stats_lock.acquire() 923 try: 924 events = event.maskname.split('|') 925 for event_name in events: 926 count = self._stats.get(event_name, 0) 927 self._stats[event_name] = count + 1 928 finally: 929 self._stats_lock.release()
930
931 - def _stats_copy(self):
932 self._stats_lock.acquire() 933 try: 934 return self._stats.copy() 935 finally: 936 self._stats_lock.release()
937
938 - def __repr__(self):
939 stats = self._stats_copy() 940 941 elapsed = int(time.time() - self._start_time) 942 elapsed_str = '' 943 if elapsed < 60: 944 elapsed_str = str(elapsed) + 'sec' 945 elif 60 <= elapsed < 3600: 946 elapsed_str = '%dmn%dsec' % (elapsed / 60, elapsed % 60) 947 elif 3600 <= elapsed < 86400: 948 elapsed_str = '%dh%dmn' % (elapsed / 3600, (elapsed % 3600) / 60) 949 elif elapsed >= 86400: 950 elapsed_str = '%dd%dh' % (elapsed / 86400, (elapsed % 86400) / 3600) 951 stats['ElapsedTime'] = elapsed_str 952 953 l = [] 954 for ev, value in sorted(stats.items(), key=lambda x: x[0]): 955 l.append(' %s=%s' % (Color.field_name(ev), 956 Color.field_value(value))) 957 s = '<%s%s >' % (Color.class_name(self.__class__.__name__), 958 ''.join(l)) 959 return s
960
961 - def dump(self, filename):
962 """ 963 Dumps statistics to file |filename|. 964 965 @param filename: pathname. 966 @type filename: string 967 """ 968 file_obj = file(filename, 'wb') 969 try: 970 file_obj.write(str(self)) 971 finally: 972 file_obj.close()
973
974 - def __str__(self, scale=45):
975 stats = self._stats_copy() 976 if not stats: 977 return '' 978 979 m = max(stats.values()) 980 unity = float(scale) / m 981 fmt = '%%-26s%%-%ds%%s' % (len(Color.field_value('@' * scale)) 982 + 1) 983 def func(x): 984 return fmt % (Color.field_name(x[0]), 985 Color.field_value('@' * int(x[1] * unity)), 986 Color.simple('%d' % x[1], 'yellow'))
987 s = '\n'.join(map(func, sorted(stats.items(), key=lambda x: x[0]))) 988 return s
989
990 991 -class NotifierError(PyinotifyError):
992 """ 993 Notifier Exception. Raised on Notifier error. 994 995 """
996 - def __init__(self, err):
997 """ 998 @param err: Exception string's description. 999 @type err: string 1000 """ 1001 PyinotifyError.__init__(self, err)
1002
1003 1004 -class Notifier:
1005 """ 1006 Read notifications, process events. 1007 1008 """
1009 - def __init__(self, watch_manager, default_proc_fun=None, read_freq=0, 1010 threshold=0, timeout=None):
1011 """ 1012 Initialization. read_freq, threshold and timeout parameters are used 1013 when looping. 1014 1015 @param watch_manager: Watch Manager. 1016 @type watch_manager: WatchManager instance 1017 @param default_proc_fun: Default processing method. If None, a new 1018 instance of PrintAllEvents will be assigned. 1019 @type default_proc_fun: instance of ProcessEvent 1020 @param read_freq: if read_freq == 0, events are read asap, 1021 if read_freq is > 0, this thread sleeps 1022 max(0, read_freq - timeout) seconds. But if 1023 timeout is None it may be different because 1024 poll is blocking waiting for something to read. 1025 @type read_freq: int 1026 @param threshold: File descriptor will be read only if the accumulated 1027 size to read becomes >= threshold. If != 0, you likely 1028 want to use it in combination with an appropriate 1029 value for read_freq because without that you would 1030 keep looping without really reading anything and that 1031 until the amount of events to read is >= threshold. 1032 At least with read_freq set you might sleep. 1033 @type threshold: int 1034 @param timeout: 1035 http://docs.python.org/lib/poll-objects.html#poll-objects 1036 @type timeout: int 1037 """ 1038 # Watch Manager instance 1039 self._watch_manager = watch_manager 1040 # File descriptor 1041 self._fd = self._watch_manager.get_fd() 1042 # Poll object and registration 1043 self._pollobj = select.poll() 1044 self._pollobj.register(self._fd, select.POLLIN) 1045 # This pipe is correctely initialized and used by ThreadedNotifier 1046 self._pipe = (-1, -1) 1047 # Event queue 1048 self._eventq = deque() 1049 # System processing functor, common to all events 1050 self._sys_proc_fun = _SysProcessEvent(self._watch_manager, self) 1051 # Default processing method 1052 self._default_proc_fun = default_proc_fun 1053 if default_proc_fun is None: 1054 self._default_proc_fun = PrintAllEvents() 1055 # Loop parameters 1056 self._read_freq = read_freq 1057 self._threshold = threshold 1058 self._timeout = timeout 1059 # Coalesce events option 1060 self._coalesce = False 1061 # set of str(raw_event), only used when coalesce option is True 1062 self._eventset = set()
1063
1064 - def append_event(self, event):
1065 """ 1066 Append a raw event to the event queue. 1067 1068 @param event: An event. 1069 @type event: _RawEvent instance. 1070 """ 1071 self._eventq.append(event)
1072
1073 - def proc_fun(self):
1074 return self._default_proc_fun
1075
1076 - def coalesce_events(self, coalesce=True):
1077 """ 1078 Coalescing events. Events are usually processed by batchs, their size 1079 depend on various factors. Thus, before processing them, events received 1080 from inotify are aggregated in a fifo queue. If this coalescing 1081 option is enabled events are filtered based on their unicity, only 1082 unique events are enqueued, doublons are discarded. An event is unique 1083 when the combination of its fields (wd, mask, cookie, name) is unique 1084 among events of a same batch. After a batch of events is processed any 1085 events is accepted again. By default this option is disabled, you have 1086 to explictly call this function to turn it on. 1087 1088 @param coalesce: Optional new coalescing value. True by default. 1089 @type coalesce: Bool 1090 """ 1091 self._coalesce = coalesce 1092 if not coalesce: 1093 self._eventset.clear()
1094
1095 - def check_events(self, timeout=None):
1096 """ 1097 Check for new events available to read, blocks up to timeout 1098 milliseconds. 1099 1100 @param timeout: If specified it overrides the corresponding instance 1101 attribute _timeout. 1102 @type timeout: int 1103 1104 @return: New events to read. 1105 @rtype: bool 1106 """ 1107 while True: 1108 try: 1109 # blocks up to 'timeout' milliseconds 1110 if timeout is None: 1111 timeout = self._timeout 1112 ret = self._pollobj.poll(timeout) 1113 except select.error, err: 1114 if err[0] == errno.EINTR: 1115 continue # interrupted, retry 1116 else: 1117 raise 1118 else: 1119 break 1120 1121 if not ret or (self._pipe[0] == ret[0][0]): 1122 return False 1123 # only one fd is polled 1124 return ret[0][1] & select.POLLIN
1125
1126 - def read_events(self):
1127 """ 1128 Read events from device, build _RawEvents, and enqueue them. 1129 """ 1130 buf_ = array.array('i', [0]) 1131 # get event queue size 1132 if fcntl.ioctl(self._fd, termios.FIONREAD, buf_, 1) == -1: 1133 return 1134 queue_size = buf_[0] 1135 if queue_size < self._threshold: 1136 log.debug('(fd: %d) %d bytes available to read but threshold is ' 1137 'fixed to %d bytes', self._fd, queue_size, 1138 self._threshold) 1139 return 1140 1141 try: 1142 # Read content from file 1143 r = os.read(self._fd, queue_size) 1144 except Exception, msg: 1145 raise NotifierError(msg) 1146 log.debug('Event queue size: %d', queue_size) 1147 rsum = 0 # counter 1148 while rsum < queue_size: 1149 s_size = 16 1150 # Retrieve wd, mask, cookie and fname_len 1151 wd, mask, cookie, fname_len = struct.unpack('iIII', 1152 r[rsum:rsum+s_size]) 1153 # Retrieve name 1154 fname, = struct.unpack('%ds' % fname_len, 1155 r[rsum + s_size:rsum + s_size + fname_len]) 1156 rawevent = _RawEvent(wd, mask, cookie, fname) 1157 if self._coalesce: 1158 # Only enqueue new (unique) events. 1159 raweventstr = str(rawevent) 1160 if raweventstr not in self._eventset: 1161 self._eventset.add(raweventstr) 1162 self._eventq.append(rawevent) 1163 else: 1164 self._eventq.append(rawevent) 1165 rsum += s_size + fname_len
1166
1167 - def process_events(self):
1168 """ 1169 Routine for processing events from queue by calling their 1170 associated proccessing method (an instance of ProcessEvent). 1171 It also does internal processings, to keep the system updated. 1172 """ 1173 while self._eventq: 1174 raw_event = self._eventq.popleft() # pop next event 1175 watch_ = self._watch_manager.get_watch(raw_event.wd) 1176 if watch_ is None: 1177 # Not really sure how we ended up here, nor how we should 1178 # handle these types of events and if it is appropriate to 1179 # completly skip them (like we are doing here). 1180 log.warning("Unable to retrieve Watch object associated to %s", 1181 repr(raw_event)) 1182 continue 1183 revent = self._sys_proc_fun(raw_event) # system processings 1184 if watch_ and watch_.proc_fun: 1185 watch_.proc_fun(revent) # user processings 1186 else: 1187 self._default_proc_fun(revent) 1188 self._sys_proc_fun.cleanup() # remove olds MOVED_* events records 1189 if self._coalesce: 1190 self._eventset.clear()
1191
1192 - def __daemonize(self, pid_file=None, force_kill=False, stdin=os.devnull, 1193 stdout=os.devnull, stderr=os.devnull):
1194 """ 1195 pid_file: file to which the pid will be written. 1196 force_kill: if True kill the process associated to pid_file. 1197 stdin, stdout, stderr: files associated to common streams. 1198 """ 1199 if pid_file is None: 1200 dirname = '/var/run/' 1201 basename = os.path.basename(sys.argv[0]) or 'pyinotify' 1202 pid_file = os.path.join(dirname, basename + '.pid') 1203 1204 if os.path.exists(pid_file): 1205 fo = file(pid_file, 'rb') 1206 try: 1207 try: 1208 pid = int(fo.read()) 1209 except ValueError: 1210 pid = None 1211 if pid is not None: 1212 try: 1213 os.kill(pid, 0) 1214 except OSError, err: 1215 if err.errno == errno.ESRCH: 1216 log.debug(err) 1217 else: 1218 log.error(err) 1219 else: 1220 if not force_kill: 1221 s = 'There is already a pid file %s with pid %d' 1222 raise NotifierError(s % (pid_file, pid)) 1223 else: 1224 os.kill(pid, 9) 1225 finally: 1226 fo.close() 1227 1228 1229 def fork_daemon(): 1230 # Adapted from Chad J. Schroeder's recipe 1231 # @see http://code.activestate.com/recipes/278731/ 1232 pid = os.fork() 1233 if (pid == 0): 1234 # parent 2 1235 os.setsid() 1236 pid = os.fork() 1237 if (pid == 0): 1238 # child 1239 os.chdir('/') 1240 os.umask(0) 1241 else: 1242 # parent 2 1243 os._exit(0) 1244 else: 1245 # parent 1 1246 os._exit(0) 1247 1248 fd_inp = os.open(stdin, os.O_RDONLY) 1249 os.dup2(fd_inp, 0) 1250 fd_out = os.open(stdout, os.O_WRONLY|os.O_CREAT) 1251 os.dup2(fd_out, 1) 1252 fd_err = os.open(stderr, os.O_WRONLY|os.O_CREAT) 1253 os.dup2(fd_err, 2)
1254 1255 # Detach task 1256 fork_daemon() 1257 1258 # Write pid 1259 file_obj = file(pid_file, 'wb') 1260 try: 1261 file_obj.write(str(os.getpid()) + '\n') 1262 finally: 1263 file_obj.close() 1264 1265 atexit.register(lambda : os.unlink(pid_file))
1266 1267
1268 - def _sleep(self, ref_time):
1269 # Only consider sleeping if read_freq is > 0 1270 if self._read_freq > 0: 1271 cur_time = time.time() 1272 sleep_amount = self._read_freq - (cur_time - ref_time) 1273 if sleep_amount > 0: 1274 log.debug('Now sleeping %d seconds', sleep_amount) 1275 time.sleep(sleep_amount)
1276 1277
1278 - def loop(self, callback=None, daemonize=False, **args):
1279 """ 1280 Events are read only one time every min(read_freq, timeout) 1281 seconds at best and only if the size to read is >= threshold. 1282 After this method returns it must not be called again for the same 1283 instance. 1284 1285 @param callback: Functor called after each event processing iteration. 1286 Expects to receive the notifier object (self) as first 1287 parameter. If this function returns True the loop is 1288 immediately terminated otherwise the loop method keeps 1289 looping. 1290 @type callback: callable object or function 1291 @param daemonize: This thread is daemonized if set to True. 1292 @type daemonize: boolean 1293 @param args: Optional and relevant only if daemonize is True. Remaining 1294 keyworded arguments are directly passed to daemonize see 1295 __daemonize() method. 1296 @type args: various 1297 """ 1298 if daemonize: 1299 self.__daemonize(**args) 1300 1301 # Read and process events forever 1302 while 1: 1303 try: 1304 self.process_events() 1305 if (callback is not None) and (callback(self) is True): 1306 break 1307 ref_time = time.time() 1308 # check_events is blocking 1309 if self.check_events(): 1310 self._sleep(ref_time) 1311 self.read_events() 1312 except KeyboardInterrupt: 1313 # Stop monitoring if sigint is caught (Control-C). 1314 log.debug('Pyinotify stops monitoring.') 1315 break 1316 # Close internals 1317 self.stop()
1318 1319
1320 - def stop(self):
1321 """ 1322 Close inotify's instance (close its file descriptor). 1323 It destroys all existing watches, pending events,... 1324 This method is automatically called at the end of loop(). 1325 """ 1326 self._pollobj.unregister(self._fd) 1327 os.close(self._fd)
1328
1329 1330 -class ThreadedNotifier(threading.Thread, Notifier):
1331 """ 1332 This notifier inherits from threading.Thread for instanciating a separate 1333 thread, and also inherits from Notifier, because it is a threaded notifier. 1334 1335 Note that every functionality provided by this class is also provided 1336 through Notifier class. Moreover Notifier should be considered first because 1337 it is not threaded and could be easily daemonized. 1338 """
1339 - def __init__(self, watch_manager, default_proc_fun=None, read_freq=0, 1340 threshold=0, timeout=None):
1341 """ 1342 Initialization, initialize base classes. read_freq, threshold and 1343 timeout parameters are used when looping. 1344 1345 @param watch_manager: Watch Manager. 1346 @type watch_manager: WatchManager instance 1347 @param default_proc_fun: Default processing method. See base class. 1348 @type default_proc_fun: instance of ProcessEvent 1349 @param read_freq: if read_freq == 0, events are read asap, 1350 if read_freq is > 0, this thread sleeps 1351 max(0, read_freq - timeout) seconds. 1352 @type read_freq: int 1353 @param threshold: File descriptor will be read only if the accumulated 1354 size to read becomes >= threshold. If != 0, you likely 1355 want to use it in combination with an appropriate 1356 value set for read_freq because without that you would 1357 keep looping without really reading anything and that 1358 until the amount of events to read is >= threshold. At 1359 least with read_freq you might sleep. 1360 @type threshold: int 1361 @param timeout: 1362 see http://docs.python.org/lib/poll-objects.html#poll-objects 1363 @type timeout: int 1364 """ 1365 # Init threading base class 1366 threading.Thread.__init__(self) 1367 # Stop condition 1368 self._stop_event = threading.Event() 1369 # Init Notifier base class 1370 Notifier.__init__(self, watch_manager, default_proc_fun, read_freq, 1371 threshold, timeout) 1372 # Create a new pipe used for thread termination 1373 self._pipe = os.pipe() 1374 self._pollobj.register(self._pipe[0], select.POLLIN)
1375
1376 - def stop(self):
1377 """ 1378 Stop notifier's loop. Stop notification. Join the thread. 1379 """ 1380 self._stop_event.set() 1381 os.write(self._pipe[1], 'stop') 1382 threading.Thread.join(self) 1383 Notifier.stop(self) 1384 self._pollobj.unregister(self._pipe[0]) 1385 os.close(self._pipe[0]) 1386 os.close(self._pipe[1])
1387
1388 - def loop(self):
1389 """ 1390 Thread's main loop. Don't meant to be called by user directly. 1391 Call inherited start() method instead. 1392 1393 Events are read only once time every min(read_freq, timeout) 1394 seconds at best and only if the size of events to read is >= threshold. 1395 """ 1396 # When the loop must be terminated .stop() is called, 'stop' 1397 # is written to pipe fd so poll() returns and .check_events() 1398 # returns False which make evaluate the While's stop condition 1399 # ._stop_event.isSet() wich put an end to the thread's execution. 1400 while not self._stop_event.isSet(): 1401 self.process_events() 1402 ref_time = time.time() 1403 if self.check_events(): 1404 self._sleep(ref_time) 1405 self.read_events()
1406
1407 - def run(self):
1408 """ 1409 Start thread's loop: read and process events until the method 1410 stop() is called. 1411 Never call this method directly, instead call the start() method 1412 inherited from threading.Thread, which then will call run() in 1413 its turn. 1414 """ 1415 self.loop()
1416
1417 1418 -class AsyncNotifier(asyncore.file_dispatcher, Notifier):
1419 """ 1420 This notifier inherits from asyncore.file_dispatcher in order to be able to 1421 use pyinotify along with the asyncore framework. 1422 1423 """
1424 - def __init__(self, watch_manager, default_proc_fun=None, read_freq=0, 1425 threshold=0, timeout=None, channel_map=None):
1426 """ 1427 Initializes the async notifier. The only additional parameter is 1428 'channel_map' which is the optional asyncore private map. See 1429 Notifier class for the meaning of the others parameters. 1430 1431 """ 1432 Notifier.__init__(self, watch_manager, default_proc_fun, read_freq, 1433 threshold, timeout) 1434 asyncore.file_dispatcher.__init__(self, self._fd, channel_map)
1435
1436 - def handle_read(self):
1437 """ 1438 When asyncore tells us we can read from the fd, we proceed processing 1439 events. This method can be overridden for handling a notification 1440 differently. 1441 1442 """ 1443 self.read_events() 1444 self.process_events()
1445
1446 1447 -class Watch:
1448 """ 1449 Represent a watch, i.e. a file or directory being watched. 1450 1451 """
1452 - def __init__(self, wd, path, mask, proc_fun, auto_add, exclude_filter):
1453 """ 1454 Initializations. 1455 1456 @param wd: Watch descriptor. 1457 @type wd: int 1458 @param path: Path of the file or directory being watched. 1459 @type path: str 1460 @param mask: Mask. 1461 @type mask: int 1462 @param proc_fun: Processing callable object. 1463 @type proc_fun: 1464 @param auto_add: Automatically add watches on new directories. 1465 @type auto_add: bool 1466 @param exclude_filter: Boolean function, used to exclude new 1467 directories from being automatically watched. 1468 See WatchManager.__init__ 1469 @type exclude_filter: callable object 1470 """ 1471 self.wd = wd 1472 self.path = path 1473 self.mask = mask 1474 self.proc_fun = proc_fun 1475 self.auto_add = auto_add 1476 self.exclude_filter = exclude_filter 1477 self.dir = os.path.isdir(self.path)
1478
1479 - def __repr__(self):
1480 """ 1481 @return: String representation. 1482 @rtype: str 1483 """ 1484 s = ' '.join(['%s%s%s' % (Color.field_name(attr), 1485 Color.punctuation('='), 1486 Color.field_value(getattr(self, attr))) \ 1487 for attr in self.__dict__ if not attr.startswith('_')]) 1488 1489 s = '%s%s %s %s' % (Color.punctuation('<'), 1490 Color.class_name(self.__class__.__name__), 1491 s, 1492 Color.punctuation('>')) 1493 return s
1494
1495 1496 -class ExcludeFilter:
1497 """ 1498 ExcludeFilter is an exclusion filter. 1499 """
1500 - def __init__(self, arg_lst):
1501 """ 1502 Examples: 1503 ef1 = ExcludeFilter(["^/etc/rc.*", "^/etc/hostname"]) 1504 ef2 = ExcludeFilter("/my/path/exclude.lst") 1505 Where exclude.lst contains: 1506 ^/etc/rc.* 1507 ^/etc/hostname 1508 1509 @param arg_lst: is either a list of patterns or a filename from which 1510 patterns will be loaded. 1511 @type arg_lst: list of str or str 1512 """ 1513 if isinstance(arg_lst, str): 1514 lst = self._load_patterns_from_file(arg_lst) 1515 elif isinstance(arg_lst, list): 1516 lst = arg_lst 1517 else: 1518 raise TypeError 1519 1520 self._lregex = [] 1521 for regex in lst: 1522 self._lregex.append(re.compile(regex, re.UNICODE))
1523
1524 - def _load_patterns_from_file(self, filename):
1525 lst = [] 1526 file_obj = file(filename, 'r') 1527 try: 1528 for line in file_obj.readlines(): 1529 # Trim leading an trailing whitespaces 1530 pattern = line.strip() 1531 if not pattern or pattern.startswith('#'): 1532 continue 1533 lst.append(pattern) 1534 finally: 1535 file_obj.close() 1536 return lst
1537
1538 - def _match(self, regex, path):
1539 return regex.match(path) is not None
1540
1541 - def __call__(self, path):
1542 """ 1543 @param path: Path to match against provided regexps. 1544 @type path: str 1545 @return: Return True if path has been matched and should 1546 be excluded, False otherwise. 1547 @rtype: bool 1548 """ 1549 for regex in self._lregex: 1550 if self._match(regex, path): 1551 return True 1552 return False
1553
1554 1555 -class WatchManagerError(Exception):
1556 """ 1557 WatchManager Exception. Raised on error encountered on watches 1558 operations. 1559 1560 """
1561 - def __init__(self, msg, wmd):
1562 """ 1563 @param msg: Exception string's description. 1564 @type msg: string 1565 @param wmd: This dictionary contains the wd assigned to paths of the 1566 same call for which watches were successfully added. 1567 @type wmd: dict 1568 """ 1569 self.wmd = wmd 1570 Exception.__init__(self, msg)
1571
1572 1573 -class WatchManager:
1574 """ 1575 Provide operations for watching files and directories. Its internal 1576 dictionary is used to reference watched items. When used inside 1577 threaded code, one must instanciate as many WatchManager instances as 1578 there are ThreadedNotifier instances. 1579 1580 """
1581 - def __init__(self, exclude_filter=lambda path: False):
1582 """ 1583 Initialization: init inotify, init watch manager dictionary. 1584 Raise OSError if initialization fails. 1585 1586 @param exclude_filter: boolean function, returns True if current 1587 path must be excluded from being watched. 1588 Convenient for providing a common exclusion 1589 filter for every call to add_watch. 1590 @type exclude_filter: callable object 1591 """ 1592 self._exclude_filter = exclude_filter 1593 self._wmd = {} # watch dict key: watch descriptor, value: watch 1594 self._fd = LIBC.inotify_init() # inotify's init, file descriptor 1595 if self._fd < 0: 1596 err = 'Cannot initialize new instance of inotify%s' % STRERRNO() 1597 raise OSError(err)
1598
1599 - def get_fd(self):
1600 """ 1601 Return assigned inotify's file descriptor. 1602 1603 @return: File descriptor. 1604 @rtype: int 1605 """ 1606 return self._fd
1607
1608 - def get_watch(self, wd):
1609 """ 1610 Get watch from provided watch descriptor wd. 1611 1612 @param wd: Watch descriptor. 1613 @type wd: int 1614 """ 1615 return self._wmd.get(wd)
1616
1617 - def del_watch(self, wd):
1618 """ 1619 Remove watch entry associated to watch descriptor wd. 1620 1621 @param wd: Watch descriptor. 1622 @type wd: int 1623 """ 1624 try: 1625 del self._wmd[wd] 1626 except KeyError, err: 1627 log.error(str(err))
1628 1629 @property
1630 - def watches(self):
1631 """ 1632 Get a reference on the internal watch manager dictionary. 1633 1634 @return: Internal watch manager dictionary. 1635 @rtype: dict 1636 """ 1637 return self._wmd
1638
1639 - def __format_path(self, path):
1640 """ 1641 Format path to its internal (stored in watch manager) representation. 1642 """ 1643 # Unicode strings are converted to byte strings, it seems to be 1644 # required because LIBC.inotify_add_watch does not work well when 1645 # it receives an ctypes.create_unicode_buffer instance as argument. 1646 # Therefore even wd are indexed with bytes string and not with 1647 # unicode paths. 1648 if isinstance(path, unicode): 1649 path = path.encode(sys.getfilesystemencoding()) 1650 return os.path.normpath(path)
1651
1652 - def __add_watch(self, path, mask, proc_fun, auto_add, exclude_filter):
1653 """ 1654 Add a watch on path, build a Watch object and insert it in the 1655 watch manager dictionary. Return the wd value. 1656 """ 1657 byte_path = self.__format_path(path) 1658 wd_ = LIBC.inotify_add_watch(self._fd, 1659 ctypes.create_string_buffer(byte_path), 1660 mask) 1661 if wd_ < 0: 1662 return wd_ 1663 watch_ = Watch(wd=wd_, path=byte_path, mask=mask, proc_fun=proc_fun, 1664 auto_add=auto_add, exclude_filter=exclude_filter) 1665 self._wmd[wd_] = watch_ 1666 log.debug('New %s', watch_) 1667 return wd_
1668
1669 - def __glob(self, path, do_glob):
1670 if do_glob: 1671 return glob.iglob(path) 1672 else: 1673 return [path]
1674
1675 - def add_watch(self, path, mask, proc_fun=None, rec=False, 1676 auto_add=False, do_glob=False, quiet=True, 1677 exclude_filter=None):
1678 """ 1679 Add watch(s) on the provided |path|(s) with associated |mask| flag 1680 value and optionally with a processing |proc_fun| function and 1681 recursive flag |rec| set to True. 1682 Ideally |path| components should not be unicode objects. Note that 1683 although unicode paths are accepted there are converted to byte 1684 strings before a watch is put on that path. The encoding used for 1685 converting the unicode object is given by sys.getfilesystemencoding(). 1686 If |path| si already watched it is ignored, but if it is called with 1687 option rec=True a watch is put on each one of its not-watched 1688 subdirectory. 1689 1690 @param path: Path to watch, the path can either be a file or a 1691 directory. Also accepts a sequence (list) of paths. 1692 @type path: string or list of strings 1693 @param mask: Bitmask of events. 1694 @type mask: int 1695 @param proc_fun: Processing object. 1696 @type proc_fun: function or ProcessEvent instance or instance of 1697 one of its subclasses or callable object. 1698 @param rec: Recursively add watches from path on all its 1699 subdirectories, set to False by default (doesn't 1700 follows symlinks in any case). 1701 @type rec: bool 1702 @param auto_add: Automatically add watches on newly created 1703 directories in watched parent |path| directory. 1704 @type auto_add: bool 1705 @param do_glob: Do globbing on pathname (see standard globbing 1706 module for more informations). 1707 @type do_glob: bool 1708 @param quiet: if False raises a WatchManagerError exception on 1709 error. See example not_quiet.py. 1710 @type quiet: bool 1711 @param exclude_filter: predicate (boolean function), which returns 1712 True if the current path must be excluded 1713 from being watched. This argument has 1714 precedence over exclude_filter passed to 1715 the class' constructor. 1716 @type exclude_filter: callable object 1717 @return: dict of paths associated to watch descriptors. A wd value 1718 is positive if the watch was added sucessfully, 1719 otherwise the value is negative. If the path was invalid 1720 or was already watched it is not included into this returned 1721 dictionary. 1722 @rtype: dict of {str: int} 1723 """ 1724 ret_ = {} # return {path: wd, ...} 1725 1726 if exclude_filter is None: 1727 exclude_filter = self._exclude_filter 1728 1729 # normalize args as list elements 1730 for npath in self.__format_param(path): 1731 # unix pathname pattern expansion 1732 for apath in self.__glob(npath, do_glob): 1733 # recursively list subdirs according to rec param 1734 for rpath in self.__walk_rec(apath, rec): 1735 if self.get_wd(rpath) is not None: 1736 # We decide to ignore paths already inserted into 1737 # the watch manager. Need to be removed with rm_watch() 1738 # first. Or simply call update_watch() to update it. 1739 continue 1740 if not exclude_filter(rpath): 1741 wd = ret_[rpath] = self.__add_watch(rpath, mask, 1742 proc_fun, 1743 auto_add, 1744 exclude_filter) 1745 if wd < 0: 1746 err = 'add_watch: cannot watch %s WD=%d%s' 1747 err = err % (rpath, wd, STRERRNO()) 1748 if quiet: 1749 log.error(err) 1750 else: 1751 raise WatchManagerError(err, ret_) 1752 else: 1753 # Let's say -2 means 'explicitely excluded 1754 # from watching'. 1755 ret_[rpath] = -2 1756 return ret_
1757
1758 - def __get_sub_rec(self, lpath):
1759 """ 1760 Get every wd from self._wmd if its path is under the path of 1761 one (at least) of those in lpath. Doesn't follow symlinks. 1762 1763 @param lpath: list of watch descriptor 1764 @type lpath: list of int 1765 @return: list of watch descriptor 1766 @rtype: list of int 1767 """ 1768 for d in lpath: 1769 root = self.get_path(d) 1770 if root is not None: 1771 # always keep root 1772 yield d 1773 else: 1774 # if invalid 1775 continue 1776 1777 # nothing else to expect 1778 if not os.path.isdir(root): 1779 continue 1780 1781 # normalization 1782 root = os.path.normpath(root) 1783 # recursion 1784 lend = len(root) 1785 for iwd in self._wmd.items(): 1786 cur = iwd[1].path 1787 pref = os.path.commonprefix([root, cur]) 1788 if root == os.sep or (len(pref) == lend and \ 1789 len(cur) > lend and \ 1790 cur[lend] == os.sep): 1791 yield iwd[1].wd
1792
1793 - def update_watch(self, wd, mask=None, proc_fun=None, rec=False, 1794 auto_add=False, quiet=True):
1795 """ 1796 Update existing watch descriptors |wd|. The |mask| value, the 1797 processing object |proc_fun|, the recursive param |rec| and the 1798 |auto_add| and |quiet| flags can all be updated. 1799 1800 @param wd: Watch Descriptor to update. Also accepts a list of 1801 watch descriptors. 1802 @type wd: int or list of int 1803 @param mask: Optional new bitmask of events. 1804 @type mask: int 1805 @param proc_fun: Optional new processing function. 1806 @type proc_fun: function or ProcessEvent instance or instance of 1807 one of its subclasses or callable object. 1808 @param rec: Optionally adds watches recursively on all 1809 subdirectories contained into |wd| directory. 1810 @type rec: bool 1811 @param auto_add: Automatically adds watches on newly created 1812 directories in the watch's path corresponding to 1813 |wd|. 1814 @type auto_add: bool 1815 @param quiet: If False raises a WatchManagerError exception on 1816 error. See example not_quiet.py 1817 @type quiet: bool 1818 @return: dict of watch descriptors associated to booleans values. 1819 True if the corresponding wd has been successfully 1820 updated, False otherwise. 1821 @rtype: dict of {int: bool} 1822 """ 1823 lwd = self.__format_param(wd) 1824 if rec: 1825 lwd = self.__get_sub_rec(lwd) 1826 1827 ret_ = {} # return {wd: bool, ...} 1828 for awd in lwd: 1829 apath = self.get_path(awd) 1830 if not apath or awd < 0: 1831 err = 'update_watch: invalid WD=%d' % awd 1832 if quiet: 1833 log.error(err) 1834 continue 1835 raise WatchManagerError(err, ret_) 1836 1837 if mask: 1838 addw = LIBC.inotify_add_watch 1839 wd_ = addw(self._fd, ctypes.create_string_buffer(apath), mask) 1840 if wd_ < 0: 1841 ret_[awd] = False 1842 err = 'update_watch: cannot update %s WD=%d%s' 1843 err = err % (apath, wd_, STRERRNO()) 1844 if quiet: 1845 log.error(err) 1846 continue 1847 raise WatchManagerError(err, ret_) 1848 1849 assert(awd == wd_) 1850 1851 if proc_fun or auto_add: 1852 watch_ = self._wmd[awd] 1853 1854 if proc_fun: 1855 watch_.proc_fun = proc_fun 1856 1857 if auto_add: 1858 watch_.auto_add = auto_add 1859 1860 ret_[awd] = True 1861 log.debug('Updated watch - %s', self._wmd[awd]) 1862 return ret_
1863
1864 - def __format_param(self, param):
1865 """ 1866 @param param: Parameter. 1867 @type param: string or int 1868 @return: wrap param. 1869 @rtype: list of type(param) 1870 """ 1871 if isinstance(param, list): 1872 for p_ in param: 1873 yield p_ 1874 else: 1875 yield param
1876
1877 - def get_wd(self, path):
1878 """ 1879 Returns the watch descriptor associated to path. This method 1880 presents a prohibitive cost, always prefer to keep the WD 1881 returned by add_watch(). If the path is unknown it returns None. 1882 1883 @param path: Path. 1884 @type path: str 1885 @return: WD or None. 1886 @rtype: int or None 1887 """ 1888 path = self.__format_path(path) 1889 for iwd in self._wmd.items(): 1890 if iwd[1].path == path: 1891 return iwd[0]
1892
1893 - def get_path(self, wd):
1894 """ 1895 Returns the path associated to WD, if WD is unknown it returns None. 1896 1897 @param wd: Watch descriptor. 1898 @type wd: int 1899 @return: Path or None. 1900 @rtype: string or None 1901 """ 1902 watch_ = self._wmd.get(wd) 1903 if watch_ is not None: 1904 return watch_.path
1905
1906 - def __walk_rec(self, top, rec):
1907 """ 1908 Yields each subdirectories of top, doesn't follow symlinks. 1909 If rec is false, only yield top. 1910 1911 @param top: root directory. 1912 @type top: string 1913 @param rec: recursive flag. 1914 @type rec: bool 1915 @return: path of one subdirectory. 1916 @rtype: string 1917 """ 1918 if not rec or os.path.islink(top) or not os.path.isdir(top): 1919 yield top 1920 else: 1921 for root, dirs, files in os.walk(top): 1922 yield root
1923
1924 - def rm_watch(self, wd, rec=False, quiet=True):
1925 """ 1926 Removes watch(s). 1927 1928 @param wd: Watch Descriptor of the file or directory to unwatch. 1929 Also accepts a list of WDs. 1930 @type wd: int or list of int. 1931 @param rec: Recursively removes watches on every already watched 1932 subdirectories and subfiles. 1933 @type rec: bool 1934 @param quiet: If False raises a WatchManagerError exception on 1935 error. See example not_quiet.py 1936 @type quiet: bool 1937 @return: dict of watch descriptors associated to booleans values. 1938 True if the corresponding wd has been successfully 1939 removed, False otherwise. 1940 @rtype: dict of {int: bool} 1941 """ 1942 lwd = self.__format_param(wd) 1943 if rec: 1944 lwd = self.__get_sub_rec(lwd) 1945 1946 ret_ = {} # return {wd: bool, ...} 1947 for awd in lwd: 1948 # remove watch 1949 wd_ = LIBC.inotify_rm_watch(self._fd, awd) 1950 if wd_ < 0: 1951 ret_[awd] = False 1952 err = 'rm_watch: cannot remove WD=%d%s' % (awd, STRERRNO()) 1953 if quiet: 1954 log.error(err) 1955 continue 1956 raise WatchManagerError(err, ret_) 1957 1958 ret_[awd] = True 1959 log.debug('Watch WD=%d (%s) removed', awd, self.get_path(awd)) 1960 return ret_
1961 1962
1963 - def watch_transient_file(self, filename, mask, proc_class):
1964 """ 1965 Watch a transient file, which will be created and deleted frequently 1966 over time (e.g. pid file). 1967 1968 @attention: Currently under the call to this function it is not 1969 possible to correctly watch the events triggered into the same 1970 base directory than the directory where is located this watched 1971 transient file. For instance it would be wrong to make these 1972 two successive calls: wm.watch_transient_file('/var/run/foo.pid', ...) 1973 and wm.add_watch('/var/run/', ...) 1974 1975 @param filename: Filename. 1976 @type filename: string 1977 @param mask: Bitmask of events, should contain IN_CREATE and IN_DELETE. 1978 @type mask: int 1979 @param proc_class: ProcessEvent (or of one of its subclass), beware of 1980 accepting a ProcessEvent's instance as argument into 1981 __init__, see transient_file.py example for more 1982 details. 1983 @type proc_class: ProcessEvent's instance or of one of its subclasses. 1984 @return: Same as add_watch(). 1985 @rtype: Same as add_watch(). 1986 """ 1987 dirname = os.path.dirname(filename) 1988 if dirname == '': 1989 return {} # Maintains coherence with add_watch() 1990 basename = os.path.basename(filename) 1991 # Assuming we are watching at least for IN_CREATE and IN_DELETE 1992 mask |= IN_CREATE | IN_DELETE 1993 1994 def cmp_name(event): 1995 if getattr(event, 'name') is None: 1996 return False 1997 return basename == event.name
1998 return self.add_watch(dirname, mask, 1999 proc_fun=proc_class(ChainIfTrue(func=cmp_name)), 2000 rec=False, 2001 auto_add=False, do_glob=False, 2002 exclude_filter=lambda path: False)
2003
2004 2005 -class Color:
2006 """ 2007 Internal class. Provide fancy colors used by string representations. 2008 """ 2009 normal = "\033[0m" 2010 black = "\033[30m" 2011 red = "\033[31m" 2012 green = "\033[32m" 2013 yellow = "\033[33m" 2014 blue = "\033[34m" 2015 purple = "\033[35m" 2016 cyan = "\033[36m" 2017 bold = "\033[1m" 2018 uline = "\033[4m" 2019 blink = "\033[5m" 2020 invert = "\033[7m" 2021 2022 @staticmethod
2023 - def punctuation(s):
2024 """Punctuation color.""" 2025 return Color.normal + s + Color.normal
2026 2027 @staticmethod
2028 - def field_value(s):
2029 """Field value color.""" 2030 if not isinstance(s, basestring): 2031 s = str(s) 2032 return Color.purple + s + Color.normal
2033 2034 @staticmethod
2035 - def field_name(s):
2036 """Field name color.""" 2037 return Color.blue + s + Color.normal
2038 2039 @staticmethod
2040 - def class_name(s):
2041 """Class name color.""" 2042 return Color.red + Color.bold + s + Color.normal
2043 2044 @staticmethod
2045 - def simple(s, color):
2046 if not isinstance(s, basestring): 2047 s = str(s) 2048 try: 2049 color_attr = getattr(Color, color) 2050 except AttributeError: 2051 return s 2052 return color_attr + s + Color.normal
2053
2054 2055 -def compatibility_mode():
2056 """ 2057 Use this function to turn on the compatibility mode. The compatibility 2058 mode is used to improve compatibility with Pyinotify 0.7.1 (or older) 2059 programs. The compatibility mode provides additional variables 'is_dir', 2060 'event_name', 'EventsCodes.IN_*' and 'EventsCodes.ALL_EVENTS' as 2061 Pyinotify 0.7.1 provided. Do not call this function from new programs!! 2062 Especially if there are developped for Pyinotify >= 0.8.x. 2063 """ 2064 setattr(EventsCodes, 'ALL_EVENTS', ALL_EVENTS) 2065 for evname in globals(): 2066 if evname.startswith('IN_'): 2067 setattr(EventsCodes, evname, globals()[evname]) 2068 global COMPATIBILITY_MODE 2069 COMPATIBILITY_MODE = True
2070
2071 2072 -def command_line():
2073 """ 2074 By default the watched path is '/tmp' and all types of events are 2075 monitored. Events monitoring serves forever, type c^c to stop it. 2076 """ 2077 from optparse import OptionParser 2078 2079 usage = "usage: %prog [options] [path1] [path2] [pathn]" 2080 2081 parser = OptionParser(usage=usage) 2082 parser.add_option("-v", "--verbose", action="store_true", 2083 dest="verbose", help="Verbose mode") 2084 parser.add_option("-r", "--recursive", action="store_true", 2085 dest="recursive", 2086 help="Add watches recursively on paths") 2087 parser.add_option("-a", "--auto_add", action="store_true", 2088 dest="auto_add", 2089 help="Automatically add watches on new directories") 2090 parser.add_option("-e", "--events-list", metavar="EVENT[,...]", 2091 dest="events_list", 2092 help=("A comma-separated list of events to watch for - " 2093 "see the documentation for valid options (defaults" 2094 " to everything)")) 2095 parser.add_option("-s", "--stats", action="store_true", 2096 dest="stats", 2097 help="Display dummy statistics") 2098 2099 (options, args) = parser.parse_args() 2100 2101 if options.verbose: 2102 log.setLevel(10) 2103 2104 if len(args) < 1: 2105 path = '/tmp' # default watched path 2106 else: 2107 path = args 2108 2109 # watch manager instance 2110 wm = WatchManager() 2111 # notifier instance and init 2112 if options.stats: 2113 notifier = Notifier(wm, default_proc_fun=Stats(), read_freq=5) 2114 else: 2115 notifier = Notifier(wm, default_proc_fun=PrintAllEvents()) 2116 2117 # What mask to apply 2118 mask = 0 2119 if options.events_list: 2120 events_list = options.events_list.split(',') 2121 for ev in events_list: 2122 evcode = EventsCodes.ALL_FLAGS.get(ev, 0) 2123 if evcode: 2124 mask |= evcode 2125 else: 2126 parser.error("The event '%s' specified with option -e" 2127 " is not valid" % ev) 2128 else: 2129 mask = ALL_EVENTS 2130 2131 # stats 2132 cb_fun = None 2133 if options.stats: 2134 def cb(s): 2135 print('%s\n%s\n' % (repr(s.proc_fun()), 2136 s.proc_fun()))
2137 cb_fun = cb 2138 2139 log.debug('Start monitoring %s, (press c^c to halt pyinotify)' % path) 2140 2141 wm.add_watch(path, mask, rec=options.recursive, auto_add=options.auto_add) 2142 # Loop forever (until sigint signal get caught) 2143 notifier.loop(callback=cb_fun) 2144 2145 2146 if __name__ == '__main__': 2147 command_line() 2148