1# Copyright 2001-2013 by Vinay Sajip. All Rights Reserved.
2#
3# Permission to use, copy, modify, and distribute this software and its
4# documentation for any purpose and without fee is hereby granted,
5# provided that the above copyright notice appear in all copies and that
6# both that copyright notice and this permission notice appear in
7# supporting documentation, and that the name of Vinay Sajip
8# not be used in advertising or publicity pertaining to distribution
9# of the software without specific, written prior permission.
10# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
11# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
12# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
13# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
14# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
15# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16
17"""
18Additional handlers for the logging package for Python. The core package is
19based on PEP 282 and comments thereto in comp.lang.python.
20
21Copyright (C) 2001-2013 Vinay Sajip. All Rights Reserved.
22
23To use, simply 'import logging.handlers' and log away!
24"""
25
26import errno, logging, socket, os, cPickle, struct, time, re
27from stat import ST_DEV, ST_INO, ST_MTIME
28
29try:
30    import codecs
31except ImportError:
32    codecs = None
33try:
34    unicode
35    _unicode = True
36except NameError:
37    _unicode = False
38
39#
40# Some constants...
41#
42
43DEFAULT_TCP_LOGGING_PORT    = 9020
44DEFAULT_UDP_LOGGING_PORT    = 9021
45DEFAULT_HTTP_LOGGING_PORT   = 9022
46DEFAULT_SOAP_LOGGING_PORT   = 9023
47SYSLOG_UDP_PORT             = 514
48SYSLOG_TCP_PORT             = 514
49
50_MIDNIGHT = 24 * 60 * 60  # number of seconds in a day
51
52class BaseRotatingHandler(logging.FileHandler):
53    """
54    Base class for handlers that rotate log files at a certain point.
55    Not meant to be instantiated directly.  Instead, use RotatingFileHandler
56    or TimedRotatingFileHandler.
57    """
58    def __init__(self, filename, mode, encoding=None, delay=0):
59        """
60        Use the specified filename for streamed logging
61        """
62        if codecs is None:
63            encoding = None
64        logging.FileHandler.__init__(self, filename, mode, encoding, delay)
65        self.mode = mode
66        self.encoding = encoding
67
68    def emit(self, record):
69        """
70        Emit a record.
71
72        Output the record to the file, catering for rollover as described
73        in doRollover().
74        """
75        try:
76            if self.shouldRollover(record):
77                self.doRollover()
78            logging.FileHandler.emit(self, record)
79        except (KeyboardInterrupt, SystemExit):
80            raise
81        except:
82            self.handleError(record)
83
84class RotatingFileHandler(BaseRotatingHandler):
85    """
86    Handler for logging to a set of files, which switches from one file
87    to the next when the current file reaches a certain size.
88    """
89    def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=0):
90        """
91        Open the specified file and use it as the stream for logging.
92
93        By default, the file grows indefinitely. You can specify particular
94        values of maxBytes and backupCount to allow the file to rollover at
95        a predetermined size.
96
97        Rollover occurs whenever the current log file is nearly maxBytes in
98        length. If backupCount is >= 1, the system will successively create
99        new files with the same pathname as the base file, but with extensions
100        ".1", ".2" etc. appended to it. For example, with a backupCount of 5
101        and a base file name of "app.log", you would get "app.log",
102        "app.log.1", "app.log.2", ... through to "app.log.5". The file being
103        written to is always "app.log" - when it gets filled up, it is closed
104        and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
105        exist, then they are renamed to "app.log.2", "app.log.3" etc.
106        respectively.
107
108        If maxBytes is zero, rollover never occurs.
109        """
110        # If rotation/rollover is wanted, it doesn't make sense to use another
111        # mode. If for example 'w' were specified, then if there were multiple
112        # runs of the calling application, the logs from previous runs would be
113        # lost if the 'w' is respected, because the log file would be truncated
114        # on each run.
115        if maxBytes > 0:
116            mode = 'a'
117        BaseRotatingHandler.__init__(self, filename, mode, encoding, delay)
118        self.maxBytes = maxBytes
119        self.backupCount = backupCount
120
121    def doRollover(self):
122        """
123        Do a rollover, as described in __init__().
124        """
125        if self.stream:
126            self.stream.close()
127            self.stream = None
128        if self.backupCount > 0:
129            for i in range(self.backupCount - 1, 0, -1):
130                sfn = "%s.%d" % (self.baseFilename, i)
131                dfn = "%s.%d" % (self.baseFilename, i + 1)
132                if os.path.exists(sfn):
133                    #print "%s -> %s" % (sfn, dfn)
134                    if os.path.exists(dfn):
135                        os.remove(dfn)
136                    os.rename(sfn, dfn)
137            dfn = self.baseFilename + ".1"
138            if os.path.exists(dfn):
139                os.remove(dfn)
140            os.rename(self.baseFilename, dfn)
141            #print "%s -> %s" % (self.baseFilename, dfn)
142        self.stream = self._open()
143
144    def shouldRollover(self, record):
145        """
146        Determine if rollover should occur.
147
148        Basically, see if the supplied record would cause the file to exceed
149        the size limit we have.
150        """
151        if self.stream is None:                 # delay was set...
152            self.stream = self._open()
153        if self.maxBytes > 0:                   # are we rolling over?
154            msg = "%s\n" % self.format(record)
155            self.stream.seek(0, 2)  #due to non-posix-compliant Windows feature
156            if self.stream.tell() + len(msg) >= self.maxBytes:
157                return 1
158        return 0
159
160class TimedRotatingFileHandler(BaseRotatingHandler):
161    """
162    Handler for logging to a file, rotating the log file at certain timed
163    intervals.
164
165    If backupCount is > 0, when rollover is done, no more than backupCount
166    files are kept - the oldest ones are deleted.
167    """
168    def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False):
169        BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay)
170        self.when = when.upper()
171        self.backupCount = backupCount
172        self.utc = utc
173        # Calculate the real rollover interval, which is just the number of
174        # seconds between rollovers.  Also set the filename suffix used when
175        # a rollover occurs.  Current 'when' events supported:
176        # S - Seconds
177        # M - Minutes
178        # H - Hours
179        # D - Days
180        # midnight - roll over at midnight
181        # W{0-6} - roll over on a certain day; 0 - Monday
182        #
183        # Case of the 'when' specifier is not important; lower or upper case
184        # will work.
185        if self.when == 'S':
186            self.interval = 1 # one second
187            self.suffix = "%Y-%m-%d_%H-%M-%S"
188            self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}$"
189        elif self.when == 'M':
190            self.interval = 60 # one minute
191            self.suffix = "%Y-%m-%d_%H-%M"
192            self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}$"
193        elif self.when == 'H':
194            self.interval = 60 * 60 # one hour
195            self.suffix = "%Y-%m-%d_%H"
196            self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}$"
197        elif self.when == 'D' or self.when == 'MIDNIGHT':
198            self.interval = 60 * 60 * 24 # one day
199            self.suffix = "%Y-%m-%d"
200            self.extMatch = r"^\d{4}-\d{2}-\d{2}$"
201        elif self.when.startswith('W'):
202            self.interval = 60 * 60 * 24 * 7 # one week
203            if len(self.when) != 2:
204                raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
205            if self.when[1] < '0' or self.when[1] > '6':
206                raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
207            self.dayOfWeek = int(self.when[1])
208            self.suffix = "%Y-%m-%d"
209            self.extMatch = r"^\d{4}-\d{2}-\d{2}$"
210        else:
211            raise ValueError("Invalid rollover interval specified: %s" % self.when)
212
213        self.extMatch = re.compile(self.extMatch)
214        self.interval = self.interval * interval # multiply by units requested
215        if os.path.exists(filename):
216            t = os.stat(filename)[ST_MTIME]
217        else:
218            t = int(time.time())
219        self.rolloverAt = self.computeRollover(t)
220
221    def computeRollover(self, currentTime):
222        """
223        Work out the rollover time based on the specified time.
224        """
225        result = currentTime + self.interval
226        # If we are rolling over at midnight or weekly, then the interval is already known.
227        # What we need to figure out is WHEN the next interval is.  In other words,
228        # if you are rolling over at midnight, then your base interval is 1 day,
229        # but you want to start that one day clock at midnight, not now.  So, we
230        # have to fudge the rolloverAt value in order to trigger the first rollover
231        # at the right time.  After that, the regular interval will take care of
232        # the rest.  Note that this code doesn't care about leap seconds. :)
233        if self.when == 'MIDNIGHT' or self.when.startswith('W'):
234            # This could be done with less code, but I wanted it to be clear
235            if self.utc:
236                t = time.gmtime(currentTime)
237            else:
238                t = time.localtime(currentTime)
239            currentHour = t[3]
240            currentMinute = t[4]
241            currentSecond = t[5]
242            # r is the number of seconds left between now and midnight
243            r = _MIDNIGHT - ((currentHour * 60 + currentMinute) * 60 +
244                    currentSecond)
245            result = currentTime + r
246            # If we are rolling over on a certain day, add in the number of days until
247            # the next rollover, but offset by 1 since we just calculated the time
248            # until the next day starts.  There are three cases:
249            # Case 1) The day to rollover is today; in this case, do nothing
250            # Case 2) The day to rollover is further in the interval (i.e., today is
251            #         day 2 (Wednesday) and rollover is on day 6 (Sunday).  Days to
252            #         next rollover is simply 6 - 2 - 1, or 3.
253            # Case 3) The day to rollover is behind us in the interval (i.e., today
254            #         is day 5 (Saturday) and rollover is on day 3 (Thursday).
255            #         Days to rollover is 6 - 5 + 3, or 4.  In this case, it's the
256            #         number of days left in the current week (1) plus the number
257            #         of days in the next week until the rollover day (3).
258            # The calculations described in 2) and 3) above need to have a day added.
259            # This is because the above time calculation takes us to midnight on this
260            # day, i.e. the start of the next day.
261            if self.when.startswith('W'):
262                day = t[6] # 0 is Monday
263                if day != self.dayOfWeek:
264                    if day < self.dayOfWeek:
265                        daysToWait = self.dayOfWeek - day
266                    else:
267                        daysToWait = 6 - day + self.dayOfWeek + 1
268                    newRolloverAt = result + (daysToWait * (60 * 60 * 24))
269                    if not self.utc:
270                        dstNow = t[-1]
271                        dstAtRollover = time.localtime(newRolloverAt)[-1]
272                        if dstNow != dstAtRollover:
273                            if not dstNow:  # DST kicks in before next rollover, so we need to deduct an hour
274                                addend = -3600
275                            else:           # DST bows out before next rollover, so we need to add an hour
276                                addend = 3600
277                            newRolloverAt += addend
278                    result = newRolloverAt
279        return result
280
281    def shouldRollover(self, record):
282        """
283        Determine if rollover should occur.
284
285        record is not used, as we are just comparing times, but it is needed so
286        the method signatures are the same
287        """
288        t = int(time.time())
289        if t >= self.rolloverAt:
290            return 1
291        #print "No need to rollover: %d, %d" % (t, self.rolloverAt)
292        return 0
293
294    def getFilesToDelete(self):
295        """
296        Determine the files to delete when rolling over.
297
298        More specific than the earlier method, which just used glob.glob().
299        """
300        dirName, baseName = os.path.split(self.baseFilename)
301        fileNames = os.listdir(dirName)
302        result = []
303        prefix = baseName + "."
304        plen = len(prefix)
305        for fileName in fileNames:
306            if fileName[:plen] == prefix:
307                suffix = fileName[plen:]
308                if self.extMatch.match(suffix):
309                    result.append(os.path.join(dirName, fileName))
310        result.sort()
311        if len(result) < self.backupCount:
312            result = []
313        else:
314            result = result[:len(result) - self.backupCount]
315        return result
316
317    def doRollover(self):
318        """
319        do a rollover; in this case, a date/time stamp is appended to the filename
320        when the rollover happens.  However, you want the file to be named for the
321        start of the interval, not the current time.  If there is a backup count,
322        then we have to get a list of matching filenames, sort them and remove
323        the one with the oldest suffix.
324        """
325        if self.stream:
326            self.stream.close()
327            self.stream = None
328        # get the time that this sequence started at and make it a TimeTuple
329        currentTime = int(time.time())
330        dstNow = time.localtime(currentTime)[-1]
331        t = self.rolloverAt - self.interval
332        if self.utc:
333            timeTuple = time.gmtime(t)
334        else:
335            timeTuple = time.localtime(t)
336            dstThen = timeTuple[-1]
337            if dstNow != dstThen:
338                if dstNow:
339                    addend = 3600
340                else:
341                    addend = -3600
342                timeTuple = time.localtime(t + addend)
343        dfn = self.baseFilename + "." + time.strftime(self.suffix, timeTuple)
344        if os.path.exists(dfn):
345            os.remove(dfn)
346        os.rename(self.baseFilename, dfn)
347        if self.backupCount > 0:
348            # find the oldest log file and delete it
349            #s = glob.glob(self.baseFilename + ".20*")
350            #if len(s) > self.backupCount:
351            #    s.sort()
352            #    os.remove(s[0])
353            for s in self.getFilesToDelete():
354                os.remove(s)
355        #print "%s -> %s" % (self.baseFilename, dfn)
356        self.stream = self._open()
357        newRolloverAt = self.computeRollover(currentTime)
358        while newRolloverAt <= currentTime:
359            newRolloverAt = newRolloverAt + self.interval
360        #If DST changes and midnight or weekly rollover, adjust for this.
361        if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
362            dstAtRollover = time.localtime(newRolloverAt)[-1]
363            if dstNow != dstAtRollover:
364                if not dstNow:  # DST kicks in before next rollover, so we need to deduct an hour
365                    addend = -3600
366                else:           # DST bows out before next rollover, so we need to add an hour
367                    addend = 3600
368                newRolloverAt += addend
369        self.rolloverAt = newRolloverAt
370
371class WatchedFileHandler(logging.FileHandler):
372    """
373    A handler for logging to a file, which watches the file
374    to see if it has changed while in use. This can happen because of
375    usage of programs such as newsyslog and logrotate which perform
376    log file rotation. This handler, intended for use under Unix,
377    watches the file to see if it has changed since the last emit.
378    (A file has changed if its device or inode have changed.)
379    If it has changed, the old file stream is closed, and the file
380    opened to get a new stream.
381
382    This handler is not appropriate for use under Windows, because
383    under Windows open files cannot be moved or renamed - logging
384    opens the files with exclusive locks - and so there is no need
385    for such a handler. Furthermore, ST_INO is not supported under
386    Windows; stat always returns zero for this value.
387
388    This handler is based on a suggestion and patch by Chad J.
389    Schroeder.
390    """
391    def __init__(self, filename, mode='a', encoding=None, delay=0):
392        logging.FileHandler.__init__(self, filename, mode, encoding, delay)
393        self.dev, self.ino = -1, -1
394        self._statstream()
395
396    def _statstream(self):
397        if self.stream:
398            sres = os.fstat(self.stream.fileno())
399            self.dev, self.ino = sres[ST_DEV], sres[ST_INO]
400
401    def emit(self, record):
402        """
403        Emit a record.
404
405        First check if the underlying file has changed, and if it
406        has, close the old stream and reopen the file to get the
407        current stream.
408        """
409        # Reduce the chance of race conditions by stat'ing by path only
410        # once and then fstat'ing our new fd if we opened a new log stream.
411        # See issue #14632: Thanks to John Mulligan for the problem report
412        # and patch.
413        try:
414            # stat the file by path, checking for existence
415            sres = os.stat(self.baseFilename)
416        except OSError as err:
417            if err.errno == errno.ENOENT:
418                sres = None
419            else:
420                raise
421        # compare file system stat with that of our stream file handle
422        if not sres or sres[ST_DEV] != self.dev or sres[ST_INO] != self.ino:
423            if self.stream is not None:
424                # we have an open file handle, clean it up
425                self.stream.flush()
426                self.stream.close()
427                # open a new file handle and get new stat info from that fd
428                self.stream = self._open()
429                self._statstream()
430        logging.FileHandler.emit(self, record)
431
432class SocketHandler(logging.Handler):
433    """
434    A handler class which writes logging records, in pickle format, to
435    a streaming socket. The socket is kept open across logging calls.
436    If the peer resets it, an attempt is made to reconnect on the next call.
437    The pickle which is sent is that of the LogRecord's attribute dictionary
438    (__dict__), so that the receiver does not need to have the logging module
439    installed in order to process the logging event.
440
441    To unpickle the record at the receiving end into a LogRecord, use the
442    makeLogRecord function.
443    """
444
445    def __init__(self, host, port):
446        """
447        Initializes the handler with a specific host address and port.
448
449        The attribute 'closeOnError' is set to 1 - which means that if
450        a socket error occurs, the socket is silently closed and then
451        reopened on the next logging call.
452        """
453        logging.Handler.__init__(self)
454        self.host = host
455        self.port = port
456        self.sock = None
457        self.closeOnError = 0
458        self.retryTime = None
459        #
460        # Exponential backoff parameters.
461        #
462        self.retryStart = 1.0
463        self.retryMax = 30.0
464        self.retryFactor = 2.0
465
466    def makeSocket(self, timeout=1):
467        """
468        A factory method which allows subclasses to define the precise
469        type of socket they want.
470        """
471        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
472        if hasattr(s, 'settimeout'):
473            s.settimeout(timeout)
474        s.connect((self.host, self.port))
475        return s
476
477    def createSocket(self):
478        """
479        Try to create a socket, using an exponential backoff with
480        a max retry time. Thanks to Robert Olson for the original patch
481        (SF #815911) which has been slightly refactored.
482        """
483        now = time.time()
484        # Either retryTime is None, in which case this
485        # is the first time back after a disconnect, or
486        # we've waited long enough.
487        if self.retryTime is None:
488            attempt = 1
489        else:
490            attempt = (now >= self.retryTime)
491        if attempt:
492            try:
493                self.sock = self.makeSocket()
494                self.retryTime = None # next time, no delay before trying
495            except socket.error:
496                #Creation failed, so set the retry time and return.
497                if self.retryTime is None:
498                    self.retryPeriod = self.retryStart
499                else:
500                    self.retryPeriod = self.retryPeriod * self.retryFactor
501                    if self.retryPeriod > self.retryMax:
502                        self.retryPeriod = self.retryMax
503                self.retryTime = now + self.retryPeriod
504
505    def send(self, s):
506        """
507        Send a pickled string to the socket.
508
509        This function allows for partial sends which can happen when the
510        network is busy.
511        """
512        if self.sock is None:
513            self.createSocket()
514        #self.sock can be None either because we haven't reached the retry
515        #time yet, or because we have reached the retry time and retried,
516        #but are still unable to connect.
517        if self.sock:
518            try:
519                if hasattr(self.sock, "sendall"):
520                    self.sock.sendall(s)
521                else:
522                    sentsofar = 0
523                    left = len(s)
524                    while left > 0:
525                        sent = self.sock.send(s[sentsofar:])
526                        sentsofar = sentsofar + sent
527                        left = left - sent
528            except socket.error:
529                self.sock.close()
530                self.sock = None  # so we can call createSocket next time
531
532    def makePickle(self, record):
533        """
534        Pickles the record in binary format with a length prefix, and
535        returns it ready for transmission across the socket.
536        """
537        ei = record.exc_info
538        if ei:
539            # just to get traceback text into record.exc_text ...
540            dummy = self.format(record)
541            record.exc_info = None  # to avoid Unpickleable error
542        # See issue #14436: If msg or args are objects, they may not be
543        # available on the receiving end. So we convert the msg % args
544        # to a string, save it as msg and zap the args.
545        d = dict(record.__dict__)
546        d['msg'] = record.getMessage()
547        d['args'] = None
548        s = cPickle.dumps(d, 1)
549        if ei:
550            record.exc_info = ei  # for next handler
551        slen = struct.pack(">L", len(s))
552        return slen + s
553
554    def handleError(self, record):
555        """
556        Handle an error during logging.
557
558        An error has occurred during logging. Most likely cause -
559        connection lost. Close the socket so that we can retry on the
560        next event.
561        """
562        if self.closeOnError and self.sock:
563            self.sock.close()
564            self.sock = None        #try to reconnect next time
565        else:
566            logging.Handler.handleError(self, record)
567
568    def emit(self, record):
569        """
570        Emit a record.
571
572        Pickles the record and writes it to the socket in binary format.
573        If there is an error with the socket, silently drop the packet.
574        If there was a problem with the socket, re-establishes the
575        socket.
576        """
577        try:
578            s = self.makePickle(record)
579            self.send(s)
580        except (KeyboardInterrupt, SystemExit):
581            raise
582        except:
583            self.handleError(record)
584
585    def close(self):
586        """
587        Closes the socket.
588        """
589        self.acquire()
590        try:
591            if self.sock:
592                self.sock.close()
593                self.sock = None
594        finally:
595            self.release()
596        logging.Handler.close(self)
597
598class DatagramHandler(SocketHandler):
599    """
600    A handler class which writes logging records, in pickle format, to
601    a datagram socket.  The pickle which is sent is that of the LogRecord's
602    attribute dictionary (__dict__), so that the receiver does not need to
603    have the logging module installed in order to process the logging event.
604
605    To unpickle the record at the receiving end into a LogRecord, use the
606    makeLogRecord function.
607
608    """
609    def __init__(self, host, port):
610        """
611        Initializes the handler with a specific host address and port.
612        """
613        SocketHandler.__init__(self, host, port)
614        self.closeOnError = 0
615
616    def makeSocket(self):
617        """
618        The factory method of SocketHandler is here overridden to create
619        a UDP socket (SOCK_DGRAM).
620        """
621        s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
622        return s
623
624    def send(self, s):
625        """
626        Send a pickled string to a socket.
627
628        This function no longer allows for partial sends which can happen
629        when the network is busy - UDP does not guarantee delivery and
630        can deliver packets out of sequence.
631        """
632        if self.sock is None:
633            self.createSocket()
634        self.sock.sendto(s, (self.host, self.port))
635
636class SysLogHandler(logging.Handler):
637    """
638    A handler class which sends formatted logging records to a syslog
639    server. Based on Sam Rushing's syslog module:
640    http://www.nightmare.com/squirl/python-ext/misc/syslog.py
641    Contributed by Nicolas Untz (after which minor refactoring changes
642    have been made).
643    """
644
645    # from <linux/sys/syslog.h>:
646    # ======================================================================
647    # priorities/facilities are encoded into a single 32-bit quantity, where
648    # the bottom 3 bits are the priority (0-7) and the top 28 bits are the
649    # facility (0-big number). Both the priorities and the facilities map
650    # roughly one-to-one to strings in the syslogd(8) source code.  This
651    # mapping is included in this file.
652    #
653    # priorities (these are ordered)
654
655    LOG_EMERG     = 0       #  system is unusable
656    LOG_ALERT     = 1       #  action must be taken immediately
657    LOG_CRIT      = 2       #  critical conditions
658    LOG_ERR       = 3       #  error conditions
659    LOG_WARNING   = 4       #  warning conditions
660    LOG_NOTICE    = 5       #  normal but significant condition
661    LOG_INFO      = 6       #  informational
662    LOG_DEBUG     = 7       #  debug-level messages
663
664    #  facility codes
665    LOG_KERN      = 0       #  kernel messages
666    LOG_USER      = 1       #  random user-level messages
667    LOG_MAIL      = 2       #  mail system
668    LOG_DAEMON    = 3       #  system daemons
669    LOG_AUTH      = 4       #  security/authorization messages
670    LOG_SYSLOG    = 5       #  messages generated internally by syslogd
671    LOG_LPR       = 6       #  line printer subsystem
672    LOG_NEWS      = 7       #  network news subsystem
673    LOG_UUCP      = 8       #  UUCP subsystem
674    LOG_CRON      = 9       #  clock daemon
675    LOG_AUTHPRIV  = 10      #  security/authorization messages (private)
676    LOG_FTP       = 11      #  FTP daemon
677
678    #  other codes through 15 reserved for system use
679    LOG_LOCAL0    = 16      #  reserved for local use
680    LOG_LOCAL1    = 17      #  reserved for local use
681    LOG_LOCAL2    = 18      #  reserved for local use
682    LOG_LOCAL3    = 19      #  reserved for local use
683    LOG_LOCAL4    = 20      #  reserved for local use
684    LOG_LOCAL5    = 21      #  reserved for local use
685    LOG_LOCAL6    = 22      #  reserved for local use
686    LOG_LOCAL7    = 23      #  reserved for local use
687
688    priority_names = {
689        "alert":    LOG_ALERT,
690        "crit":     LOG_CRIT,
691        "critical": LOG_CRIT,
692        "debug":    LOG_DEBUG,
693        "emerg":    LOG_EMERG,
694        "err":      LOG_ERR,
695        "error":    LOG_ERR,        #  DEPRECATED
696        "info":     LOG_INFO,
697        "notice":   LOG_NOTICE,
698        "panic":    LOG_EMERG,      #  DEPRECATED
699        "warn":     LOG_WARNING,    #  DEPRECATED
700        "warning":  LOG_WARNING,
701        }
702
703    facility_names = {
704        "auth":     LOG_AUTH,
705        "authpriv": LOG_AUTHPRIV,
706        "cron":     LOG_CRON,
707        "daemon":   LOG_DAEMON,
708        "ftp":      LOG_FTP,
709        "kern":     LOG_KERN,
710        "lpr":      LOG_LPR,
711        "mail":     LOG_MAIL,
712        "news":     LOG_NEWS,
713        "security": LOG_AUTH,       #  DEPRECATED
714        "syslog":   LOG_SYSLOG,
715        "user":     LOG_USER,
716        "uucp":     LOG_UUCP,
717        "local0":   LOG_LOCAL0,
718        "local1":   LOG_LOCAL1,
719        "local2":   LOG_LOCAL2,
720        "local3":   LOG_LOCAL3,
721        "local4":   LOG_LOCAL4,
722        "local5":   LOG_LOCAL5,
723        "local6":   LOG_LOCAL6,
724        "local7":   LOG_LOCAL7,
725        }
726
727    #The map below appears to be trivially lowercasing the key. However,
728    #there's more to it than meets the eye - in some locales, lowercasing
729    #gives unexpected results. See SF #1524081: in the Turkish locale,
730    #"INFO".lower() != "info"
731    priority_map = {
732        "DEBUG" : "debug",
733        "INFO" : "info",
734        "WARNING" : "warning",
735        "ERROR" : "error",
736        "CRITICAL" : "critical"
737    }
738
739    def __init__(self, address=('localhost', SYSLOG_UDP_PORT),
740                 facility=LOG_USER, socktype=None):
741        """
742        Initialize a handler.
743
744        If address is specified as a string, a UNIX socket is used. To log to a
745        local syslogd, "SysLogHandler(address="/dev/log")" can be used.
746        If facility is not specified, LOG_USER is used. If socktype is
747        specified as socket.SOCK_DGRAM or socket.SOCK_STREAM, that specific
748        socket type will be used. For Unix sockets, you can also specify a
749        socktype of None, in which case socket.SOCK_DGRAM will be used, falling
750        back to socket.SOCK_STREAM.
751        """
752        logging.Handler.__init__(self)
753
754        self.address = address
755        self.facility = facility
756        self.socktype = socktype
757
758        if isinstance(address, basestring):
759            self.unixsocket = 1
760            self._connect_unixsocket(address)
761        else:
762            self.unixsocket = 0
763            if socktype is None:
764                socktype = socket.SOCK_DGRAM
765            self.socket = socket.socket(socket.AF_INET, socktype)
766            if socktype == socket.SOCK_STREAM:
767                self.socket.connect(address)
768            self.socktype = socktype
769        self.formatter = None
770
771    def _connect_unixsocket(self, address):
772        use_socktype = self.socktype
773        if use_socktype is None:
774            use_socktype = socket.SOCK_DGRAM
775        self.socket = socket.socket(socket.AF_UNIX, use_socktype)
776        try:
777            self.socket.connect(address)
778            # it worked, so set self.socktype to the used type
779            self.socktype = use_socktype
780        except socket.error:
781            self.socket.close()
782            if self.socktype is not None:
783                # user didn't specify falling back, so fail
784                raise
785            use_socktype = socket.SOCK_STREAM
786            self.socket = socket.socket(socket.AF_UNIX, use_socktype)
787            try:
788                self.socket.connect(address)
789                # it worked, so set self.socktype to the used type
790                self.socktype = use_socktype
791            except socket.error:
792                self.socket.close()
793                raise
794
795    # curious: when talking to the unix-domain '/dev/log' socket, a
796    #   zero-terminator seems to be required.  this string is placed
797    #   into a class variable so that it can be overridden if
798    #   necessary.
799    log_format_string = '<%d>%s\000'
800
801    def encodePriority(self, facility, priority):
802        """
803        Encode the facility and priority. You can pass in strings or
804        integers - if strings are passed, the facility_names and
805        priority_names mapping dictionaries are used to convert them to
806        integers.
807        """
808        if isinstance(facility, basestring):
809            facility = self.facility_names[facility]
810        if isinstance(priority, basestring):
811            priority = self.priority_names[priority]
812        return (facility << 3) | priority
813
814    def close (self):
815        """
816        Closes the socket.
817        """
818        self.acquire()
819        try:
820            if self.unixsocket:
821                self.socket.close()
822        finally:
823            self.release()
824        logging.Handler.close(self)
825
826    def mapPriority(self, levelName):
827        """
828        Map a logging level name to a key in the priority_names map.
829        This is useful in two scenarios: when custom levels are being
830        used, and in the case where you can't do a straightforward
831        mapping by lowercasing the logging level name because of locale-
832        specific issues (see SF #1524081).
833        """
834        return self.priority_map.get(levelName, "warning")
835
836    def emit(self, record):
837        """
838        Emit a record.
839
840        The record is formatted, and then sent to the syslog server. If
841        exception information is present, it is NOT sent to the server.
842        """
843        msg = self.format(record) + '\000'
844        """
845        We need to convert record level to lowercase, maybe this will
846        change in the future.
847        """
848        prio = '<%d>' % self.encodePriority(self.facility,
849                                            self.mapPriority(record.levelname))
850        # Message is a string. Convert to bytes as required by RFC 5424
851        if type(msg) is unicode:
852            msg = msg.encode('utf-8')
853        msg = prio + msg
854        try:
855            if self.unixsocket:
856                try:
857                    self.socket.send(msg)
858                except socket.error:
859                    self._connect_unixsocket(self.address)
860                    self.socket.send(msg)
861            elif self.socktype == socket.SOCK_DGRAM:
862                self.socket.sendto(msg, self.address)
863            else:
864                self.socket.sendall(msg)
865        except (KeyboardInterrupt, SystemExit):
866            raise
867        except:
868            self.handleError(record)
869
870class SMTPHandler(logging.Handler):
871    """
872    A handler class which sends an SMTP email for each logging event.
873    """
874    def __init__(self, mailhost, fromaddr, toaddrs, subject,
875                 credentials=None, secure=None):
876        """
877        Initialize the handler.
878
879        Initialize the instance with the from and to addresses and subject
880        line of the email. To specify a non-standard SMTP port, use the
881        (host, port) tuple format for the mailhost argument. To specify
882        authentication credentials, supply a (username, password) tuple
883        for the credentials argument. To specify the use of a secure
884        protocol (TLS), pass in a tuple for the secure argument. This will
885        only be used when authentication credentials are supplied. The tuple
886        will be either an empty tuple, or a single-value tuple with the name
887        of a keyfile, or a 2-value tuple with the names of the keyfile and
888        certificate file. (This tuple is passed to the `starttls` method).
889        """
890        logging.Handler.__init__(self)
891        if isinstance(mailhost, tuple):
892            self.mailhost, self.mailport = mailhost
893        else:
894            self.mailhost, self.mailport = mailhost, None
895        if isinstance(credentials, tuple):
896            self.username, self.password = credentials
897        else:
898            self.username = None
899        self.fromaddr = fromaddr
900        if isinstance(toaddrs, basestring):
901            toaddrs = [toaddrs]
902        self.toaddrs = toaddrs
903        self.subject = subject
904        self.secure = secure
905        self._timeout = 5.0
906
907    def getSubject(self, record):
908        """
909        Determine the subject for the email.
910
911        If you want to specify a subject line which is record-dependent,
912        override this method.
913        """
914        return self.subject
915
916    def emit(self, record):
917        """
918        Emit a record.
919
920        Format the record and send it to the specified addressees.
921        """
922        try:
923            import smtplib
924            from email.utils import formatdate
925            port = self.mailport
926            if not port:
927                port = smtplib.SMTP_PORT
928            smtp = smtplib.SMTP(self.mailhost, port, timeout=self._timeout)
929            msg = self.format(record)
930            msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % (
931                            self.fromaddr,
932                            ",".join(self.toaddrs),
933                            self.getSubject(record),
934                            formatdate(), msg)
935            if self.username:
936                if self.secure is not None:
937                    smtp.ehlo()
938                    smtp.starttls(*self.secure)
939                    smtp.ehlo()
940                smtp.login(self.username, self.password)
941            smtp.sendmail(self.fromaddr, self.toaddrs, msg)
942            smtp.quit()
943        except (KeyboardInterrupt, SystemExit):
944            raise
945        except:
946            self.handleError(record)
947
948class NTEventLogHandler(logging.Handler):
949    """
950    A handler class which sends events to the NT Event Log. Adds a
951    registry entry for the specified application name. If no dllname is
952    provided, win32service.pyd (which contains some basic message
953    placeholders) is used. Note that use of these placeholders will make
954    your event logs big, as the entire message source is held in the log.
955    If you want slimmer logs, you have to pass in the name of your own DLL
956    which contains the message definitions you want to use in the event log.
957    """
958    def __init__(self, appname, dllname=None, logtype="Application"):
959        logging.Handler.__init__(self)
960        try:
961            import win32evtlogutil, win32evtlog
962            self.appname = appname
963            self._welu = win32evtlogutil
964            if not dllname:
965                dllname = os.path.split(self._welu.__file__)
966                dllname = os.path.split(dllname[0])
967                dllname = os.path.join(dllname[0], r'win32service.pyd')
968            self.dllname = dllname
969            self.logtype = logtype
970            self._welu.AddSourceToRegistry(appname, dllname, logtype)
971            self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
972            self.typemap = {
973                logging.DEBUG   : win32evtlog.EVENTLOG_INFORMATION_TYPE,
974                logging.INFO    : win32evtlog.EVENTLOG_INFORMATION_TYPE,
975                logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
976                logging.ERROR   : win32evtlog.EVENTLOG_ERROR_TYPE,
977                logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
978         }
979        except ImportError:
980            print("The Python Win32 extensions for NT (service, event "\
981                        "logging) appear not to be available.")
982            self._welu = None
983
984    def getMessageID(self, record):
985        """
986        Return the message ID for the event record. If you are using your
987        own messages, you could do this by having the msg passed to the
988        logger being an ID rather than a formatting string. Then, in here,
989        you could use a dictionary lookup to get the message ID. This
990        version returns 1, which is the base message ID in win32service.pyd.
991        """
992        return 1
993
994    def getEventCategory(self, record):
995        """
996        Return the event category for the record.
997
998        Override this if you want to specify your own categories. This version
999        returns 0.
1000        """
1001        return 0
1002
1003    def getEventType(self, record):
1004        """
1005        Return the event type for the record.
1006
1007        Override this if you want to specify your own types. This version does
1008        a mapping using the handler's typemap attribute, which is set up in
1009        __init__() to a dictionary which contains mappings for DEBUG, INFO,
1010        WARNING, ERROR and CRITICAL. If you are using your own levels you will
1011        either need to override this method or place a suitable dictionary in
1012        the handler's typemap attribute.
1013        """
1014        return self.typemap.get(record.levelno, self.deftype)
1015
1016    def emit(self, record):
1017        """
1018        Emit a record.
1019
1020        Determine the message ID, event category and event type. Then
1021        log the message in the NT event log.
1022        """
1023        if self._welu:
1024            try:
1025                id = self.getMessageID(record)
1026                cat = self.getEventCategory(record)
1027                type = self.getEventType(record)
1028                msg = self.format(record)
1029                self._welu.ReportEvent(self.appname, id, cat, type, [msg])
1030            except (KeyboardInterrupt, SystemExit):
1031                raise
1032            except:
1033                self.handleError(record)
1034
1035    def close(self):
1036        """
1037        Clean up this handler.
1038
1039        You can remove the application name from the registry as a
1040        source of event log entries. However, if you do this, you will
1041        not be able to see the events as you intended in the Event Log
1042        Viewer - it needs to be able to access the registry to get the
1043        DLL name.
1044        """
1045        #self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
1046        logging.Handler.close(self)
1047
1048class HTTPHandler(logging.Handler):
1049    """
1050    A class which sends records to a Web server, using either GET or
1051    POST semantics.
1052    """
1053    def __init__(self, host, url, method="GET"):
1054        """
1055        Initialize the instance with the host, the request URL, and the method
1056        ("GET" or "POST")
1057        """
1058        logging.Handler.__init__(self)
1059        method = method.upper()
1060        if method not in ["GET", "POST"]:
1061            raise ValueError("method must be GET or POST")
1062        self.host = host
1063        self.url = url
1064        self.method = method
1065
1066    def mapLogRecord(self, record):
1067        """
1068        Default implementation of mapping the log record into a dict
1069        that is sent as the CGI data. Overwrite in your class.
1070        Contributed by Franz  Glasner.
1071        """
1072        return record.__dict__
1073
1074    def emit(self, record):
1075        """
1076        Emit a record.
1077
1078        Send the record to the Web server as a percent-encoded dictionary
1079        """
1080        try:
1081            import httplib, urllib
1082            host = self.host
1083            h = httplib.HTTP(host)
1084            url = self.url
1085            data = urllib.urlencode(self.mapLogRecord(record))
1086            if self.method == "GET":
1087                if (url.find('?') >= 0):
1088                    sep = '&'
1089                else:
1090                    sep = '?'
1091                url = url + "%c%s" % (sep, data)
1092            h.putrequest(self.method, url)
1093            # support multiple hosts on one IP address...
1094            # need to strip optional :port from host, if present
1095            i = host.find(":")
1096            if i >= 0:
1097                host = host[:i]
1098            h.putheader("Host", host)
1099            if self.method == "POST":
1100                h.putheader("Content-type",
1101                            "application/x-www-form-urlencoded")
1102                h.putheader("Content-length", str(len(data)))
1103            h.endheaders(data if self.method == "POST" else None)
1104            h.getreply()    #can't do anything with the result
1105        except (KeyboardInterrupt, SystemExit):
1106            raise
1107        except:
1108            self.handleError(record)
1109
1110class BufferingHandler(logging.Handler):
1111    """
1112  A handler class which buffers logging records in memory. Whenever each
1113  record is added to the buffer, a check is made to see if the buffer should
1114  be flushed. If it should, then flush() is expected to do what's needed.
1115    """
1116    def __init__(self, capacity):
1117        """
1118        Initialize the handler with the buffer size.
1119        """
1120        logging.Handler.__init__(self)
1121        self.capacity = capacity
1122        self.buffer = []
1123
1124    def shouldFlush(self, record):
1125        """
1126        Should the handler flush its buffer?
1127
1128        Returns true if the buffer is up to capacity. This method can be
1129        overridden to implement custom flushing strategies.
1130        """
1131        return (len(self.buffer) >= self.capacity)
1132
1133    def emit(self, record):
1134        """
1135        Emit a record.
1136
1137        Append the record. If shouldFlush() tells us to, call flush() to process
1138        the buffer.
1139        """
1140        self.buffer.append(record)
1141        if self.shouldFlush(record):
1142            self.flush()
1143
1144    def flush(self):
1145        """
1146        Override to implement custom flushing behaviour.
1147
1148        This version just zaps the buffer to empty.
1149        """
1150        self.acquire()
1151        try:
1152            self.buffer = []
1153        finally:
1154            self.release()
1155
1156    def close(self):
1157        """
1158        Close the handler.
1159
1160        This version just flushes and chains to the parent class' close().
1161        """
1162        self.flush()
1163        logging.Handler.close(self)
1164
1165class MemoryHandler(BufferingHandler):
1166    """
1167    A handler class which buffers logging records in memory, periodically
1168    flushing them to a target handler. Flushing occurs whenever the buffer
1169    is full, or when an event of a certain severity or greater is seen.
1170    """
1171    def __init__(self, capacity, flushLevel=logging.ERROR, target=None):
1172        """
1173        Initialize the handler with the buffer size, the level at which
1174        flushing should occur and an optional target.
1175
1176        Note that without a target being set either here or via setTarget(),
1177        a MemoryHandler is no use to anyone!
1178        """
1179        BufferingHandler.__init__(self, capacity)
1180        self.flushLevel = flushLevel
1181        self.target = target
1182
1183    def shouldFlush(self, record):
1184        """
1185        Check for buffer full or a record at the flushLevel or higher.
1186        """
1187        return (len(self.buffer) >= self.capacity) or \
1188                (record.levelno >= self.flushLevel)
1189
1190    def setTarget(self, target):
1191        """
1192        Set the target handler for this handler.
1193        """
1194        self.target = target
1195
1196    def flush(self):
1197        """
1198        For a MemoryHandler, flushing means just sending the buffered
1199        records to the target, if there is one. Override if you want
1200        different behaviour.
1201        """
1202        self.acquire()
1203        try:
1204            if self.target:
1205                for record in self.buffer:
1206                    self.target.handle(record)
1207                self.buffer = []
1208        finally:
1209            self.release()
1210
1211    def close(self):
1212        """
1213        Flush, set the target to None and lose the buffer.
1214        """
1215        self.flush()
1216        self.acquire()
1217        try:
1218            self.target = None
1219            BufferingHandler.close(self)
1220        finally:
1221            self.release()
1222