diff options
Diffstat (limited to 'mini-dinstall')
| -rwxr-xr-x | mini-dinstall | 230 |
1 files changed, 115 insertions, 115 deletions
diff --git a/mini-dinstall b/mini-dinstall index 82f0d4c..da881ab 100755 --- a/mini-dinstall +++ b/mini-dinstall @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/python3 # -*- mode: python; coding: utf-8 -*- # Miniature version of "dinstall", for installing .changes into an # archive @@ -19,12 +19,12 @@ # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA import os, sys, re, glob, getopt, time, traceback, lzma, getpass, socket -import shutil, signal, threading, select, Queue, SocketServer, datetime +import shutil, signal, threading, select, queue, socketserver, datetime import logging, logging.handlers #logging.basicConfig() import apt_pkg apt_pkg.init() -from ConfigParser import * +from configparser import * from minidinstall.ChangeFile import * from minidinstall.Dnotify import * @@ -64,7 +64,7 @@ mail_log_flush_level = logging.ERROR mail_log_flush_count = 10 mail_to = getpass.getuser() mail_server = 'localhost' -incoming_permissions = 0750 +incoming_permissions = 0o750 tweet_server = 'identica' tweet_user = None tweet_password = None @@ -86,32 +86,32 @@ Changes: tweet_template = "Installed %(source)s %(version)s to %(distribution)s" def usage(ecode, ver_only=None): - print "mini-dinstall", pkg_version + print("mini-dinstall", pkg_version) if ver_only: sys.exit(ecode) - print "Copyright (C) 2002 Colin Walters <walters@gnu.org>" - print "Licensed under the GNU GPL." - print "Usage: mini-dinstall [OPTIONS...] [DIRECTORY]" - print "Options:" - print " -v, --verbose\t\tDisplay extra information" - print " -q, --quiet\t\tDisplay less information" - print " -c, --config=FILE\tParse configuration info from FILE" - print " -d, --debug\t\tOutput information to stdout as well as log" - print " --no-log\t\tDon't write information to log file" - print " -n, --no-act\t\tDon't actually perform changes" - print " -b, --batch\t\tDon't daemonize; run once, then exit" - print " -r, --run\t\tProcess queue immediately" - print " -k, --kill\t\tKill the running mini-dinstall" - print " --no-db\t\tDisable lookups on package database" - print " --help\t\tWhat you're looking at" - print " --version\t\tPrint the software version and exit" + print("Copyright (C) 2002 Colin Walters <walters@gnu.org>") + print("Licensed under the GNU GPL.") + print("Usage: mini-dinstall [OPTIONS...] [DIRECTORY]") + print("Options:") + print(" -v, --verbose\t\tDisplay extra information") + print(" -q, --quiet\t\tDisplay less information") + print(" -c, --config=FILE\tParse configuration info from FILE") + print(" -d, --debug\t\tOutput information to stdout as well as log") + print(" --no-log\t\tDon't write information to log file") + print(" -n, --no-act\t\tDon't actually perform changes") + print(" -b, --batch\t\tDon't daemonize; run once, then exit") + print(" -r, --run\t\tProcess queue immediately") + print(" -k, --kill\t\tKill the running mini-dinstall") + print(" --no-db\t\tDisable lookups on package database") + print(" --help\t\tWhat you're looking at") + print(" --version\t\tPrint the software version and exit") sys.exit(ecode) try: opts, args = getopt.getopt(sys.argv[1:], 'vqc:dnbrk', ['verbose', 'quiet', 'config=', 'debug', 'no-log', 'no-act', 'batch', 'run', 'kill', 'no-db', 'help', 'version', ]) -except getopt.GetoptError, e: +except getopt.GetoptError as e: sys.stderr.write("Error reading arguments: %s\n" % e) usage(1) for (key, val) in opts: @@ -181,8 +181,8 @@ def do_mkdir(name): return try: do_and_log('Creating directory "%s"' % (name), os.mkdir, name) - except OSError, e: - print e + except OSError as e: + print(e) exit(1) def do_rename(source, target): @@ -201,13 +201,13 @@ stderr_handler.setLevel(loglevel) stderr_handler.setFormatter(logging.Formatter(fmt="%(name)s [%(thread)d] %(levelname)s: %(message)s")) configp = ConfigParser() -configfile_names = map(lambda x: os.path.abspath(os.path.expanduser(x)), configfile_names) +configfile_names = [os.path.abspath(os.path.expanduser(x)) for x in configfile_names] logger.debug("Reading config files: %s" % (configfile_names,)) configp.read(configfile_names) class SubjectSpecifyingLoggingSMTPHandler(logging.handlers.SMTPHandler): def __init__(self, *args, **kwargs): - apply(logging.handlers.SMTPHandler.__init__, [self] + list(args) + ['dummy'], kwargs) + logging.handlers.SMTPHandler.__init__(*[self] + list(args) + ['dummy'], **kwargs) def setSubject(self, subject): self._subject = subject @@ -255,7 +255,7 @@ lockfilename = os.path.join(dinstall_subdir, 'mini-dinstall.lock') def process_exists(pid): try: os.kill(pid, 0) - except OSError, e: + except OSError as e: return 0 return 1 @@ -279,13 +279,13 @@ if run_mode or kill_mode: sock.connect(socket_name) if run_mode: logger.debug('Sending RUN command') - sock.send('RUN\n') + sock.send('RUN\n'.encode('utf-8')) else: logger.debug('Sending DIE command') - sock.send('DIE\n') + sock.send('DIE\n'.encode('utf-8')) logger.debug('Reading response') - response = sock.recv(8192) - print response + response = sock.recv(8192).decode('utf-8') + print(response) sys.exit(0) if configp.has_option('DEFAULT', 'logfile'): @@ -309,7 +309,7 @@ class DinstallException(Exception): def __init__(self, value): self._value = value def __str__(self): - return `self._value` + return repr(self._value) if not configp.has_option('DEFAULT', 'archive_style'): logger.critical("You must set the default archive_style option (since version 0.4.0)") @@ -321,7 +321,7 @@ default_extra_keyrings = [] default_keyrings = None if configp.has_option('DEFAULT', 'architectures'): - default_architectures = string.split(configp.get('DEFAULT', 'architectures'), ', ') + default_architectures = configp.get('DEFAULT', 'architectures').split(', ') if configp.has_option('DEFAULT', 'verify_sigs'): default_verify_sigs = configp.getboolean('DEFAULT', 'verify_sigs') if configp.has_option('DEFAULT', 'trigger_reindex'): @@ -335,7 +335,7 @@ if configp.has_option('DEFAULT', 'expire_release_files'): if configp.has_option('DEFAULT', 'extra_keyrings'): default_extra_keyrings = re.split(', ?', configp.get('DEFAULT', 'extra_keyrings')) if configp.has_option('DEFAULT', 'keyids'): - keyids = string.split(configp.get('DEFAULT', 'keyids'), ', ') + keyids = configp.get('DEFAULT', 'keyids').split(', ') if configp.has_option('DEFAULT', 'keyrings'): default_keyrings = re.split(', ?', configp.get('DEFAULT', 'keyrings')) if configp.has_option('DEFAULT', 'use_byhash'): @@ -361,7 +361,7 @@ if not len(sects) == 0: for sect in sects: distributions[sect] = {} if configp.has_option(sect, "architectures"): - distributions[sect]["arches"] = string.split(configp.get(sect, "architectures"), ', ') + distributions[sect]["arches"] = configp.get(sect, "architectures").split(', ') else: distributions[sect]["arches"] = default_architectures else: @@ -404,7 +404,7 @@ class DistOptionHandler: def get_option_map(self, dist): ret = self._distributions[dist] - for key in self._optionmap.keys(): + for key in list(self._optionmap.keys()): type = self._optionmap[key][0] ret[key] = self._optionmap[key][1] if self._configp.has_option ('DEFAULT', key): @@ -427,7 +427,7 @@ class DistOptionHandler: distoptionhandler = DistOptionHandler(distributions, configp) -for dist in distributions.keys(): +for dist in list(distributions.keys()): distributions[dist] = distoptionhandler.get_option_map(dist) if not distributions[dist]['archive_style'] in ('simple-subdir', 'flat'): raise DinstallException("Unknown archive style \"%s\"" % (distributions[dist]['archive_style'],)) @@ -480,11 +480,11 @@ reprocess_needed = threading.Event() reprocess_finished = threading.Event() reprocess_lock = threading.Lock() -class IncomingDirRequestHandler(SocketServer.StreamRequestHandler, SocketServer.BaseRequestHandler): +class IncomingDirRequestHandler(socketserver.StreamRequestHandler, socketserver.BaseRequestHandler): def handle(self): logger.debug('Got request from %s' % (self.client_address,)) - req = self.rfile.readline() - if req == 'RUN\n': + req = self.rfile.readline().strip().decode('utf-8') + if req == 'RUN': logger.debug('Doing RUN command') reprocess_lock.acquire() reprocess_needed.set() @@ -492,16 +492,16 @@ class IncomingDirRequestHandler(SocketServer.StreamRequestHandler, SocketServer. reprocess_finished.wait() reprocess_finished.clear() reprocess_lock.release() - self.wfile.write('200 Reprocessing complete\n') - elif req == 'DIE\n': + self.wfile.write('200 Reprocessing complete'.encode('utf-8')) + elif req == 'DIE': logger.debug('Doing DIE command') - self.wfile.write('200 Beginning shutdown\n') + self.wfile.write('200 Beginning shutdown'.encode('utf-8')) die_event.set() else: logger.debug('Got unknown command %s' % (req,)) - self.wfile.write('500 Unknown request\n') + self.wfile.write('500 Unknown request'.encode('utf-8')) -class ExceptionThrowingThreadedUnixStreamServer(SocketServer.ThreadingUnixStreamServer): +class ExceptionThrowingThreadedUnixStreamServer(socketserver.ThreadingUnixStreamServer): def handle_error(self, request, client_address): self._logger.exception("Unhandled exception during request processing; shutting down") die_event.set() @@ -517,7 +517,7 @@ class IncomingDir(threading.Thread): self._batch_mode = batch_mode self._max_retry_time = max_retry_time self._last_failed_targets = {} - self._eventqueue = Queue.Queue() + self._eventqueue = queue.Queue() self._done_event = threading.Event() # ensure we always have some reprocess queue self._reprocess_queue = {} @@ -542,14 +542,14 @@ class IncomingDir(threading.Thread): self._daemonize(initial_reprocess_queue, initial_fucked_list) self._done_event.set() self._logger.info('All packages in incoming dir installed; exiting') - except Exception, e: + except Exception as e: self._logger.exception("Unhandled exception; shutting down") die_event.set() self._done_event.set() return 0 def _abspath(self, *args): - return os.path.abspath(apply(os.path.join, [self._dir] + list(args))) + return os.path.abspath(os.path.join(*[self._dir] + list(args))) def _get_changefiles(self): ret = [] @@ -557,7 +557,7 @@ class IncomingDir(threading.Thread): self._logger.debug("glob: " + globpath) changefilenames = glob.glob(globpath) for changefilename in changefilenames: - if not self._reprocess_queue.has_key(changefilename): + if changefilename not in self._reprocess_queue: self._logger.info('Examining "%s"' % (changefilename,)) changefile = ChangeFile() try: @@ -573,7 +573,7 @@ class IncomingDir(threading.Thread): def _changefile_ready(self, changefilename, changefile): try: dist = changefile['distribution'] - except KeyError, e: + except KeyError as e: self._logger.warn("Unable to read distribution field for \"%s\"; data: %s" % (changefilename, changefile,)) return 0 try: @@ -584,14 +584,14 @@ class IncomingDir(threading.Thread): def _install_changefile(self, changefilename, changefile, doing_reprocess): changefiledist = changefile['distribution'] - for dist in distributions.keys(): + for dist in list(distributions.keys()): distributions[dist] = distoptionhandler.get_option_map(dist) if distributions[dist]['alias'] != None and changefiledist in distributions[dist]['alias']: logger.info('Distribution "%s" is an alias for "%s"' % (changefiledist, dist)) break else: dist = changefiledist - if not dist in self._archivemap.keys(): + if not dist in list(self._archivemap.keys()): raise DinstallException('Unknown distribution "%s" in \"%s\"' % (dist, changefilename,)) logger.debug('Installing %s in archive %s' % (changefilename, self._archivemap[dist][1].getName())) self._archivemap[dist][0].install(changefilename, changefile) @@ -619,7 +619,7 @@ class IncomingDir(threading.Thread): def _daemon_reprocess_pending(self): curtime = time.time() - for changefilename in self._reprocess_queue.keys(): + for changefilename in list(self._reprocess_queue.keys()): (starttime, nexttime, delay) = self._reprocess_queue[changefilename] if curtime >= nexttime: return 1 @@ -632,7 +632,7 @@ class IncomingDir(threading.Thread): self._async_dnotify.start() try: os.unlink(socket_name) - except OSError, e: + except OSError as e: pass self._server = ExceptionThrowingThreadedUnixStreamServer(socket_name, IncomingDirRequestHandler) self._server.allow_reuse_address = 1 @@ -646,7 +646,7 @@ class IncomingDir(threading.Thread): self._reprocess_queue[changefilename] = [curtime, curtime, retry_time] # The main daemon loop - while 1: + while True: # Wait until we have something to do while not (self._daemon_event_ispending() or self._daemon_reprocess_pending()): time.sleep(0.5) @@ -663,13 +663,13 @@ class IncomingDir(threading.Thread): self._logger.debug('Scanning for changes') # do we have anything to reprocess? - for changefilename in self._reprocess_queue.keys(): + for changefilename in list(self._reprocess_queue.keys()): (starttime, nexttime, delay) = self._reprocess_queue[changefilename] curtime = time.time() try: changefile = ChangeFile() changefile.load_from_file(changefilename) - except (ChangeFileException,IOError), e: + except (ChangeFileException,IOError) as e: if not os.path.exists(changefilename): self._logger.info('Changefile "%s" got removed' % (changefilename,)) else: @@ -689,7 +689,7 @@ class IncomingDir(threading.Thread): self._install_changefile(changefilename, changefile, doing_reprocess) self._logger.debug('Removing "%s" from incoming queue after successful install.' % (changefilename,)) del self._reprocess_queue[changefilename] - except Exception, e: + except Exception as e: logger.exception("Unable to install \"%s\"; adding to screwed list" % (changefilename,)) fucked.append(changefilename) else: @@ -718,12 +718,12 @@ class IncomingDir(threading.Thread): self._logger.warn("Skipping screwed changefile \"%s\"" % (changefilename,)) continue # Have we tried this changefile before? - if not self._reprocess_queue.has_key(changefilename): + if changefilename not in self._reprocess_queue: self._logger.debug('New change file "%s"' % (changefilename,)) if self._changefile_ready(changefilename, changefile): try: self._install_changefile(changefilename, changefile, doing_reprocess) - except Exception, e: + except Exception as e: logger.exception("Unable to install \"%s\"; adding to screwed list" % (changefilename,)) fucked.append(changefilename) else: @@ -749,12 +749,12 @@ class ArchiveDir: self._dir = dir self._name = os.path.basename(os.path.abspath(dir)) self._logger = logger - for key in configdict.keys(): + for key in list(configdict.keys()): self._logger.debug("Setting \"%s\" => \"%s\" in archive \"%s\"" % ('_'+key, configdict[key], self._name)) self.__dict__['_' + key] = configdict[key] do_mkdir(dir) self._batch_mode = batch_mode - if configdict.has_key('verify_sigs'): + if 'verify_sigs' in configdict: self._verify_sigs = configdict['verify_sigs'] else: self._verify_sigs = verify_sigs @@ -780,10 +780,10 @@ class ArchiveDir: # self._changefiles = [] def _abspath(self, *args): - return os.path.abspath(apply(os.path.join, [self._dir] + list(args))) + return os.path.abspath(os.path.join(*[self._dir] + list(args))) def _relpath(self, *args): - return apply(os.path.join, [self._name] + list(args)) + return os.path.join(*[self._name] + list(args)) def install(self, changefilename, changefile): retval = 0 @@ -805,15 +805,15 @@ class ArchiveDir: self._logger.info('Verifying signature on "%s"' % (changefilename,)) try: if self._keyrings: - verifier = DebianSigVerifier(keyrings=map(os.path.expanduser, self._keyrings), extra_keyrings=self._extra_keyrings) + verifier = DebianSigVerifier(keyrings=list(map(os.path.expanduser, self._keyrings)), extra_keyrings=self._extra_keyrings) else: verifier = DebianSigVerifier(extra_keyrings=self._extra_keyrings) output = verifier.verify(changefilename) logger.debug(output) logger.info('Good signature on "%s"' % (changefilename,)) - except GPGSigVerificationFailure, e: + except GPGSigVerificationFailure as e: msg = "Failed to verify signature on \"%s\": %s\n" % (changefilename, e) - msg += string.join(e.getOutput(), '') + msg += ''.join(e.getOutput()) logger.error(msg) self._reject_changefile(changefilename, changefile, e) return 0 @@ -829,12 +829,12 @@ class ArchiveDir: return 0 try: self._install_changefile_internal(changefilename, changefile) - except Exception, e: + except Exception as e: self._logger.exception('Failed to process "%s"' % (changefilename,)) self._reject_changefile(changefilename, changefile, e) return 0 if self._chown_changes_files: - do_chmod(changefilename, 0600) + do_chmod(changefilename, 0o600) target = os.path.join(self._dir, os.path.basename(changefilename)) # the final step do_rename(changefilename, target) @@ -842,13 +842,13 @@ class ArchiveDir: if self._mail_on_success: done = False missing_fields = [] - if changefile.has_key('changes'): + if 'changes' in changefile: changefile ['changes_without_dot'] = misc.format_changes(changefile['changes']) while not done: try: mail_subject = mail_subject_template % changefile mail_body = mail_body_template % changefile - except KeyError, exc: + except KeyError as exc: key = exc.args[0] changefile[key] = '' missing_fields.append(key) @@ -861,12 +861,12 @@ class ArchiveDir: if self._tweet_on_success: done = False missing_fields = [] - if changefile.has_key('changes'): + if 'changes' in changefile: changefile ['changes_without_dot'] = misc.format_changes(changefile['changes']) while not done: try: tweet_body = tweet_template % changefile - except KeyError, exc: + except KeyError as exc: key = exc.args[0] changefile[key] = '' missing_fields.append(key) @@ -896,7 +896,7 @@ class ArchiveDir: else: (newupstreamver, newdebianver) = parse_versions(version) is_sourceful = 0 - for file in map(lambda x: x[2], changefile.getFiles()): + for file in [x[2] for x in changefile.getFiles()]: match = debpackage_re.search(file) if match: arch = match.group(3) @@ -921,16 +921,16 @@ class ArchiveDir: newfiles.append((os.path.join(incomingdir, file), target, match.group(1), 'source')) all_arches = {} - for arch in map(lambda x: x[3], newfiles): + for arch in [x[3] for x in newfiles]: all_arches[arch] = 1 completed = [] oldfiles = [] if not self._keep_old: found_old_bins = 0 - for (oldversion, oldarch) in map(lambda x: x[1:], self._get_package_versions()): - if not all_arches.has_key(oldarch) and apt_pkg.version_compare(oldversion, version) < 0: + for (oldversion, oldarch) in [x[1:] for x in self._get_package_versions()]: + if oldarch not in all_arches and apt_pkg.version_compare(oldversion, version) < 0: found_old_bins = 1 - for (pkgname, arch) in map(lambda x: x[2:], newfiles): + for (pkgname, arch) in [x[2:] for x in newfiles]: if arch == 'source' and found_old_bins: continue self._logger.debug('Scanning for old files') @@ -941,14 +941,14 @@ class ArchiveDir: oldpkgname = match.group(1) oldarch = match.group(3) file = self._arch_target(arch, file) - if not file in map(lambda x: x[0], oldfiles): + if not file in [x[0] for x in oldfiles]: target = file + tmp_old_suffix if oldpkgname == pkgname and oldarch == arch: oldfiles.append((file, target)) self._logger.debug('Scanning "%s" for old files' % (self._abspath('source'))) for file in self._read_source_dir(): file = self._source_target(file) - if not file in map(lambda x: x[0], oldfiles): + if not file in [x[0] for x in oldfiles]: target = file + tmp_old_suffix match = debchanges_re.search(file) if not match and is_sourceful: @@ -977,22 +977,22 @@ class ArchiveDir: oldfiles.append((file, target)) continue match = debsrc_native_re.search(file) - if match and match.group(1) in map(lambda x: x[2], newfiles): + if match and match.group(1) in [x[2] for x in newfiles]: oldfiles.append((file, target)) continue - self._clean_targets = map(lambda x: x[1], oldfiles) - allrenames = oldfiles + map(lambda x: x[:2], newfiles) + self._clean_targets = [x[1] for x in oldfiles] + allrenames = oldfiles + [x[:2] for x in newfiles] try: while not allrenames == []: (oldname, newname) = allrenames[0] do_rename(oldname, newname) completed.append(allrenames[0]) allrenames = allrenames[1:] - except OSError, e: + except OSError as e: logger.exception("Failed to do rename (%s); attempting rollback" % (e.strerror,)) try: - self._logger.error(traceback.format_tb(sys.exc_traceback)) + self._logger.error(traceback.format_tb(sys.exc_info()[2])) except: pass # Unwind to previous state @@ -1028,12 +1028,12 @@ class ArchiveDir: incomingdir = os.path.dirname(changefilename) try: f = open(os.path.join(rejectdir, "%s_%s.reason" % (sourcename, version)), 'w') - if type(exception) == type('string'): + if isinstance(exception, str): f.write(exception) else: traceback.print_exception(Exception, exception, None, None, f) f.close() - for file in map(lambda x: x[2], changefile.getFiles()): + for file in [x[2] for x in changefile.getFiles()]: if os.access(os.path.join(incomingdir, file), os.R_OK): file = os.path.join(incomingdir, file) else: @@ -1041,7 +1041,7 @@ class ArchiveDir: target = os.path.join(rejectdir, os.path.basename(file)) do_rename(file, target) do_rename(changefilename, os.path.join(rejectdir, os.path.basename(changefilename))) - self._logger.info('Rejecting "%s": %s' % (changefilename, `exception`)) + self._logger.info('Rejecting "%s": %s' % (changefilename, repr(exception))) except Exception: self._logger.error("Unhandled exception while rejecting %s; archive may be in inconsistent state" % (changefilename,)) raise @@ -1055,7 +1055,7 @@ class ArchiveDir: class SimpleSubdirArchiveDir(ArchiveDir): def __init__(self, *args, **kwargs): - apply(ArchiveDir.__init__, [self] + list(args), kwargs) + ArchiveDir.__init__(*[self] + list(args), **kwargs) for arch in list(self._arches) + ['source']: target = os.path.join(self._dir, arch) do_mkdir(target) @@ -1109,8 +1109,8 @@ class ArchiveDirIndexer(threading.Thread): self._name = os.path.basename(os.path.abspath(dir)) threading.Thread.__init__(self, name=self._name) self._logger = logger - self._eventqueue = Queue.Queue() - for key in configdict.keys(): + self._eventqueue = queue.Queue() + for key in list(configdict.keys()): self._logger.debug("Setting \"%s\" => \"%s\" in archive \"%s\"" % ('_'+key, configdict[key], self._name)) self.__dict__['_' + key] = configdict[key] do_mkdir(dir) @@ -1119,10 +1119,10 @@ class ArchiveDirIndexer(threading.Thread): self._done_event = threading.Event() def _abspath(self, *args): - return os.path.abspath(apply(os.path.join, [self._dir] + list(args))) + return os.path.abspath(os.path.join(*[self._dir] + list(args))) def _relpath(self, *args): - return apply(os.path.join, [self._name] + list(args)) + return os.path.join(*[self._name] + list(args)) def _make_indexfile(self, dir, type, name, arch=None): cmdline = ['apt-ftparchive', type, dir, @@ -1134,7 +1134,7 @@ class ArchiveDirIndexer(threading.Thread): if not nodb_mode: cmdline += ['--db', '%s.db' %dir] - self._logger.debug("Running: " + string.join(cmdline, ' ')) + self._logger.debug("Running: " + ' '.join(cmdline)) if no_act: return (infd, outfd) = os.pipe() @@ -1156,7 +1156,7 @@ class ArchiveDirIndexer(threading.Thread): xzpackagesfilename = packagesfilename + '.xz' newxzpackagesfilename = newpackagesfilename + '.xz' newpackagesfile = open(newpackagesfilename, 'w') - newxzpackagesfile = lzma.LZMAFile(newxzpackagesfilename, 'w') + newxzpackagesfile = lzma.open(newxzpackagesfilename, 'wt') buf = stdout.read(8192) while buf != '': newpackagesfile.write(buf) @@ -1262,7 +1262,7 @@ class ArchiveDirIndexer(threading.Thread): # never returns self._daemonize() self._done_event.set() - except Exception, e: + except Exception as e: self._logger.exception("Unhandled exception; shutting down") die_event.set() self._done_event.set() @@ -1279,7 +1279,7 @@ class ArchiveDirIndexer(threading.Thread): self._async_dnotify.start() # The main daemon loop - while 1: + while True: # Wait until we have a pending event while not self._daemon_event_ispending(): @@ -1292,10 +1292,10 @@ class ArchiveDirIndexer(threading.Thread): setevent = None dir = None obj = self._eventqueue.get() - if type(obj) == type(''): + if isinstance(obj, str): self._logger.debug('got dir change') dir = obj - elif type(obj) == type(None): + elif obj is None: self._logger.debug('got general event') setevent = None elif obj.__class__ == threading.Event().__class__: @@ -1359,7 +1359,7 @@ class ArchiveDirIndexer(threading.Thread): class SimpleSubdirArchiveDirIndexer(ArchiveDirIndexer): def __init__(self, *args, **kwargs): - apply(ArchiveDirIndexer.__init__, [self] + list(args), kwargs) + ArchiveDirIndexer.__init__(*[self] + list(args), **kwargs) for arch in list(self._arches) + ['source']: target = os.path.join(self._dir, arch) do_mkdir(target) @@ -1393,7 +1393,7 @@ class SimpleSubdirArchiveDirIndexer(ArchiveDirIndexer): self._logger.info("Release generation disabled, removing existing Release file") try: os.unlink(targetname) - except OSError, e: + except OSError as e: pass return tmpname = targetname + tmp_new_suffix @@ -1441,7 +1441,7 @@ class SimpleSubdirArchiveDirIndexer(ArchiveDirIndexer): f.write('Valid-Until: ' + (datetime.datetime.utcnow() + datetime.timedelta(days=28)).strftime("%a, %d %b %Y %H:%M:%S UTC") + '\n') f.write('Architectures: ' + arch + '\n') if self._keyids: - f.write('Signed-By: ' + string.join(self._keyids, ',') + '\n') + f.write('Signed-By: ' + ','.join(self._keyids) + '\n') if self._use_byhash: f.write('Acquire-By-Hash: yes\n') if self._release_description: @@ -1454,17 +1454,17 @@ class SimpleSubdirArchiveDirIndexer(ArchiveDirIndexer): self._logger.info("Release generation complete") def _in_archdir(self, *args): - return apply(lambda x,self=self: self._abspath(x), args) + return (lambda x,self=self: self._abspath(x)) (*args) def _get_dnotify_dirs(self): - return map(lambda x, self=self: self._abspath(x), self._arches + ['source']) + return list(map(lambda x, self=self: self._abspath(x), self._arches + ['source'])) def _get_all_indexfiles(self): - return map(lambda arch: os.path.join(arch, 'Packages'), self._arches) + ['source/Sources'] + return [os.path.join(arch, 'Packages') for arch in self._arches] + ['source/Sources'] class FlatArchiveDirIndexer(ArchiveDirIndexer): def __init__(self, *args, **kwargs): - apply(ArchiveDirIndexer.__init__, [self] + list(args), kwargs) + ArchiveDirIndexer.__init__(*[self] + list(args), **kwargs) def _index_impl(self, arches, force=None): pkgsfile = self._abspath('Packages') @@ -1501,7 +1501,7 @@ class FlatArchiveDirIndexer(ArchiveDirIndexer): self._logger.info("Release generation disabled, removing existing Release file") try: os.unlink(targetname) - except OSError, e: + except OSError as e: pass return tmpname = targetname + tmp_new_suffix @@ -1549,9 +1549,9 @@ class FlatArchiveDirIndexer(ArchiveDirIndexer): f.write('Date: ' + time.strftime("%a, %d %b %Y %H:%M:%S UTC", time.gmtime()) + '\n') if self._expire_release_files or self._keyids: f.write('Valid-Until: ' + (datetime.datetime.utcnow() + datetime.timedelta(days=28)).strftime("%a, %d %b %Y %H:%M:%S UTC") + '\n') - f.write('Architectures: ' + string.join(self._arches, ' ') + '\n') + f.write('Architectures: ' + ' '.join(self._arches) + '\n') if self._keyids: - f.write('Signed-By: ' + string.join(self._keyids, ',') + '\n') + f.write('Signed-By: ' + ','.join(self._keyids) + '\n') if self._use_byhash: f.write('Acquire-By-Hash: yes\n') if self._release_description: @@ -1564,7 +1564,7 @@ class FlatArchiveDirIndexer(ArchiveDirIndexer): self._logger.info("Release generation complete") def _in_archdir(self, *args): - return apply(lambda x,self=self: self._abspath(x), args[1:]) + return (lambda x,self=self: self._abspath(x))(*args[1:]) def _get_dnotify_dirs(self): return [self._dir] @@ -1617,7 +1617,7 @@ if not (debug_mode or batch_mode): archivemap = {} # Instantiaate archive classes for installing files -for dist in distributions.keys(): +for dist in list(distributions.keys()): if distributions[dist]['archive_style'] == 'simple-subdir': newclass = SimpleSubdirArchiveDir else: @@ -1625,7 +1625,7 @@ for dist in distributions.keys(): archivemap[dist] = [newclass(dist, logger, distributions[dist], batch_mode=batch_mode, keyrings=default_keyrings, extra_keyrings=default_extra_keyrings, verify_sigs=default_verify_sigs), None] # Create archive indexing threads, but don't start them yet -for dist in distributions.keys(): +for dist in list(distributions.keys()): targetdir = os.path.join(toplevel_directory, dist) logger.info('Initializing archive indexer %s' % (dist,)) if distributions[dist]['archive_style'] == 'simple-subdir': @@ -1645,7 +1645,7 @@ if batch_mode: incoming.wait() # Once we've installed everything, start the indexing threads -for dist in distributions.keys(): +for dist in list(distributions.keys()): archive = archivemap[dist][1] logger.debug('Starting archive %s' % (archive.getName(),)) archive.start() @@ -1653,7 +1653,7 @@ for dist in distributions.keys(): # Wait for all the indexing threads to finish; none of these ever # return if we're in daemon mode if batch_mode: - for dist in distributions.keys(): + for dist in list(distributions.keys()): archive = archivemap[dist][1] logger.debug('Waiting for archive %s to finish' % (archive.getName(),)) archive.wait() @@ -1662,7 +1662,7 @@ else: die_event.wait() logger.info('Die event caught; waiting for incoming processor to finish') incoming.wait() - for dist in distributions.keys(): + for dist in list(distributions.keys()): archive = archivemap[dist][1] logger.info('Die event caught; waiting for archive %s to finish' % (archive.getName(),)) archive.wait() |
