ovs-bugtool.in: Remove unused variables.
[cascardo/ovs.git] / utilities / bugtool / ovs-bugtool.in
index 19b3378..cd1924d 100755 (executable)
@@ -14,7 +14,7 @@
 # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
 #
 # Copyright (c) 2005, 2007 XenSource Ltd.
-# Copyright (c) 2010, 2011, 2012 Nicira, Inc.
+# Copyright (c) 2010, 2011, 2012, 2013, 2015 Nicira, Inc.
 
 #
 # To add new entries to the bugtool, you need to:
@@ -33,9 +33,6 @@
 # or func_output().
 #
 
-import warnings
-warnings.filterwarnings(action="ignore", category=DeprecationWarning)
-
 import getopt
 import re
 import os
@@ -44,19 +41,16 @@ import sys
 import tarfile
 import time
 import commands
-import pprint
 from xml.dom.minidom import parse, getDOMImplementation
 import zipfile
 from subprocess import Popen, PIPE
 from select import select
-from signal import SIGTERM, SIGUSR1
+from signal import SIGTERM
 import md5
 import platform
 import fcntl
-import glob
-import urllib
-import socket
-import base64
+import warnings
+warnings.filterwarnings(action="ignore", category=DeprecationWarning)
 
 OS_RELEASE = platform.release()
 
@@ -109,10 +103,10 @@ HOSTS_ALLOW = '/etc/hosts.allow'
 HOSTS_DENY = '/etc/hosts.deny'
 DHCP_LEASE_DIR = ['/var/lib/dhclient', '/var/lib/dhcp3']
 OPENVSWITCH_LOG_DIR = '@LOGDIR@/'
-OPENVSWITCH_DEFAULT_SWITCH = '/etc/default/openvswitch-switch' # Debian
-OPENVSWITCH_SYSCONFIG_SWITCH = '/etc/sysconfig/openvswitch'    # RHEL
-OPENVSWITCH_DEFAULT_CONTROLLER = '/etc/default/openvswitch-controller'
+OPENVSWITCH_DEFAULT_SWITCH = '/etc/default/openvswitch-switch'  # Debian
+OPENVSWITCH_SYSCONFIG_SWITCH = '/etc/sysconfig/openvswitch'     # RHEL
 OPENVSWITCH_CONF_DB = '@DBDIR@/conf.db'
+OPENVSWITCH_COMPACT_DB = '@DBDIR@/bugtool-compact-conf.db'
 OPENVSWITCH_VSWITCHD_PID = '@RUNDIR@/ovs-vswitchd.pid'
 VAR_LOG_DIR = '/var/log/'
 VAR_LOG_CORE_DIR = '/var/log/core'
@@ -123,7 +117,8 @@ YUM_REPOS_DIR = '/etc/yum.repos.d'
 # External programs
 #
 
-os.environ['PATH'] = '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:@pkgdatadir@/scripts'
+os.environ['PATH'] = '/usr/local/sbin:/usr/local/bin:' \
+                     '/usr/sbin:/usr/bin:/sbin:/bin:@pkgdatadir@/scripts'
 ARP = 'arp'
 CAT = 'cat'
 CHKCONFIG = 'chkconfig'
@@ -149,7 +144,6 @@ NETSTAT = 'netstat'
 OVS_DPCTL = 'ovs-dpctl'
 OVS_OFCTL = 'ovs-ofctl'
 OVS_VSCTL = 'ovs-vsctl'
-OVS_APPCTL = 'ovs-appctl'
 PS = 'ps'
 ROUTE = 'route'
 RPM = 'rpm'
@@ -174,19 +168,19 @@ ZCAT = 'zcat'
 # scripts in dom0.
 #
 
-PII_NO            = 'no'
-PII_YES           = 'yes'
-PII_MAYBE         = 'maybe'
+PII_NO = 'no'
+PII_YES = 'yes'
+PII_MAYBE = 'maybe'
 PII_IF_CUSTOMIZED = 'if_customized'
-KEY      = 0
-PII      = 1
+KEY = 0
+PII = 1
 MIN_SIZE = 2
 MAX_SIZE = 3
 MIN_TIME = 4
 MAX_TIME = 5
-MIME     = 6
-CHECKED  = 7
-HIDDEN   = 8
+MIME = 6
+CHECKED = 7
+HIDDEN = 8
 
 MIME_DATA = 'application/data'
 MIME_TEXT = 'text/plain'
@@ -198,19 +192,20 @@ CAP_XML_ROOT = "system-status-capabilities"
 CAP_XML_ELEMENT = 'capability'
 
 
-CAP_BOOT_LOADER          = 'boot-loader'
-CAP_DISK_INFO            = 'disk-info'
-CAP_HARDWARE_INFO        = 'hardware-info'
-CAP_KERNEL_INFO          = 'kernel-info'
-CAP_LOSETUP_A            = 'loopback-devices'
-CAP_MULTIPATH            = 'multipath'
-CAP_NETWORK_CONFIG       = 'network-config'
-CAP_NETWORK_STATUS       = 'network-status'
-CAP_OPENVSWITCH_LOGS    = 'ovs-system-logs'
-CAP_PROCESS_LIST         = 'process-list'
-CAP_SYSTEM_LOGS          = 'system-logs'
-CAP_SYSTEM_SERVICES      = 'system-services'
-CAP_YUM                  = 'yum'
+CAP_BOOT_LOADER = 'boot-loader'
+CAP_DISK_INFO = 'disk-info'
+CAP_HARDWARE_INFO = 'hardware-info'
+CAP_KERNEL_INFO = 'kernel-info'
+CAP_LOSETUP_A = 'loopback-devices'
+CAP_MULTIPATH = 'multipath'
+CAP_NETWORK_CONFIG = 'network-config'
+CAP_NETWORK_INFO = 'network-info'
+CAP_NETWORK_STATUS = 'network-status'
+CAP_OPENVSWITCH_LOGS = 'ovs-system-logs'
+CAP_PROCESS_LIST = 'process-list'
+CAP_SYSTEM_LOGS = 'system-logs'
+CAP_SYSTEM_SERVICES = 'system-services'
+CAP_YUM = 'yum'
 
 KB = 1024
 MB = 1024 * 1024
@@ -219,8 +214,11 @@ caps = {}
 cap_sizes = {}
 unlimited_data = False
 dbg = False
-# Default value for the number of rotated logs.
+# Default value for the number of days to collect logs.
 log_days = 20
+log_last_mod_time = None
+free_disk_space = None
+
 
 def cap(key, pii=PII_MAYBE, min_size=-1, max_size=-1, min_time=-1,
         max_time=-1, mime=MIME_TEXT, checked=True, hidden=False):
@@ -229,31 +227,20 @@ def cap(key, pii=PII_MAYBE, min_size=-1, max_size=-1, min_time=-1,
     cap_sizes[key] = 0
 
 
-cap(CAP_BOOT_LOADER,         PII_NO,                    max_size=3*KB,
-    max_time=5)
-cap(CAP_DISK_INFO,           PII_MAYBE,                 max_size=50*KB,
-    max_time=20)
-cap(CAP_HARDWARE_INFO,       PII_MAYBE,                 max_size=30*KB,
-    max_time=20)
-cap(CAP_KERNEL_INFO,         PII_MAYBE,                 max_size=120*KB,
-    max_time=5)
-cap(CAP_LOSETUP_A,           PII_MAYBE,                 max_size=KB, max_time=5)
-cap(CAP_MULTIPATH,           PII_MAYBE,                 max_size=20*KB,
-    max_time=10)
-cap(CAP_NETWORK_CONFIG,      PII_IF_CUSTOMIZED,
-                                        min_size=0,     max_size=40*KB)
-cap(CAP_NETWORK_STATUS,      PII_YES,                   max_size=50*MB,
-    max_time=30)
-cap(CAP_OPENVSWITCH_LOGS,    PII_MAYBE,                 max_size=-1,
-    max_time=5)
-cap(CAP_PROCESS_LIST,        PII_YES,                   max_size=30*KB,
-    max_time=20)
-cap(CAP_SYSTEM_LOGS,         PII_MAYBE,                 max_size=200*MB,
-    max_time=5)
-cap(CAP_SYSTEM_SERVICES,     PII_NO,                    max_size=5*KB,
-    max_time=20)
-cap(CAP_YUM,                 PII_IF_CUSTOMIZED,         max_size=10*KB,
-    max_time=30)
+cap(CAP_BOOT_LOADER, PII_NO, max_size=3 * KB, max_time=5)
+cap(CAP_DISK_INFO, PII_MAYBE, max_size=50 * KB, max_time=20)
+cap(CAP_HARDWARE_INFO, PII_MAYBE, max_size=2 * MB, max_time=20)
+cap(CAP_KERNEL_INFO, PII_MAYBE, max_size=120 * KB, max_time=5)
+cap(CAP_LOSETUP_A, PII_MAYBE, max_size=KB, max_time=5)
+cap(CAP_MULTIPATH, PII_MAYBE, max_size=20 * KB, max_time=10)
+cap(CAP_NETWORK_CONFIG, PII_IF_CUSTOMIZED, min_size=0, max_size=5 * MB)
+cap(CAP_NETWORK_INFO, PII_YES, max_size=50 * MB, max_time=30)
+cap(CAP_NETWORK_STATUS, PII_YES, max_size=-1, max_time=30)
+cap(CAP_OPENVSWITCH_LOGS, PII_MAYBE, max_size=-1, max_time=5)
+cap(CAP_PROCESS_LIST, PII_YES, max_size=30 * KB, max_time=20)
+cap(CAP_SYSTEM_LOGS, PII_MAYBE, max_size=200 * MB, max_time=5)
+cap(CAP_SYSTEM_SERVICES, PII_NO, max_size=5 * KB, max_time=20)
+cap(CAP_YUM, PII_IF_CUSTOMIZED, max_size=10 * KB, max_time=30)
 
 ANSWER_YES_TO_ALL = False
 SILENT_MODE = False
@@ -261,14 +248,17 @@ entries = None
 data = {}
 dev_null = open('/dev/null', 'r+')
 
+
 def output(x):
     global SILENT_MODE
     if not SILENT_MODE:
         print x
 
+
 def output_ts(x):
     output("[%s]  %s" % (time.strftime("%x %X %Z"), x))
 
+
 def cmd_output(cap, args, label=None, filter=None, binary=False):
     if cap in entries:
         if not label:
@@ -281,7 +271,8 @@ def cmd_output(cap, args, label=None, filter=None, binary=False):
         data[label] = {'cap': cap, 'cmd_args': args, 'filter': filter,
                        'binary': binary}
 
-def file_output(cap, path_list, newest_first=False):
+
+def file_output(cap, path_list, newest_first=False, last_mod_time=None):
     """
     If newest_first is True, the list of files in path_list is sorted
     by file modification time in descending order, else its sorted
@@ -292,21 +283,20 @@ def file_output(cap, path_list, newest_first=False):
         for path in path_list:
             try:
                 s = os.stat(path)
-            except OSError, e:
+            except OSError:
                 continue
-            path_entries.append((path, s))
+            if last_mod_time is None or s.st_mtime >= last_mod_time:
+                path_entries.append((path, s))
 
         mtime = lambda(path, stat): stat.st_mtime
         path_entries.sort(key=mtime, reverse=newest_first)
         for p in path_entries:
-            if unlimited_data or caps[cap][MAX_SIZE] == -1 or \
-                    cap_sizes[cap] < caps[cap][MAX_SIZE]:
+            if check_space(cap, p[0], p[1].st_size):
                 data[p] = {'cap': cap, 'filename': p[0]}
-                cap_sizes[cap] += p[1].st_size
-            else:
-                output("Omitting %s, size constraint of %s exceeded" % (p[0], cap))
 
-def tree_output(cap, path, pattern=None, negate=False, newest_first=False):
+
+def tree_output(cap, path, pattern=None, negate=False, newest_first=False,
+                last_mod_time=None):
     """
     Walks the directory tree rooted at path. Files in current dir are processed
     before files in sub-dirs.
@@ -316,22 +306,26 @@ def tree_output(cap, path, pattern=None, negate=False, newest_first=False):
             for root, dirs, files in os.walk(path):
                 fns = [fn for fn in [os.path.join(root, f) for f in files]
                        if os.path.isfile(fn) and matches(fn, pattern, negate)]
-                file_output(cap, fns, newest_first=newest_first)
+                file_output(cap, fns, newest_first=newest_first,
+                            last_mod_time=last_mod_time)
+
+
+def prefix_output(cap, prefix, newest_first=False, last_mod_time=None):
+    """
+    Output files with the same prefix.
+    """
+    fns = []
+    for root, dirs, files in os.walk(os.path.dirname(prefix)):
+        fns += [fn for fn in [os.path.join(root, f) for f in files]
+                if fn.startswith(prefix)]
+    file_output(cap, fns, newest_first=newest_first,
+                last_mod_time=last_mod_time)
+
 
 def func_output(cap, label, func):
     if cap in entries:
-        t = str(func).split()
         data[label] = {'cap': cap, 'func': func}
 
-def log_output(cap, logs, newest_first=False):
-    global log_days
-    file_output(cap, logs)
-    file_output(cap,
-        ['%s.%d' % (f, n) for n in range(1, log_days+1) for f in logs], \
-        newest_first=newest_first)
-    file_output(cap,
-        ['%s.%d.gz' % (f, n) for n in range(1, log_days+1) for f in logs], \
-        newest_first=newest_first)
 
 def collect_data():
     process_lists = {}
@@ -351,12 +345,8 @@ def collect_data():
                 f = open(v['filename'], 'r')
                 s = f.read()
                 f.close()
-                if unlimited_data or caps[cap][MAX_SIZE] == -1 or \
-                        cap_sizes[cap] < caps[cap][MAX_SIZE]:
+                if check_space(cap, v['filename'], len(s)):
                     v['output'] = StringIOmtime(s)
-                    cap_sizes[cap] += len(s)
-                else:
-                    output("Omitting %s, size constraint of %s exceeded" % (v['filename'], cap))
             except:
                 pass
         elif v.has_key('func'):
@@ -364,24 +354,50 @@ def collect_data():
                 s = v['func'](cap)
             except Exception, e:
                 s = str(e)
-            if unlimited_data or caps[cap][MAX_SIZE] == -1 or \
-                    cap_sizes[cap] < caps[cap][MAX_SIZE]:
+            if check_space(cap, k, len(s)):
                 v['output'] = StringIOmtime(s)
-                cap_sizes[cap] += len(s)
-            else:
-                output("Omitting %s, size constraint of %s exceeded" % (k, cap))
 
     run_procs(process_lists.values())
 
 
 def main(argv=None):
     global ANSWER_YES_TO_ALL, SILENT_MODE
-    global entries, data, dbg, unlimited_data, log_days
+    global entries, data, dbg, unlimited_data, free_disk_space
+    global log_days, log_last_mod_time
 
     # Filter flags
     only_ovs_info = False
     collect_all_info = True
 
+    if '--help' in sys.argv:
+        print """\
+%(argv0)s: create status report bundles to assist in problem diagnosis
+usage: %(argv0)s OPTIONS
+
+By default, %(argv0)s prompts for permission to collect each form of status
+information and produces a .tar.gz file as output.
+
+The following options are available.
+  --help                     display this help message, then exit
+  -s, --silent               suppress most output to stdout
+
+Options for categories of data to collect:
+  --entries=CAP_A,CAP_B,...  set categories of data to collect
+  --all                      collect all categories
+  --ovs                      collect only directly OVS-related info
+  --log-days=DAYS            collect DAYS worth of old logs
+  -y, --yestoall             suppress prompts to confirm collection
+  --capabilities             print categories as XML on stdout, then exit
+
+Output options:
+  --output=FORMAT            set output format to one of tar tar.bz2 tar.gz zip
+  --outfile=FILE             write output to FILE
+  --outfd=FD                 write output to FD (requires --output=tar)
+  --unlimited                ignore default limits on sizes of data collected
+  --debug                    print ovs-bugtool debug info on stdout\
+""" % {'argv0': sys.argv[0]}
+        sys.exit(0)
+
     # we need access to privileged files, exit if we are not running as root
     if os.getuid() != 0:
         print >>sys.stderr, "Error: ovs-bugtool must be run as root"
@@ -417,7 +433,7 @@ def main(argv=None):
             return 0
 
         if k == '--output':
-            if  v in ['tar', 'tar.bz2', 'tar.gz', 'zip']:
+            if v in ['tar', 'tar.bz2', 'tar.gz', 'zip']:
                 output_type = v
             else:
                 print >>sys.stderr, "Invalid output format '%s'" % v
@@ -475,8 +491,14 @@ def main(argv=None):
         print >>sys.stderr, "Cannot set both '--outfd' and '--outfile'"
         return 2
 
+    if output_file is not None and not unlimited_data:
+        free_disk_space = get_free_disk_space(output_file) * 90 / 100
+
+    log_last_mod_time = int(time.time()) - log_days * 86400
+
     if ANSWER_YES_TO_ALL:
-        output("Warning: '--yestoall' argument provided, will not prompt for individual files.")
+        output("Warning: '--yestoall' argument provided, will not prompt "
+               "for individual files.")
 
     output('''
 This application will collate dmesg output, details of the
@@ -496,7 +518,8 @@ exclude those logs from the archive.
 
     file_output(CAP_BOOT_LOADER, [GRUB_CONFIG])
     cmd_output(CAP_BOOT_LOADER, [LS, '-lR', '/boot'])
-    cmd_output(CAP_BOOT_LOADER, [MD5SUM, BOOT_KERNEL, BOOT_INITRD], label='vmlinuz-initrd.md5sum')
+    cmd_output(CAP_BOOT_LOADER, [MD5SUM, BOOT_KERNEL, BOOT_INITRD],
+               label='vmlinuz-initrd.md5sum')
 
     cmd_output(CAP_DISK_INFO, [FDISK, '-l'])
     file_output(CAP_DISK_INFO, [PROC_PARTITIONS, PROC_MOUNTS])
@@ -511,7 +534,8 @@ exclude those logs from the archive.
     cmd_output(CAP_DISK_INFO, [SG_MAP, '-x'])
     func_output(CAP_DISK_INFO, 'scsi-hosts', dump_scsi_hosts)
 
-    file_output(CAP_HARDWARE_INFO, [PROC_CPUINFO, PROC_MEMINFO, PROC_IOPORTS, PROC_INTERRUPTS])
+    file_output(CAP_HARDWARE_INFO,
+                [PROC_CPUINFO, PROC_MEMINFO, PROC_IOPORTS, PROC_INTERRUPTS])
     cmd_output(CAP_HARDWARE_INFO, [DMIDECODE])
     cmd_output(CAP_HARDWARE_INFO, [LSPCI, '-n'])
     cmd_output(CAP_HARDWARE_INFO, [LSPCI, '-vv'])
@@ -519,9 +543,8 @@ exclude those logs from the archive.
     file_output(CAP_HARDWARE_INFO, [SYSCONFIG_HWCONF])
     cmd_output(CAP_HARDWARE_INFO, [LS, '-lR', '/dev'])
 
-
-    file_output(CAP_KERNEL_INFO, [PROC_VERSION, PROC_MODULES, PROC_DEVICES,
-                                  PROC_FILESYSTEMS, PROC_CMDLINE])
+    file_output(CAP_KERNEL_INFO, [PROC_VERSION, PROC_MODULES,
+                PROC_DEVICES, PROC_FILESYSTEMS, PROC_CMDLINE])
     cmd_output(CAP_KERNEL_INFO, [ZCAT, PROC_CONFIG], label='config')
     cmd_output(CAP_KERNEL_INFO, [SYSCTL, '-A'])
     file_output(CAP_KERNEL_INFO, [MODPROBE_CONF])
@@ -539,17 +562,21 @@ exclude those logs from the archive.
 
     tree_output(CAP_NETWORK_CONFIG, SYSCONFIG_NETWORK_SCRIPTS, IFCFG_RE)
     tree_output(CAP_NETWORK_CONFIG, SYSCONFIG_NETWORK_SCRIPTS, ROUTE_RE)
-    file_output(CAP_NETWORK_CONFIG, [SYSCONFIG_NETWORK, RESOLV_CONF, NSSWITCH_CONF, HOSTS])
-    file_output(CAP_NETWORK_CONFIG, [NTP_CONF, IPTABLES_CONFIG, HOSTS_ALLOW, HOSTS_DENY])
-    file_output(CAP_NETWORK_CONFIG, [OPENVSWITCH_CONF_DB])
-
-    cmd_output(CAP_NETWORK_STATUS, [IFCONFIG, '-a'])
-    cmd_output(CAP_NETWORK_STATUS, [ROUTE, '-n'])
-    cmd_output(CAP_NETWORK_STATUS, [ARP, '-n'])
-    cmd_output(CAP_NETWORK_STATUS, [NETSTAT, '-an'])
+    file_output(CAP_NETWORK_CONFIG,
+                [SYSCONFIG_NETWORK, RESOLV_CONF, NSSWITCH_CONF, HOSTS])
+    file_output(CAP_NETWORK_CONFIG,
+                [NTP_CONF, IPTABLES_CONFIG, HOSTS_ALLOW, HOSTS_DENY])
+    file_output(CAP_NETWORK_CONFIG, [OPENVSWITCH_DEFAULT_SWITCH,
+                OPENVSWITCH_SYSCONFIG_SWITCH])
+
+    cmd_output(CAP_NETWORK_INFO, [IFCONFIG, '-a'])
+    cmd_output(CAP_NETWORK_INFO, [ROUTE, '-n'])
+    cmd_output(CAP_NETWORK_INFO, [ARP, '-n'])
+    cmd_output(CAP_NETWORK_INFO, [NETSTAT, '-an'])
     for dir in DHCP_LEASE_DIR:
-        tree_output(CAP_NETWORK_STATUS, dir)
-    cmd_output(CAP_NETWORK_STATUS, [IPTABLES, '-nL'])
+        tree_output(CAP_NETWORK_INFO, dir)
+    for table in ['filter', 'nat', 'mangle', 'raw', 'security']:
+        cmd_output(CAP_NETWORK_INFO, [IPTABLES, '-t', table, '-nL'])
     for p in os.listdir('/sys/class/net/'):
         try:
             f = open('/sys/class/net/%s/type' % p, 'r')
@@ -557,49 +584,48 @@ exclude those logs from the archive.
             f.close()
             if os.path.islink('/sys/class/net/%s/device' % p) and int(t) == 1:
                 # ARPHRD_ETHER
-                cmd_output(CAP_NETWORK_STATUS, [ETHTOOL, '-S', p])
+                cmd_output(CAP_NETWORK_INFO, [ETHTOOL, '-S', p])
                 if not p.startswith('vif') and not p.startswith('tap'):
-                    cmd_output(CAP_NETWORK_STATUS, [ETHTOOL, p])
-                    cmd_output(CAP_NETWORK_STATUS, [ETHTOOL, '-k', p])
-                    cmd_output(CAP_NETWORK_STATUS, [ETHTOOL, '-i', p])
-                    cmd_output(CAP_NETWORK_STATUS, [ETHTOOL, '-c', p])
+                    cmd_output(CAP_NETWORK_INFO, [ETHTOOL, p])
+                    cmd_output(CAP_NETWORK_INFO, [ETHTOOL, '-k', p])
+                    cmd_output(CAP_NETWORK_INFO, [ETHTOOL, '-i', p])
+                    cmd_output(CAP_NETWORK_INFO, [ETHTOOL, '-c', p])
             if int(t) == 1:
-                cmd_output(CAP_NETWORK_STATUS,
+                cmd_output(CAP_NETWORK_INFO,
                            [TC, '-s', '-d', 'class', 'show', 'dev', p])
         except:
             pass
-    tree_output(CAP_NETWORK_STATUS, PROC_NET_BONDING_DIR)
-    tree_output(CAP_NETWORK_STATUS, PROC_NET_VLAN_DIR)
-    cmd_output(CAP_NETWORK_STATUS, [TC, '-s', 'qdisc'])
-    file_output(CAP_NETWORK_STATUS, [PROC_NET_SOFTNET_STAT])
+    tree_output(CAP_NETWORK_INFO, PROC_NET_BONDING_DIR)
+    tree_output(CAP_NETWORK_INFO, PROC_NET_VLAN_DIR)
+    cmd_output(CAP_NETWORK_INFO, [TC, '-s', 'qdisc'])
+    file_output(CAP_NETWORK_INFO, [PROC_NET_SOFTNET_STAT])
+
+    collect_ovsdb()
     if os.path.exists(OPENVSWITCH_VSWITCHD_PID):
         cmd_output(CAP_NETWORK_STATUS, [OVS_DPCTL, 'show', '-s'])
         for d in dp_list():
-            cmd_output(CAP_NETWORK_STATUS, [OVS_DPCTL, 'dump-flows', d])
-        try:
-            vspidfile = open(OPENVSWITCH_VSWITCHD_PID)
-            vspid = int(vspidfile.readline().strip())
-            vspidfile.close()
-            for b in bond_list(vspid):
-                cmd_output(CAP_NETWORK_STATUS,
-                           [OVS_APPCTL, '-t', '@RUNDIR@/ovs-vswitchd.%s.ctl' % vspid, '-e' 'bond/show %s' % b],
-                           'ovs-appctl-bond-show-%s.out' % b)
-        except e:
-            pass
+            cmd_output(CAP_NETWORK_STATUS, [OVS_DPCTL, 'dump-flows', '-m', d])
 
-    cmd_output(CAP_PROCESS_LIST, [PS, 'wwwaxf', '-eo', 'pid,tty,stat,time,nice,psr,pcpu,pmem,nwchan,wchan:25,args'], label='process-tree')
+    cmd_output(CAP_PROCESS_LIST, [PS, 'wwwaxf', '-eo',
+               'pid,tty,stat,time,nice,psr,pcpu,pmem,nwchan,wchan:25,args'],
+               label='process-tree')
     func_output(CAP_PROCESS_LIST, 'fd_usage', fd_usage)
 
-    system_logs = ([ VAR_LOG_DIR + x for x in
+    system_logs = ([VAR_LOG_DIR + x for x in
         ['crit.log', 'kern.log', 'daemon.log', 'user.log',
         'syslog', 'messages', 'secure', 'debug', 'dmesg', 'boot']])
-    ovs_logs = ([ OPENVSWITCH_LOG_DIR + x for x in
+    for log in system_logs:
+        prefix_output(CAP_SYSTEM_LOGS, log, last_mod_time=log_last_mod_time)
+
+    ovs_logs = ([OPENVSWITCH_LOG_DIR + x for x in
         ['ovs-vswitchd.log', 'ovsdb-server.log',
         'ovs-xapi-sync.log', 'ovs-monitor-ipsec.log', 'ovs-ctl.log']])
-    log_output(CAP_SYSTEM_LOGS, system_logs)
-    log_output(CAP_OPENVSWITCH_LOGS, ovs_logs)
+    for log in ovs_logs:
+        prefix_output(CAP_OPENVSWITCH_LOGS, log,
+                      last_mod_time=log_last_mod_time)
 
-    if not os.path.exists('/var/log/dmesg') and not os.path.exists('/var/log/boot'):
+    if not os.path.exists('/var/log/dmesg') and \
+       not os.path.exists('/var/log/boot'):
         cmd_output(CAP_SYSTEM_LOGS, [DMESG])
 
     cmd_output(CAP_SYSTEM_SERVICES, [CHKCONFIG, '--list'])
@@ -611,7 +637,8 @@ exclude those logs from the archive.
     cmd_output(CAP_YUM, [RPM, '-qa'])
     file_output(CAP_YUM, [APT_SOURCES_LIST])
     tree_output(CAP_YUM, APT_SOURCES_LIST_D)
-    cmd_output(CAP_YUM, [DPKG_QUERY, '-W', '-f=${Package} ${Version} ${Status}\n'], 'dpkg-packages')
+    cmd_output(CAP_YUM, [DPKG_QUERY, '-W',
+               '-f=${Package} ${Version} ${Status}\n'], 'dpkg-packages')
 
     # Filter out ovs relevant information if --ovs option passed
     # else collect all information
@@ -644,13 +671,13 @@ exclude those logs from the archive.
     # permit the user to filter out data
     # We cannot use iteritems, since we modify 'data' as we pass through
     for (k, v) in sorted(data.items()):
-       cap = v['cap']
-       if 'filename' in v:
-           key = k[0]
-       else:
-           key = k
-       if not ANSWER_YES_TO_ALL and not yes("Include '%s'? [Y/n]: " % key):
-           del data[k]
+        cap = v['cap']
+        if 'filename' in v:
+            key = k[0]
+        else:
+            key = k
+        if not ANSWER_YES_TO_ALL and not yes("Include '%s'? [Y/n]: " % key):
+            del data[k]
 
     # collect selected data now
     output_ts('Running commands to collect data')
@@ -659,7 +686,8 @@ exclude those logs from the archive.
     subdir = "bug-report-%s" % time.strftime("%Y%m%d%H%M%S")
 
     # include inventory
-    data['inventory.xml'] = {'cap': None, 'output': StringIOmtime(make_inventory(data, subdir))}
+    data['inventory.xml'] = {'cap': None,
+                        'output': StringIOmtime(make_inventory(data, subdir))}
 
     # create archive
     if output_fd == -1:
@@ -686,8 +714,11 @@ exclude those logs from the archive.
         for c in caps.keys():
             print >>sys.stderr, "    %s (%d, %d)" % (c, caps[c][MAX_SIZE],
                                                      cap_sizes[c])
+
+    cleanup_ovsdb()
     return 0
 
+
 def dump_scsi_hosts(cap):
     output = ''
     l = os.listdir('/sys/class/scsi_host')
@@ -709,11 +740,13 @@ def dump_scsi_hosts(cap):
         except:
                 pass
 
-        output += "%s:\n" %h
-        output += "    %s%s\n" % (procname, modelname and (" -> %s" % modelname) or '')
+        output += "%s:\n" % h
+        output += "    %s%s\n" \
+                  % (procname, modelname and (" -> %s" % modelname) or '')
 
     return output
 
+
 def module_info(cap):
     output = StringIO.StringIO()
     modules = open(PROC_MODULES, 'r')
@@ -721,7 +754,8 @@ def module_info(cap):
 
     for line in modules:
         module = line.split()[0]
-        procs.append(ProcOutput([MODINFO, module], caps[cap][MAX_TIME], output))
+        procs.append(ProcOutput([MODINFO, module],
+                     caps[cap][MAX_TIME], output))
     modules.close()
 
     run_procs([procs])
@@ -736,9 +770,11 @@ def multipathd_topology(cap):
 
     return stdout
 
+
 def dp_list():
     output = StringIO.StringIO()
-    procs = [ProcOutput([OVS_DPCTL, 'dump-dps'], caps[CAP_NETWORK_STATUS][MAX_TIME], output)]
+    procs = [ProcOutput([OVS_DPCTL, 'dump-dps'],
+             caps[CAP_NETWORK_STATUS][MAX_TIME], output)]
 
     run_procs([procs])
 
@@ -746,25 +782,47 @@ def dp_list():
         return output.getvalue().splitlines()
     return []
 
-def bond_list(pid):
-    output = StringIO.StringIO()
-    procs = [ProcOutput([OVS_APPCTL, '-t', '@RUNDIR@/ovs-vswitchd.%s.ctl' % pid, '-e' 'bond/list'], caps[CAP_NETWORK_STATUS][MAX_TIME], output)]
 
-    run_procs([procs])
+def collect_ovsdb():
+    if not os.path.isfile(OPENVSWITCH_CONF_DB):
+        return
+
+    max_size = 10 * MB
+
+    try:
+        if os.path.getsize(OPENVSWITCH_CONF_DB) > max_size:
+            if os.path.isfile(OPENVSWITCH_COMPACT_DB):
+                os.unlink(OPENVSWITCH_COMPACT_DB)
+
+            output = StringIO.StringIO()
+            max_time = 5
+            procs = [ProcOutput(['ovsdb-tool', 'compact',
+                                OPENVSWITCH_CONF_DB, OPENVSWITCH_COMPACT_DB],
+                                max_time, output)]
+            run_procs([procs])
+            file_output(CAP_NETWORK_STATUS, [OPENVSWITCH_COMPACT_DB])
+        else:
+            file_output(CAP_NETWORK_STATUS, [OPENVSWITCH_CONF_DB])
+    except OSError:
+        return
+
+
+def cleanup_ovsdb():
+    try:
+        if os.path.isfile(OPENVSWITCH_COMPACT_DB):
+            os.unlink(OPENVSWITCH_COMPACT_DB)
+    except:
+        return
 
-    if not procs[0].timed_out:
-        bonds = output.getvalue().splitlines()[1:]
-        return [x.split('\t')[1] for x in bonds]
-    return []
 
 def fd_usage(cap):
     output = ''
     fd_dict = {}
     for d in [p for p in os.listdir('/proc') if p.isdigit()]:
         try:
-            fh = open('/proc/'+d+'/cmdline')
+            fh = open('/proc/' + d + '/cmdline')
             name = fh.readline()
-            num_fds = len(os.listdir(os.path.join('/proc/'+d+'/fd')))
+            num_fds = len(os.listdir(os.path.join('/proc/' + d + '/fd')))
             if num_fds > 0:
                 if not num_fds in fd_dict:
                     fd_dict[num_fds] = []
@@ -777,6 +835,7 @@ def fd_usage(cap):
         output += "%s: %s\n" % (k, str(fd_dict[k]))
     return output
 
+
 def dump_rdac_groups(cap):
     output = StringIO.StringIO()
     procs = [ProcOutput([MPPUTIL, '-a'], caps[cap][MAX_TIME], output)]
@@ -794,7 +853,10 @@ def dump_rdac_groups(cap):
                 group, _ = line.split(None, 1)
                 cmd_output(cap, [MPPUTIL, '-g', group])
 
+
 def load_plugins(just_capabilities=False, filter=None):
+    global log_last_mod_time
+
     def getText(nodelist):
         rc = ""
         for node in nodelist:
@@ -809,7 +871,8 @@ def load_plugins(just_capabilities=False, filter=None):
             ret = val in ['true', 'yes']
         return ret
 
-    for dir in [d for d in os.listdir(PLUGIN_DIR) if os.path.isdir(os.path.join(PLUGIN_DIR, d))]:
+    for dir in [d for d in os.listdir(PLUGIN_DIR)
+                if os.path.isdir(os.path.join(PLUGIN_DIR, d))]:
         if not caps.has_key(dir):
             if not os.path.exists("%s/%s.xml" % (PLUGIN_DIR, dir)):
                 continue
@@ -817,24 +880,29 @@ def load_plugins(just_capabilities=False, filter=None):
             assert xmldoc.documentElement.tagName == "capability"
 
             pii, min_size, max_size, min_time, max_time, mime = \
-                 PII_MAYBE, -1,-1,-1,-1, MIME_TEXT
+                 PII_MAYBE, -1, -1, -1, -1, MIME_TEXT
 
-            if xmldoc.documentElement.getAttribute("pii") in [PII_NO, PII_YES, PII_MAYBE, PII_IF_CUSTOMIZED]:
+            if xmldoc.documentElement.getAttribute("pii") in \
+               [PII_NO, PII_YES, PII_MAYBE, PII_IF_CUSTOMIZED]:
                 pii = xmldoc.documentElement.getAttribute("pii")
             if xmldoc.documentElement.getAttribute("min_size") != '':
-                min_size = long(xmldoc.documentElement.getAttribute("min_size"))
+                min_size = long(
+                            xmldoc.documentElement.getAttribute("min_size"))
             if xmldoc.documentElement.getAttribute("max_size") != '':
-                max_size = long(xmldoc.documentElement.getAttribute("max_size"))
+                max_size = long(
+                            xmldoc.documentElement.getAttribute("max_size"))
             if xmldoc.documentElement.getAttribute("min_time") != '':
                 min_time = int(xmldoc.documentElement.getAttribute("min_time"))
             if xmldoc.documentElement.getAttribute("max_time") != '':
                 max_time = int(xmldoc.documentElement.getAttribute("max_time"))
-            if xmldoc.documentElement.getAttribute("mime") in [MIME_DATA, MIME_TEXT]:
+            if xmldoc.documentElement.getAttribute("mime") in \
+               [MIME_DATA, MIME_TEXT]:
                 mime = xmldoc.documentElement.getAttribute("mime")
             checked = getBoolAttr(xmldoc.documentElement, 'checked', True)
             hidden = getBoolAttr(xmldoc.documentElement, 'hidden', False)
 
-            cap(dir, pii, min_size, max_size, min_time, max_time, mime, checked, hidden)
+            cap(dir, pii, min_size, max_size, min_time, max_time, mime,
+                checked, hidden)
 
         if just_capabilities:
             continue
@@ -855,24 +923,35 @@ def load_plugins(just_capabilities=False, filter=None):
                 if el.tagName == "files":
                     newest_first = getBoolAttr(el, 'newest_first')
                     if el.getAttribute("type") == "logs":
-                        log_output(dir, getText(el.childNodes).split(),
-                                    newest_first=newest_first)
+                        for fn in getText(el.childNodes).split():
+                            prefix_output(dir, fn, newest_first=newest_first,
+                                          last_mod_time=log_last_mod_time)
                     else:
                         file_output(dir, getText(el.childNodes).split(),
                                     newest_first=newest_first)
                 elif el.tagName == "directory":
                     pattern = el.getAttribute("pattern")
-                    if pattern == '': pattern = None
+                    if pattern == '':
+                        pattern = None
                     negate = getBoolAttr(el, 'negate')
                     newest_first = getBoolAttr(el, 'newest_first')
-                    tree_output(dir, getText(el.childNodes),
-                                pattern and re.compile(pattern) or None,
-                                negate=negate, newest_first=newest_first)
+                    if el.getAttribute("type") == "logs":
+                        tree_output(dir, getText(el.childNodes),
+                                    pattern and re.compile(pattern) or None,
+                                    negate=negate, newest_first=newest_first,
+                                    last_mod_time=log_last_mod_time)
+                    else:
+                        tree_output(dir, getText(el.childNodes),
+                                    pattern and re.compile(pattern) or None,
+                                    negate=negate, newest_first=newest_first)
                 elif el.tagName == "command":
                     label = el.getAttribute("label")
-                    if label == '': label = None
+                    if label == '':
+                        label = None
                     binary = getBoolAttr(el, 'binary')
-                    cmd_output(dir, getText(el.childNodes), label, binary=binary)
+                    cmd_output(dir,
+                               getText(el.childNodes), label, binary=binary)
+
 
 def make_tar(subdir, suffix, output_fd, output_file):
     global SILENT_MODE, data
@@ -919,7 +998,7 @@ def make_tar(subdir, suffix, output_fd, output_file):
         tf.close()
 
     if output_fd == -1:
-        output ('Writing tarball %s successful.' % filename)
+        output('Writing tarball %s successful.' % filename)
         if SILENT_MODE:
             print filename
 
@@ -953,7 +1032,7 @@ def make_zip(subdir, output_file):
     finally:
         zf.close()
 
-    output ('Writing archive %s successful.' % filename)
+    output('Writing archive %s successful.' % filename)
     if SILENT_MODE:
         print filename
 
@@ -977,11 +1056,13 @@ def make_inventory(inventory, subdir):
         inventory.items())
     return document.toprettyxml()
 
+
 def inventory_entry(document, subdir, k, v):
     try:
         el = document.createElement(INVENTORY_XML_ELEMENT)
         el.setAttribute('capability', v['cap'])
-        el.setAttribute('filename', os.path.join(subdir, construct_filename(k, v)))
+        el.setAttribute('filename',
+                        os.path.join(subdir, construct_filename(k, v)))
         el.setAttribute('md5sum', md5sum(v))
         document.getElementsByTagName(INVENTORY_XML_ROOT)[0].appendChild(el)
     except:
@@ -1016,9 +1097,11 @@ def construct_filename(k, v):
 
     return s
 
+
 def update_capabilities():
     pass
 
+
 def update_cap_size(cap, size):
     update_cap(cap, MIN_SIZE, size)
     update_cap(cap, MAX_SIZE, size)
@@ -1061,9 +1144,11 @@ def size_of(f, pattern, negate):
 def print_capabilities():
     document = getDOMImplementation().createDocument(
         "ns", CAP_XML_ROOT, None)
-    map(lambda key: capability(document, key), [k for k in caps.keys() if not caps[k][HIDDEN]])
+    map(lambda key: capability(document, key),
+        [k for k in caps.keys() if not caps[k][HIDDEN]])
     print document.toprettyxml()
 
+
 def capability(document, key):
     c = caps[key]
     el = document.createElement(CAP_XML_ELEMENT)
@@ -1091,6 +1176,7 @@ def yes(prompt):
 
 partition_re = re.compile(r'(.*[0-9]+$)|(^xvd)')
 
+
 def disk_list():
     disks = []
     try:
@@ -1110,7 +1196,8 @@ def disk_list():
 class ProcOutput:
     debug = False
 
-    def __init__(self, command, max_time, inst=None, filter=None, binary=False):
+    def __init__(self, command, max_time, inst=None, filter=None,
+                 binary=False):
         self.command = command
         self.max_time = max_time
         self.inst = inst
@@ -1130,7 +1217,8 @@ class ProcOutput:
         self.terminate()
 
     def cmdAsStr(self):
-        return isinstance(self.command, list) and ' '.join(self.command) or self.command
+        return isinstance(self.command, list) \
+                and ' '.join(self.command) or self.command
 
     def run(self):
         self.timed_out = False
@@ -1141,7 +1229,8 @@ class ProcOutput:
                               stdin=dev_null, stdout=PIPE, stderr=dev_null,
                               shell=isinstance(self.command, str))
             old = fcntl.fcntl(self.proc.stdout.fileno(), fcntl.F_GETFD)
-            fcntl.fcntl(self.proc.stdout.fileno(), fcntl.F_SETFD, old | fcntl.FD_CLOEXEC)
+            fcntl.fcntl(self.proc.stdout.fileno(),
+                        fcntl.F_SETFD, old | fcntl.FD_CLOEXEC)
             self.running = True
             self.failed = False
         except:
@@ -1178,6 +1267,7 @@ class ProcOutput:
             if self.inst:
                 self.inst.write(line)
 
+
 def run_procs(procs):
     while True:
         pipes = []
@@ -1230,6 +1320,31 @@ def pidof(name):
     return pids
 
 
+def check_space(cap, name, size):
+    global free_disk_space
+    if free_disk_space is not None and size > free_disk_space:
+        output("Omitting %s, out of disk space (requested: %u, allowed: %u)" %
+               (name, size, free_disk_space))
+        return False
+    elif unlimited_data or caps[cap][MAX_SIZE] == -1 or \
+             cap_sizes[cap] < caps[cap][MAX_SIZE]:
+        cap_sizes[cap] += size
+        if free_disk_space is not None:
+            free_disk_space -= size
+        return True
+    else:
+        output("Omitting %s, size constraint of %s exceeded" % (name, cap))
+        return False
+
+
+def get_free_disk_space(path):
+    path = os.path.abspath(path)
+    while not os.path.exists(path):
+        path = os.path.dirname(path)
+    s = os.statvfs(path)
+    return s.f_frsize * s.f_bfree
+
+
 class StringIOmtime(StringIO.StringIO):
     def __init__(self, buf=''):
         StringIO.StringIO.__init__(self, buf)