1"""
2KVM test utility functions.
3
4@copyright: 2008-2009 Red Hat Inc.
5"""
6
7import time, string, random, socket, os, signal, re, logging, commands, cPickle
8import fcntl, shelve, ConfigParser, threading, sys, UserDict, inspect, tarfile
9import struct, shutil
10from autotest_lib.client.bin import utils, os_dep
11from autotest_lib.client.common_lib import error, logging_config
12import rss_client, aexpect
13try:
14    import koji
15    KOJI_INSTALLED = True
16except ImportError:
17    KOJI_INSTALLED = False
18
19# From include/linux/sockios.h
20SIOCSIFHWADDR = 0x8924
21SIOCGIFHWADDR = 0x8927
22SIOCSIFFLAGS = 0x8914
23SIOCGIFINDEX = 0x8933
24SIOCBRADDIF = 0x89a2
25# From linux/include/linux/if_tun.h
26TUNSETIFF = 0x400454ca
27TUNGETIFF = 0x800454d2
28TUNGETFEATURES = 0x800454cf
29IFF_UP = 0x1
30IFF_TAP = 0x0002
31IFF_NO_PI = 0x1000
32IFF_VNET_HDR = 0x4000
33
34def _lock_file(filename):
35    f = open(filename, "w")
36    fcntl.lockf(f, fcntl.LOCK_EX)
37    return f
38
39
40def _unlock_file(f):
41    fcntl.lockf(f, fcntl.LOCK_UN)
42    f.close()
43
44
45def is_vm(obj):
46    """
47    Tests whether a given object is a VM object.
48
49    @param obj: Python object.
50    """
51    return obj.__class__.__name__ == "VM"
52
53
54class NetError(Exception):
55    pass
56
57
58class TAPModuleError(NetError):
59    def __init__(self, devname, action="open", details=None):
60        NetError.__init__(self, devname)
61        self.devname = devname
62        self.details = details
63
64    def __str__(self):
65        e_msg = "Can't %s %s" % (self.action, self.devname)
66        if self.details is not None:
67            e_msg += " : %s" % self.details
68        return e_msg
69
70
71class TAPNotExistError(NetError):
72    def __init__(self, ifname):
73        NetError.__init__(self, ifname)
74        self.ifname = ifname
75
76    def __str__(self):
77        return "Interface %s does not exist" % self.ifname
78
79
80class TAPCreationError(NetError):
81    def __init__(self, ifname, details=None):
82        NetError.__init__(self, ifname, details)
83        self.ifname = ifname
84        self.details = details
85
86    def __str__(self):
87        e_msg = "Cannot create TAP device %s" % self.ifname
88        if self.details is not None:
89            e_msg += ": %s" % self.details
90        return e_msg
91
92
93class TAPBringUpError(NetError):
94    def __init__(self, ifname):
95        NetError.__init__(self, ifname)
96        self.ifname = ifname
97
98    def __str__(self):
99        return "Cannot bring up TAP %s" % self.ifname
100
101
102class BRAddIfError(NetError):
103    def __init__(self, ifname, brname, details):
104        NetError.__init__(self, ifname, brname, details)
105        self.ifname = ifname
106        self.brname = brname
107        self.details = details
108
109    def __str__(self):
110        return ("Can not add if %s to bridge %s: %s" %
111                (self.ifname, self.brname, self.details))
112
113
114class HwAddrSetError(NetError):
115    def __init__(self, ifname, mac):
116        NetError.__init__(self, ifname, mac)
117        self.ifname = ifname
118        self.mac = mac
119
120    def __str__(self):
121        return "Can not set mac %s to interface %s" % (self.mac, self.ifname)
122
123
124class HwAddrGetError(NetError):
125    def __init__(self, ifname):
126        NetError.__init__(self, ifname)
127        self.ifname = ifname
128
129    def __str__(self):
130        return "Can not get mac of interface %s" % self.ifname
131
132
133class Env(UserDict.IterableUserDict):
134    """
135    A dict-like object containing global objects used by tests.
136    """
137    def __init__(self, filename=None, version=0):
138        """
139        Create an empty Env object or load an existing one from a file.
140
141        If the version recorded in the file is lower than version, or if some
142        error occurs during unpickling, or if filename is not supplied,
143        create an empty Env object.
144
145        @param filename: Path to an env file.
146        @param version: Required env version (int).
147        """
148        UserDict.IterableUserDict.__init__(self)
149        empty = {"version": version}
150        if filename:
151            self._filename = filename
152            try:
153                if os.path.isfile(filename):
154                    f = open(filename, "r")
155                    env = cPickle.load(f)
156                    f.close()
157                    if env.get("version", 0) >= version:
158                        self.data = env
159                    else:
160                        logging.warning("Incompatible env file found. Not using it.")
161                        self.data = empty
162                else:
163                    # No previous env file found, proceed...
164                    self.data = empty
165            # Almost any exception can be raised during unpickling, so let's
166            # catch them all
167            except Exception, e:
168                logging.warning(e)
169                self.data = empty
170        else:
171            self.data = empty
172
173
174    def save(self, filename=None):
175        """
176        Pickle the contents of the Env object into a file.
177
178        @param filename: Filename to pickle the dict into.  If not supplied,
179                use the filename from which the dict was loaded.
180        """
181        filename = filename or self._filename
182        f = open(filename, "w")
183        cPickle.dump(self.data, f)
184        f.close()
185
186
187    def get_all_vms(self):
188        """
189        Return a list of all VM objects in this Env object.
190        """
191        return [o for o in self.values() if is_vm(o)]
192
193
194    def get_vm(self, name):
195        """
196        Return a VM object by its name.
197
198        @param name: VM name.
199        """
200        return self.get("vm__%s" % name)
201
202
203    def register_vm(self, name, vm):
204        """
205        Register a VM in this Env object.
206
207        @param name: VM name.
208        @param vm: VM object.
209        """
210        self["vm__%s" % name] = vm
211
212
213    def unregister_vm(self, name):
214        """
215        Remove a given VM.
216
217        @param name: VM name.
218        """
219        del self["vm__%s" % name]
220
221
222    def register_installer(self, installer):
223        """
224        Register a installer that was just run
225
226        The installer will be available for other tests, so that
227        information about the installed KVM modules and qemu-kvm can be used by
228        them.
229        """
230        self['last_installer'] = installer
231
232
233    def previous_installer(self):
234        """
235        Return the last installer that was registered
236        """
237        return self.get('last_installer')
238
239
240class Params(UserDict.IterableUserDict):
241    """
242    A dict-like object passed to every test.
243    """
244    def objects(self, key):
245        """
246        Return the names of objects defined using a given key.
247
248        @param key: The name of the key whose value lists the objects
249                (e.g. 'nics').
250        """
251        return self.get(key, "").split()
252
253
254    def object_params(self, obj_name):
255        """
256        Return a dict-like object containing the parameters of an individual
257        object.
258
259        This method behaves as follows: the suffix '_' + obj_name is removed
260        from all key names that have it.  Other key names are left unchanged.
261        The values of keys with the suffix overwrite the values of their
262        suffixless versions.
263
264        @param obj_name: The name of the object (objects are listed by the
265                objects() method).
266        """
267        suffix = "_" + obj_name
268        new_dict = self.copy()
269        for key in self:
270            if key.endswith(suffix):
271                new_key = key.split(suffix)[0]
272                new_dict[new_key] = self[key]
273        return new_dict
274
275
276# Functions related to MAC/IP addresses
277
278def _open_mac_pool(lock_mode):
279    lock_file = open("/tmp/mac_lock", "w+")
280    fcntl.lockf(lock_file, lock_mode)
281    pool = shelve.open("/tmp/address_pool")
282    return pool, lock_file
283
284
285def _close_mac_pool(pool, lock_file):
286    pool.close()
287    fcntl.lockf(lock_file, fcntl.LOCK_UN)
288    lock_file.close()
289
290
291def _generate_mac_address_prefix(mac_pool):
292    """
293    Generate a random MAC address prefix and add it to the MAC pool dictionary.
294    If there's a MAC prefix there already, do not update the MAC pool and just
295    return what's in there. By convention we will set KVM autotest MAC
296    addresses to start with 0x9a.
297
298    @param mac_pool: The MAC address pool object.
299    @return: The MAC address prefix.
300    """
301    if "prefix" in mac_pool:
302        prefix = mac_pool["prefix"]
303    else:
304        r = random.SystemRandom()
305        prefix = "9a:%02x:%02x:%02x:" % (r.randint(0x00, 0xff),
306                                         r.randint(0x00, 0xff),
307                                         r.randint(0x00, 0xff))
308        mac_pool["prefix"] = prefix
309    return prefix
310
311
312def generate_mac_address(vm_instance, nic_index):
313    """
314    Randomly generate a MAC address and add it to the MAC address pool.
315
316    Try to generate a MAC address based on a randomly generated MAC address
317    prefix and add it to a persistent dictionary.
318    key = VM instance + NIC index, value = MAC address
319    e.g. {'20100310-165222-Wt7l:0': '9a:5d:94:6a:9b:f9'}
320
321    @param vm_instance: The instance attribute of a VM.
322    @param nic_index: The index of the NIC.
323    @return: MAC address string.
324    """
325    mac_pool, lock_file = _open_mac_pool(fcntl.LOCK_EX)
326    key = "%s:%s" % (vm_instance, nic_index)
327    if key in mac_pool:
328        mac = mac_pool[key]
329    else:
330        prefix = _generate_mac_address_prefix(mac_pool)
331        r = random.SystemRandom()
332        while key not in mac_pool:
333            mac = prefix + "%02x:%02x" % (r.randint(0x00, 0xff),
334                                          r.randint(0x00, 0xff))
335            if mac in mac_pool.values():
336                continue
337            mac_pool[key] = mac
338    _close_mac_pool(mac_pool, lock_file)
339    return mac
340
341
342def free_mac_address(vm_instance, nic_index):
343    """
344    Remove a MAC address from the address pool.
345
346    @param vm_instance: The instance attribute of a VM.
347    @param nic_index: The index of the NIC.
348    """
349    mac_pool, lock_file = _open_mac_pool(fcntl.LOCK_EX)
350    key = "%s:%s" % (vm_instance, nic_index)
351    if key in mac_pool:
352        del mac_pool[key]
353    _close_mac_pool(mac_pool, lock_file)
354
355
356def set_mac_address(vm_instance, nic_index, mac):
357    """
358    Set a MAC address in the pool.
359
360    @param vm_instance: The instance attribute of a VM.
361    @param nic_index: The index of the NIC.
362    """
363    mac_pool, lock_file = _open_mac_pool(fcntl.LOCK_EX)
364    mac_pool["%s:%s" % (vm_instance, nic_index)] = mac
365    _close_mac_pool(mac_pool, lock_file)
366
367
368def get_mac_address(vm_instance, nic_index):
369    """
370    Return a MAC address from the pool.
371
372    @param vm_instance: The instance attribute of a VM.
373    @param nic_index: The index of the NIC.
374    @return: MAC address string.
375    """
376    mac_pool, lock_file = _open_mac_pool(fcntl.LOCK_SH)
377    mac = mac_pool.get("%s:%s" % (vm_instance, nic_index))
378    _close_mac_pool(mac_pool, lock_file)
379    return mac
380
381
382def verify_ip_address_ownership(ip, macs, timeout=10.0):
383    """
384    Use arping and the ARP cache to make sure a given IP address belongs to one
385    of the given MAC addresses.
386
387    @param ip: An IP address.
388    @param macs: A list or tuple of MAC addresses.
389    @return: True iff ip is assigned to a MAC address in macs.
390    """
391    # Compile a regex that matches the given IP address and any of the given
392    # MAC addresses
393    mac_regex = "|".join("(%s)" % mac for mac in macs)
394    regex = re.compile(r"\b%s\b.*\b(%s)\b" % (ip, mac_regex), re.IGNORECASE)
395
396    # Check the ARP cache
397    o = commands.getoutput("%s -n" % find_command("arp"))
398    if regex.search(o):
399        return True
400
401    # Get the name of the bridge device for arping
402    o = commands.getoutput("%s route get %s" % (find_command("ip"), ip))
403    dev = re.findall("dev\s+\S+", o, re.IGNORECASE)
404    if not dev:
405        return False
406    dev = dev[0].split()[-1]
407
408    # Send an ARP request
409    o = commands.getoutput("%s -f -c 3 -I %s %s" %
410                           (find_command("arping"), dev, ip))
411    return bool(regex.search(o))
412
413
414# Utility functions for dealing with external processes
415
416def find_command(cmd):
417    for dir in ["/usr/local/sbin", "/usr/local/bin",
418                "/usr/sbin", "/usr/bin", "/sbin", "/bin"]:
419        file = os.path.join(dir, cmd)
420        if os.path.exists(file):
421            return file
422    raise ValueError('Missing command: %s' % cmd)
423
424
425def pid_exists(pid):
426    """
427    Return True if a given PID exists.
428
429    @param pid: Process ID number.
430    """
431    try:
432        os.kill(pid, 0)
433        return True
434    except:
435        return False
436
437
438def safe_kill(pid, signal):
439    """
440    Attempt to send a signal to a given process that may or may not exist.
441
442    @param signal: Signal number.
443    """
444    try:
445        os.kill(pid, signal)
446        return True
447    except:
448        return False
449
450
451def kill_process_tree(pid, sig=signal.SIGKILL):
452    """Signal a process and all of its children.
453
454    If the process does not exist -- return.
455
456    @param pid: The pid of the process to signal.
457    @param sig: The signal to send to the processes.
458    """
459    if not safe_kill(pid, signal.SIGSTOP):
460        return
461    children = commands.getoutput("ps --ppid=%d -o pid=" % pid).split()
462    for child in children:
463        kill_process_tree(int(child), sig)
464    safe_kill(pid, sig)
465    safe_kill(pid, signal.SIGCONT)
466
467
468def get_git_branch(repository, branch, srcdir, commit=None, lbranch=None):
469    """
470    Retrieves a given git code repository.
471
472    @param repository: Git repository URL
473    """
474    logging.info("Fetching git [REP '%s' BRANCH '%s' COMMIT '%s'] -> %s",
475                 repository, branch, commit, srcdir)
476    if not os.path.exists(srcdir):
477        os.makedirs(srcdir)
478    os.chdir(srcdir)
479
480    if os.path.exists(".git"):
481        utils.system("git reset --hard")
482    else:
483        utils.system("git init")
484
485    if not lbranch:
486        lbranch = branch
487
488    utils.system("git fetch -q -f -u -t %s %s:%s" %
489                 (repository, branch, lbranch))
490    utils.system("git checkout %s" % lbranch)
491    if commit:
492        utils.system("git checkout %s" % commit)
493
494    h = utils.system_output('git log --pretty=format:"%H" -1')
495    try:
496        desc = "tag %s" % utils.system_output("git describe")
497    except error.CmdError:
498        desc = "no tag found"
499
500    logging.info("Commit hash for %s is %s (%s)", repository, h.strip(), desc)
501    return srcdir
502
503
504def check_kvm_source_dir(source_dir):
505    """
506    Inspects the kvm source directory and verifies its disposition. In some
507    occasions build may be dependant on the source directory disposition.
508    The reason why the return codes are numbers is that we might have more
509    changes on the source directory layout, so it's not scalable to just use
510    strings like 'old_repo', 'new_repo' and such.
511
512    @param source_dir: Source code path that will be inspected.
513    """
514    os.chdir(source_dir)
515    has_qemu_dir = os.path.isdir('qemu')
516    has_kvm_dir = os.path.isdir('kvm')
517    if has_qemu_dir:
518        logging.debug("qemu directory detected, source dir layout 1")
519        return 1
520    if has_kvm_dir and not has_qemu_dir:
521        logging.debug("kvm directory detected, source dir layout 2")
522        return 2
523    else:
524        raise error.TestError("Unknown source dir layout, cannot proceed.")
525
526
527# Functions and classes used for logging into guests and transferring files
528
529class LoginError(Exception):
530    def __init__(self, msg, output):
531        Exception.__init__(self, msg, output)
532        self.msg = msg
533        self.output = output
534
535    def __str__(self):
536        return "%s    (output: %r)" % (self.msg, self.output)
537
538
539class LoginAuthenticationError(LoginError):
540    pass
541
542
543class LoginTimeoutError(LoginError):
544    def __init__(self, output):
545        LoginError.__init__(self, "Login timeout expired", output)
546
547
548class LoginProcessTerminatedError(LoginError):
549    def __init__(self, status, output):
550        LoginError.__init__(self, None, output)
551        self.status = status
552
553    def __str__(self):
554        return ("Client process terminated    (status: %s,    output: %r)" %
555                (self.status, self.output))
556
557
558class LoginBadClientError(LoginError):
559    def __init__(self, client):
560        LoginError.__init__(self, None, None)
561        self.client = client
562
563    def __str__(self):
564        return "Unknown remote shell client: %r" % self.client
565
566
567class SCPError(Exception):
568    def __init__(self, msg, output):
569        Exception.__init__(self, msg, output)
570        self.msg = msg
571        self.output = output
572
573    def __str__(self):
574        return "%s    (output: %r)" % (self.msg, self.output)
575
576
577class SCPAuthenticationError(SCPError):
578    pass
579
580
581class SCPAuthenticationTimeoutError(SCPAuthenticationError):
582    def __init__(self, output):
583        SCPAuthenticationError.__init__(self, "Authentication timeout expired",
584                                        output)
585
586
587class SCPTransferTimeoutError(SCPError):
588    def __init__(self, output):
589        SCPError.__init__(self, "Transfer timeout expired", output)
590
591
592class SCPTransferFailedError(SCPError):
593    def __init__(self, status, output):
594        SCPError.__init__(self, None, output)
595        self.status = status
596
597    def __str__(self):
598        return ("SCP transfer failed    (status: %s,    output: %r)" %
599                (self.status, self.output))
600
601
602def _remote_login(session, username, password, prompt, timeout=10):
603    """
604    Log into a remote host (guest) using SSH or Telnet.  Wait for questions
605    and provide answers.  If timeout expires while waiting for output from the
606    child (e.g. a password prompt or a shell prompt) -- fail.
607
608    @brief: Log into a remote host (guest) using SSH or Telnet.
609
610    @param session: An Expect or ShellSession instance to operate on
611    @param username: The username to send in reply to a login prompt
612    @param password: The password to send in reply to a password prompt
613    @param prompt: The shell prompt that indicates a successful login
614    @param timeout: The maximal time duration (in seconds) to wait for each
615            step of the login procedure (i.e. the "Are you sure" prompt, the
616            password prompt, the shell prompt, etc)
617    @raise LoginTimeoutError: If timeout expires
618    @raise LoginAuthenticationError: If authentication fails
619    @raise LoginProcessTerminatedError: If the client terminates during login
620    @raise LoginError: If some other error occurs
621    """
622    password_prompt_count = 0
623    login_prompt_count = 0
624
625    while True:
626        try:
627            match, text = session.read_until_last_line_matches(
628                [r"[Aa]re you sure", r"[Pp]assword:\s*$", r"[Ll]ogin:\s*$",
629                 r"[Cc]onnection.*closed", r"[Cc]onnection.*refused",
630                 r"[Pp]lease wait", r"[Ww]arning", prompt],
631                timeout=timeout, internal_timeout=0.5)
632            if match == 0:  # "Are you sure you want to continue connecting"
633                logging.debug("Got 'Are you sure...', sending 'yes'")
634                session.sendline("yes")
635                continue
636            elif match == 1:  # "password:"
637                if password_prompt_count == 0:
638                    logging.debug("Got password prompt, sending '%s'", password)
639                    session.sendline(password)
640                    password_prompt_count += 1
641                    continue
642                else:
643                    raise LoginAuthenticationError("Got password prompt twice",
644                                                   text)
645            elif match == 2:  # "login:"
646                if login_prompt_count == 0 and password_prompt_count == 0:
647                    logging.debug("Got username prompt; sending '%s'", username)
648                    session.sendline(username)
649                    login_prompt_count += 1
650                    continue
651                else:
652                    if login_prompt_count > 0:
653                        msg = "Got username prompt twice"
654                    else:
655                        msg = "Got username prompt after password prompt"
656                    raise LoginAuthenticationError(msg, text)
657            elif match == 3:  # "Connection closed"
658                raise LoginError("Client said 'connection closed'", text)
659            elif match == 4:  # "Connection refused"
660                raise LoginError("Client said 'connection refused'", text)
661            elif match == 5:  # "Please wait"
662                logging.debug("Got 'Please wait'")
663                timeout = 30
664                continue
665            elif match == 6:  # "Warning added RSA"
666                logging.debug("Got 'Warning added RSA to known host list")
667                continue
668            elif match == 7:  # prompt
669                logging.debug("Got shell prompt -- logged in")
670                break
671        except aexpect.ExpectTimeoutError, e:
672            raise LoginTimeoutError(e.output)
673        except aexpect.ExpectProcessTerminatedError, e:
674            raise LoginProcessTerminatedError(e.status, e.output)
675
676
677def remote_login(client, host, port, username, password, prompt, linesep="\n",
678                 log_filename=None, timeout=10):
679    """
680    Log into a remote host (guest) using SSH/Telnet/Netcat.
681
682    @param client: The client to use ('ssh', 'telnet' or 'nc')
683    @param host: Hostname or IP address
684    @param port: Port to connect to
685    @param username: Username (if required)
686    @param password: Password (if required)
687    @param prompt: Shell prompt (regular expression)
688    @param linesep: The line separator to use when sending lines
689            (e.g. '\\n' or '\\r\\n')
690    @param log_filename: If specified, log all output to this file
691    @param timeout: The maximal time duration (in seconds) to wait for
692            each step of the login procedure (i.e. the "Are you sure" prompt
693            or the password prompt)
694    @raise LoginBadClientError: If an unknown client is requested
695    @raise: Whatever _remote_login() raises
696    @return: A ShellSession object.
697    """
698    if client == "ssh":
699        cmd = ("ssh -o UserKnownHostsFile=/dev/null "
700               "-o PreferredAuthentications=password -p %s %s@%s" %
701               (port, username, host))
702    elif client == "telnet":
703        cmd = "telnet -l %s %s %s" % (username, host, port)
704    elif client == "nc":
705        cmd = "nc %s %s" % (host, port)
706    else:
707        raise LoginBadClientError(client)
708
709    logging.debug("Trying to login with command '%s'", cmd)
710    session = aexpect.ShellSession(cmd, linesep=linesep, prompt=prompt)
711    try:
712        _remote_login(session, username, password, prompt, timeout)
713    except:
714        session.close()
715        raise
716    if log_filename:
717        session.set_output_func(log_line)
718        session.set_output_params((log_filename,))
719    return session
720
721
722def wait_for_login(client, host, port, username, password, prompt, linesep="\n",
723                   log_filename=None, timeout=240, internal_timeout=10):
724    """
725    Make multiple attempts to log into a remote host (guest) until one succeeds
726    or timeout expires.
727
728    @param timeout: Total time duration to wait for a successful login
729    @param internal_timeout: The maximal time duration (in seconds) to wait for
730            each step of the login procedure (e.g. the "Are you sure" prompt
731            or the password prompt)
732    @see: remote_login()
733    @raise: Whatever remote_login() raises
734    @return: A ShellSession object.
735    """
736    logging.debug("Attempting to log into %s:%s using %s (timeout %ds)",
737                  host, port, client, timeout)
738    end_time = time.time() + timeout
739    while time.time() < end_time:
740        try:
741            return remote_login(client, host, port, username, password, prompt,
742                                linesep, log_filename, internal_timeout)
743        except LoginError, e:
744            logging.debug(e)
745        time.sleep(2)
746    # Timeout expired; try one more time but don't catch exceptions
747    return remote_login(client, host, port, username, password, prompt,
748                        linesep, log_filename, internal_timeout)
749
750
751def _remote_scp(session, password_list, transfer_timeout=600, login_timeout=10):
752    """
753    Transfer file(s) to a remote host (guest) using SCP.  Wait for questions
754    and provide answers.  If login_timeout expires while waiting for output
755    from the child (e.g. a password prompt), fail.  If transfer_timeout expires
756    while waiting for the transfer to complete, fail.
757
758    @brief: Transfer files using SCP, given a command line.
759
760    @param session: An Expect or ShellSession instance to operate on
761    @param password_list: Password list to send in reply to the password prompt
762    @param transfer_timeout: The time duration (in seconds) to wait for the
763            transfer to complete.
764    @param login_timeout: The maximal time duration (in seconds) to wait for
765            each step of the login procedure (i.e. the "Are you sure" prompt or
766            the password prompt)
767    @raise SCPAuthenticationError: If authentication fails
768    @raise SCPTransferTimeoutError: If the transfer fails to complete in time
769    @raise SCPTransferFailedError: If the process terminates with a nonzero
770            exit code
771    @raise SCPError: If some other error occurs
772    """
773    password_prompt_count = 0
774    timeout = login_timeout
775    authentication_done = False
776
777    scp_type = len(password_list)
778
779    while True:
780        try:
781            match, text = session.read_until_last_line_matches(
782                [r"[Aa]re you sure", r"[Pp]assword:\s*$", r"lost connection"],
783                timeout=timeout, internal_timeout=0.5)
784            if match == 0:  # "Are you sure you want to continue connecting"
785                logging.debug("Got 'Are you sure...', sending 'yes'")
786                session.sendline("yes")
787                continue
788            elif match == 1:  # "password:"
789                if password_prompt_count == 0:
790                    logging.debug("Got password prompt, sending '%s'" %
791                                   password_list[password_prompt_count])
792                    session.sendline(password_list[password_prompt_count])
793                    password_prompt_count += 1
794                    timeout = transfer_timeout
795                    if scp_type == 1:
796                        authentication_done = True
797                    continue
798                elif password_prompt_count == 1 and scp_type == 2:
799                    logging.debug("Got password prompt, sending '%s'" %
800                                   password_list[password_prompt_count])
801                    session.sendline(password_list[password_prompt_count])
802                    password_prompt_count += 1
803                    timeout = transfer_timeout
804                    authentication_done = True
805                    continue
806                else:
807                    raise SCPAuthenticationError("Got password prompt twice",
808                                                 text)
809            elif match == 2:  # "lost connection"
810                raise SCPError("SCP client said 'lost connection'", text)
811        except aexpect.ExpectTimeoutError, e:
812            if authentication_done:
813                raise SCPTransferTimeoutError(e.output)
814            else:
815                raise SCPAuthenticationTimeoutError(e.output)
816        except aexpect.ExpectProcessTerminatedError, e:
817            if e.status == 0:
818                logging.debug("SCP process terminated with status 0")
819                break
820            else:
821                raise SCPTransferFailedError(e.status, e.output)
822
823
824def remote_scp(command, password_list, log_filename=None, transfer_timeout=600,
825               login_timeout=10):
826    """
827    Transfer file(s) to a remote host (guest) using SCP.
828
829    @brief: Transfer files using SCP, given a command line.
830
831    @param command: The command to execute
832        (e.g. "scp -r foobar root@localhost:/tmp/").
833    @param password_list: Password list to send in reply to a password prompt.
834    @param log_filename: If specified, log all output to this file
835    @param transfer_timeout: The time duration (in seconds) to wait for the
836            transfer to complete.
837    @param login_timeout: The maximal time duration (in seconds) to wait for
838            each step of the login procedure (i.e. the "Are you sure" prompt
839            or the password prompt)
840    @raise: Whatever _remote_scp() raises
841    """
842    logging.debug("Trying to SCP with command '%s', timeout %ss",
843                  command, transfer_timeout)
844    if log_filename:
845        output_func = log_line
846        output_params = (log_filename,)
847    else:
848        output_func = None
849        output_params = ()
850    session = aexpect.Expect(command,
851                                    output_func=output_func,
852                                    output_params=output_params)
853    try:
854        _remote_scp(session, password_list, transfer_timeout, login_timeout)
855    finally:
856        session.close()
857
858
859def scp_to_remote(host, port, username, password, local_path, remote_path,
860                  log_filename=None, timeout=600):
861    """
862    Copy files to a remote host (guest) through scp.
863
864    @param host: Hostname or IP address
865    @param username: Username (if required)
866    @param password: Password (if required)
867    @param local_path: Path on the local machine where we are copying from
868    @param remote_path: Path on the remote machine where we are copying to
869    @param log_filename: If specified, log all output to this file
870    @param timeout: The time duration (in seconds) to wait for the transfer
871            to complete.
872    @raise: Whatever remote_scp() raises
873    """
874    command = ("scp -v -o UserKnownHostsFile=/dev/null "
875               "-o PreferredAuthentications=password -r -P %s %s %s@%s:%s" %
876               (port, local_path, username, host, remote_path))
877    password_list = []
878    password_list.append(password)
879    return remote_scp(command, password_list, log_filename, timeout)
880
881
882
883def scp_from_remote(host, port, username, password, remote_path, local_path,
884                    log_filename=None, timeout=600):
885    """
886    Copy files from a remote host (guest).
887
888    @param host: Hostname or IP address
889    @param username: Username (if required)
890    @param password: Password (if required)
891    @param local_path: Path on the local machine where we are copying from
892    @param remote_path: Path on the remote machine where we are copying to
893    @param log_filename: If specified, log all output to this file
894    @param timeout: The time duration (in seconds) to wait for the transfer
895            to complete.
896    @raise: Whatever remote_scp() raises
897    """
898    command = ("scp -v -o UserKnownHostsFile=/dev/null "
899               "-o PreferredAuthentications=password -r -P %s %s@%s:%s %s" %
900               (port, username, host, remote_path, local_path))
901    password_list = []
902    password_list.append(password)
903    remote_scp(command, password_list, log_filename, timeout)
904
905
906def scp_between_remotes(src, dst, port, s_passwd, d_passwd, s_name, d_name,
907                        s_path, d_path, log_filename=None, timeout=600):
908    """
909    Copy files from a remote host (guest) to another remote host (guest).
910
911    @param src/dst: Hostname or IP address of src and dst
912    @param s_name/d_name: Username (if required)
913    @param s_passwd/d_passwd: Password (if required)
914    @param s_path/d_path: Path on the remote machine where we are copying
915                         from/to
916    @param log_filename: If specified, log all output to this file
917    @param timeout: The time duration (in seconds) to wait for the transfer
918            to complete.
919
920    @return: True on success and False on failure.
921    """
922    command = ("scp -v -o UserKnownHostsFile=/dev/null -o "
923               "PreferredAuthentications=password -r -P %s %s@%s:%s %s@%s:%s" %
924               (port, s_name, src, s_path, d_name, dst, d_path))
925    password_list = []
926    password_list.append(s_passwd)
927    password_list.append(d_passwd)
928    return remote_scp(command, password_list, log_filename, timeout)
929
930
931def copy_files_to(address, client, username, password, port, local_path,
932                  remote_path, log_filename=None, verbose=False, timeout=600):
933    """
934    Copy files to a remote host (guest) using the selected client.
935
936    @param client: Type of transfer client
937    @param username: Username (if required)
938    @param password: Password (if requried)
939    @param local_path: Path on the local machine where we are copying from
940    @param remote_path: Path on the remote machine where we are copying to
941    @param address: Address of remote host(guest)
942    @param log_filename: If specified, log all output to this file (SCP only)
943    @param verbose: If True, log some stats using logging.debug (RSS only)
944    @param timeout: The time duration (in seconds) to wait for the transfer to
945            complete.
946    @raise: Whatever remote_scp() raises
947    """
948    if client == "scp":
949        scp_to_remote(address, port, username, password, local_path,
950                      remote_path, log_filename, timeout)
951    elif client == "rss":
952        log_func = None
953        if verbose:
954            log_func = logging.debug
955        c = rss_client.FileUploadClient(address, port, log_func)
956        c.upload(local_path, remote_path, timeout)
957        c.close()
958
959
960def copy_files_from(address, client, username, password, port, remote_path,
961                    local_path, log_filename=None, verbose=False, timeout=600):
962    """
963    Copy files from a remote host (guest) using the selected client.
964
965    @param client: Type of transfer client
966    @param username: Username (if required)
967    @param password: Password (if requried)
968    @param remote_path: Path on the remote machine where we are copying from
969    @param local_path: Path on the local machine where we are copying to
970    @param address: Address of remote host(guest)
971    @param log_filename: If specified, log all output to this file (SCP only)
972    @param verbose: If True, log some stats using logging.debug (RSS only)
973    @param timeout: The time duration (in seconds) to wait for the transfer to
974    complete.
975    @raise: Whatever remote_scp() raises
976    """
977    if client == "scp":
978        scp_from_remote(address, port, username, password, remote_path,
979                        local_path, log_filename, timeout)
980    elif client == "rss":
981        log_func = None
982        if verbose:
983            log_func = logging.debug
984        c = rss_client.FileDownloadClient(address, port, log_func)
985        c.download(remote_path, local_path, timeout)
986        c.close()
987
988
989# The following are utility functions related to ports.
990
991def is_port_free(port, address):
992    """
993    Return True if the given port is available for use.
994
995    @param port: Port number
996    """
997    try:
998        s = socket.socket()
999        #s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
1000        if address == "localhost":
1001            s.bind(("localhost", port))
1002            free = True
1003        else:
1004            s.connect((address, port))
1005            free = False
1006    except socket.error:
1007        if address == "localhost":
1008            free = False
1009        else:
1010            free = True
1011    s.close()
1012    return free
1013
1014
1015def find_free_port(start_port, end_port, address="localhost"):
1016    """
1017    Return a host free port in the range [start_port, end_port].
1018
1019    @param start_port: First port that will be checked.
1020    @param end_port: Port immediately after the last one that will be checked.
1021    """
1022    for i in range(start_port, end_port):
1023        if is_port_free(i, address):
1024            return i
1025    return None
1026
1027
1028def find_free_ports(start_port, end_port, count, address="localhost"):
1029    """
1030    Return count of host free ports in the range [start_port, end_port].
1031
1032    @count: Initial number of ports known to be free in the range.
1033    @param start_port: First port that will be checked.
1034    @param end_port: Port immediately after the last one that will be checked.
1035    """
1036    ports = []
1037    i = start_port
1038    while i < end_port and count > 0:
1039        if is_port_free(i, address):
1040            ports.append(i)
1041            count -= 1
1042        i += 1
1043    return ports
1044
1045
1046# An easy way to log lines to files when the logging system can't be used
1047
1048_open_log_files = {}
1049_log_file_dir = "/tmp"
1050
1051
1052def log_line(filename, line):
1053    """
1054    Write a line to a file.  '\n' is appended to the line.
1055
1056    @param filename: Path of file to write to, either absolute or relative to
1057            the dir set by set_log_file_dir().
1058    @param line: Line to write.
1059    """
1060    global _open_log_files, _log_file_dir
1061    if filename not in _open_log_files:
1062        path = get_path(_log_file_dir, filename)
1063        try:
1064            os.makedirs(os.path.dirname(path))
1065        except OSError:
1066            pass
1067        _open_log_files[filename] = open(path, "w")
1068    timestr = time.strftime("%Y-%m-%d %H:%M:%S")
1069    _open_log_files[filename].write("%s: %s\n" % (timestr, line))
1070    _open_log_files[filename].flush()
1071
1072
1073def set_log_file_dir(dir):
1074    """
1075    Set the base directory for log files created by log_line().
1076
1077    @param dir: Directory for log files.
1078    """
1079    global _log_file_dir
1080    _log_file_dir = dir
1081
1082
1083# The following are miscellaneous utility functions.
1084
1085def get_path(base_path, user_path):
1086    """
1087    Translate a user specified path to a real path.
1088    If user_path is relative, append it to base_path.
1089    If user_path is absolute, return it as is.
1090
1091    @param base_path: The base path of relative user specified paths.
1092    @param user_path: The user specified path.
1093    """
1094    if os.path.isabs(user_path):
1095        return user_path
1096    else:
1097        return os.path.join(base_path, user_path)
1098
1099
1100def generate_random_string(length):
1101    """
1102    Return a random string using alphanumeric characters.
1103
1104    @length: length of the string that will be generated.
1105    """
1106    r = random.SystemRandom()
1107    str = ""
1108    chars = string.letters + string.digits
1109    while length > 0:
1110        str += r.choice(chars)
1111        length -= 1
1112    return str
1113
1114def generate_random_id():
1115    """
1116    Return a random string suitable for use as a qemu id.
1117    """
1118    return "id" + generate_random_string(6)
1119
1120
1121def generate_tmp_file_name(file, ext=None, dir='/tmp/'):
1122    """
1123    Returns a temporary file name. The file is not created.
1124    """
1125    while True:
1126        file_name = (file + '-' + time.strftime("%Y%m%d-%H%M%S-") +
1127                     generate_random_string(4))
1128        if ext:
1129            file_name += '.' + ext
1130        file_name = os.path.join(dir, file_name)
1131        if not os.path.exists(file_name):
1132            break
1133
1134    return file_name
1135
1136
1137def format_str_for_message(str):
1138    """
1139    Format str so that it can be appended to a message.
1140    If str consists of one line, prefix it with a space.
1141    If str consists of multiple lines, prefix it with a newline.
1142
1143    @param str: string that will be formatted.
1144    """
1145    lines = str.splitlines()
1146    num_lines = len(lines)
1147    str = "\n".join(lines)
1148    if num_lines == 0:
1149        return ""
1150    elif num_lines == 1:
1151        return " " + str
1152    else:
1153        return "\n" + str
1154
1155
1156def wait_for(func, timeout, first=0.0, step=1.0, text=None):
1157    """
1158    If func() evaluates to True before timeout expires, return the
1159    value of func(). Otherwise return None.
1160
1161    @brief: Wait until func() evaluates to True.
1162
1163    @param timeout: Timeout in seconds
1164    @param first: Time to sleep before first attempt
1165    @param steps: Time to sleep between attempts in seconds
1166    @param text: Text to print while waiting, for debug purposes
1167    """
1168    start_time = time.time()
1169    end_time = time.time() + timeout
1170
1171    time.sleep(first)
1172
1173    while time.time() < end_time:
1174        if text:
1175            logging.debug("%s (%f secs)", text, (time.time() - start_time))
1176
1177        output = func()
1178        if output:
1179            return output
1180
1181        time.sleep(step)
1182
1183    return None
1184
1185
1186def get_hash_from_file(hash_path, dvd_basename):
1187    """
1188    Get the a hash from a given DVD image from a hash file
1189    (Hash files are usually named MD5SUM or SHA1SUM and are located inside the
1190    download directories of the DVDs)
1191
1192    @param hash_path: Local path to a hash file.
1193    @param cd_image: Basename of a CD image
1194    """
1195    hash_file = open(hash_path, 'r')
1196    for line in hash_file.readlines():
1197        if dvd_basename in line:
1198            return line.split()[0]
1199
1200
1201def run_tests(parser, job):
1202    """
1203    Runs the sequence of KVM tests based on the list of dictionaries
1204    generated by the configuration system, handling dependencies.
1205
1206    @param parser: Config parser object.
1207    @param job: Autotest job object.
1208
1209    @return: True, if all tests ran passed, False if any of them failed.
1210    """
1211    for i, d in enumerate(parser.get_dicts()):
1212        logging.info("Test %4d:  %s" % (i + 1, d["shortname"]))
1213
1214    status_dict = {}
1215    failed = False
1216
1217    for dict in parser.get_dicts():
1218        if dict.get("skip") == "yes":
1219            continue
1220        dependencies_satisfied = True
1221        for dep in dict.get("dep"):
1222            for test_name in status_dict.keys():
1223                if not dep in test_name:
1224                    continue
1225                # So the only really non-fatal state is WARN,
1226                # All the others make it not safe to proceed with dependency
1227                # execution
1228                if status_dict[test_name] not in ['GOOD', 'WARN']:
1229                    dependencies_satisfied = False
1230                    break
1231        test_iterations = int(dict.get("iterations", 1))
1232        test_tag = dict.get("shortname")
1233
1234        if dependencies_satisfied:
1235            # Setting up profilers during test execution.
1236            profilers = dict.get("profilers", "").split()
1237            for profiler in profilers:
1238                job.profilers.add(profiler)
1239            # We need only one execution, profiled, hence we're passing
1240            # the profile_only parameter to job.run_test().
1241            profile_only = bool(profilers) or None
1242            current_status = job.run_test_detail(dict.get("vm_type"),
1243                                                 params=dict,
1244                                                 tag=test_tag,
1245                                                 iterations=test_iterations,
1246                                                 profile_only=profile_only)
1247            for profiler in profilers:
1248                job.profilers.delete(profiler)
1249        else:
1250            # We will force the test to fail as TestNA during preprocessing
1251            dict['dependency_failed'] = 'yes'
1252            current_status = job.run_test_detail(dict.get("vm_type"),
1253                                                 params=dict,
1254                                                 tag=test_tag,
1255                                                 iterations=test_iterations)
1256
1257        if not current_status:
1258            failed = True
1259        status_dict[dict.get("name")] = current_status
1260
1261    return not failed
1262
1263
1264def display_attributes(instance):
1265    """
1266    Inspects a given class instance attributes and displays them, convenient
1267    for debugging.
1268    """
1269    logging.debug("Attributes set:")
1270    for member in inspect.getmembers(instance):
1271        name, value = member
1272        attribute = getattr(instance, name)
1273        if not (name.startswith("__") or callable(attribute) or not value):
1274            logging.debug("    %s: %s", name, value)
1275
1276
1277def get_full_pci_id(pci_id):
1278    """
1279    Get full PCI ID of pci_id.
1280
1281    @param pci_id: PCI ID of a device.
1282    """
1283    cmd = "lspci -D | awk '/%s/ {print $1}'" % pci_id
1284    status, full_id = commands.getstatusoutput(cmd)
1285    if status != 0:
1286        return None
1287    return full_id
1288
1289
1290def get_vendor_from_pci_id(pci_id):
1291    """
1292    Check out the device vendor ID according to pci_id.
1293
1294    @param pci_id: PCI ID of a device.
1295    """
1296    cmd = "lspci -n | awk '/%s/ {print $3}'" % pci_id
1297    return re.sub(":", " ", commands.getoutput(cmd))
1298
1299
1300def get_cpu_flags():
1301    """
1302    Returns a list of the CPU flags
1303    """
1304    flags_re = re.compile(r'^flags\s*:(.*)')
1305    for line in open('/proc/cpuinfo').readlines():
1306        match = flags_re.match(line)
1307        if match:
1308            return match.groups()[0].split()
1309    return []
1310
1311
1312def get_cpu_vendor(cpu_flags=[], verbose=True):
1313    """
1314    Returns the name of the CPU vendor, either intel, amd or unknown
1315    """
1316    if not cpu_flags:
1317        cpu_flags = get_cpu_flags()
1318
1319    if 'vmx' in cpu_flags:
1320        vendor = 'intel'
1321    elif 'svm' in cpu_flags:
1322        vendor = 'amd'
1323    else:
1324        vendor = 'unknown'
1325
1326    if verbose:
1327        logging.debug("Detected CPU vendor as '%s'", vendor)
1328    return vendor
1329
1330
1331def get_archive_tarball_name(source_dir, tarball_name, compression):
1332    '''
1333    Get the name for a tarball file, based on source, name and compression
1334    '''
1335    if tarball_name is None:
1336        tarball_name = os.path.basename(source_dir)
1337
1338    if not tarball_name.endswith('.tar'):
1339        tarball_name = '%s.tar' % tarball_name
1340
1341    if compression and not tarball_name.endswith('.%s' % compression):
1342        tarball_name = '%s.%s' % (tarball_name, compression)
1343
1344    return tarball_name
1345
1346
1347def archive_as_tarball(source_dir, dest_dir, tarball_name=None,
1348                       compression='bz2', verbose=True):
1349    '''
1350    Saves the given source directory to the given destination as a tarball
1351
1352    If the name of the archive is omitted, it will be taken from the
1353    source_dir. If it is an absolute path, dest_dir will be ignored. But,
1354    if both the destination directory and tarball anem is given, and the
1355    latter is not an absolute path, they will be combined.
1356
1357    For archiving directory '/tmp' in '/net/server/backup' as file
1358    'tmp.tar.bz2', simply use:
1359
1360    >>> virt_utils.archive_as_tarball('/tmp', '/net/server/backup')
1361
1362    To save the file it with a different name, say 'host1-tmp.tar.bz2'
1363    and save it under '/net/server/backup', use:
1364
1365    >>> virt_utils.archive_as_tarball('/tmp', '/net/server/backup',
1366                                      'host1-tmp')
1367
1368    To save with gzip compression instead (resulting in the file
1369    '/net/server/backup/host1-tmp.tar.gz'), use:
1370
1371    >>> virt_utils.archive_as_tarball('/tmp', '/net/server/backup',
1372                                      'host1-tmp', 'gz')
1373    '''
1374    tarball_name = get_archive_tarball_name(source_dir,
1375                                            tarball_name,
1376                                            compression)
1377    if not os.path.isabs(tarball_name):
1378        tarball_path = os.path.join(dest_dir, tarball_name)
1379    else:
1380        tarball_path = tarball_name
1381
1382    if verbose:
1383        logging.debug('Archiving %s as %s' % (source_dir,
1384                                              tarball_path))
1385
1386    os.chdir(os.path.dirname(source_dir))
1387    tarball = tarfile.TarFile(name=tarball_path, mode='w')
1388    tarball = tarball.open(name=tarball_path, mode='w:%s' % compression)
1389    tarball.add(os.path.basename(source_dir))
1390    tarball.close()
1391
1392
1393class Thread(threading.Thread):
1394    """
1395    Run a function in a background thread.
1396    """
1397    def __init__(self, target, args=(), kwargs={}):
1398        """
1399        Initialize the instance.
1400
1401        @param target: Function to run in the thread.
1402        @param args: Arguments to pass to target.
1403        @param kwargs: Keyword arguments to pass to target.
1404        """
1405        threading.Thread.__init__(self)
1406        self._target = target
1407        self._args = args
1408        self._kwargs = kwargs
1409
1410
1411    def run(self):
1412        """
1413        Run target (passed to the constructor).  No point in calling this
1414        function directly.  Call start() to make this function run in a new
1415        thread.
1416        """
1417        self._e = None
1418        self._retval = None
1419        try:
1420            try:
1421                self._retval = self._target(*self._args, **self._kwargs)
1422            except:
1423                self._e = sys.exc_info()
1424                raise
1425        finally:
1426            # Avoid circular references (start() may be called only once so
1427            # it's OK to delete these)
1428            del self._target, self._args, self._kwargs
1429
1430
1431    def join(self, timeout=None, suppress_exception=False):
1432        """
1433        Join the thread.  If target raised an exception, re-raise it.
1434        Otherwise, return the value returned by target.
1435
1436        @param timeout: Timeout value to pass to threading.Thread.join().
1437        @param suppress_exception: If True, don't re-raise the exception.
1438        """
1439        threading.Thread.join(self, timeout)
1440        try:
1441            if self._e:
1442                if not suppress_exception:
1443                    # Because the exception was raised in another thread, we
1444                    # need to explicitly insert the current context into it
1445                    s = error.exception_context(self._e[1])
1446                    s = error.join_contexts(error.get_context(), s)
1447                    error.set_exception_context(self._e[1], s)
1448                    raise self._e[0], self._e[1], self._e[2]
1449            else:
1450                return self._retval
1451        finally:
1452            # Avoid circular references (join() may be called multiple times
1453            # so we can't delete these)
1454            self._e = None
1455            self._retval = None
1456
1457
1458def parallel(targets):
1459    """
1460    Run multiple functions in parallel.
1461
1462    @param targets: A sequence of tuples or functions.  If it's a sequence of
1463            tuples, each tuple will be interpreted as (target, args, kwargs) or
1464            (target, args) or (target,) depending on its length.  If it's a
1465            sequence of functions, the functions will be called without
1466            arguments.
1467    @return: A list of the values returned by the functions called.
1468    """
1469    threads = []
1470    for target in targets:
1471        if isinstance(target, tuple) or isinstance(target, list):
1472            t = Thread(*target)
1473        else:
1474            t = Thread(target)
1475        threads.append(t)
1476        t.start()
1477    return [t.join() for t in threads]
1478
1479
1480class VirtLoggingConfig(logging_config.LoggingConfig):
1481    """
1482    Used with the sole purpose of providing convenient logging setup
1483    for the KVM test auxiliary programs.
1484    """
1485    def configure_logging(self, results_dir=None, verbose=False):
1486        super(VirtLoggingConfig, self).configure_logging(use_console=True,
1487                                                         verbose=verbose)
1488
1489
1490class PciAssignable(object):
1491    """
1492    Request PCI assignable devices on host. It will check whether to request
1493    PF (physical Functions) or VF (Virtual Functions).
1494    """
1495    def __init__(self, type="vf", driver=None, driver_option=None,
1496                 names=None, devices_requested=None):
1497        """
1498        Initialize parameter 'type' which could be:
1499        vf: Virtual Functions
1500        pf: Physical Function (actual hardware)
1501        mixed:  Both includes VFs and PFs
1502
1503        If pass through Physical NIC cards, we need to specify which devices
1504        to be assigned, e.g. 'eth1 eth2'.
1505
1506        If pass through Virtual Functions, we need to specify how many vfs
1507        are going to be assigned, e.g. passthrough_count = 8 and max_vfs in
1508        config file.
1509
1510        @param type: PCI device type.
1511        @param driver: Kernel module for the PCI assignable device.
1512        @param driver_option: Module option to specify the maximum number of
1513                VFs (eg 'max_vfs=7')
1514        @param names: Physical NIC cards correspondent network interfaces,
1515                e.g.'eth1 eth2 ...'
1516        @param devices_requested: Number of devices being requested.
1517        """
1518        self.type = type
1519        self.driver = driver
1520        self.driver_option = driver_option
1521        if names:
1522            self.name_list = names.split()
1523        if devices_requested:
1524            self.devices_requested = int(devices_requested)
1525        else:
1526            self.devices_requested = None
1527
1528
1529    def _get_pf_pci_id(self, name, search_str):
1530        """
1531        Get the PF PCI ID according to name.
1532
1533        @param name: Name of the PCI device.
1534        @param search_str: Search string to be used on lspci.
1535        """
1536        cmd = "ethtool -i %s | awk '/bus-info/ {print $2}'" % name
1537        s, pci_id = commands.getstatusoutput(cmd)
1538        if not (s or "Cannot get driver information" in pci_id):
1539            return pci_id[5:]
1540        cmd = "lspci | awk '/%s/ {print $1}'" % search_str
1541        pci_ids = [id for id in commands.getoutput(cmd).splitlines()]
1542        nic_id = int(re.search('[0-9]+', name).group(0))
1543        if (len(pci_ids) - 1) < nic_id:
1544            return None
1545        return pci_ids[nic_id]
1546
1547
1548    def _release_dev(self, pci_id):
1549        """
1550        Release a single PCI device.
1551
1552        @param pci_id: PCI ID of a given PCI device.
1553        """
1554        base_dir = "/sys/bus/pci"
1555        full_id = get_full_pci_id(pci_id)
1556        vendor_id = get_vendor_from_pci_id(pci_id)
1557        drv_path = os.path.join(base_dir, "devices/%s/driver" % full_id)
1558        if 'pci-stub' in os.readlink(drv_path):
1559            cmd = "echo '%s' > %s/new_id" % (vendor_id, drv_path)
1560            if os.system(cmd):
1561                return False
1562
1563            stub_path = os.path.join(base_dir, "drivers/pci-stub")
1564            cmd = "echo '%s' > %s/unbind" % (full_id, stub_path)
1565            if os.system(cmd):
1566                return False
1567
1568            driver = self.dev_drivers[pci_id]
1569            cmd = "echo '%s' > %s/bind" % (full_id, driver)
1570            if os.system(cmd):
1571                return False
1572
1573        return True
1574
1575
1576    def get_vf_devs(self):
1577        """
1578        Catch all VFs PCI IDs.
1579
1580        @return: List with all PCI IDs for the Virtual Functions avaliable
1581        """
1582        if not self.sr_iov_setup():
1583            return []
1584
1585        cmd = "lspci | awk '/Virtual Function/ {print $1}'"
1586        return commands.getoutput(cmd).split()
1587
1588
1589    def get_pf_devs(self):
1590        """
1591        Catch all PFs PCI IDs.
1592
1593        @return: List with all PCI IDs for the physical hardware requested
1594        """
1595        pf_ids = []
1596        for name in self.name_list:
1597            pf_id = self._get_pf_pci_id(name, "Ethernet")
1598            if not pf_id:
1599                continue
1600            pf_ids.append(pf_id)
1601        return pf_ids
1602
1603
1604    def get_devs(self, count):
1605        """
1606        Check out all devices' PCI IDs according to their name.
1607
1608        @param count: count number of PCI devices needed for pass through
1609        @return: a list of all devices' PCI IDs
1610        """
1611        if self.type == "vf":
1612            vf_ids = self.get_vf_devs()
1613        elif self.type == "pf":
1614            vf_ids = self.get_pf_devs()
1615        elif self.type == "mixed":
1616            vf_ids = self.get_vf_devs()
1617            vf_ids.extend(self.get_pf_devs())
1618        return vf_ids[0:count]
1619
1620
1621    def get_vfs_count(self):
1622        """
1623        Get VFs count number according to lspci.
1624        """
1625        # FIXME: Need to think out a method of identify which
1626        # 'virtual function' belongs to which physical card considering
1627        # that if the host has more than one 82576 card. PCI_ID?
1628        cmd = "lspci | grep 'Virtual Function' | wc -l"
1629        return int(commands.getoutput(cmd))
1630
1631
1632    def check_vfs_count(self):
1633        """
1634        Check VFs count number according to the parameter driver_options.
1635        """
1636        # Network card 82576 has two network interfaces and each can be
1637        # virtualized up to 7 virtual functions, therefore we multiply
1638        # two for the value of driver_option 'max_vfs'.
1639        expected_count = int((re.findall("(\d)", self.driver_option)[0])) * 2
1640        return (self.get_vfs_count == expected_count)
1641
1642
1643    def is_binded_to_stub(self, full_id):
1644        """
1645        Verify whether the device with full_id is already binded to pci-stub.
1646
1647        @param full_id: Full ID for the given PCI device
1648        """
1649        base_dir = "/sys/bus/pci"
1650        stub_path = os.path.join(base_dir, "drivers/pci-stub")
1651        if os.path.exists(os.path.join(stub_path, full_id)):
1652            return True
1653        return False
1654
1655
1656    def sr_iov_setup(self):
1657        """
1658        Ensure the PCI device is working in sr_iov mode.
1659
1660        Check if the PCI hardware device drive is loaded with the appropriate,
1661        parameters (number of VFs), and if it's not, perform setup.
1662
1663        @return: True, if the setup was completed successfuly, False otherwise.
1664        """
1665        re_probe = False
1666        s, o = commands.getstatusoutput('lsmod | grep %s' % self.driver)
1667        if s:
1668            re_probe = True
1669        elif not self.check_vfs_count():
1670            os.system("modprobe -r %s" % self.driver)
1671            re_probe = True
1672        else:
1673            return True
1674
1675        # Re-probe driver with proper number of VFs
1676        if re_probe:
1677            cmd = "modprobe %s %s" % (self.driver, self.driver_option)
1678            logging.info("Loading the driver '%s' with option '%s'",
1679                         self.driver, self.driver_option)
1680            s, o = commands.getstatusoutput(cmd)
1681            if s:
1682                return False
1683            return True
1684
1685
1686    def request_devs(self):
1687        """
1688        Implement setup process: unbind the PCI device and then bind it
1689        to the pci-stub driver.
1690
1691        @return: a list of successfully requested devices' PCI IDs.
1692        """
1693        base_dir = "/sys/bus/pci"
1694        stub_path = os.path.join(base_dir, "drivers/pci-stub")
1695
1696        self.pci_ids = self.get_devs(self.devices_requested)
1697        logging.debug("The following pci_ids were found: %s", self.pci_ids)
1698        requested_pci_ids = []
1699        self.dev_drivers = {}
1700
1701        # Setup all devices specified for assignment to guest
1702        for pci_id in self.pci_ids:
1703            full_id = get_full_pci_id(pci_id)
1704            if not full_id:
1705                continue
1706            drv_path = os.path.join(base_dir, "devices/%s/driver" % full_id)
1707            dev_prev_driver = os.path.realpath(os.path.join(drv_path,
1708                                               os.readlink(drv_path)))
1709            self.dev_drivers[pci_id] = dev_prev_driver
1710
1711            # Judge whether the device driver has been binded to stub
1712            if not self.is_binded_to_stub(full_id):
1713                logging.debug("Binding device %s to stub", full_id)
1714                vendor_id = get_vendor_from_pci_id(pci_id)
1715                stub_new_id = os.path.join(stub_path, 'new_id')
1716                unbind_dev = os.path.join(drv_path, 'unbind')
1717                stub_bind = os.path.join(stub_path, 'bind')
1718
1719                info_write_to_files = [(vendor_id, stub_new_id),
1720                                       (full_id, unbind_dev),
1721                                       (full_id, stub_bind)]
1722
1723                for content, file in info_write_to_files:
1724                    try:
1725                        utils.open_write_close(file, content)
1726                    except IOError:
1727                        logging.debug("Failed to write %s to file %s", content,
1728                                      file)
1729                        continue
1730
1731                if not self.is_binded_to_stub(full_id):
1732                    logging.error("Binding device %s to stub failed", pci_id)
1733                    continue
1734            else:
1735                logging.debug("Device %s already binded to stub", pci_id)
1736            requested_pci_ids.append(pci_id)
1737        self.pci_ids = requested_pci_ids
1738        return self.pci_ids
1739
1740
1741    def release_devs(self):
1742        """
1743        Release all PCI devices currently assigned to VMs back to the
1744        virtualization host.
1745        """
1746        try:
1747            for pci_id in self.dev_drivers:
1748                if not self._release_dev(pci_id):
1749                    logging.error("Failed to release device %s to host", pci_id)
1750                else:
1751                    logging.info("Released device %s successfully", pci_id)
1752        except:
1753            return
1754
1755
1756class KojiClient(object):
1757    """
1758    Stablishes a connection with the build system, either koji or brew.
1759
1760    This class provides convenience methods to retrieve information on packages
1761    and the packages themselves hosted on the build system. Packages should be
1762    specified in the KojiPgkSpec syntax.
1763    """
1764
1765    CMD_LOOKUP_ORDER = ['/usr/bin/brew', '/usr/bin/koji' ]
1766
1767    CONFIG_MAP = {'/usr/bin/brew': '/etc/brewkoji.conf',
1768                  '/usr/bin/koji': '/etc/koji.conf'}
1769
1770
1771    def __init__(self, cmd=None):
1772        """
1773        Verifies whether the system has koji or brew installed, then loads
1774        the configuration file that will be used to download the files.
1775
1776        @type cmd: string
1777        @param cmd: Optional command name, either 'brew' or 'koji'. If not
1778                set, get_default_command() is used and to look for
1779                one of them.
1780        @raise: ValueError
1781        """
1782        if not KOJI_INSTALLED:
1783            raise ValueError('No koji/brew installed on the machine')
1784
1785        # Instance variables used by many methods
1786        self.command = None
1787        self.config = None
1788        self.config_options = {}
1789        self.session = None
1790
1791        # Set koji command or get default
1792        if cmd is None:
1793            self.command = self.get_default_command()
1794        else:
1795            self.command = cmd
1796
1797        # Check koji command
1798        if not self.is_command_valid():
1799            raise ValueError('Koji command "%s" is not valid' % self.command)
1800
1801        # Assuming command is valid, set configuration file and read it
1802        self.config = self.CONFIG_MAP[self.command]
1803        self.read_config()
1804
1805        # Setup koji session
1806        server_url = self.config_options['server']
1807        session_options = self.get_session_options()
1808        self.session = koji.ClientSession(server_url,
1809                                          session_options)
1810
1811
1812    def read_config(self, check_is_valid=True):
1813        '''
1814        Reads options from the Koji configuration file
1815
1816        By default it checks if the koji configuration is valid
1817
1818        @type check_valid: boolean
1819        @param check_valid: whether to include a check on the configuration
1820        @raises: ValueError
1821        @returns: None
1822        '''
1823        if check_is_valid:
1824            if not self.is_config_valid():
1825                raise ValueError('Koji config "%s" is not valid' % self.config)
1826
1827        config = ConfigParser.ConfigParser()
1828        config.read(self.config)
1829
1830        basename = os.path.basename(self.command)
1831        for name, value in config.items(basename):
1832            self.config_options[name] = value
1833
1834
1835    def get_session_options(self):
1836        '''
1837        Filter only options necessary for setting up a cobbler client session
1838
1839        @returns: only the options used for session setup
1840        '''
1841        session_options = {}
1842        for name, value in self.config_options.items():
1843            if name in ('user', 'password', 'debug_xmlrpc', 'debug'):
1844                session_options[name] = value
1845        return session_options
1846
1847
1848    def is_command_valid(self):
1849        '''
1850        Checks if the currently set koji command is valid
1851
1852        @returns: True or False
1853        '''
1854        koji_command_ok = True
1855
1856        if not os.path.isfile(self.command):
1857            logging.error('Koji command "%s" is not a regular file',
1858                          self.command)
1859            koji_command_ok = False
1860
1861        if not os.access(self.command, os.X_OK):
1862            logging.warning('Koji command "%s" is not executable: this is '
1863                         'not fatal but indicates an unexpected situation',
1864                         self.command)
1865
1866        if not self.command in self.CONFIG_MAP.keys():
1867            logging.error('Koji command "%s" does not have a configuration '
1868                          'file associated to it', self.command)
1869            koji_command_ok = False
1870
1871        return koji_command_ok
1872
1873
1874    def is_config_valid(self):
1875        '''
1876        Checks if the currently set koji configuration is valid
1877
1878        @returns: True or False
1879        '''
1880        koji_config_ok = True
1881
1882        if not os.path.isfile(self.config):
1883            logging.error('Koji config "%s" is not a regular file', self.config)
1884            koji_config_ok = False
1885
1886        if not os.access(self.config, os.R_OK):
1887            logging.error('Koji config "%s" is not readable', self.config)
1888            koji_config_ok = False
1889
1890        config = ConfigParser.ConfigParser()
1891        config.read(self.config)
1892        basename = os.path.basename(self.command)
1893        if not config.has_section(basename):
1894            logging.error('Koji configuration file "%s" does not have a '
1895                          'section "%s", named after the base name of the '
1896                          'currently set koji command "%s"', self.config,
1897                           basename, self.command)
1898            koji_config_ok = False
1899
1900        return koji_config_ok
1901
1902
1903    def get_default_command(self):
1904        '''
1905        Looks up for koji or brew "binaries" on the system
1906
1907        Systems with plain koji usually don't have a brew cmd, while systems
1908        with koji, have *both* koji and brew utilities. So we look for brew
1909        first, and if found, we consider that the system is configured for
1910        brew. If not, we consider this is a system with plain koji.
1911
1912        @returns: either koji or brew command line executable path, or None
1913        '''
1914        koji_command = None
1915        for command in self.CMD_LOOKUP_ORDER:
1916            if os.path.isfile(command):
1917                koji_command = command
1918                break
1919            else:
1920                koji_command_basename = os.path.basename(koji_command)
1921                try:
1922                    koji_command = os_dep.command(koji_command_basename)
1923                    break
1924                except ValueError:
1925                    pass
1926        return koji_command
1927
1928
1929    def get_pkg_info(self, pkg):
1930        '''
1931        Returns information from Koji on the package
1932
1933        @type pkg: KojiPkgSpec
1934        @param pkg: information about the package, as a KojiPkgSpec instance
1935
1936        @returns: information from Koji about the specified package
1937        '''
1938        info = {}
1939        if pkg.build is not None:
1940            info = self.session.getBuild(int(pkg.build))
1941        elif pkg.tag is not None and pkg.package is not None:
1942            builds = self.session.listTagged(pkg.tag,
1943                                             latest=True,
1944                                             inherit=True,
1945                                             package=pkg.package)
1946            if builds:
1947                info = builds[0]
1948        return info
1949
1950
1951    def is_pkg_valid(self, pkg):
1952        '''
1953        Checks if this package is altogether valid on Koji
1954
1955        This verifies if the build or tag specified in the package
1956        specification actually exist on the Koji server
1957
1958        @returns: True or False
1959        '''
1960        valid = True
1961        if pkg.build:
1962            if not self.is_pkg_spec_build_valid(pkg):
1963                valid = False
1964        elif pkg.tag:
1965            if not self.is_pkg_spec_tag_valid(pkg):
1966                valid = False
1967        else:
1968            valid = False
1969        return valid
1970
1971
1972    def is_pkg_spec_build_valid(self, pkg):
1973        '''
1974        Checks if build is valid on Koji
1975
1976        @param pkg: a Pkg instance
1977        '''
1978        if pkg.build is not None:
1979            info = self.session.getBuild(int(pkg.build))
1980            if info:
1981                return True
1982        return False
1983
1984
1985    def is_pkg_spec_tag_valid(self, pkg):
1986        '''
1987        Checks if tag is valid on Koji
1988
1989        @type pkg: KojiPkgSpec
1990        @param pkg: a package specification
1991        '''
1992        if pkg.tag is not None:
1993            tag = self.session.getTag(pkg.tag)
1994            if tag:
1995                return True
1996        return False
1997
1998
1999    def get_pkg_rpm_info(self, pkg, arch=None):
2000        '''
2001        Returns a list of infomation on the RPM packages found on koji
2002
2003        @type pkg: KojiPkgSpec
2004        @param pkg: a package specification
2005        @type arch: string
2006        @param arch: packages built for this architecture, but also including
2007                architecture independent (noarch) packages
2008        '''
2009        if arch is None:
2010            arch = utils.get_arch()
2011        rpms = []
2012        info = self.get_pkg_info(pkg)
2013        if info:
2014            rpms = self.session.listRPMs(buildID=info['id'],
2015                                         arches=[arch, 'noarch'])
2016            if pkg.subpackages:
2017                rpms = [d for d in rpms if d['name'] in pkg.subpackages]
2018        return rpms
2019
2020
2021    def get_pkg_rpm_names(self, pkg, arch=None):
2022        '''
2023        Gets the names for the RPM packages specified in pkg
2024
2025        @type pkg: KojiPkgSpec
2026        @param pkg: a package specification
2027        @type arch: string
2028        @param arch: packages built for this architecture, but also including
2029                architecture independent (noarch) packages
2030        '''
2031        if arch is None:
2032            arch = utils.get_arch()
2033        rpms = self.get_pkg_rpm_info(pkg, arch)
2034        return [rpm['name'] for rpm in rpms]
2035
2036
2037    def get_pkg_rpm_file_names(self, pkg, arch=None):
2038        '''
2039        Gets the file names for the RPM packages specified in pkg
2040
2041        @type pkg: KojiPkgSpec
2042        @param pkg: a package specification
2043        @type arch: string
2044        @param arch: packages built for this architecture, but also including
2045                architecture independent (noarch) packages
2046        '''
2047        if arch is None:
2048            arch = utils.get_arch()
2049        rpm_names = []
2050        rpms = self.get_pkg_rpm_info(pkg, arch)
2051        for rpm in rpms:
2052            arch_rpm_name = koji.pathinfo.rpm(rpm)
2053            rpm_name = os.path.basename(arch_rpm_name)
2054            rpm_names.append(rpm_name)
2055        return rpm_names
2056
2057
2058    def get_pkg_urls(self, pkg, arch=None):
2059        '''
2060        Gets the urls for the packages specified in pkg
2061
2062        @type pkg: KojiPkgSpec
2063        @param pkg: a package specification
2064        @type arch: string
2065        @param arch: packages built for this architecture, but also including
2066                architecture independent (noarch) packages
2067        '''
2068        info = self.get_pkg_info(pkg)
2069        rpms = self.get_pkg_rpm_info(pkg, arch)
2070        rpm_urls = []
2071        for rpm in rpms:
2072            rpm_name = koji.pathinfo.rpm(rpm)
2073            url = ("%s/%s/%s/%s/%s" % (self.config_options['pkgurl'],
2074                                       info['package_name'],
2075                                       info['version'], info['release'],
2076                                       rpm_name))
2077            rpm_urls.append(url)
2078        return rpm_urls
2079
2080
2081    def get_pkgs(self, pkg, dst_dir, arch=None):
2082        '''
2083        Download the packages
2084
2085        @type pkg: KojiPkgSpec
2086        @param pkg: a package specification
2087        @type dst_dir: string
2088        @param dst_dir: the destination directory, where the downloaded
2089                packages will be saved on
2090        @type arch: string
2091        @param arch: packages built for this architecture, but also including
2092                architecture independent (noarch) packages
2093        '''
2094        rpm_urls = self.get_pkg_urls(pkg, arch)
2095        for url in rpm_urls:
2096            utils.get_file(url,
2097                           os.path.join(dst_dir, os.path.basename(url)))
2098
2099
2100DEFAULT_KOJI_TAG = None
2101def set_default_koji_tag(tag):
2102    '''
2103    Sets the default tag that will be used
2104    '''
2105    global DEFAULT_KOJI_TAG
2106    DEFAULT_KOJI_TAG = tag
2107
2108
2109def get_default_koji_tag():
2110    return DEFAULT_KOJI_TAG
2111
2112
2113class KojiPkgSpec(object):
2114    '''
2115    A package specification syntax parser for Koji
2116
2117    This holds information on either tag or build, and packages to be fetched
2118    from koji and possibly installed (features external do this class).
2119
2120    New objects can be created either by providing information in the textual
2121    format or by using the actual parameters for tag, build, package and sub-
2122    packages. The textual format is useful for command line interfaces and
2123    configuration files, while using parameters is better for using this in
2124    a programatic fashion.
2125
2126    The following sets of examples are interchangeable. Specifying all packages
2127    part of build number 1000:
2128
2129        >>> from kvm_utils import KojiPkgSpec
2130        >>> pkg = KojiPkgSpec('1000')
2131
2132        >>> pkg = KojiPkgSpec(build=1000)
2133
2134    Specifying only a subset of packages of build number 1000:
2135
2136        >>> pkg = KojiPkgSpec('1000:kernel,kernel-devel')
2137
2138        >>> pkg = KojiPkgSpec(build=1000,
2139                              subpackages=['kernel', 'kernel-devel'])
2140
2141    Specifying the latest build for the 'kernel' package tagged with 'dist-f14':
2142
2143        >>> pkg = KojiPkgSpec('dist-f14:kernel')
2144
2145        >>> pkg = KojiPkgSpec(tag='dist-f14', package='kernel')
2146
2147    Specifying the 'kernel' package using the default tag:
2148
2149        >>> kvm_utils.set_default_koji_tag('dist-f14')
2150        >>> pkg = KojiPkgSpec('kernel')
2151
2152        >>> pkg = KojiPkgSpec(package='kernel')
2153
2154    Specifying the 'kernel' package using the default tag:
2155
2156        >>> kvm_utils.set_default_koji_tag('dist-f14')
2157        >>> pkg = KojiPkgSpec('kernel')
2158
2159        >>> pkg = KojiPkgSpec(package='kernel')
2160
2161    If you do not specify a default tag, and give a package name without an
2162    explicit tag, your package specification is considered invalid:
2163
2164        >>> print kvm_utils.get_default_koji_tag()
2165        None
2166        >>> print kvm_utils.KojiPkgSpec('kernel').is_valid()
2167        False
2168
2169        >>> print kvm_utils.KojiPkgSpec(package='kernel').is_valid()
2170        False
2171    '''
2172
2173    SEP = ':'
2174
2175    def __init__(self, text='', tag=None, build=None,
2176                 package=None, subpackages=[]):
2177        '''
2178        Instantiates a new KojiPkgSpec object
2179
2180        @type text: string
2181        @param text: a textual representation of a package on Koji that
2182                will be parsed
2183        @type tag: string
2184        @param tag: a koji tag, example: Fedora-14-RELEASE
2185                (see U{http://fedoraproject.org/wiki/Koji#Tags_and_Targets})
2186        @type build: number
2187        @param build: a koji build, example: 1001
2188                (see U{http://fedoraproject.org/wiki/Koji#Koji_Architecture})
2189        @type package: string
2190        @param package: a koji package, example: python
2191                (see U{http://fedoraproject.org/wiki/Koji#Koji_Architecture})
2192        @type subpackages: list of strings
2193        @param subpackages: a list of package names, usually a subset of
2194                the RPM packages generated by a given build
2195        '''
2196
2197        # Set to None to indicate 'not set' (and be able to use 'is')
2198        self.tag = None
2199        self.build = None
2200        self.package = None
2201        self.subpackages = []
2202
2203        self.default_tag = None
2204
2205        # Textual representation takes precedence (most common use case)
2206        if text:
2207            self.parse(text)
2208        else:
2209            self.tag = tag
2210            self.build = build
2211            self.package = package
2212            self.subpackages = subpackages
2213
2214        # Set the default tag, if set, as a fallback
2215        if not self.build and not self.tag:
2216            default_tag = get_default_koji_tag()
2217            if default_tag is not None:
2218                self.tag = default_tag
2219
2220
2221    def parse(self, text):
2222        '''
2223        Parses a textual representation of a package specification
2224
2225        @type text: string
2226        @param text: textual representation of a package in koji
2227        '''
2228        parts = text.count(self.SEP) + 1
2229        if parts == 1:
2230            if text.isdigit():
2231                self.build = text
2232            else:
2233                self.package = text
2234        elif parts == 2:
2235            part1, part2 = text.split(self.SEP)
2236            if part1.isdigit():
2237                self.build = part1
2238                self.subpackages = part2.split(',')
2239            else:
2240                self.tag = part1
2241                self.package = part2
2242        elif parts >= 3:
2243            # Instead of erroring on more arguments, we simply ignore them
2244            # This makes the parser suitable for future syntax additions, such
2245            # as specifying the package architecture
2246            part1, part2, part3 = text.split(self.SEP)[0:3]
2247            self.tag = part1
2248            self.package = part2
2249            self.subpackages = part3.split(',')
2250
2251
2252    def _is_invalid_neither_tag_or_build(self):
2253        '''
2254        Checks if this package is invalid due to not having either a valid
2255        tag or build set, that is, both are empty.
2256
2257        @returns: True if this is invalid and False if it's valid
2258        '''
2259        return (self.tag is None and self.build is None)
2260
2261
2262    def _is_invalid_package_but_no_tag(self):
2263        '''
2264        Checks if this package is invalid due to having a package name set
2265        but tag or build set, that is, both are empty.
2266
2267        @returns: True if this is invalid and False if it's valid
2268        '''
2269        return (self.package and not self.tag)
2270
2271
2272    def _is_invalid_subpackages_but_no_main_package(self):
2273        '''
2274        Checks if this package is invalid due to having a tag set (this is Ok)
2275        but specifying subpackage names without specifying the main package
2276        name.
2277
2278        Specifying subpackages without a main package name is only valid when
2279        a build is used instead of a tag.
2280
2281        @returns: True if this is invalid and False if it's valid
2282        '''
2283        return (self.tag and self.subpackages and not self.package)
2284
2285
2286    def is_valid(self):
2287        '''
2288        Checks if this package specification is valid.
2289
2290        Being valid means that it has enough and not conflicting information.
2291        It does not validate that the packages specified actually existe on
2292        the Koji server.
2293
2294        @returns: True or False
2295        '''
2296        if self._is_invalid_neither_tag_or_build():
2297            return False
2298        elif self._is_invalid_package_but_no_tag():
2299            return False
2300        elif self._is_invalid_subpackages_but_no_main_package():
2301            return False
2302
2303        return True
2304
2305
2306    def describe_invalid(self):
2307        '''
2308        Describes why this is not valid, in a human friendly way
2309        '''
2310        if self._is_invalid_neither_tag_or_build():
2311            return 'neither a tag or build are set, and of them should be set'
2312        elif self._is_invalid_package_but_no_tag():
2313            return 'package name specified but no tag is set'
2314        elif self._is_invalid_subpackages_but_no_main_package():
2315            return 'subpackages specified but no main package is set'
2316
2317        return 'unkwown reason, seems to be valid'
2318
2319
2320    def describe(self):
2321        '''
2322        Describe this package specification, in a human friendly way
2323
2324        @returns: package specification description
2325        '''
2326        if self.is_valid():
2327            description = ''
2328            if not self.subpackages:
2329                description += 'all subpackages from %s ' % self.package
2330            else:
2331                description += ('only subpackage(s) %s from package %s ' %
2332                                (', '.join(self.subpackages), self.package))
2333
2334            if self.build:
2335                description += 'from build %s' % self.build
2336            elif self.tag:
2337                description += 'tagged with %s' % self.tag
2338            else:
2339                raise ValueError, 'neither build or tag is set'
2340
2341            return description
2342        else:
2343            return ('Invalid package specification: %s' %
2344                    self.describe_invalid())
2345
2346
2347    def __repr__(self):
2348        return ("<KojiPkgSpec tag=%s build=%s pkg=%s subpkgs=%s>" %
2349                (self.tag, self.build, self.package,
2350                 ", ".join(self.subpackages)))
2351
2352
2353def umount(src, mount_point, type):
2354    """
2355    Umount the src mounted in mount_point.
2356
2357    @src: mount source
2358    @mount_point: mount point
2359    @type: file system type
2360    """
2361
2362    mount_string = "%s %s %s" % (src, mount_point, type)
2363    if mount_string in file("/etc/mtab").read():
2364        umount_cmd = "umount %s" % mount_point
2365        try:
2366            utils.system(umount_cmd)
2367            return True
2368        except error.CmdError:
2369            return False
2370    else:
2371        logging.debug("%s is not mounted under %s", src, mount_point)
2372        return True
2373
2374
2375def mount(src, mount_point, type, perm="rw"):
2376    """
2377    Mount the src into mount_point of the host.
2378
2379    @src: mount source
2380    @mount_point: mount point
2381    @type: file system type
2382    @perm: mount premission
2383    """
2384    umount(src, mount_point, type)
2385    mount_string = "%s %s %s %s" % (src, mount_point, type, perm)
2386
2387    if mount_string in file("/etc/mtab").read():
2388        logging.debug("%s is already mounted in %s with %s",
2389                      src, mount_point, perm)
2390        return True
2391
2392    mount_cmd = "mount -t %s %s %s -o %s" % (type, src, mount_point, perm)
2393    try:
2394        utils.system(mount_cmd)
2395    except error.CmdError:
2396        return False
2397
2398    logging.debug("Verify the mount through /etc/mtab")
2399    if mount_string in file("/etc/mtab").read():
2400        logging.debug("%s is successfully mounted", src)
2401        return True
2402    else:
2403        logging.error("Can't find mounted NFS share - /etc/mtab contents \n%s",
2404                      file("/etc/mtab").read())
2405        return False
2406
2407
2408class GitRepoHelper(object):
2409    '''
2410    Helps to deal with git repos, mostly fetching content from a repo
2411    '''
2412    def __init__(self, uri, branch, destination_dir, commit=None, lbranch=None):
2413        '''
2414        Instantiates a new GitRepoHelper
2415
2416        @type uri: string
2417        @param uri: git repository url
2418        @type branch: string
2419        @param branch: git remote branch
2420        @type destination_dir: string
2421        @param destination_dir: path of a dir where to save downloaded code
2422        @type commit: string
2423        @param commit: specific commit to download
2424        @type lbranch: string
2425        @param lbranch: git local branch name, if different from remote
2426        '''
2427        self.uri = uri
2428        self.branch = branch
2429        self.destination_dir = destination_dir
2430        self.commit = commit
2431        if lbranch is None:
2432            self.lbranch = branch
2433
2434
2435    def init(self):
2436        '''
2437        Initializes a directory for receiving a verbatim copy of git repo
2438
2439        This creates a directory if necessary, and either resets or inits
2440        the repo
2441        '''
2442        if not os.path.exists(self.destination_dir):
2443            logging.debug('Creating directory %s for git repo %s',
2444                          self.destination_dir, self.uri)
2445            os.makedirs(self.destination_dir)
2446
2447        os.chdir(self.destination_dir)
2448
2449        if os.path.exists('.git'):
2450            logging.debug('Resetting previously existing git repo at %s for '
2451                          'receiving git repo %s',
2452                          self.destination_dir, self.uri)
2453            utils.system('git reset --hard')
2454        else:
2455            logging.debug('Initializing new git repo at %s for receiving '
2456                          'git repo %s',
2457                          self.destination_dir, self.uri)
2458            utils.system('git init')
2459
2460
2461    def fetch(self):
2462        '''
2463        Performs a git fetch from the remote repo
2464        '''
2465        logging.info("Fetching git [REP '%s' BRANCH '%s'] -> %s",
2466                     self.uri, self.branch, self.destination_dir)
2467        os.chdir(self.destination_dir)
2468        utils.system("git fetch -q -f -u -t %s %s:%s" % (self.uri,
2469                                                         self.branch,
2470                                                         self.lbranch))
2471
2472
2473    def checkout(self):
2474        '''
2475        Performs a git checkout for a given branch and start point (commit)
2476        '''
2477        os.chdir(self.destination_dir)
2478
2479        logging.debug('Checking out local branch %s', self.lbranch)
2480        utils.system("git checkout %s" % self.lbranch)
2481
2482        if self.commit is not None:
2483            logging.debug('Checking out commit %s', self.commit)
2484            utils.system("git checkout %s" % self.commit)
2485
2486        h = utils.system_output('git log --pretty=format:"%H" -1').strip()
2487        try:
2488            desc = "tag %s" % utils.system_output("git describe")
2489        except error.CmdError:
2490            desc = "no tag found"
2491
2492        logging.info("Commit hash for %s is %s (%s)", self.name, h, desc)
2493
2494
2495    def execute(self):
2496        '''
2497        Performs all steps necessary to initialize and download a git repo
2498
2499        This includes the init, fetch and checkout steps in one single
2500        utility method.
2501        '''
2502        self.init()
2503        self.fetch()
2504        self.checkout()
2505
2506
2507class GitRepoParamHelper(GitRepoHelper):
2508    '''
2509    Helps to deal with git repos specified in cartersian config files
2510
2511    This class attempts to make it simple to manage a git repo, by using a
2512    naming standard that follows this basic syntax:
2513
2514    <prefix>_name_<suffix>
2515
2516    <prefix> is always 'git_repo' and <suffix> sets options for this git repo.
2517    Example for repo named foo:
2518
2519    git_repo_foo_uri = git://git.foo.org/foo.git
2520    git_repo_foo_branch = master
2521    git_repo_foo_lbranch = master
2522    git_repo_foo_commit = bb5fb8e678aabe286e74c4f2993dc2a9e550b627
2523    '''
2524    def __init__(self, params, name, destination_dir):
2525        '''
2526        Instantiates a new GitRepoParamHelper
2527        '''
2528        self.params = params
2529        self.name = name
2530        self.destination_dir = destination_dir
2531        self._parse_params()
2532
2533
2534    def _parse_params(self):
2535        '''
2536        Parses the params items for entries related to this repo
2537
2538        This method currently does everything that the parent class __init__()
2539        method does, that is, sets all instance variables needed by other
2540        methods. That means it's not strictly necessary to call parent's
2541        __init__().
2542        '''
2543        config_prefix = 'git_repo_%s' % self.name
2544        logging.debug('Parsing parameters for git repo %s, configuration '
2545                      'prefix is %s' % (self.name, config_prefix))
2546
2547        self.uri = self.params.get('%s_uri' % config_prefix)
2548        logging.debug('Git repo %s uri: %s' % (self.name, self.uri))
2549
2550        self.branch = self.params.get('%s_branch' % config_prefix, 'master')
2551        logging.debug('Git repo %s branch: %s' % (self.name, self.branch))
2552
2553        self.lbranch = self.params.get('%s_lbranch' % config_prefix)
2554        if self.lbranch is None:
2555            self.lbranch = self.branch
2556        logging.debug('Git repo %s lbranch: %s' % (self.name, self.lbranch))
2557
2558        self.commit = self.params.get('%s_commit' % config_prefix)
2559        if self.commit is None:
2560            logging.debug('Git repo %s commit is not set' % self.name)
2561        else:
2562            logging.debug('Git repo %s commit: %s' % (self.name, self.commit))
2563
2564
2565class LocalSourceDirHelper(object):
2566    '''
2567    Helper class to deal with source code sitting somewhere in the filesystem
2568    '''
2569    def __init__(self, source_dir, destination_dir):
2570        '''
2571        @param source_dir:
2572        @param destination_dir:
2573        @return: new LocalSourceDirHelper instance
2574        '''
2575        self.source = source_dir
2576        self.destination = destination_dir
2577
2578
2579    def execute(self):
2580        '''
2581        Copies the source directory to the destination directory
2582        '''
2583        if os.path.isdir(self.destination):
2584            shutil.rmtree(self.destination)
2585
2586        if os.path.isdir(self.source):
2587            shutil.copytree(self.source, self.destination)
2588
2589
2590class LocalSourceDirParamHelper(LocalSourceDirHelper):
2591    '''
2592    Helps to deal with source dirs specified in cartersian config files
2593
2594    This class attempts to make it simple to manage a source dir, by using a
2595    naming standard that follows this basic syntax:
2596
2597    <prefix>_name_<suffix>
2598
2599    <prefix> is always 'local_src' and <suffix> sets options for this source
2600    dir.  Example for source dir named foo:
2601
2602    local_src_foo_path = /home/user/foo
2603    '''
2604    def __init__(self, params, name, destination_dir):
2605        '''
2606        Instantiate a new LocalSourceDirParamHelper
2607        '''
2608        self.params = params
2609        self.name = name
2610        self.destination_dir = destination_dir
2611        self._parse_params()
2612
2613
2614    def _parse_params(self):
2615        '''
2616        Parses the params items for entries related to source dir
2617        '''
2618        config_prefix = 'local_src_%s' % self.name
2619        logging.debug('Parsing parameters for local source %s, configuration '
2620                      'prefix is %s' % (self.name, config_prefix))
2621
2622        self.path = self.params.get('%s_path' % config_prefix)
2623        logging.debug('Local source directory %s path: %s' % (self.name,
2624                                                              self.path))
2625        self.source = self.path
2626        self.destination = self.destination_dir
2627
2628
2629class LocalTarHelper(object):
2630    '''
2631    Helper class to deal with source code in a local tarball
2632    '''
2633    def __init__(self, source, destination_dir):
2634        self.source = source
2635        self.destination = destination_dir
2636
2637
2638    def extract(self):
2639        '''
2640        Extracts the tarball into the destination directory
2641        '''
2642        if os.path.isdir(self.destination):
2643            shutil.rmtree(self.destination)
2644
2645        if os.path.isfile(self.source) and tarfile.is_tarfile(self.source):
2646
2647            name = os.path.basename(self.destination)
2648            temp_dir = os.path.join(os.path.dirname(self.destination),
2649                                    '%s.tmp' % name)
2650            logging.debug('Temporary directory for extracting tarball is %s' %
2651                          temp_dir)
2652
2653            if not os.path.isdir(temp_dir):
2654                os.makedirs(temp_dir)
2655
2656            tarball = tarfile.open(self.source)
2657            tarball.extractall(temp_dir)
2658
2659            #
2660            # If there's a directory at the toplevel of the tarfile, assume
2661            # it's the root for the contents, usually source code
2662            #
2663            tarball_info = tarball.members[0]
2664            if tarball_info.isdir():
2665                content_path = os.path.join(temp_dir,
2666                                            tarball_info.name)
2667            else:
2668                content_path = temp_dir
2669
2670            #
2671            # Now move the content directory to the final destination
2672            #
2673            shutil.move(content_path, self.destination)
2674
2675        else:
2676            raise OSError("%s is not a file or tar file" % self.source)
2677
2678
2679    def execute(self):
2680        '''
2681        Executes all action this helper is suposed to perform
2682
2683        This is the main entry point method for this class, and all other
2684        helper classes.
2685        '''
2686        self.extract()
2687
2688
2689class LocalTarParamHelper(LocalTarHelper):
2690    '''
2691    Helps to deal with source tarballs specified in cartersian config files
2692
2693    This class attempts to make it simple to manage a tarball with source code,
2694    by using a  naming standard that follows this basic syntax:
2695
2696    <prefix>_name_<suffix>
2697
2698    <prefix> is always 'local_tar' and <suffix> sets options for this source
2699    tarball.  Example for source tarball named foo:
2700
2701    local_tar_foo_path = /tmp/foo-1.0.tar.gz
2702    '''
2703    def __init__(self, params, name, destination_dir):
2704        '''
2705        Instantiates a new LocalTarParamHelper
2706        '''
2707        self.params = params
2708        self.name = name
2709        self.destination_dir = destination_dir
2710        self._parse_params()
2711
2712
2713    def _parse_params(self):
2714        '''
2715        Parses the params items for entries related to this local tar helper
2716        '''
2717        config_prefix = 'local_tar_%s' % self.name
2718        logging.debug('Parsing parameters for local tar %s, configuration '
2719                      'prefix is %s' % (self.name, config_prefix))
2720
2721        self.path = self.params.get('%s_path' % config_prefix)
2722        logging.debug('Local source tar %s path: %s' % (self.name,
2723                                                        self.path))
2724        self.source = self.path
2725        self.destination = self.destination_dir
2726
2727
2728class RemoteTarHelper(LocalTarHelper):
2729    '''
2730    Helper that fetches a tarball and extracts it locally
2731    '''
2732    def __init__(self, source_uri, destination_dir):
2733        self.source = source_uri
2734        self.destination = destination_dir
2735
2736
2737    def execute(self):
2738        '''
2739        Executes all action this helper class is suposed to perform
2740
2741        This is the main entry point method for this class, and all other
2742        helper classes.
2743
2744        This implementation fetches the remote tar file and then extracts
2745        it using the functionality present in the parent class.
2746        '''
2747        name = os.path.basename(self.source)
2748        base_dest = os.path.dirname(self.destination_dir)
2749        dest = os.path.join(base_dest, name)
2750        utils.get_file(self.source, dest)
2751        self.source = dest
2752        self.extract()
2753
2754
2755class RemoteTarParamHelper(RemoteTarHelper):
2756    '''
2757    Helps to deal with remote source tarballs specified in cartersian config
2758
2759    This class attempts to make it simple to manage a tarball with source code,
2760    by using a  naming standard that follows this basic syntax:
2761
2762    <prefix>_name_<suffix>
2763
2764    <prefix> is always 'local_tar' and <suffix> sets options for this source
2765    tarball.  Example for source tarball named foo:
2766
2767    remote_tar_foo_uri = http://foo.org/foo-1.0.tar.gz
2768    '''
2769    def __init__(self, params, name, destination_dir):
2770        '''
2771        Instantiates a new RemoteTarParamHelper instance
2772        '''
2773        self.params = params
2774        self.name = name
2775        self.destination_dir = destination_dir
2776        self._parse_params()
2777
2778
2779    def _parse_params(self):
2780        '''
2781        Parses the params items for entries related to this remote tar helper
2782        '''
2783        config_prefix = 'remote_tar_%s' % self.name
2784        logging.debug('Parsing parameters for remote tar %s, configuration '
2785                      'prefix is %s' % (self.name, config_prefix))
2786
2787        self.uri = self.params.get('%s_uri' % config_prefix)
2788        logging.debug('Remote source tar %s uri: %s' % (self.name,
2789                                                        self.uri))
2790        self.source = self.uri
2791        self.destination = self.destination_dir
2792
2793
2794class PatchHelper(object):
2795    '''
2796    Helper that encapsulates the patching of source code with patch files
2797    '''
2798    def __init__(self, source_dir, patches):
2799        '''
2800        Initializes a new PatchHelper
2801        '''
2802        self.source_dir = source_dir
2803        self.patches = patches
2804
2805
2806    def download(self):
2807        '''
2808        Copies patch files from remote locations to the source directory
2809        '''
2810        for patch in self.patches:
2811            utils.get_file(patch, os.path.join(self.source_dir,
2812                                               os.path.basename(patch)))
2813
2814
2815    def patch(self):
2816        '''
2817        Patches the source dir with all patch files
2818        '''
2819        os.chdir(self.source_dir)
2820        for patch in self.patches:
2821            patch_file = os.path.join(self.source_dir,
2822                                      os.path.basename(patch))
2823            utils.system('patch -p1 < %s' % os.path.basename(patch))
2824
2825
2826    def execute(self):
2827        '''
2828        Performs all steps necessary to download patches and apply them
2829        '''
2830        self.download()
2831        self.patch()
2832
2833
2834class PatchParamHelper(PatchHelper):
2835    '''
2836    Helps to deal with patches specified in cartersian config files
2837
2838    This class attempts to make it simple to patch source coude, by using a
2839    naming standard that follows this basic syntax:
2840
2841    [<git_repo>|<local_src>|<local_tar>|<remote_tar>]_<name>_patches
2842
2843    <prefix> is either a 'local_src' or 'git_repo', that, together with <name>
2844    specify a directory containing source code to receive the patches. That is,
2845    for source code coming from git repo foo, patches would be specified as:
2846
2847    git_repo_foo_patches = ['http://foo/bar.patch', 'http://foo/baz.patch']
2848
2849    And for for patches to be applied on local source code named also foo:
2850
2851    local_src_foo_patches = ['http://foo/bar.patch', 'http://foo/baz.patch']
2852    '''
2853    def __init__(self, params, prefix, source_dir):
2854        '''
2855        Initializes a new PatchParamHelper instance
2856        '''
2857        self.params = params
2858        self.prefix = prefix
2859        self.source_dir = source_dir
2860        self._parse_params()
2861
2862
2863    def _parse_params(self):
2864        '''
2865        Parses the params items for entries related to this set of patches
2866
2867        This method currently does everything that the parent class __init__()
2868        method does, that is, sets all instance variables needed by other
2869        methods. That means it's not strictly necessary to call parent's
2870        __init__().
2871        '''
2872        logging.debug('Parsing patch parameters for prefix %s' % self.prefix)
2873        patches_param_key = '%s_patches' % self.prefix
2874
2875        self.patches_str = self.params.get(patches_param_key, '[]')
2876        logging.debug('Patches config for prefix %s: %s' % (self.prefix,
2877                                                            self.patches_str))
2878
2879        self.patches = eval(self.patches_str)
2880        logging.debug('Patches for prefix %s: %s' % (self.prefix,
2881                                                     ", ".join(self.patches)))
2882
2883
2884class GnuSourceBuildInvalidSource(Exception):
2885    '''
2886    Exception raised when build source dir/file is not valid
2887    '''
2888    pass
2889
2890
2891class GnuSourceBuildHelper(object):
2892    '''
2893    Handles software installation of GNU-like source code
2894
2895    This basically means that the build will go though the classic GNU
2896    autotools steps: ./configure, make, make install
2897    '''
2898    def __init__(self, source, build_dir, prefix,
2899                 configure_options=[]):
2900        '''
2901        @type source: string
2902        @param source: source directory or tarball
2903        @type prefix: string
2904        @param prefix: installation prefix
2905        @type build_dir: string
2906        @param build_dir: temporary directory used for building the source code
2907        @type configure_options: list
2908        @param configure_options: options to pass to configure
2909        @throws: GnuSourceBuildInvalidSource
2910        '''
2911        self.source = source
2912        self.build_dir = build_dir
2913        self.prefix = prefix
2914        self.configure_options = configure_options
2915        self.include_pkg_config_path()
2916
2917
2918    def include_pkg_config_path(self):
2919        '''
2920        Adds the current prefix to the list of paths that pkg-config searches
2921
2922        This is currently not optional as there is no observed adverse side
2923        effects of enabling this. As the "prefix" is usually only valid during
2924        a test run, we believe that having other pkg-config files (*.pc) in
2925        either '<prefix>/share/pkgconfig' or '<prefix>/lib/pkgconfig' is
2926        exactly for the purpose of using them.
2927
2928        @returns: None
2929        '''
2930        env_var = 'PKG_CONFIG_PATH'
2931
2932        include_paths = [os.path.join(self.prefix, 'share', 'pkgconfig'),
2933                         os.path.join(self.prefix, 'lib', 'pkgconfig')]
2934
2935        if os.environ.has_key(env_var):
2936            paths = os.environ[env_var].split(':')
2937            for include_path in include_paths:
2938                if include_path not in paths:
2939                    paths.append(include_path)
2940            os.environ[env_var] = ':'.join(paths)
2941        else:
2942            os.environ[env_var] = ':'.join(include_paths)
2943
2944        logging.debug('PKG_CONFIG_PATH is: %s' % os.environ['PKG_CONFIG_PATH'])
2945
2946
2947    def get_configure_path(self):
2948        '''
2949        Checks if 'configure' exists, if not, return 'autogen.sh' as a fallback
2950        '''
2951        configure_path = os.path.abspath(os.path.join(self.source,
2952                                                      "configure"))
2953        autogen_path = os.path.abspath(os.path.join(self.source,
2954                                                "autogen.sh"))
2955        if os.path.exists(configure_path):
2956            return configure_path
2957        elif os.path.exists(autogen_path):
2958            return autogen_path
2959        else:
2960            raise GnuSourceBuildInvalidSource('configure script does not exist')
2961
2962
2963    def get_available_configure_options(self):
2964        '''
2965        Return the list of available options of a GNU like configure script
2966
2967        This will run the "configure" script at the source directory
2968
2969        @returns: list of options accepted by configure script
2970        '''
2971        help_raw = utils.system_output('%s --help' % self.get_configure_path(),
2972                                       ignore_status=True)
2973        help_output = help_raw.split("\n")
2974        option_list = []
2975        for line in help_output:
2976            cleaned_line = line.lstrip()
2977            if cleaned_line.startswith("--"):
2978                option = cleaned_line.split()[0]
2979                option = option.split("=")[0]
2980                option_list.append(option)
2981
2982        return option_list
2983
2984
2985    def enable_debug_symbols(self):
2986        '''
2987        Enables option that leaves debug symbols on compiled software
2988
2989        This makes debugging a lot easier.
2990        '''
2991        enable_debug_option = "--disable-strip"
2992        if enable_debug_option in self.get_available_configure_options():
2993            self.configure_options.append(enable_debug_option)
2994            logging.debug('Enabling debug symbols with option: %s' %
2995                          enable_debug_option)
2996
2997
2998    def get_configure_command(self):
2999        '''
3000        Formats configure script with all options set
3001
3002        @returns: string with all configure options, including prefix
3003        '''
3004        prefix_option = "--prefix=%s" % self.prefix
3005        options = self.configure_options
3006        options.append(prefix_option)
3007        return "%s %s" % (self.get_configure_path(),
3008                          " ".join(options))
3009
3010
3011    def configure(self):
3012        '''
3013        Runs the "configure" script passing apropriate command line options
3014        '''
3015        configure_command = self.get_configure_command()
3016        logging.info('Running configure on build dir')
3017        os.chdir(self.build_dir)
3018        utils.system(configure_command)
3019
3020
3021    def make(self):
3022        '''
3023        Runs "make" using the correct number of parallel jobs
3024        '''
3025        parallel_make_jobs = utils.count_cpus()
3026        make_command = "make -j %s" % parallel_make_jobs
3027        logging.info("Running make on build dir")
3028        os.chdir(self.build_dir)
3029        utils.system(make_command)
3030
3031
3032    def make_install(self):
3033        '''
3034        Runs "make install"
3035        '''
3036        os.chdir(self.build_dir)
3037        utils.system("make install")
3038
3039
3040    install = make_install
3041
3042
3043    def execute(self):
3044        '''
3045        Runs appropriate steps for *building* this source code tree
3046        '''
3047        self.configure()
3048        self.make()
3049
3050
3051class GnuSourceBuildParamHelper(GnuSourceBuildHelper):
3052    '''
3053    Helps to deal with gnu_autotools build helper in cartersian config files
3054
3055    This class attempts to make it simple to build source coude, by using a
3056    naming standard that follows this basic syntax:
3057
3058    [<git_repo>|<local_src>]_<name>_<option> = value
3059
3060    To pass extra options to the configure script, while building foo from a
3061    git repo, set the following variable:
3062
3063    git_repo_foo_configure_options = --enable-feature
3064    '''
3065    def __init__(self, params, name, destination_dir, install_prefix):
3066        '''
3067        Instantiates a new GnuSourceBuildParamHelper
3068        '''
3069        self.params = params
3070        self.name = name
3071        self.destination_dir = destination_dir
3072        self.install_prefix = install_prefix
3073        self._parse_params()
3074
3075
3076    def _parse_params(self):
3077        '''
3078        Parses the params items for entries related to source directory
3079
3080        This method currently does everything that the parent class __init__()
3081        method does, that is, sets all instance variables needed by other
3082        methods. That means it's not strictly necessary to call parent's
3083        __init__().
3084        '''
3085        logging.debug('Parsing gnu_autotools build parameters for %s' %
3086                      self.name)
3087
3088        configure_opt_key = '%s_configure_options' % self.name
3089        configure_options = self.params.get(configure_opt_key, '').split()
3090        logging.debug('Configure options for %s: %s' % (self.name,
3091                                                        configure_options))
3092
3093        self.source = self.destination_dir
3094        self.build_dir = self.destination_dir
3095        self.prefix = self.install_prefix
3096        self.configure_options = configure_options
3097        self.include_pkg_config_path()
3098
3099
3100def install_host_kernel(job, params):
3101    """
3102    Install a host kernel, given the appropriate params.
3103
3104    @param job: Job object.
3105    @param params: Dict with host kernel install params.
3106    """
3107    install_type = params.get('host_kernel_install_type')
3108
3109    rpm_url = params.get('host_kernel_rpm_url')
3110
3111    koji_cmd = params.get('host_kernel_koji_cmd')
3112    koji_build = params.get('host_kernel_koji_build')
3113    koji_tag = params.get('host_kernel_koji_tag')
3114
3115    git_repo = params.get('host_kernel_git_repo')
3116    git_branch = params.get('host_kernel_git_branch')
3117    git_commit = params.get('host_kernel_git_commit')
3118    patch_list = params.get('host_kernel_patch_list')
3119    if patch_list:
3120        patch_list = patch_list.split()
3121    kernel_config = params.get('host_kernel_config')
3122
3123    if install_type == 'rpm':
3124        logging.info('Installing host kernel through rpm')
3125        dst = os.path.join("/tmp", os.path.basename(rpm_url))
3126        k = utils.get_file(rpm_url, dst)
3127        host_kernel = job.kernel(k)
3128        host_kernel.install(install_vmlinux=False)
3129        host_kernel.boot()
3130
3131    elif install_type in ['koji', 'brew']:
3132        k_deps = KojiPkgSpec(tag=koji_tag, package='kernel',
3133                             subpackages=['kernel-devel', 'kernel-firmware'])
3134        k = KojiPkgSpec(tag=koji_tag, package='kernel',
3135                        subpackages=['kernel'])
3136
3137        c = KojiClient(koji_cmd)
3138        logging.info('Fetching kernel dependencies (-devel, -firmware)')
3139        c.get_pkgs(k_deps, job.tmpdir)
3140        logging.info('Installing kernel dependencies (-devel, -firmware) '
3141                     'through %s', install_type)
3142        k_deps_rpm_file_names = [os.path.join(job.tmpdir, rpm_file_name) for
3143                                 rpm_file_name in c.get_pkg_rpm_file_names(k_deps)]
3144        utils.run('rpm -U --force %s' % " ".join(k_deps_rpm_file_names))
3145
3146        c.get_pkgs(k, job.tmpdir)
3147        k_rpm = os.path.join(job.tmpdir,
3148                             c.get_pkg_rpm_file_names(k)[0])
3149        host_kernel = job.kernel(k_rpm)
3150        host_kernel.install(install_vmlinux=False)
3151        host_kernel.boot()
3152
3153    elif install_type == 'git':
3154        logging.info('Chose to install host kernel through git, proceeding')
3155        repodir = os.path.join("/tmp", 'kernel_src')
3156        r = get_git_branch(git_repo, git_branch, repodir, git_commit)
3157        host_kernel = job.kernel(r)
3158        if patch_list:
3159            host_kernel.patch(patch_list)
3160        host_kernel.config(kernel_config)
3161        host_kernel.build()
3162        host_kernel.install()
3163        host_kernel.boot()
3164
3165    else:
3166        logging.info('Chose %s, using the current kernel for the host',
3167                     install_type)
3168
3169
3170def if_nametoindex(ifname):
3171    """
3172    Map an interface name into its corresponding index.
3173    Returns 0 on error, as 0 is not a valid index
3174
3175    @param ifname: interface name
3176    """
3177    index = 0
3178    ctrl_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
3179    ifr = struct.pack("16si", ifname, 0)
3180    r = fcntl.ioctl(ctrl_sock, SIOCGIFINDEX, ifr)
3181    index = struct.unpack("16si", r)[1]
3182    ctrl_sock.close()
3183    return index
3184
3185
3186def vnet_hdr_probe(tapfd):
3187    """
3188    Check if the IFF_VNET_HDR is support by tun.
3189
3190    @param tapfd: the file descriptor of /dev/net/tun
3191    """
3192    u = struct.pack("I", 0)
3193    try:
3194        r = fcntl.ioctl(tapfd, TUNGETFEATURES, u)
3195    except OverflowError:
3196        return False
3197    flags = struct.unpack("I", r)[0]
3198    if flags & IFF_VNET_HDR:
3199        return True
3200    else:
3201        return False
3202
3203
3204def open_tap(devname, ifname, vnet_hdr=True):
3205    """
3206    Open a tap device and returns its file descriptor which is used by
3207    fd=<fd> parameter of qemu-kvm.
3208
3209    @param ifname: TAP interface name
3210    @param vnet_hdr: Whether enable the vnet header
3211    """
3212    try:
3213        tapfd = os.open(devname, os.O_RDWR)
3214    except OSError, e:
3215        raise TAPModuleError(devname, "open", e)
3216    flags = IFF_TAP | IFF_NO_PI
3217    if vnet_hdr and vnet_hdr_probe(tapfd):
3218        flags |= IFF_VNET_HDR
3219
3220    ifr = struct.pack("16sh", ifname, flags)
3221    try:
3222        r = fcntl.ioctl(tapfd, TUNSETIFF, ifr)
3223    except IOError, details:
3224        raise TAPCreationError(ifname, details)
3225    ifname = struct.unpack("16sh", r)[0].strip("\x00")
3226    return tapfd
3227
3228
3229def add_to_bridge(ifname, brname):
3230    """
3231    Add a TAP device to bridge
3232
3233    @param ifname: Name of TAP device
3234    @param brname: Name of the bridge
3235    """
3236    ctrl_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
3237    index = if_nametoindex(ifname)
3238    if index == 0:
3239        raise TAPNotExistError(ifname)
3240    ifr = struct.pack("16si", brname, index)
3241    try:
3242        r = fcntl.ioctl(ctrl_sock, SIOCBRADDIF, ifr)
3243    except IOError, details:
3244        raise BRAddIfError(ifname, brname, details)
3245    ctrl_sock.close()
3246
3247
3248def bring_up_ifname(ifname):
3249    """
3250    Bring up an interface
3251
3252    @param ifname: Name of the interface
3253    """
3254    ctrl_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
3255    ifr = struct.pack("16si", ifname, IFF_UP)
3256    try:
3257        fcntl.ioctl(ctrl_sock, SIOCSIFFLAGS, ifr)
3258    except IOError:
3259        raise TAPBringUpError(ifname)
3260    ctrl_sock.close()
3261
3262
3263def if_set_macaddress(ifname, mac):
3264    """
3265    Set the mac address for an interface
3266
3267    @param ifname: Name of the interface
3268    @mac: Mac address
3269    """
3270    ctrl_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0)
3271
3272    ifr = struct.pack("256s", ifname)
3273    try:
3274        mac_dev = fcntl.ioctl(ctrl_sock, SIOCGIFHWADDR, ifr)[18:24]
3275        mac_dev = ":".join(["%02x" % ord(m) for m in mac_dev])
3276    except IOError, e:
3277        raise HwAddrGetError(ifname)
3278
3279    if mac_dev.lower() == mac.lower():
3280        return
3281
3282    ifr = struct.pack("16sH14s", ifname, 1,
3283                      "".join([chr(int(m, 16)) for m in mac.split(":")]))
3284    try:
3285        fcntl.ioctl(ctrl_sock, SIOCSIFHWADDR, ifr)
3286    except IOError, e:
3287        logging.info(e)
3288        raise HwAddrSetError(ifname, mac)
3289    ctrl_sock.close()
3290