models.py revision 6c0b70ba5fa83b57fc0bc2f6cf347b546c1bc9e8
1import logging, os
2from datetime import datetime
3from django.db import models as dbmodels, connection
4from xml.sax import saxutils
5import common
6from autotest_lib.frontend.afe import model_logic, model_attributes
7from autotest_lib.frontend import settings, thread_local
8from autotest_lib.client.common_lib import enum, host_protections, global_config
9from autotest_lib.client.common_lib import host_queue_entry_states
10
11# job options and user preferences
12DEFAULT_REBOOT_BEFORE = model_attributes.RebootBefore.IF_DIRTY
13DEFAULT_REBOOT_AFTER = model_attributes.RebootBefore.ALWAYS
14
15
16class AclAccessViolation(Exception):
17    """\
18    Raised when an operation is attempted with proper permissions as
19    dictated by ACLs.
20    """
21
22
23class AtomicGroup(model_logic.ModelWithInvalid, dbmodels.Model):
24    """\
25    An atomic group defines a collection of hosts which must only be scheduled
26    all at once.  Any host with a label having an atomic group will only be
27    scheduled for a job at the same time as other hosts sharing that label.
28
29    Required:
30      name: A name for this atomic group.  ex: 'rack23' or 'funky_net'
31      max_number_of_machines: The maximum number of machines that will be
32              scheduled at once when scheduling jobs to this atomic group.
33              The job.synch_count is considered the minimum.
34
35    Optional:
36      description: Arbitrary text description of this group's purpose.
37    """
38    name = dbmodels.CharField(max_length=255, unique=True)
39    description = dbmodels.TextField(blank=True)
40    # This magic value is the default to simplify the scheduler logic.
41    # It must be "large".  The common use of atomic groups is to want all
42    # machines in the group to be used, limits on which subset used are
43    # often chosen via dependency labels.
44    INFINITE_MACHINES = 333333333
45    max_number_of_machines = dbmodels.IntegerField(default=INFINITE_MACHINES)
46    invalid = dbmodels.BooleanField(default=False,
47                                  editable=settings.FULL_ADMIN)
48
49    name_field = 'name'
50    objects = model_logic.ModelWithInvalidManager()
51    valid_objects = model_logic.ValidObjectsManager()
52
53
54    def enqueue_job(self, job, is_template=False):
55        """Enqueue a job on an associated atomic group of hosts."""
56        queue_entry = HostQueueEntry.create(atomic_group=self, job=job,
57                                            is_template=is_template)
58        queue_entry.save()
59
60
61    def clean_object(self):
62        self.label_set.clear()
63
64
65    class Meta:
66        db_table = 'afe_atomic_groups'
67
68
69    def __unicode__(self):
70        return unicode(self.name)
71
72
73class Label(model_logic.ModelWithInvalid, dbmodels.Model):
74    """\
75    Required:
76      name: label name
77
78    Optional:
79      kernel_config: URL/path to kernel config for jobs run on this label.
80      platform: If True, this is a platform label (defaults to False).
81      only_if_needed: If True, a Host with this label can only be used if that
82              label is requested by the job/test (either as the meta_host or
83              in the job_dependencies).
84      atomic_group: The atomic group associated with this label.
85    """
86    name = dbmodels.CharField(max_length=255, unique=True)
87    kernel_config = dbmodels.CharField(max_length=255, blank=True)
88    platform = dbmodels.BooleanField(default=False)
89    invalid = dbmodels.BooleanField(default=False,
90                                    editable=settings.FULL_ADMIN)
91    only_if_needed = dbmodels.BooleanField(default=False)
92
93    name_field = 'name'
94    objects = model_logic.ModelWithInvalidManager()
95    valid_objects = model_logic.ValidObjectsManager()
96    atomic_group = dbmodels.ForeignKey(AtomicGroup, null=True, blank=True)
97
98
99    def clean_object(self):
100        self.host_set.clear()
101        self.test_set.clear()
102
103
104    def enqueue_job(self, job, atomic_group=None, is_template=False):
105        """Enqueue a job on any host of this label."""
106        queue_entry = HostQueueEntry.create(meta_host=self, job=job,
107                                            is_template=is_template,
108                                            atomic_group=atomic_group)
109        queue_entry.save()
110
111
112    class Meta:
113        db_table = 'afe_labels'
114
115    def __unicode__(self):
116        return unicode(self.name)
117
118
119class Drone(dbmodels.Model, model_logic.ModelExtensions):
120    """
121    A scheduler drone
122
123    hostname: the drone's hostname
124    """
125    hostname = dbmodels.CharField(max_length=255, unique=True)
126
127    name_field = 'hostname'
128    objects = model_logic.ExtendedManager()
129
130
131    def save(self, *args, **kwargs):
132        if not User.current_user().is_superuser():
133            raise Exception('Only superusers may edit drones')
134        super(Drone, self).save(*args, **kwargs)
135
136
137    def delete(self):
138        if not User.current_user().is_superuser():
139            raise Exception('Only superusers may delete drones')
140        super(Drone, self).delete()
141
142
143    class Meta:
144        db_table = 'afe_drones'
145
146    def __unicode__(self):
147        return unicode(self.hostname)
148
149
150class DroneSet(dbmodels.Model, model_logic.ModelExtensions):
151    """
152    A set of scheduler drones
153
154    These will be used by the scheduler to decide what drones a job is allowed
155    to run on.
156
157    name: the drone set's name
158    drones: the drones that are part of the set
159    """
160    DRONE_SETS_ENABLED = global_config.global_config.get_config_value(
161            'SCHEDULER', 'drone_sets_enabled', type=bool, default=False)
162    DEFAULT_DRONE_SET_NAME = global_config.global_config.get_config_value(
163            'SCHEDULER', 'default_drone_set_name', default=None)
164
165    name = dbmodels.CharField(max_length=255, unique=True)
166    drones = dbmodels.ManyToManyField(Drone, db_table='afe_drone_sets_drones')
167
168    name_field = 'name'
169    objects = model_logic.ExtendedManager()
170
171
172    def save(self, *args, **kwargs):
173        if not User.current_user().is_superuser():
174            raise Exception('Only superusers may edit drone sets')
175        super(DroneSet, self).save(*args, **kwargs)
176
177
178    def delete(self):
179        if not User.current_user().is_superuser():
180            raise Exception('Only superusers may delete drone sets')
181        super(DroneSet, self).delete()
182
183
184    @classmethod
185    def drone_sets_enabled(cls):
186        return cls.DRONE_SETS_ENABLED
187
188
189    @classmethod
190    def default_drone_set_name(cls):
191        return cls.DEFAULT_DRONE_SET_NAME
192
193
194    @classmethod
195    def get_default(cls):
196        return cls.smart_get(cls.DEFAULT_DRONE_SET_NAME)
197
198
199    @classmethod
200    def resolve_name(cls, drone_set_name):
201        """
202        Returns the name of one of these, if not None, in order of preference:
203        1) the drone set given,
204        2) the current user's default drone set, or
205        3) the global default drone set
206
207        or returns None if drone sets are disabled
208        """
209        if not cls.drone_sets_enabled():
210            return None
211
212        user = User.current_user()
213        user_drone_set_name = user.drone_set and user.drone_set.name
214
215        return drone_set_name or user_drone_set_name or cls.get_default().name
216
217
218    def get_drone_hostnames(self):
219        """
220        Gets the hostnames of all drones in this drone set
221        """
222        return set(self.drones.all().values_list('hostname', flat=True))
223
224
225    class Meta:
226        db_table = 'afe_drone_sets'
227
228    def __unicode__(self):
229        return unicode(self.name)
230
231
232class User(dbmodels.Model, model_logic.ModelExtensions):
233    """\
234    Required:
235    login :user login name
236
237    Optional:
238    access_level: 0=User (default), 1=Admin, 100=Root
239    """
240    ACCESS_ROOT = 100
241    ACCESS_ADMIN = 1
242    ACCESS_USER = 0
243
244    AUTOTEST_SYSTEM = 'autotest_system'
245
246    login = dbmodels.CharField(max_length=255, unique=True)
247    access_level = dbmodels.IntegerField(default=ACCESS_USER, blank=True)
248
249    # user preferences
250    reboot_before = dbmodels.SmallIntegerField(
251        choices=model_attributes.RebootBefore.choices(), blank=True,
252        default=DEFAULT_REBOOT_BEFORE)
253    reboot_after = dbmodels.SmallIntegerField(
254        choices=model_attributes.RebootAfter.choices(), blank=True,
255        default=DEFAULT_REBOOT_AFTER)
256    drone_set = dbmodels.ForeignKey(DroneSet, null=True, blank=True)
257    show_experimental = dbmodels.BooleanField(default=False)
258
259    name_field = 'login'
260    objects = model_logic.ExtendedManager()
261
262
263    def save(self, *args, **kwargs):
264        # is this a new object being saved for the first time?
265        first_time = (self.id is None)
266        user = thread_local.get_user()
267        if user and not user.is_superuser() and user.login != self.login:
268            raise AclAccessViolation("You cannot modify user " + self.login)
269        super(User, self).save(*args, **kwargs)
270        if first_time:
271            everyone = AclGroup.objects.get(name='Everyone')
272            everyone.users.add(self)
273
274
275    def is_superuser(self):
276        return self.access_level >= self.ACCESS_ROOT
277
278
279    @classmethod
280    def current_user(cls):
281        user = thread_local.get_user()
282        if user is None:
283            user, _ = cls.objects.get_or_create(login=cls.AUTOTEST_SYSTEM)
284            user.access_level = cls.ACCESS_ROOT
285            user.save()
286        return user
287
288
289    class Meta:
290        db_table = 'afe_users'
291
292    def __unicode__(self):
293        return unicode(self.login)
294
295
296class Host(model_logic.ModelWithInvalid, dbmodels.Model,
297           model_logic.ModelWithAttributes):
298    """\
299    Required:
300    hostname
301
302    optional:
303    locked: if true, host is locked and will not be queued
304
305    Internal:
306    synch_id: currently unused
307    status: string describing status of host
308    invalid: true if the host has been deleted
309    protection: indicates what can be done to this host during repair
310    locked_by: user that locked the host, or null if the host is unlocked
311    lock_time: DateTime at which the host was locked
312    dirty: true if the host has been used without being rebooted
313    """
314    Status = enum.Enum('Verifying', 'Running', 'Ready', 'Repairing',
315                       'Repair Failed', 'Cleaning', 'Pending',
316                       string_values=True)
317    Protection = host_protections.Protection
318
319    hostname = dbmodels.CharField(max_length=255, unique=True)
320    labels = dbmodels.ManyToManyField(Label, blank=True,
321                                      db_table='afe_hosts_labels')
322    locked = dbmodels.BooleanField(default=False)
323    synch_id = dbmodels.IntegerField(blank=True, null=True,
324                                     editable=settings.FULL_ADMIN)
325    status = dbmodels.CharField(max_length=255, default=Status.READY,
326                                choices=Status.choices(),
327                                editable=settings.FULL_ADMIN)
328    invalid = dbmodels.BooleanField(default=False,
329                                    editable=settings.FULL_ADMIN)
330    protection = dbmodels.SmallIntegerField(null=False, blank=True,
331                                            choices=host_protections.choices,
332                                            default=host_protections.default)
333    locked_by = dbmodels.ForeignKey(User, null=True, blank=True, editable=False)
334    lock_time = dbmodels.DateTimeField(null=True, blank=True, editable=False)
335    dirty = dbmodels.BooleanField(default=True, editable=settings.FULL_ADMIN)
336
337    name_field = 'hostname'
338    objects = model_logic.ModelWithInvalidManager()
339    valid_objects = model_logic.ValidObjectsManager()
340
341
342    def __init__(self, *args, **kwargs):
343        super(Host, self).__init__(*args, **kwargs)
344        self._record_attributes(['status'])
345
346
347    @staticmethod
348    def create_one_time_host(hostname):
349        query = Host.objects.filter(hostname=hostname)
350        if query.count() == 0:
351            host = Host(hostname=hostname, invalid=True)
352            host.do_validate()
353        else:
354            host = query[0]
355            if not host.invalid:
356                raise model_logic.ValidationError({
357                    'hostname' : '%s already exists in the autotest DB.  '
358                        'Select it rather than entering it as a one time '
359                        'host.' % hostname
360                    })
361        host.protection = host_protections.Protection.DO_NOT_REPAIR
362        host.locked = False
363        host.save()
364        host.clean_object()
365        return host
366
367
368    def resurrect_object(self, old_object):
369        super(Host, self).resurrect_object(old_object)
370        # invalid hosts can be in use by the scheduler (as one-time hosts), so
371        # don't change the status
372        self.status = old_object.status
373
374
375    def clean_object(self):
376        self.aclgroup_set.clear()
377        self.labels.clear()
378
379
380    def save(self, *args, **kwargs):
381        # extra spaces in the hostname can be a sneaky source of errors
382        self.hostname = self.hostname.strip()
383        # is this a new object being saved for the first time?
384        first_time = (self.id is None)
385        if not first_time:
386            AclGroup.check_for_acl_violation_hosts([self])
387        if self.locked and not self.locked_by:
388            self.locked_by = User.current_user()
389            self.lock_time = datetime.now()
390            self.dirty = True
391        elif not self.locked and self.locked_by:
392            self.locked_by = None
393            self.lock_time = None
394        super(Host, self).save(*args, **kwargs)
395        if first_time:
396            everyone = AclGroup.objects.get(name='Everyone')
397            everyone.hosts.add(self)
398        self._check_for_updated_attributes()
399
400
401    def delete(self):
402        AclGroup.check_for_acl_violation_hosts([self])
403        for queue_entry in self.hostqueueentry_set.all():
404            queue_entry.deleted = True
405            queue_entry.abort()
406        super(Host, self).delete()
407
408
409    def on_attribute_changed(self, attribute, old_value):
410        assert attribute == 'status'
411        logging.info(self.hostname + ' -> ' + self.status)
412
413
414    def enqueue_job(self, job, atomic_group=None, is_template=False):
415        """Enqueue a job on this host."""
416        queue_entry = HostQueueEntry.create(host=self, job=job,
417                                            is_template=is_template,
418                                            atomic_group=atomic_group)
419        # allow recovery of dead hosts from the frontend
420        if not self.active_queue_entry() and self.is_dead():
421            self.status = Host.Status.READY
422            self.save()
423        queue_entry.save()
424
425        block = IneligibleHostQueue(job=job, host=self)
426        block.save()
427
428
429    def platform(self):
430        # TODO(showard): slighly hacky?
431        platforms = self.labels.filter(platform=True)
432        if len(platforms) == 0:
433            return None
434        return platforms[0]
435    platform.short_description = 'Platform'
436
437
438    @classmethod
439    def check_no_platform(cls, hosts):
440        Host.objects.populate_relationships(hosts, Label, 'label_list')
441        errors = []
442        for host in hosts:
443            platforms = [label.name for label in host.label_list
444                         if label.platform]
445            if platforms:
446                # do a join, just in case this host has multiple platforms,
447                # we'll be able to see it
448                errors.append('Host %s already has a platform: %s' % (
449                              host.hostname, ', '.join(platforms)))
450        if errors:
451            raise model_logic.ValidationError({'labels': '; '.join(errors)})
452
453
454    def is_dead(self):
455        return self.status == Host.Status.REPAIR_FAILED
456
457
458    def active_queue_entry(self):
459        active = list(self.hostqueueentry_set.filter(active=True))
460        if not active:
461            return None
462        assert len(active) == 1, ('More than one active entry for '
463                                  'host ' + self.hostname)
464        return active[0]
465
466
467    def _get_attribute_model_and_args(self, attribute):
468        return HostAttribute, dict(host=self, attribute=attribute)
469
470
471    class Meta:
472        db_table = 'afe_hosts'
473
474    def __unicode__(self):
475        return unicode(self.hostname)
476
477
478class HostAttribute(dbmodels.Model):
479    """Arbitrary keyvals associated with hosts."""
480    host = dbmodels.ForeignKey(Host)
481    attribute = dbmodels.CharField(max_length=90)
482    value = dbmodels.CharField(max_length=300)
483
484    objects = model_logic.ExtendedManager()
485
486    class Meta:
487        db_table = 'afe_host_attributes'
488
489
490class Test(dbmodels.Model, model_logic.ModelExtensions):
491    """\
492    Required:
493    author: author name
494    description: description of the test
495    name: test name
496    time: short, medium, long
497    test_class: This describes the class for your the test belongs in.
498    test_category: This describes the category for your tests
499    test_type: Client or Server
500    path: path to pass to run_test()
501    sync_count:  is a number >=1 (1 being the default). If it's 1, then it's an
502                 async job. If it's >1 it's sync job for that number of machines
503                 i.e. if sync_count = 2 it is a sync job that requires two
504                 machines.
505    Optional:
506    dependencies: What the test requires to run. Comma deliminated list
507    dependency_labels: many-to-many relationship with labels corresponding to
508                       test dependencies.
509    experimental: If this is set to True production servers will ignore the test
510    run_verify: Whether or not the scheduler should run the verify stage
511    """
512    TestTime = enum.Enum('SHORT', 'MEDIUM', 'LONG', start_value=1)
513    TestTypes = model_attributes.TestTypes
514    # TODO(showard) - this should be merged with Job.ControlType (but right
515    # now they use opposite values)
516
517    name = dbmodels.CharField(max_length=255, unique=True)
518    author = dbmodels.CharField(max_length=255)
519    test_class = dbmodels.CharField(max_length=255)
520    test_category = dbmodels.CharField(max_length=255)
521    dependencies = dbmodels.CharField(max_length=255, blank=True)
522    description = dbmodels.TextField(blank=True)
523    experimental = dbmodels.BooleanField(default=True)
524    run_verify = dbmodels.BooleanField(default=True)
525    test_time = dbmodels.SmallIntegerField(choices=TestTime.choices(),
526                                           default=TestTime.MEDIUM)
527    test_type = dbmodels.SmallIntegerField(choices=TestTypes.choices())
528    sync_count = dbmodels.IntegerField(default=1)
529    path = dbmodels.CharField(max_length=255, unique=True)
530
531    dependency_labels = (
532        dbmodels.ManyToManyField(Label, blank=True,
533                                 db_table='afe_autotests_dependency_labels'))
534    name_field = 'name'
535    objects = model_logic.ExtendedManager()
536
537
538    def admin_description(self):
539        escaped_description = saxutils.escape(self.description)
540        return '<span style="white-space:pre">%s</span>' % escaped_description
541    admin_description.allow_tags = True
542    admin_description.short_description = 'Description'
543
544
545    class Meta:
546        db_table = 'afe_autotests'
547
548    def __unicode__(self):
549        return unicode(self.name)
550
551
552class TestParameter(dbmodels.Model):
553    """
554    A declared parameter of a test
555    """
556    test = dbmodels.ForeignKey(Test)
557    name = dbmodels.CharField(max_length=255)
558
559    class Meta:
560        db_table = 'afe_test_parameters'
561        unique_together = ('test', 'name')
562
563    def __unicode__(self):
564        return u'%s (%s)' % (self.name, test.name)
565
566
567class Profiler(dbmodels.Model, model_logic.ModelExtensions):
568    """\
569    Required:
570    name: profiler name
571    test_type: Client or Server
572
573    Optional:
574    description: arbirary text description
575    """
576    name = dbmodels.CharField(max_length=255, unique=True)
577    description = dbmodels.TextField(blank=True)
578
579    name_field = 'name'
580    objects = model_logic.ExtendedManager()
581
582
583    class Meta:
584        db_table = 'afe_profilers'
585
586    def __unicode__(self):
587        return unicode(self.name)
588
589
590class AclGroup(dbmodels.Model, model_logic.ModelExtensions):
591    """\
592    Required:
593    name: name of ACL group
594
595    Optional:
596    description: arbitrary description of group
597    """
598    name = dbmodels.CharField(max_length=255, unique=True)
599    description = dbmodels.CharField(max_length=255, blank=True)
600    users = dbmodels.ManyToManyField(User, blank=False,
601                                     db_table='afe_acl_groups_users')
602    hosts = dbmodels.ManyToManyField(Host, blank=True,
603                                     db_table='afe_acl_groups_hosts')
604
605    name_field = 'name'
606    objects = model_logic.ExtendedManager()
607
608    @staticmethod
609    def check_for_acl_violation_hosts(hosts):
610        user = User.current_user()
611        if user.is_superuser():
612            return
613        accessible_host_ids = set(
614            host.id for host in Host.objects.filter(aclgroup__users=user))
615        for host in hosts:
616            # Check if the user has access to this host,
617            # but only if it is not a metahost or a one-time-host
618            no_access = (isinstance(host, Host)
619                         and not host.invalid
620                         and int(host.id) not in accessible_host_ids)
621            if no_access:
622                raise AclAccessViolation("%s does not have access to %s" %
623                                         (str(user), str(host)))
624
625
626    @staticmethod
627    def check_abort_permissions(queue_entries):
628        """
629        look for queue entries that aren't abortable, meaning
630         * the job isn't owned by this user, and
631           * the machine isn't ACL-accessible, or
632           * the machine is in the "Everyone" ACL
633        """
634        user = User.current_user()
635        if user.is_superuser():
636            return
637        not_owned = queue_entries.exclude(job__owner=user.login)
638        # I do this using ID sets instead of just Django filters because
639        # filtering on M2M dbmodels is broken in Django 0.96. It's better in
640        # 1.0.
641        # TODO: Use Django filters, now that we're using 1.0.
642        accessible_ids = set(
643            entry.id for entry
644            in not_owned.filter(host__aclgroup__users__login=user.login))
645        public_ids = set(entry.id for entry
646                         in not_owned.filter(host__aclgroup__name='Everyone'))
647        cannot_abort = [entry for entry in not_owned.select_related()
648                        if entry.id not in accessible_ids
649                        or entry.id in public_ids]
650        if len(cannot_abort) == 0:
651            return
652        entry_names = ', '.join('%s-%s/%s' % (entry.job.id, entry.job.owner,
653                                              entry.host_or_metahost_name())
654                                for entry in cannot_abort)
655        raise AclAccessViolation('You cannot abort the following job entries: '
656                                 + entry_names)
657
658
659    def check_for_acl_violation_acl_group(self):
660        user = User.current_user()
661        if user.is_superuser():
662            return
663        if self.name == 'Everyone':
664            raise AclAccessViolation("You cannot modify 'Everyone'!")
665        if not user in self.users.all():
666            raise AclAccessViolation("You do not have access to %s"
667                                     % self.name)
668
669    @staticmethod
670    def on_host_membership_change():
671        everyone = AclGroup.objects.get(name='Everyone')
672
673        # find hosts that aren't in any ACL group and add them to Everyone
674        # TODO(showard): this is a bit of a hack, since the fact that this query
675        # works is kind of a coincidence of Django internals.  This trick
676        # doesn't work in general (on all foreign key relationships).  I'll
677        # replace it with a better technique when the need arises.
678        orphaned_hosts = Host.valid_objects.filter(aclgroup__id__isnull=True)
679        everyone.hosts.add(*orphaned_hosts.distinct())
680
681        # find hosts in both Everyone and another ACL group, and remove them
682        # from Everyone
683        hosts_in_everyone = Host.valid_objects.filter(aclgroup__name='Everyone')
684        acled_hosts = set()
685        for host in hosts_in_everyone:
686            # Has an ACL group other than Everyone
687            if host.aclgroup_set.count() > 1:
688                acled_hosts.add(host)
689        everyone.hosts.remove(*acled_hosts)
690
691
692    def delete(self):
693        if (self.name == 'Everyone'):
694            raise AclAccessViolation("You cannot delete 'Everyone'!")
695        self.check_for_acl_violation_acl_group()
696        super(AclGroup, self).delete()
697        self.on_host_membership_change()
698
699
700    def add_current_user_if_empty(self):
701        if not self.users.count():
702            self.users.add(User.current_user())
703
704
705    def perform_after_save(self, change):
706        if not change:
707            self.users.add(User.current_user())
708        self.add_current_user_if_empty()
709        self.on_host_membership_change()
710
711
712    def save(self, *args, **kwargs):
713        change = bool(self.id)
714        if change:
715            # Check the original object for an ACL violation
716            AclGroup.objects.get(id=self.id).check_for_acl_violation_acl_group()
717        super(AclGroup, self).save(*args, **kwargs)
718        self.perform_after_save(change)
719
720
721    class Meta:
722        db_table = 'afe_acl_groups'
723
724    def __unicode__(self):
725        return unicode(self.name)
726
727
728class Kernel(dbmodels.Model):
729    """
730    A kernel configuration for a parameterized job
731    """
732    version = dbmodels.CharField(max_length=255)
733    cmdline = dbmodels.CharField(max_length=255, blank=True)
734
735    @classmethod
736    def create_kernels(cls, kernel_list):
737        """
738        Creates all kernels in the kernel list
739
740        @param kernel_list A list of dictionaries that describe the kernels, in
741                           the same format as the 'kernel' argument to
742                           rpc_interface.generate_control_file
743        @returns a list of the created kernels
744        """
745        if not kernel_list:
746            return None
747        return [cls._create(kernel) for kernel in kernel_list]
748
749
750    @classmethod
751    def _create(cls, kernel_dict):
752        version = kernel_dict.pop('version')
753        cmdline = kernel_dict.pop('cmdline', '')
754
755        if kernel_dict:
756            raise Exception('Extraneous kernel arguments remain: %r'
757                            % kernel_dict)
758
759        kernel, _ = cls.objects.get_or_create(version=version,
760                                              cmdline=cmdline)
761        return kernel
762
763
764    class Meta:
765        db_table = 'afe_kernels'
766        unique_together = ('version', 'cmdline')
767
768    def __unicode__(self):
769        return u'%s %s' % (self.version, self.cmdline)
770
771
772class ParameterizedJob(dbmodels.Model):
773    """
774    Auxiliary configuration for a parameterized job
775    """
776    test = dbmodels.ForeignKey(Test)
777    label = dbmodels.ForeignKey(Label, null=True)
778    use_container = dbmodels.BooleanField(default=False)
779    profile_only = dbmodels.BooleanField(default=False)
780    upload_kernel_config = dbmodels.BooleanField(default=False)
781
782    kernels = dbmodels.ManyToManyField(
783            Kernel, db_table='afe_parameterized_job_kernels')
784    profilers = dbmodels.ManyToManyField(
785            Profiler, through='ParameterizedJobProfiler')
786
787
788    @classmethod
789    def smart_get(cls, id_or_name, *args, **kwargs):
790        """For compatibility with Job.add_object"""
791        return cls.objects.get(pk=id_or_name)
792
793
794    def job(self):
795        jobs = self.job_set.all()
796        assert jobs.count() <= 1
797        return jobs and jobs[0] or None
798
799
800    class Meta:
801        db_table = 'afe_parameterized_jobs'
802
803    def __unicode__(self):
804        return u'%s (parameterized) - %s' % (self.test.name, self.job())
805
806
807class ParameterizedJobProfiler(dbmodels.Model):
808    """
809    A profiler to run on a parameterized job
810    """
811    parameterized_job = dbmodels.ForeignKey(ParameterizedJob)
812    profiler = dbmodels.ForeignKey(Profiler)
813
814    class Meta:
815        db_table = 'afe_parameterized_jobs_profilers'
816        unique_together = ('parameterized_job', 'profiler')
817
818
819class ParameterizedJobProfilerParameter(dbmodels.Model):
820    """
821    A parameter for a profiler in a parameterized job
822    """
823    parameterized_job_profiler = dbmodels.ForeignKey(ParameterizedJobProfiler)
824    parameter_name = dbmodels.CharField(max_length=255)
825    parameter_value = dbmodels.TextField()
826    parameter_type = dbmodels.CharField(
827            max_length=8, choices=model_attributes.ParameterTypes.choices())
828
829    class Meta:
830        db_table = 'afe_parameterized_job_profiler_parameters'
831        unique_together = ('parameterized_job_profiler', 'parameter_name')
832
833    def __unicode__(self):
834        return u'%s - %s' % (self.parameterized_job_profiler.profiler.name,
835                             self.parameter_name)
836
837
838class ParameterizedJobParameter(dbmodels.Model):
839    """
840    Parameters for a parameterized job
841    """
842    parameterized_job = dbmodels.ForeignKey(ParameterizedJob)
843    test_parameter = dbmodels.ForeignKey(TestParameter)
844    parameter_value = dbmodels.TextField()
845    parameter_type = dbmodels.CharField(
846            max_length=8, choices=model_attributes.ParameterTypes.choices())
847
848    class Meta:
849        db_table = 'afe_parameterized_job_parameters'
850        unique_together = ('parameterized_job', 'test_parameter')
851
852    def __unicode__(self):
853        return u'%s - %s' % (self.parameterized_job.job().name,
854                             self.test_parameter.name)
855
856
857class JobManager(model_logic.ExtendedManager):
858    'Custom manager to provide efficient status counts querying.'
859    def get_status_counts(self, job_ids):
860        """\
861        Returns a dictionary mapping the given job IDs to their status
862        count dictionaries.
863        """
864        if not job_ids:
865            return {}
866        id_list = '(%s)' % ','.join(str(job_id) for job_id in job_ids)
867        cursor = connection.cursor()
868        cursor.execute("""
869            SELECT job_id, status, aborted, complete, COUNT(*)
870            FROM afe_host_queue_entries
871            WHERE job_id IN %s
872            GROUP BY job_id, status, aborted, complete
873            """ % id_list)
874        all_job_counts = dict((job_id, {}) for job_id in job_ids)
875        for job_id, status, aborted, complete, count in cursor.fetchall():
876            job_dict = all_job_counts[job_id]
877            full_status = HostQueueEntry.compute_full_status(status, aborted,
878                                                             complete)
879            job_dict.setdefault(full_status, 0)
880            job_dict[full_status] += count
881        return all_job_counts
882
883
884class Job(dbmodels.Model, model_logic.ModelExtensions):
885    """\
886    owner: username of job owner
887    name: job name (does not have to be unique)
888    priority: Low, Medium, High, Urgent (or 0-3)
889    control_file: contents of control file
890    control_type: Client or Server
891    created_on: date of job creation
892    submitted_on: date of job submission
893    synch_count: how many hosts should be used per autoserv execution
894    run_verify: Whether or not to run the verify phase
895    timeout: hours from queuing time until job times out
896    max_runtime_hrs: hours from job starting time until job times out
897    email_list: list of people to email on completion delimited by any of:
898                white space, ',', ':', ';'
899    dependency_labels: many-to-many relationship with labels corresponding to
900                       job dependencies
901    reboot_before: Never, If dirty, or Always
902    reboot_after: Never, If all tests passed, or Always
903    parse_failed_repair: if True, a failed repair launched by this job will have
904    its results parsed as part of the job.
905    drone_set: The set of drones to run this job on
906    """
907    DEFAULT_TIMEOUT = global_config.global_config.get_config_value(
908        'AUTOTEST_WEB', 'job_timeout_default', default=240)
909    DEFAULT_MAX_RUNTIME_HRS = global_config.global_config.get_config_value(
910        'AUTOTEST_WEB', 'job_max_runtime_hrs_default', default=72)
911    DEFAULT_PARSE_FAILED_REPAIR = global_config.global_config.get_config_value(
912        'AUTOTEST_WEB', 'parse_failed_repair_default', type=bool,
913        default=False)
914
915    Priority = enum.Enum('Low', 'Medium', 'High', 'Urgent')
916    ControlType = enum.Enum('Server', 'Client', start_value=1)
917
918    owner = dbmodels.CharField(max_length=255)
919    name = dbmodels.CharField(max_length=255)
920    priority = dbmodels.SmallIntegerField(choices=Priority.choices(),
921                                          blank=True, # to allow 0
922                                          default=Priority.MEDIUM)
923    control_file = dbmodels.TextField(null=True, blank=True)
924    control_type = dbmodels.SmallIntegerField(choices=ControlType.choices(),
925                                              blank=True, # to allow 0
926                                              default=ControlType.CLIENT)
927    created_on = dbmodels.DateTimeField()
928    synch_count = dbmodels.IntegerField(null=True, default=1)
929    timeout = dbmodels.IntegerField(default=DEFAULT_TIMEOUT)
930    run_verify = dbmodels.BooleanField(default=True)
931    email_list = dbmodels.CharField(max_length=250, blank=True)
932    dependency_labels = (
933            dbmodels.ManyToManyField(Label, blank=True,
934                                     db_table='afe_jobs_dependency_labels'))
935    reboot_before = dbmodels.SmallIntegerField(
936        choices=model_attributes.RebootBefore.choices(), blank=True,
937        default=DEFAULT_REBOOT_BEFORE)
938    reboot_after = dbmodels.SmallIntegerField(
939        choices=model_attributes.RebootAfter.choices(), blank=True,
940        default=DEFAULT_REBOOT_AFTER)
941    parse_failed_repair = dbmodels.BooleanField(
942        default=DEFAULT_PARSE_FAILED_REPAIR)
943    max_runtime_hrs = dbmodels.IntegerField(default=DEFAULT_MAX_RUNTIME_HRS)
944    drone_set = dbmodels.ForeignKey(DroneSet, null=True, blank=True)
945
946    parameterized_job = dbmodels.ForeignKey(ParameterizedJob, null=True,
947                                            blank=True)
948
949
950    # custom manager
951    objects = JobManager()
952
953
954    def is_server_job(self):
955        return self.control_type == self.ControlType.SERVER
956
957
958    @classmethod
959    def parameterized_jobs_enabled(cls):
960        return global_config.global_config.get_config_value(
961                'AUTOTEST_WEB', 'parameterized_jobs', type=bool)
962
963
964    @classmethod
965    def check_parameterized_job(cls, control_file, parameterized_job):
966        """
967        Checks that the job is valid given the global config settings
968
969        First, either control_file must be set, or parameterized_job must be
970        set, but not both. Second, parameterized_job must be set if and only if
971        the parameterized_jobs option in the global config is set to True.
972        """
973        if not (bool(control_file) ^ bool(parameterized_job)):
974            raise Exception('Job must have either control file or '
975                            'parameterization, but not both')
976
977        parameterized_jobs_enabled = cls.parameterized_jobs_enabled()
978        if control_file and parameterized_jobs_enabled:
979            raise Exception('Control file specified, but parameterized jobs '
980                            'are enabled')
981        if parameterized_job and not parameterized_jobs_enabled:
982            raise Exception('Parameterized job specified, but parameterized '
983                            'jobs are not enabled')
984
985
986    @classmethod
987    def create(cls, owner, options, hosts):
988        """\
989        Creates a job by taking some information (the listed args)
990        and filling in the rest of the necessary information.
991        """
992        AclGroup.check_for_acl_violation_hosts(hosts)
993
994        control_file = options.get('control_file')
995        parameterized_job = options.get('parameterized_job')
996
997        # The current implementation of parameterized jobs requires that only
998        # control files or parameterized jobs are used. Using the image
999        # parameter on autoupdate_ParameterizedJob doesn't mix pure
1000        # parameterized jobs and control files jobs, it does muck enough with
1001        # normal jobs by adding a parameterized id to them that this check will
1002        # fail. So for now we just skip this check.
1003        # cls.check_parameterized_job(control_file=control_file,
1004        #                             parameterized_job=parameterized_job)
1005        user = User.current_user()
1006        if options.get('reboot_before') is None:
1007            options['reboot_before'] = user.get_reboot_before_display()
1008        if options.get('reboot_after') is None:
1009            options['reboot_after'] = user.get_reboot_after_display()
1010
1011        drone_set = DroneSet.resolve_name(options.get('drone_set'))
1012
1013        job = cls.add_object(
1014            owner=owner,
1015            name=options['name'],
1016            priority=options['priority'],
1017            control_file=control_file,
1018            control_type=options['control_type'],
1019            synch_count=options.get('synch_count'),
1020            timeout=options.get('timeout'),
1021            max_runtime_hrs=options.get('max_runtime_hrs'),
1022            run_verify=options.get('run_verify'),
1023            email_list=options.get('email_list'),
1024            reboot_before=options.get('reboot_before'),
1025            reboot_after=options.get('reboot_after'),
1026            parse_failed_repair=options.get('parse_failed_repair'),
1027            created_on=datetime.now(),
1028            drone_set=drone_set,
1029            parameterized_job=parameterized_job)
1030
1031        job.dependency_labels = options['dependencies']
1032
1033        if options.get('keyvals'):
1034            for key, value in options['keyvals'].iteritems():
1035                JobKeyval.objects.create(job=job, key=key, value=value)
1036
1037        return job
1038
1039
1040    def save(self, *args, **kwargs):
1041        # The current implementation of parameterized jobs requires that only
1042        # control files or parameterized jobs are used. Using the image
1043        # parameter on autoupdate_ParameterizedJob doesn't mix pure
1044        # parameterized jobs and control files jobs, it does muck enough with
1045        # normal jobs by adding a parameterized id to them that this check will
1046        # fail. So for now we just skip this check.
1047        # cls.check_parameterized_job(control_file=self.control_file,
1048        #                             parameterized_job=self.parameterized_job)
1049        super(Job, self).save(*args, **kwargs)
1050
1051
1052    def queue(self, hosts, atomic_group=None, is_template=False):
1053        """Enqueue a job on the given hosts."""
1054        if not hosts:
1055            if atomic_group:
1056                # No hosts or labels are required to queue an atomic group
1057                # Job.  However, if they are given, we respect them below.
1058                atomic_group.enqueue_job(self, is_template=is_template)
1059            else:
1060                # hostless job
1061                entry = HostQueueEntry.create(job=self, is_template=is_template)
1062                entry.save()
1063            return
1064
1065        for host in hosts:
1066            host.enqueue_job(self, atomic_group=atomic_group,
1067                             is_template=is_template)
1068
1069
1070    def create_recurring_job(self, start_date, loop_period, loop_count, owner):
1071        rec = RecurringRun(job=self, start_date=start_date,
1072                           loop_period=loop_period,
1073                           loop_count=loop_count,
1074                           owner=User.objects.get(login=owner))
1075        rec.save()
1076        return rec.id
1077
1078
1079    def user(self):
1080        try:
1081            return User.objects.get(login=self.owner)
1082        except self.DoesNotExist:
1083            return None
1084
1085
1086    def abort(self):
1087        for queue_entry in self.hostqueueentry_set.all():
1088            queue_entry.abort()
1089
1090
1091    def tag(self):
1092        return '%s-%s' % (self.id, self.owner)
1093
1094
1095    def keyval_dict(self):
1096        return dict((keyval.key, keyval.value)
1097                    for keyval in self.jobkeyval_set.all())
1098
1099
1100    class Meta:
1101        db_table = 'afe_jobs'
1102
1103    def __unicode__(self):
1104        return u'%s (%s-%s)' % (self.name, self.id, self.owner)
1105
1106
1107class JobKeyval(dbmodels.Model, model_logic.ModelExtensions):
1108    """Keyvals associated with jobs"""
1109    job = dbmodels.ForeignKey(Job)
1110    key = dbmodels.CharField(max_length=90)
1111    value = dbmodels.CharField(max_length=300)
1112
1113    objects = model_logic.ExtendedManager()
1114
1115    class Meta:
1116        db_table = 'afe_job_keyvals'
1117
1118
1119class IneligibleHostQueue(dbmodels.Model, model_logic.ModelExtensions):
1120    job = dbmodels.ForeignKey(Job)
1121    host = dbmodels.ForeignKey(Host)
1122
1123    objects = model_logic.ExtendedManager()
1124
1125    class Meta:
1126        db_table = 'afe_ineligible_host_queues'
1127
1128
1129class HostQueueEntry(dbmodels.Model, model_logic.ModelExtensions):
1130    Status = host_queue_entry_states.Status
1131    ACTIVE_STATUSES = host_queue_entry_states.ACTIVE_STATUSES
1132    COMPLETE_STATUSES = host_queue_entry_states.COMPLETE_STATUSES
1133
1134    job = dbmodels.ForeignKey(Job)
1135    host = dbmodels.ForeignKey(Host, blank=True, null=True)
1136    status = dbmodels.CharField(max_length=255)
1137    meta_host = dbmodels.ForeignKey(Label, blank=True, null=True,
1138                                    db_column='meta_host')
1139    active = dbmodels.BooleanField(default=False)
1140    complete = dbmodels.BooleanField(default=False)
1141    deleted = dbmodels.BooleanField(default=False)
1142    execution_subdir = dbmodels.CharField(max_length=255, blank=True,
1143                                          default='')
1144    # If atomic_group is set, this is a virtual HostQueueEntry that will
1145    # be expanded into many actual hosts within the group at schedule time.
1146    atomic_group = dbmodels.ForeignKey(AtomicGroup, blank=True, null=True)
1147    aborted = dbmodels.BooleanField(default=False)
1148    started_on = dbmodels.DateTimeField(null=True, blank=True)
1149
1150    objects = model_logic.ExtendedManager()
1151
1152
1153    def __init__(self, *args, **kwargs):
1154        super(HostQueueEntry, self).__init__(*args, **kwargs)
1155        self._record_attributes(['status'])
1156
1157
1158    @classmethod
1159    def create(cls, job, host=None, meta_host=None, atomic_group=None,
1160                 is_template=False):
1161        if is_template:
1162            status = cls.Status.TEMPLATE
1163        else:
1164            status = cls.Status.QUEUED
1165
1166        return cls(job=job, host=host, meta_host=meta_host,
1167                   atomic_group=atomic_group, status=status)
1168
1169
1170    def save(self, *args, **kwargs):
1171        self._set_active_and_complete()
1172        super(HostQueueEntry, self).save(*args, **kwargs)
1173        self._check_for_updated_attributes()
1174
1175
1176    def execution_path(self):
1177        """
1178        Path to this entry's results (relative to the base results directory).
1179        """
1180        return os.path.join(self.job.tag(), self.execution_subdir)
1181
1182
1183    def host_or_metahost_name(self):
1184        if self.host:
1185            return self.host.hostname
1186        elif self.meta_host:
1187            return self.meta_host.name
1188        else:
1189            assert self.atomic_group, "no host, meta_host or atomic group!"
1190            return self.atomic_group.name
1191
1192
1193    def _set_active_and_complete(self):
1194        if self.status in self.ACTIVE_STATUSES:
1195            self.active, self.complete = True, False
1196        elif self.status in self.COMPLETE_STATUSES:
1197            self.active, self.complete = False, True
1198        else:
1199            self.active, self.complete = False, False
1200
1201
1202    def on_attribute_changed(self, attribute, old_value):
1203        assert attribute == 'status'
1204        logging.info('%s/%d (%d) -> %s' % (self.host, self.job.id, self.id,
1205                                           self.status))
1206
1207
1208    def is_meta_host_entry(self):
1209        'True if this is a entry has a meta_host instead of a host.'
1210        return self.host is None and self.meta_host is not None
1211
1212
1213    def log_abort(self, user):
1214        abort_log = AbortedHostQueueEntry(queue_entry=self, aborted_by=user)
1215        abort_log.save()
1216
1217
1218    def abort(self):
1219        # this isn't completely immune to race conditions since it's not atomic,
1220        # but it should be safe given the scheduler's behavior.
1221        if not self.complete and not self.aborted:
1222            self.log_abort(User.current_user())
1223            self.aborted = True
1224            self.save()
1225
1226
1227    @classmethod
1228    def compute_full_status(cls, status, aborted, complete):
1229        if aborted and not complete:
1230            return 'Aborted (%s)' % status
1231        return status
1232
1233
1234    def full_status(self):
1235        return self.compute_full_status(self.status, self.aborted,
1236                                        self.complete)
1237
1238
1239    def _postprocess_object_dict(self, object_dict):
1240        object_dict['full_status'] = self.full_status()
1241
1242
1243    class Meta:
1244        db_table = 'afe_host_queue_entries'
1245
1246
1247
1248    def __unicode__(self):
1249        hostname = None
1250        if self.host:
1251            hostname = self.host.hostname
1252        return u"%s/%d (%d)" % (hostname, self.job.id, self.id)
1253
1254
1255class AbortedHostQueueEntry(dbmodels.Model, model_logic.ModelExtensions):
1256    queue_entry = dbmodels.OneToOneField(HostQueueEntry, primary_key=True)
1257    aborted_by = dbmodels.ForeignKey(User)
1258    aborted_on = dbmodels.DateTimeField()
1259
1260    objects = model_logic.ExtendedManager()
1261
1262
1263    def save(self, *args, **kwargs):
1264        self.aborted_on = datetime.now()
1265        super(AbortedHostQueueEntry, self).save(*args, **kwargs)
1266
1267    class Meta:
1268        db_table = 'afe_aborted_host_queue_entries'
1269
1270
1271class RecurringRun(dbmodels.Model, model_logic.ModelExtensions):
1272    """\
1273    job: job to use as a template
1274    owner: owner of the instantiated template
1275    start_date: Run the job at scheduled date
1276    loop_period: Re-run (loop) the job periodically
1277                 (in every loop_period seconds)
1278    loop_count: Re-run (loop) count
1279    """
1280
1281    job = dbmodels.ForeignKey(Job)
1282    owner = dbmodels.ForeignKey(User)
1283    start_date = dbmodels.DateTimeField()
1284    loop_period = dbmodels.IntegerField(blank=True)
1285    loop_count = dbmodels.IntegerField(blank=True)
1286
1287    objects = model_logic.ExtendedManager()
1288
1289    class Meta:
1290        db_table = 'afe_recurring_run'
1291
1292    def __unicode__(self):
1293        return u'RecurringRun(job %s, start %s, period %s, count %s)' % (
1294            self.job.id, self.start_date, self.loop_period, self.loop_count)
1295
1296
1297class SpecialTask(dbmodels.Model, model_logic.ModelExtensions):
1298    """\
1299    Tasks to run on hosts at the next time they are in the Ready state. Use this
1300    for high-priority tasks, such as forced repair or forced reinstall.
1301
1302    host: host to run this task on
1303    task: special task to run
1304    time_requested: date and time the request for this task was made
1305    is_active: task is currently running
1306    is_complete: task has finished running
1307    time_started: date and time the task started
1308    queue_entry: Host queue entry waiting on this task (or None, if task was not
1309                 started in preparation of a job)
1310    """
1311    Task = enum.Enum('Verify', 'Cleanup', 'Repair', string_values=True)
1312
1313    host = dbmodels.ForeignKey(Host, blank=False, null=False)
1314    task = dbmodels.CharField(max_length=64, choices=Task.choices(),
1315                              blank=False, null=False)
1316    requested_by = dbmodels.ForeignKey(User)
1317    time_requested = dbmodels.DateTimeField(auto_now_add=True, blank=False,
1318                                            null=False)
1319    is_active = dbmodels.BooleanField(default=False, blank=False, null=False)
1320    is_complete = dbmodels.BooleanField(default=False, blank=False, null=False)
1321    time_started = dbmodels.DateTimeField(null=True, blank=True)
1322    queue_entry = dbmodels.ForeignKey(HostQueueEntry, blank=True, null=True)
1323    success = dbmodels.BooleanField(default=False, blank=False, null=False)
1324
1325    objects = model_logic.ExtendedManager()
1326
1327
1328    def save(self, **kwargs):
1329        if self.queue_entry:
1330            self.requested_by = User.objects.get(
1331                    login=self.queue_entry.job.owner)
1332        super(SpecialTask, self).save(**kwargs)
1333
1334
1335    def execution_path(self):
1336        """@see HostQueueEntry.execution_path()"""
1337        return 'hosts/%s/%s-%s' % (self.host.hostname, self.id,
1338                                   self.task.lower())
1339
1340
1341    # property to emulate HostQueueEntry.status
1342    @property
1343    def status(self):
1344        """
1345        Return a host queue entry status appropriate for this task.  Although
1346        SpecialTasks are not HostQueueEntries, it is helpful to the user to
1347        present similar statuses.
1348        """
1349        if self.is_complete:
1350            if self.success:
1351                return HostQueueEntry.Status.COMPLETED
1352            return HostQueueEntry.Status.FAILED
1353        if self.is_active:
1354            return HostQueueEntry.Status.RUNNING
1355        return HostQueueEntry.Status.QUEUED
1356
1357
1358    # property to emulate HostQueueEntry.started_on
1359    @property
1360    def started_on(self):
1361        return self.time_started
1362
1363
1364    @classmethod
1365    def schedule_special_task(cls, host, task):
1366        """
1367        Schedules a special task on a host if the task is not already scheduled.
1368        """
1369        existing_tasks = SpecialTask.objects.filter(host__id=host.id, task=task,
1370                                                    is_active=False,
1371                                                    is_complete=False)
1372        if existing_tasks:
1373            return existing_tasks[0]
1374
1375        special_task = SpecialTask(host=host, task=task,
1376                                   requested_by=User.current_user())
1377        special_task.save()
1378        return special_task
1379
1380
1381    def activate(self):
1382        """
1383        Sets a task as active and sets the time started to the current time.
1384        """
1385        logging.info('Starting: %s', self)
1386        self.is_active = True
1387        self.time_started = datetime.now()
1388        self.save()
1389
1390
1391    def finish(self, success):
1392        """
1393        Sets a task as completed
1394        """
1395        logging.info('Finished: %s', self)
1396        self.is_active = False
1397        self.is_complete = True
1398        self.success = success
1399        self.save()
1400
1401
1402    class Meta:
1403        db_table = 'afe_special_tasks'
1404
1405
1406    def __unicode__(self):
1407        result = u'Special Task %s (host %s, task %s, time %s)' % (
1408            self.id, self.host, self.task, self.time_requested)
1409        if self.is_complete:
1410            result += u' (completed)'
1411        elif self.is_active:
1412            result += u' (active)'
1413
1414        return result
1415