models.py revision 7db38bab49bb85027c216bdf89e289d0f24dba16
1import logging, os
2from datetime import datetime
3from django.db import models as dbmodels, connection
4from xml.sax import saxutils
5import common
6from autotest_lib.frontend.afe import model_logic, model_attributes
7from autotest_lib.frontend import settings, thread_local
8from autotest_lib.client.common_lib import enum, host_protections, global_config
9from autotest_lib.client.common_lib import host_queue_entry_states
10
11# job options and user preferences
12DEFAULT_REBOOT_BEFORE = model_attributes.RebootBefore.IF_DIRTY
13DEFAULT_REBOOT_AFTER = model_attributes.RebootBefore.ALWAYS
14
15
16class AclAccessViolation(Exception):
17    """\
18    Raised when an operation is attempted with proper permissions as
19    dictated by ACLs.
20    """
21
22
23class AtomicGroup(model_logic.ModelWithInvalid, dbmodels.Model):
24    """\
25    An atomic group defines a collection of hosts which must only be scheduled
26    all at once.  Any host with a label having an atomic group will only be
27    scheduled for a job at the same time as other hosts sharing that label.
28
29    Required:
30      name: A name for this atomic group, e.g. 'rack23' or 'funky_net'.
31      max_number_of_machines: The maximum number of machines that will be
32              scheduled at once when scheduling jobs to this atomic group.
33              The job.synch_count is considered the minimum.
34
35    Optional:
36      description: Arbitrary text description of this group's purpose.
37    """
38    name = dbmodels.CharField(max_length=255, unique=True)
39    description = dbmodels.TextField(blank=True)
40    # This magic value is the default to simplify the scheduler logic.
41    # It must be "large".  The common use of atomic groups is to want all
42    # machines in the group to be used, limits on which subset used are
43    # often chosen via dependency labels.
44    # TODO(dennisjeffrey): Revisit this so we don't have to assume that
45    # "infinity" is around 3.3 million.
46    INFINITE_MACHINES = 333333333
47    max_number_of_machines = dbmodels.IntegerField(default=INFINITE_MACHINES)
48    invalid = dbmodels.BooleanField(default=False,
49                                  editable=settings.FULL_ADMIN)
50
51    name_field = 'name'
52    objects = model_logic.ModelWithInvalidManager()
53    valid_objects = model_logic.ValidObjectsManager()
54
55
56    def enqueue_job(self, job, is_template=False):
57        """Enqueue a job on an associated atomic group of hosts.
58
59        @param job: A job to enqueue.
60        @param is_template: Whether the status should be "Template".
61        """
62        queue_entry = HostQueueEntry.create(atomic_group=self, job=job,
63                                            is_template=is_template)
64        queue_entry.save()
65
66
67    def clean_object(self):
68        self.label_set.clear()
69
70
71    class Meta:
72        """Metadata for class AtomicGroup."""
73        db_table = 'afe_atomic_groups'
74
75
76    def __unicode__(self):
77        return unicode(self.name)
78
79
80class Label(model_logic.ModelWithInvalid, dbmodels.Model):
81    """\
82    Required:
83      name: label name
84
85    Optional:
86      kernel_config: URL/path to kernel config for jobs run on this label.
87      platform: If True, this is a platform label (defaults to False).
88      only_if_needed: If True, a Host with this label can only be used if that
89              label is requested by the job/test (either as the meta_host or
90              in the job_dependencies).
91      atomic_group: The atomic group associated with this label.
92    """
93    name = dbmodels.CharField(max_length=255, unique=True)
94    kernel_config = dbmodels.CharField(max_length=255, blank=True)
95    platform = dbmodels.BooleanField(default=False)
96    invalid = dbmodels.BooleanField(default=False,
97                                    editable=settings.FULL_ADMIN)
98    only_if_needed = dbmodels.BooleanField(default=False)
99
100    name_field = 'name'
101    objects = model_logic.ModelWithInvalidManager()
102    valid_objects = model_logic.ValidObjectsManager()
103    atomic_group = dbmodels.ForeignKey(AtomicGroup, null=True, blank=True)
104
105
106    def clean_object(self):
107        self.host_set.clear()
108        self.test_set.clear()
109
110
111    def enqueue_job(self, job, atomic_group=None, is_template=False):
112        """Enqueue a job on any host of this label.
113
114        @param job: A job to enqueue.
115        @param atomic_group: The associated atomic group.
116        @param is_template: Whether the status should be "Template".
117        """
118        queue_entry = HostQueueEntry.create(meta_host=self, job=job,
119                                            is_template=is_template,
120                                            atomic_group=atomic_group)
121        queue_entry.save()
122
123
124    class Meta:
125        """Metadata for class Label."""
126        db_table = 'afe_labels'
127
128    def __unicode__(self):
129        return unicode(self.name)
130
131
132class Drone(dbmodels.Model, model_logic.ModelExtensions):
133    """
134    A scheduler drone
135
136    hostname: the drone's hostname
137    """
138    hostname = dbmodels.CharField(max_length=255, unique=True)
139
140    name_field = 'hostname'
141    objects = model_logic.ExtendedManager()
142
143
144    def save(self, *args, **kwargs):
145        if not User.current_user().is_superuser():
146            raise Exception('Only superusers may edit drones')
147        super(Drone, self).save(*args, **kwargs)
148
149
150    def delete(self):
151        if not User.current_user().is_superuser():
152            raise Exception('Only superusers may delete drones')
153        super(Drone, self).delete()
154
155
156    class Meta:
157        """Metadata for class Drone."""
158        db_table = 'afe_drones'
159
160    def __unicode__(self):
161        return unicode(self.hostname)
162
163
164class DroneSet(dbmodels.Model, model_logic.ModelExtensions):
165    """
166    A set of scheduler drones
167
168    These will be used by the scheduler to decide what drones a job is allowed
169    to run on.
170
171    name: the drone set's name
172    drones: the drones that are part of the set
173    """
174    DRONE_SETS_ENABLED = global_config.global_config.get_config_value(
175            'SCHEDULER', 'drone_sets_enabled', type=bool, default=False)
176    DEFAULT_DRONE_SET_NAME = global_config.global_config.get_config_value(
177            'SCHEDULER', 'default_drone_set_name', default=None)
178
179    name = dbmodels.CharField(max_length=255, unique=True)
180    drones = dbmodels.ManyToManyField(Drone, db_table='afe_drone_sets_drones')
181
182    name_field = 'name'
183    objects = model_logic.ExtendedManager()
184
185
186    def save(self, *args, **kwargs):
187        if not User.current_user().is_superuser():
188            raise Exception('Only superusers may edit drone sets')
189        super(DroneSet, self).save(*args, **kwargs)
190
191
192    def delete(self):
193        if not User.current_user().is_superuser():
194            raise Exception('Only superusers may delete drone sets')
195        super(DroneSet, self).delete()
196
197
198    @classmethod
199    def drone_sets_enabled(cls):
200        """Returns whether drone sets are enabled.
201
202        @param cls: Implicit class object.
203        """
204        return cls.DRONE_SETS_ENABLED
205
206
207    @classmethod
208    def default_drone_set_name(cls):
209        """Returns the default drone set name.
210
211        @param cls: Implicit class object.
212        """
213        return cls.DEFAULT_DRONE_SET_NAME
214
215
216    @classmethod
217    def get_default(cls):
218        """Gets the default drone set name, compatible with Job.add_object.
219
220        @param cls: Implicit class object.
221        """
222        return cls.smart_get(cls.DEFAULT_DRONE_SET_NAME)
223
224
225    @classmethod
226    def resolve_name(cls, drone_set_name):
227        """
228        Returns the name of one of these, if not None, in order of preference:
229        1) the drone set given,
230        2) the current user's default drone set, or
231        3) the global default drone set
232
233        or returns None if drone sets are disabled
234
235        @param cls: Implicit class object.
236        @param drone_set_name: A drone set name.
237        """
238        if not cls.drone_sets_enabled():
239            return None
240
241        user = User.current_user()
242        user_drone_set_name = user.drone_set and user.drone_set.name
243
244        return drone_set_name or user_drone_set_name or cls.get_default().name
245
246
247    def get_drone_hostnames(self):
248        """
249        Gets the hostnames of all drones in this drone set
250        """
251        return set(self.drones.all().values_list('hostname', flat=True))
252
253
254    class Meta:
255        """Metadata for class DroneSet."""
256        db_table = 'afe_drone_sets'
257
258    def __unicode__(self):
259        return unicode(self.name)
260
261
262class User(dbmodels.Model, model_logic.ModelExtensions):
263    """\
264    Required:
265    login :user login name
266
267    Optional:
268    access_level: 0=User (default), 1=Admin, 100=Root
269    """
270    ACCESS_ROOT = 100
271    ACCESS_ADMIN = 1
272    ACCESS_USER = 0
273
274    AUTOTEST_SYSTEM = 'autotest_system'
275
276    login = dbmodels.CharField(max_length=255, unique=True)
277    access_level = dbmodels.IntegerField(default=ACCESS_USER, blank=True)
278
279    # user preferences
280    reboot_before = dbmodels.SmallIntegerField(
281        choices=model_attributes.RebootBefore.choices(), blank=True,
282        default=DEFAULT_REBOOT_BEFORE)
283    reboot_after = dbmodels.SmallIntegerField(
284        choices=model_attributes.RebootAfter.choices(), blank=True,
285        default=DEFAULT_REBOOT_AFTER)
286    drone_set = dbmodels.ForeignKey(DroneSet, null=True, blank=True)
287    show_experimental = dbmodels.BooleanField(default=False)
288
289    name_field = 'login'
290    objects = model_logic.ExtendedManager()
291
292
293    def save(self, *args, **kwargs):
294        # is this a new object being saved for the first time?
295        first_time = (self.id is None)
296        user = thread_local.get_user()
297        if user and not user.is_superuser() and user.login != self.login:
298            raise AclAccessViolation("You cannot modify user " + self.login)
299        super(User, self).save(*args, **kwargs)
300        if first_time:
301            everyone = AclGroup.objects.get(name='Everyone')
302            everyone.users.add(self)
303
304
305    def is_superuser(self):
306        """Returns whether the user has superuser access."""
307        return self.access_level >= self.ACCESS_ROOT
308
309
310    @classmethod
311    def current_user(cls):
312        """Returns the current user.
313
314        @param cls: Implicit class object.
315        """
316        user = thread_local.get_user()
317        if user is None:
318            user, _ = cls.objects.get_or_create(login=cls.AUTOTEST_SYSTEM)
319            user.access_level = cls.ACCESS_ROOT
320            user.save()
321        return user
322
323
324    class Meta:
325        """Metadata for class User."""
326        db_table = 'afe_users'
327
328    def __unicode__(self):
329        return unicode(self.login)
330
331
332class Host(model_logic.ModelWithInvalid, dbmodels.Model,
333           model_logic.ModelWithAttributes):
334    """\
335    Required:
336    hostname
337
338    optional:
339    locked: if true, host is locked and will not be queued
340
341    Internal:
342    synch_id: currently unused
343    status: string describing status of host
344    invalid: true if the host has been deleted
345    protection: indicates what can be done to this host during repair
346    locked_by: user that locked the host, or null if the host is unlocked
347    lock_time: DateTime at which the host was locked
348    dirty: true if the host has been used without being rebooted
349    """
350    Status = enum.Enum('Verifying', 'Running', 'Ready', 'Repairing',
351                       'Repair Failed', 'Cleaning', 'Pending',
352                       string_values=True)
353    Protection = host_protections.Protection
354
355    hostname = dbmodels.CharField(max_length=255, unique=True)
356    labels = dbmodels.ManyToManyField(Label, blank=True,
357                                      db_table='afe_hosts_labels')
358    locked = dbmodels.BooleanField(default=False)
359    synch_id = dbmodels.IntegerField(blank=True, null=True,
360                                     editable=settings.FULL_ADMIN)
361    status = dbmodels.CharField(max_length=255, default=Status.READY,
362                                choices=Status.choices(),
363                                editable=settings.FULL_ADMIN)
364    invalid = dbmodels.BooleanField(default=False,
365                                    editable=settings.FULL_ADMIN)
366    protection = dbmodels.SmallIntegerField(null=False, blank=True,
367                                            choices=host_protections.choices,
368                                            default=host_protections.default)
369    locked_by = dbmodels.ForeignKey(User, null=True, blank=True, editable=False)
370    lock_time = dbmodels.DateTimeField(null=True, blank=True, editable=False)
371    dirty = dbmodels.BooleanField(default=True, editable=settings.FULL_ADMIN)
372
373    name_field = 'hostname'
374    objects = model_logic.ModelWithInvalidManager()
375    valid_objects = model_logic.ValidObjectsManager()
376
377
378    def __init__(self, *args, **kwargs):
379        super(Host, self).__init__(*args, **kwargs)
380        self._record_attributes(['status'])
381
382
383    @staticmethod
384    def create_one_time_host(hostname):
385        """Creates a one-time host.
386
387        @param hostname: The name for the host.
388        """
389        query = Host.objects.filter(hostname=hostname)
390        if query.count() == 0:
391            host = Host(hostname=hostname, invalid=True)
392            host.do_validate()
393        else:
394            host = query[0]
395            if not host.invalid:
396                raise model_logic.ValidationError({
397                    'hostname' : '%s already exists in the autotest DB.  '
398                        'Select it rather than entering it as a one time '
399                        'host.' % hostname
400                    })
401        host.protection = host_protections.Protection.DO_NOT_REPAIR
402        host.locked = False
403        host.save()
404        host.clean_object()
405        return host
406
407
408    def resurrect_object(self, old_object):
409        super(Host, self).resurrect_object(old_object)
410        # invalid hosts can be in use by the scheduler (as one-time hosts), so
411        # don't change the status
412        self.status = old_object.status
413
414
415    def clean_object(self):
416        self.aclgroup_set.clear()
417        self.labels.clear()
418
419
420    def save(self, *args, **kwargs):
421        # extra spaces in the hostname can be a sneaky source of errors
422        self.hostname = self.hostname.strip()
423        # is this a new object being saved for the first time?
424        first_time = (self.id is None)
425        if not first_time:
426            AclGroup.check_for_acl_violation_hosts([self])
427        if self.locked and not self.locked_by:
428            self.locked_by = User.current_user()
429            self.lock_time = datetime.now()
430            self.dirty = True
431        elif not self.locked and self.locked_by:
432            self.locked_by = None
433            self.lock_time = None
434        super(Host, self).save(*args, **kwargs)
435        if first_time:
436            everyone = AclGroup.objects.get(name='Everyone')
437            everyone.hosts.add(self)
438        self._check_for_updated_attributes()
439
440
441    def delete(self):
442        AclGroup.check_for_acl_violation_hosts([self])
443        for queue_entry in self.hostqueueentry_set.all():
444            queue_entry.deleted = True
445            queue_entry.abort()
446        super(Host, self).delete()
447
448
449    def on_attribute_changed(self, attribute, old_value):
450        assert attribute == 'status'
451        logging.info(self.hostname + ' -> ' + self.status)
452
453
454    def enqueue_job(self, job, atomic_group=None, is_template=False):
455        """Enqueue a job on this host.
456
457        @param job: A job to enqueue.
458        @param atomic_group: The associated atomic group.
459        @param is_template: Whther the status should be "Template".
460        """
461        queue_entry = HostQueueEntry.create(host=self, job=job,
462                                            is_template=is_template,
463                                            atomic_group=atomic_group)
464        # allow recovery of dead hosts from the frontend
465        if not self.active_queue_entry() and self.is_dead():
466            self.status = Host.Status.READY
467            self.save()
468        queue_entry.save()
469
470        block = IneligibleHostQueue(job=job, host=self)
471        block.save()
472
473
474    def platform(self):
475        """The platform of the host."""
476        # TODO(showard): slighly hacky?
477        platforms = self.labels.filter(platform=True)
478        if len(platforms) == 0:
479            return None
480        return platforms[0]
481    platform.short_description = 'Platform'
482
483
484    @classmethod
485    def check_no_platform(cls, hosts):
486        """Verify the specified hosts have no associated platforms.
487
488        @param cls: Implicit class object.
489        @param hosts: The hosts to verify.
490        @raises model_logic.ValidationError if any hosts already have a
491            platform.
492        """
493        Host.objects.populate_relationships(hosts, Label, 'label_list')
494        errors = []
495        for host in hosts:
496            platforms = [label.name for label in host.label_list
497                         if label.platform]
498            if platforms:
499                # do a join, just in case this host has multiple platforms,
500                # we'll be able to see it
501                errors.append('Host %s already has a platform: %s' % (
502                              host.hostname, ', '.join(platforms)))
503        if errors:
504            raise model_logic.ValidationError({'labels': '; '.join(errors)})
505
506
507    def is_dead(self):
508        """Returns whether the host is dead (has status repair failed)."""
509        return self.status == Host.Status.REPAIR_FAILED
510
511
512    def active_queue_entry(self):
513        """Returns the active queue entry for this host, or None if none."""
514        active = list(self.hostqueueentry_set.filter(active=True))
515        if not active:
516            return None
517        assert len(active) == 1, ('More than one active entry for '
518                                  'host ' + self.hostname)
519        return active[0]
520
521
522    def _get_attribute_model_and_args(self, attribute):
523        return HostAttribute, dict(host=self, attribute=attribute)
524
525
526    class Meta:
527        """Metadata for the Host class."""
528        db_table = 'afe_hosts'
529
530    def __unicode__(self):
531        return unicode(self.hostname)
532
533
534class HostAttribute(dbmodels.Model):
535    """Arbitrary keyvals associated with hosts."""
536    host = dbmodels.ForeignKey(Host)
537    attribute = dbmodels.CharField(max_length=90)
538    value = dbmodels.CharField(max_length=300)
539
540    objects = model_logic.ExtendedManager()
541
542    class Meta:
543        """Metadata for the HostAttribute class."""
544        db_table = 'afe_host_attributes'
545
546
547class Test(dbmodels.Model, model_logic.ModelExtensions):
548    """\
549    Required:
550    author: author name
551    description: description of the test
552    name: test name
553    time: short, medium, long
554    test_class: This describes the class for your the test belongs in.
555    test_category: This describes the category for your tests
556    test_type: Client or Server
557    path: path to pass to run_test()
558    sync_count:  is a number >=1 (1 being the default). If it's 1, then it's an
559                 async job. If it's >1 it's sync job for that number of machines
560                 i.e. if sync_count = 2 it is a sync job that requires two
561                 machines.
562    Optional:
563    dependencies: What the test requires to run. Comma deliminated list
564    dependency_labels: many-to-many relationship with labels corresponding to
565                       test dependencies.
566    experimental: If this is set to True production servers will ignore the test
567    run_verify: Whether or not the scheduler should run the verify stage
568    """
569    TestTime = enum.Enum('SHORT', 'MEDIUM', 'LONG', start_value=1)
570    TestTypes = model_attributes.TestTypes
571    # TODO(showard) - this should be merged with Job.ControlType (but right
572    # now they use opposite values)
573
574    name = dbmodels.CharField(max_length=255, unique=True)
575    author = dbmodels.CharField(max_length=255)
576    test_class = dbmodels.CharField(max_length=255)
577    test_category = dbmodels.CharField(max_length=255)
578    dependencies = dbmodels.CharField(max_length=255, blank=True)
579    description = dbmodels.TextField(blank=True)
580    experimental = dbmodels.BooleanField(default=True)
581    run_verify = dbmodels.BooleanField(default=True)
582    test_time = dbmodels.SmallIntegerField(choices=TestTime.choices(),
583                                           default=TestTime.MEDIUM)
584    test_type = dbmodels.SmallIntegerField(choices=TestTypes.choices())
585    sync_count = dbmodels.IntegerField(default=1)
586    path = dbmodels.CharField(max_length=255, unique=True)
587
588    dependency_labels = (
589        dbmodels.ManyToManyField(Label, blank=True,
590                                 db_table='afe_autotests_dependency_labels'))
591    name_field = 'name'
592    objects = model_logic.ExtendedManager()
593
594
595    def admin_description(self):
596        """Returns a string representing the admin description."""
597        escaped_description = saxutils.escape(self.description)
598        return '<span style="white-space:pre">%s</span>' % escaped_description
599    admin_description.allow_tags = True
600    admin_description.short_description = 'Description'
601
602
603    class Meta:
604        """Metadata for class Test."""
605        db_table = 'afe_autotests'
606
607    def __unicode__(self):
608        return unicode(self.name)
609
610
611class TestParameter(dbmodels.Model):
612    """
613    A declared parameter of a test
614    """
615    test = dbmodels.ForeignKey(Test)
616    name = dbmodels.CharField(max_length=255)
617
618    class Meta:
619        """Metadata for class TestParameter."""
620        db_table = 'afe_test_parameters'
621        unique_together = ('test', 'name')
622
623    def __unicode__(self):
624        return u'%s (%s)' % (self.name, self.test.name)
625
626
627class Profiler(dbmodels.Model, model_logic.ModelExtensions):
628    """\
629    Required:
630    name: profiler name
631    test_type: Client or Server
632
633    Optional:
634    description: arbirary text description
635    """
636    name = dbmodels.CharField(max_length=255, unique=True)
637    description = dbmodels.TextField(blank=True)
638
639    name_field = 'name'
640    objects = model_logic.ExtendedManager()
641
642
643    class Meta:
644        """Metadata for class Profiler."""
645        db_table = 'afe_profilers'
646
647    def __unicode__(self):
648        return unicode(self.name)
649
650
651class AclGroup(dbmodels.Model, model_logic.ModelExtensions):
652    """\
653    Required:
654    name: name of ACL group
655
656    Optional:
657    description: arbitrary description of group
658    """
659    name = dbmodels.CharField(max_length=255, unique=True)
660    description = dbmodels.CharField(max_length=255, blank=True)
661    users = dbmodels.ManyToManyField(User, blank=False,
662                                     db_table='afe_acl_groups_users')
663    hosts = dbmodels.ManyToManyField(Host, blank=True,
664                                     db_table='afe_acl_groups_hosts')
665
666    name_field = 'name'
667    objects = model_logic.ExtendedManager()
668
669    @staticmethod
670    def check_for_acl_violation_hosts(hosts):
671        """Verify the current user has access to the specified hosts.
672
673        @param hosts: The hosts to verify against.
674        @raises AclAccessViolation if the current user doesn't have access
675            to a host.
676        """
677        user = User.current_user()
678        if user.is_superuser():
679            return
680        accessible_host_ids = set(
681            host.id for host in Host.objects.filter(aclgroup__users=user))
682        for host in hosts:
683            # Check if the user has access to this host,
684            # but only if it is not a metahost or a one-time-host.
685            no_access = (isinstance(host, Host)
686                         and not host.invalid
687                         and int(host.id) not in accessible_host_ids)
688            if no_access:
689                raise AclAccessViolation("%s does not have access to %s" %
690                                         (str(user), str(host)))
691
692
693    @staticmethod
694    def check_abort_permissions(queue_entries):
695        """Look for queue entries that aren't abortable by the current user.
696
697        An entry is not abortable if:
698           * the job isn't owned by this user, and
699           * the machine isn't ACL-accessible, or
700           * the machine is in the "Everyone" ACL
701
702        @param queue_entries: The queue entries to check.
703        @raises AclAccessViolation if a queue entry is not abortable by the
704            current user.
705        """
706        user = User.current_user()
707        if user.is_superuser():
708            return
709        not_owned = queue_entries.exclude(job__owner=user.login)
710        # I do this using ID sets instead of just Django filters because
711        # filtering on M2M dbmodels is broken in Django 0.96. It's better in
712        # 1.0.
713        # TODO: Use Django filters, now that we're using 1.0.
714        accessible_ids = set(
715            entry.id for entry
716            in not_owned.filter(host__aclgroup__users__login=user.login))
717        public_ids = set(entry.id for entry
718                         in not_owned.filter(host__aclgroup__name='Everyone'))
719        cannot_abort = [entry for entry in not_owned.select_related()
720                        if entry.id not in accessible_ids
721                        or entry.id in public_ids]
722        if len(cannot_abort) == 0:
723            return
724        entry_names = ', '.join('%s-%s/%s' % (entry.job.id, entry.job.owner,
725                                              entry.host_or_metahost_name())
726                                for entry in cannot_abort)
727        raise AclAccessViolation('You cannot abort the following job entries: '
728                                 + entry_names)
729
730
731    def check_for_acl_violation_acl_group(self):
732        """Verifies the current user has acces to this ACL group.
733
734        @raises AclAccessViolation if the current user doesn't have access to
735            this ACL group.
736        """
737        user = User.current_user()
738        if user.is_superuser():
739            return
740        if self.name == 'Everyone':
741            raise AclAccessViolation("You cannot modify 'Everyone'!")
742        if not user in self.users.all():
743            raise AclAccessViolation("You do not have access to %s"
744                                     % self.name)
745
746    @staticmethod
747    def on_host_membership_change():
748        """Invoked when host membership changes."""
749        everyone = AclGroup.objects.get(name='Everyone')
750
751        # find hosts that aren't in any ACL group and add them to Everyone
752        # TODO(showard): this is a bit of a hack, since the fact that this query
753        # works is kind of a coincidence of Django internals.  This trick
754        # doesn't work in general (on all foreign key relationships).  I'll
755        # replace it with a better technique when the need arises.
756        orphaned_hosts = Host.valid_objects.filter(aclgroup__id__isnull=True)
757        everyone.hosts.add(*orphaned_hosts.distinct())
758
759        # find hosts in both Everyone and another ACL group, and remove them
760        # from Everyone
761        hosts_in_everyone = Host.valid_objects.filter(aclgroup__name='Everyone')
762        acled_hosts = set()
763        for host in hosts_in_everyone:
764            # Has an ACL group other than Everyone
765            if host.aclgroup_set.count() > 1:
766                acled_hosts.add(host)
767        everyone.hosts.remove(*acled_hosts)
768
769
770    def delete(self):
771        if (self.name == 'Everyone'):
772            raise AclAccessViolation("You cannot delete 'Everyone'!")
773        self.check_for_acl_violation_acl_group()
774        super(AclGroup, self).delete()
775        self.on_host_membership_change()
776
777
778    def add_current_user_if_empty(self):
779        """Adds the current user if the set of users is empty."""
780        if not self.users.count():
781            self.users.add(User.current_user())
782
783
784    def perform_after_save(self, change):
785        """Called after a save.
786
787        @param change: Whether there was a change.
788        """
789        if not change:
790            self.users.add(User.current_user())
791        self.add_current_user_if_empty()
792        self.on_host_membership_change()
793
794
795    def save(self, *args, **kwargs):
796        change = bool(self.id)
797        if change:
798            # Check the original object for an ACL violation
799            AclGroup.objects.get(id=self.id).check_for_acl_violation_acl_group()
800        super(AclGroup, self).save(*args, **kwargs)
801        self.perform_after_save(change)
802
803
804    class Meta:
805        """Metadata for class AclGroup."""
806        db_table = 'afe_acl_groups'
807
808    def __unicode__(self):
809        return unicode(self.name)
810
811
812class Kernel(dbmodels.Model):
813    """
814    A kernel configuration for a parameterized job
815    """
816    version = dbmodels.CharField(max_length=255)
817    cmdline = dbmodels.CharField(max_length=255, blank=True)
818
819    @classmethod
820    def create_kernels(cls, kernel_list):
821        """Creates all kernels in the kernel list.
822
823        @param cls: Implicit class object.
824        @param kernel_list: A list of dictionaries that describe the kernels,
825            in the same format as the 'kernel' argument to
826            rpc_interface.generate_control_file.
827        @return A list of the created kernels.
828        """
829        if not kernel_list:
830            return None
831        return [cls._create(kernel) for kernel in kernel_list]
832
833
834    @classmethod
835    def _create(cls, kernel_dict):
836        version = kernel_dict.pop('version')
837        cmdline = kernel_dict.pop('cmdline', '')
838
839        if kernel_dict:
840            raise Exception('Extraneous kernel arguments remain: %r'
841                            % kernel_dict)
842
843        kernel, _ = cls.objects.get_or_create(version=version,
844                                              cmdline=cmdline)
845        return kernel
846
847
848    class Meta:
849        """Metadata for class Kernel."""
850        db_table = 'afe_kernels'
851        unique_together = ('version', 'cmdline')
852
853    def __unicode__(self):
854        return u'%s %s' % (self.version, self.cmdline)
855
856
857class ParameterizedJob(dbmodels.Model):
858    """
859    Auxiliary configuration for a parameterized job.
860    """
861    test = dbmodels.ForeignKey(Test)
862    label = dbmodels.ForeignKey(Label, null=True)
863    use_container = dbmodels.BooleanField(default=False)
864    profile_only = dbmodels.BooleanField(default=False)
865    upload_kernel_config = dbmodels.BooleanField(default=False)
866
867    kernels = dbmodels.ManyToManyField(
868            Kernel, db_table='afe_parameterized_job_kernels')
869    profilers = dbmodels.ManyToManyField(
870            Profiler, through='ParameterizedJobProfiler')
871
872
873    @classmethod
874    def smart_get(cls, id_or_name, *args, **kwargs):
875        """For compatibility with Job.add_object.
876
877        @param cls: Implicit class object.
878        @param id_or_name: The ID or name to get.
879        @param args: Non-keyword arguments.
880        @param kwargs: Keyword arguments.
881        """
882        return cls.objects.get(pk=id_or_name)
883
884
885    def job(self):
886        """Returns the job if it exists, or else None."""
887        jobs = self.job_set.all()
888        assert jobs.count() <= 1
889        return jobs and jobs[0] or None
890
891
892    class Meta:
893        """Metadata for class ParameterizedJob."""
894        db_table = 'afe_parameterized_jobs'
895
896    def __unicode__(self):
897        return u'%s (parameterized) - %s' % (self.test.name, self.job())
898
899
900class ParameterizedJobProfiler(dbmodels.Model):
901    """
902    A profiler to run on a parameterized job
903    """
904    parameterized_job = dbmodels.ForeignKey(ParameterizedJob)
905    profiler = dbmodels.ForeignKey(Profiler)
906
907    class Meta:
908        """Metedata for class ParameterizedJobProfiler."""
909        db_table = 'afe_parameterized_jobs_profilers'
910        unique_together = ('parameterized_job', 'profiler')
911
912
913class ParameterizedJobProfilerParameter(dbmodels.Model):
914    """
915    A parameter for a profiler in a parameterized job
916    """
917    parameterized_job_profiler = dbmodels.ForeignKey(ParameterizedJobProfiler)
918    parameter_name = dbmodels.CharField(max_length=255)
919    parameter_value = dbmodels.TextField()
920    parameter_type = dbmodels.CharField(
921            max_length=8, choices=model_attributes.ParameterTypes.choices())
922
923    class Meta:
924        """Metadata for class ParameterizedJobProfilerParameter."""
925        db_table = 'afe_parameterized_job_profiler_parameters'
926        unique_together = ('parameterized_job_profiler', 'parameter_name')
927
928    def __unicode__(self):
929        return u'%s - %s' % (self.parameterized_job_profiler.profiler.name,
930                             self.parameter_name)
931
932
933class ParameterizedJobParameter(dbmodels.Model):
934    """
935    Parameters for a parameterized job
936    """
937    parameterized_job = dbmodels.ForeignKey(ParameterizedJob)
938    test_parameter = dbmodels.ForeignKey(TestParameter)
939    parameter_value = dbmodels.TextField()
940    parameter_type = dbmodels.CharField(
941            max_length=8, choices=model_attributes.ParameterTypes.choices())
942
943    class Meta:
944        """Metadata for class ParameterizedJobParameter."""
945        db_table = 'afe_parameterized_job_parameters'
946        unique_together = ('parameterized_job', 'test_parameter')
947
948    def __unicode__(self):
949        return u'%s - %s' % (self.parameterized_job.job().name,
950                             self.test_parameter.name)
951
952
953class JobManager(model_logic.ExtendedManager):
954    'Custom manager to provide efficient status counts querying.'
955    def get_status_counts(self, job_ids):
956        """Returns a dict mapping the given job IDs to their status count dicts.
957
958        @param job_ids: A list of job IDs.
959        """
960        if not job_ids:
961            return {}
962        id_list = '(%s)' % ','.join(str(job_id) for job_id in job_ids)
963        cursor = connection.cursor()
964        cursor.execute("""
965            SELECT job_id, status, aborted, complete, COUNT(*)
966            FROM afe_host_queue_entries
967            WHERE job_id IN %s
968            GROUP BY job_id, status, aborted, complete
969            """ % id_list)
970        all_job_counts = dict((job_id, {}) for job_id in job_ids)
971        for job_id, status, aborted, complete, count in cursor.fetchall():
972            job_dict = all_job_counts[job_id]
973            full_status = HostQueueEntry.compute_full_status(status, aborted,
974                                                             complete)
975            job_dict.setdefault(full_status, 0)
976            job_dict[full_status] += count
977        return all_job_counts
978
979
980class Job(dbmodels.Model, model_logic.ModelExtensions):
981    """\
982    owner: username of job owner
983    name: job name (does not have to be unique)
984    priority: Low, Medium, High, Urgent (or 0-3)
985    control_file: contents of control file
986    control_type: Client or Server
987    created_on: date of job creation
988    submitted_on: date of job submission
989    synch_count: how many hosts should be used per autoserv execution
990    run_verify: Whether or not to run the verify phase
991    timeout: hours from queuing time until job times out
992    max_runtime_hrs: DEPRECATED - hours from job starting time until job
993                     times out
994    max_runtime_mins: minutes from job starting time until job times out
995    email_list: list of people to email on completion delimited by any of:
996                white space, ',', ':', ';'
997    dependency_labels: many-to-many relationship with labels corresponding to
998                       job dependencies
999    reboot_before: Never, If dirty, or Always
1000    reboot_after: Never, If all tests passed, or Always
1001    parse_failed_repair: if True, a failed repair launched by this job will have
1002    its results parsed as part of the job.
1003    drone_set: The set of drones to run this job on
1004    """
1005    DEFAULT_TIMEOUT = global_config.global_config.get_config_value(
1006        'AUTOTEST_WEB', 'job_timeout_default', default=240)
1007    # MAX_RUNTIME_HRS is deprecated. Will be removed after switch to mins is
1008    # completed.
1009    DEFAULT_MAX_RUNTIME_HRS = global_config.global_config.get_config_value(
1010        'AUTOTEST_WEB', 'job_max_runtime_hrs_default', default=72)
1011    DEFAULT_MAX_RUNTIME_MINS = global_config.global_config.get_config_value(
1012        'AUTOTEST_WEB', 'job_max_runtime_mins_default', default=72*60)
1013    DEFAULT_PARSE_FAILED_REPAIR = global_config.global_config.get_config_value(
1014        'AUTOTEST_WEB', 'parse_failed_repair_default', type=bool,
1015        default=False)
1016
1017    Priority = enum.Enum('Low', 'Medium', 'High', 'Urgent')
1018    ControlType = enum.Enum('Server', 'Client', start_value=1)
1019
1020    owner = dbmodels.CharField(max_length=255)
1021    name = dbmodels.CharField(max_length=255)
1022    priority = dbmodels.SmallIntegerField(choices=Priority.choices(),
1023                                          blank=True, # to allow 0
1024                                          default=Priority.MEDIUM)
1025    control_file = dbmodels.TextField(null=True, blank=True)
1026    control_type = dbmodels.SmallIntegerField(choices=ControlType.choices(),
1027                                              blank=True, # to allow 0
1028                                              default=ControlType.CLIENT)
1029    created_on = dbmodels.DateTimeField()
1030    synch_count = dbmodels.IntegerField(null=True, default=1)
1031    timeout = dbmodels.IntegerField(default=DEFAULT_TIMEOUT)
1032    run_verify = dbmodels.BooleanField(default=True)
1033    email_list = dbmodels.CharField(max_length=250, blank=True)
1034    dependency_labels = (
1035            dbmodels.ManyToManyField(Label, blank=True,
1036                                     db_table='afe_jobs_dependency_labels'))
1037    reboot_before = dbmodels.SmallIntegerField(
1038        choices=model_attributes.RebootBefore.choices(), blank=True,
1039        default=DEFAULT_REBOOT_BEFORE)
1040    reboot_after = dbmodels.SmallIntegerField(
1041        choices=model_attributes.RebootAfter.choices(), blank=True,
1042        default=DEFAULT_REBOOT_AFTER)
1043    parse_failed_repair = dbmodels.BooleanField(
1044        default=DEFAULT_PARSE_FAILED_REPAIR)
1045    # max_runtime_hrs is deprecated. Will be removed after switch to mins is
1046    # completed.
1047    max_runtime_hrs = dbmodels.IntegerField(default=DEFAULT_MAX_RUNTIME_HRS)
1048    max_runtime_mins = dbmodels.IntegerField(default=DEFAULT_MAX_RUNTIME_MINS)
1049    drone_set = dbmodels.ForeignKey(DroneSet, null=True, blank=True)
1050
1051    parameterized_job = dbmodels.ForeignKey(ParameterizedJob, null=True,
1052                                            blank=True)
1053
1054
1055    # custom manager
1056    objects = JobManager()
1057
1058
1059    def is_server_job(self):
1060        """Returns whether this job is of type server."""
1061        return self.control_type == self.ControlType.SERVER
1062
1063
1064    @classmethod
1065    def parameterized_jobs_enabled(cls):
1066        """Returns whether parameterized jobs are enabled.
1067
1068        @param cls: Implicit class object.
1069        """
1070        return global_config.global_config.get_config_value(
1071                'AUTOTEST_WEB', 'parameterized_jobs', type=bool)
1072
1073
1074    @classmethod
1075    def check_parameterized_job(cls, control_file, parameterized_job):
1076        """Checks that the job is valid given the global config settings.
1077
1078        First, either control_file must be set, or parameterized_job must be
1079        set, but not both. Second, parameterized_job must be set if and only if
1080        the parameterized_jobs option in the global config is set to True.
1081
1082        @param cls: Implict class object.
1083        @param control_file: A control file.
1084        @param parameterized_job: A parameterized job.
1085        """
1086        if not (bool(control_file) ^ bool(parameterized_job)):
1087            raise Exception('Job must have either control file or '
1088                            'parameterization, but not both')
1089
1090        parameterized_jobs_enabled = cls.parameterized_jobs_enabled()
1091        if control_file and parameterized_jobs_enabled:
1092            raise Exception('Control file specified, but parameterized jobs '
1093                            'are enabled')
1094        if parameterized_job and not parameterized_jobs_enabled:
1095            raise Exception('Parameterized job specified, but parameterized '
1096                            'jobs are not enabled')
1097
1098
1099    @classmethod
1100    def create(cls, owner, options, hosts):
1101        """Creates a job.
1102
1103        The job is created by taking some information (the listed args) and
1104        filling in the rest of the necessary information.
1105
1106        @param cls: Implicit class object.
1107        @param owner: The owner for the job.
1108        @param options: An options object.
1109        @param hosts: The hosts to use.
1110        """
1111        AclGroup.check_for_acl_violation_hosts(hosts)
1112
1113        control_file = options.get('control_file')
1114        parameterized_job = options.get('parameterized_job')
1115
1116        # The current implementation of parameterized jobs requires that only
1117        # control files or parameterized jobs are used. Using the image
1118        # parameter on autoupdate_ParameterizedJob doesn't mix pure
1119        # parameterized jobs and control files jobs, it does muck enough with
1120        # normal jobs by adding a parameterized id to them that this check will
1121        # fail. So for now we just skip this check.
1122        # cls.check_parameterized_job(control_file=control_file,
1123        #                             parameterized_job=parameterized_job)
1124        user = User.current_user()
1125        if options.get('reboot_before') is None:
1126            options['reboot_before'] = user.get_reboot_before_display()
1127        if options.get('reboot_after') is None:
1128            options['reboot_after'] = user.get_reboot_after_display()
1129
1130        drone_set = DroneSet.resolve_name(options.get('drone_set'))
1131
1132        job = cls.add_object(
1133            owner=owner,
1134            name=options['name'],
1135            priority=options['priority'],
1136            control_file=control_file,
1137            control_type=options['control_type'],
1138            synch_count=options.get('synch_count'),
1139            timeout=options.get('timeout'),
1140            max_runtime_mins=options.get('max_runtime_mins'),
1141            run_verify=options.get('run_verify'),
1142            email_list=options.get('email_list'),
1143            reboot_before=options.get('reboot_before'),
1144            reboot_after=options.get('reboot_after'),
1145            parse_failed_repair=options.get('parse_failed_repair'),
1146            created_on=datetime.now(),
1147            drone_set=drone_set,
1148            parameterized_job=parameterized_job)
1149
1150        job.dependency_labels = options['dependencies']
1151
1152        if options.get('keyvals'):
1153            for key, value in options['keyvals'].iteritems():
1154                JobKeyval.objects.create(job=job, key=key, value=value)
1155
1156        return job
1157
1158
1159    def save(self, *args, **kwargs):
1160        # The current implementation of parameterized jobs requires that only
1161        # control files or parameterized jobs are used. Using the image
1162        # parameter on autoupdate_ParameterizedJob doesn't mix pure
1163        # parameterized jobs and control files jobs, it does muck enough with
1164        # normal jobs by adding a parameterized id to them that this check will
1165        # fail. So for now we just skip this check.
1166        # cls.check_parameterized_job(control_file=self.control_file,
1167        #                             parameterized_job=self.parameterized_job)
1168        super(Job, self).save(*args, **kwargs)
1169
1170
1171    def queue(self, hosts, atomic_group=None, is_template=False):
1172        """Enqueue a job on the given hosts.
1173
1174        @param hosts: The hosts to use.
1175        @param atomic_group: The associated atomic group.
1176        @param is_template: Whether the status should be "Template".
1177        """
1178        if not hosts:
1179            if atomic_group:
1180                # No hosts or labels are required to queue an atomic group
1181                # Job.  However, if they are given, we respect them below.
1182                atomic_group.enqueue_job(self, is_template=is_template)
1183            else:
1184                # hostless job
1185                entry = HostQueueEntry.create(job=self, is_template=is_template)
1186                entry.save()
1187            return
1188
1189        for host in hosts:
1190            host.enqueue_job(self, atomic_group=atomic_group,
1191                             is_template=is_template)
1192
1193
1194    def create_recurring_job(self, start_date, loop_period, loop_count, owner):
1195        """Creates a recurring job.
1196
1197        @param start_date: The starting date of the job.
1198        @param loop_period: How often to re-run the job, in seconds.
1199        @param loop_count: The re-run count.
1200        @param owner: The owner of the job.
1201        """
1202        rec = RecurringRun(job=self, start_date=start_date,
1203                           loop_period=loop_period,
1204                           loop_count=loop_count,
1205                           owner=User.objects.get(login=owner))
1206        rec.save()
1207        return rec.id
1208
1209
1210    def user(self):
1211        """Gets the user of this job, or None if it doesn't exist."""
1212        try:
1213            return User.objects.get(login=self.owner)
1214        except self.DoesNotExist:
1215            return None
1216
1217
1218    def abort(self):
1219        """Aborts this job."""
1220        for queue_entry in self.hostqueueentry_set.all():
1221            queue_entry.abort()
1222
1223
1224    def tag(self):
1225        """Returns a string tag for this job."""
1226        return '%s-%s' % (self.id, self.owner)
1227
1228
1229    def keyval_dict(self):
1230        """Returns all keyvals for this job as a dictionary."""
1231        return dict((keyval.key, keyval.value)
1232                    for keyval in self.jobkeyval_set.all())
1233
1234
1235    class Meta:
1236        """Metadata for class Job."""
1237        db_table = 'afe_jobs'
1238
1239    def __unicode__(self):
1240        return u'%s (%s-%s)' % (self.name, self.id, self.owner)
1241
1242
1243class JobKeyval(dbmodels.Model, model_logic.ModelExtensions):
1244    """Keyvals associated with jobs"""
1245    job = dbmodels.ForeignKey(Job)
1246    key = dbmodels.CharField(max_length=90)
1247    value = dbmodels.CharField(max_length=300)
1248
1249    objects = model_logic.ExtendedManager()
1250
1251    class Meta:
1252        """Metadata for class JobKeyval."""
1253        db_table = 'afe_job_keyvals'
1254
1255
1256class IneligibleHostQueue(dbmodels.Model, model_logic.ModelExtensions):
1257    """Represents an ineligible host queue."""
1258    job = dbmodels.ForeignKey(Job)
1259    host = dbmodels.ForeignKey(Host)
1260
1261    objects = model_logic.ExtendedManager()
1262
1263    class Meta:
1264        """Metadata for class IneligibleHostQueue."""
1265        db_table = 'afe_ineligible_host_queues'
1266
1267
1268class HostQueueEntry(dbmodels.Model, model_logic.ModelExtensions):
1269    """Represents a host queue entry."""
1270    Status = host_queue_entry_states.Status
1271    ACTIVE_STATUSES = host_queue_entry_states.ACTIVE_STATUSES
1272    COMPLETE_STATUSES = host_queue_entry_states.COMPLETE_STATUSES
1273
1274    job = dbmodels.ForeignKey(Job)
1275    host = dbmodels.ForeignKey(Host, blank=True, null=True)
1276    status = dbmodels.CharField(max_length=255)
1277    meta_host = dbmodels.ForeignKey(Label, blank=True, null=True,
1278                                    db_column='meta_host')
1279    active = dbmodels.BooleanField(default=False)
1280    complete = dbmodels.BooleanField(default=False)
1281    deleted = dbmodels.BooleanField(default=False)
1282    execution_subdir = dbmodels.CharField(max_length=255, blank=True,
1283                                          default='')
1284    # If atomic_group is set, this is a virtual HostQueueEntry that will
1285    # be expanded into many actual hosts within the group at schedule time.
1286    atomic_group = dbmodels.ForeignKey(AtomicGroup, blank=True, null=True)
1287    aborted = dbmodels.BooleanField(default=False)
1288    started_on = dbmodels.DateTimeField(null=True, blank=True)
1289
1290    objects = model_logic.ExtendedManager()
1291
1292
1293    def __init__(self, *args, **kwargs):
1294        super(HostQueueEntry, self).__init__(*args, **kwargs)
1295        self._record_attributes(['status'])
1296
1297
1298    @classmethod
1299    def create(cls, job, host=None, meta_host=None, atomic_group=None,
1300                 is_template=False):
1301        """Creates a new host queue entry.
1302
1303        @param cls: Implicit class object.
1304        @param job: The associated job.
1305        @param host: The associated host.
1306        @param meta_host: The associated meta host.
1307        @param atomic_group: The associated atomic group.
1308        @param is_template: Whether the status should be "Template".
1309        """
1310        if is_template:
1311            status = cls.Status.TEMPLATE
1312        else:
1313            status = cls.Status.QUEUED
1314
1315        return cls(job=job, host=host, meta_host=meta_host,
1316                   atomic_group=atomic_group, status=status)
1317
1318
1319    def save(self, *args, **kwargs):
1320        self._set_active_and_complete()
1321        super(HostQueueEntry, self).save(*args, **kwargs)
1322        self._check_for_updated_attributes()
1323
1324
1325    def execution_path(self):
1326        """
1327        Path to this entry's results (relative to the base results directory).
1328        """
1329        return os.path.join(self.job.tag(), self.execution_subdir)
1330
1331
1332    def host_or_metahost_name(self):
1333        """Returns the first non-None name found in priority order.
1334
1335        The priority order checked is: (1) host name; (2) meta host name; and
1336        (3) atomic group name.
1337        """
1338        if self.host:
1339            return self.host.hostname
1340        elif self.meta_host:
1341            return self.meta_host.name
1342        else:
1343            assert self.atomic_group, "no host, meta_host or atomic group!"
1344            return self.atomic_group.name
1345
1346
1347    def _set_active_and_complete(self):
1348        if self.status in self.ACTIVE_STATUSES:
1349            self.active, self.complete = True, False
1350        elif self.status in self.COMPLETE_STATUSES:
1351            self.active, self.complete = False, True
1352        else:
1353            self.active, self.complete = False, False
1354
1355
1356    def on_attribute_changed(self, attribute, old_value):
1357        assert attribute == 'status'
1358        logging.info('%s/%d (%d) -> %s', self.host, self.job.id, self.id,
1359                     self.status)
1360
1361
1362    def is_meta_host_entry(self):
1363        'True if this is a entry has a meta_host instead of a host.'
1364        return self.host is None and self.meta_host is not None
1365
1366
1367    def log_abort(self, user):
1368        """Logs an abort.
1369
1370        @param user: The user performing the abort.
1371        """
1372        abort_log = AbortedHostQueueEntry(queue_entry=self, aborted_by=user)
1373        abort_log.save()
1374
1375
1376    def abort(self):
1377        """Aborts this host queue entry."""
1378        # This isn't completely immune to race conditions since it's not atomic,
1379        # but it should be safe given the scheduler's behavior.
1380        if not self.complete and not self.aborted:
1381            self.log_abort(User.current_user())
1382            self.aborted = True
1383            self.save()
1384
1385
1386    @classmethod
1387    def compute_full_status(cls, status, aborted, complete):
1388        """Returns a modified status msg if the host queue entry was aborted.
1389
1390        @param cls: Implicit class object.
1391        @param status: The original status message.
1392        @param aborted: Whether the host queue entry was aborted.
1393        @param complete: Whether the host queue entry was completed.
1394        """
1395        if aborted and not complete:
1396            return 'Aborted (%s)' % status
1397        return status
1398
1399
1400    def full_status(self):
1401        """Returns the full status of this host queue entry, as a string."""
1402        return self.compute_full_status(self.status, self.aborted,
1403                                        self.complete)
1404
1405
1406    def _postprocess_object_dict(self, object_dict):
1407        object_dict['full_status'] = self.full_status()
1408
1409
1410    class Meta:
1411        """Metadata for class HostQueueEntry."""
1412        db_table = 'afe_host_queue_entries'
1413
1414
1415
1416    def __unicode__(self):
1417        hostname = None
1418        if self.host:
1419            hostname = self.host.hostname
1420        return u"%s/%d (%d)" % (hostname, self.job.id, self.id)
1421
1422
1423class AbortedHostQueueEntry(dbmodels.Model, model_logic.ModelExtensions):
1424    """Represents an aborted host queue entry."""
1425    queue_entry = dbmodels.OneToOneField(HostQueueEntry, primary_key=True)
1426    aborted_by = dbmodels.ForeignKey(User)
1427    aborted_on = dbmodels.DateTimeField()
1428
1429    objects = model_logic.ExtendedManager()
1430
1431
1432    def save(self, *args, **kwargs):
1433        self.aborted_on = datetime.now()
1434        super(AbortedHostQueueEntry, self).save(*args, **kwargs)
1435
1436    class Meta:
1437        """Metadata for class AbortedHostQueueEntry."""
1438        db_table = 'afe_aborted_host_queue_entries'
1439
1440
1441class RecurringRun(dbmodels.Model, model_logic.ModelExtensions):
1442    """\
1443    job: job to use as a template
1444    owner: owner of the instantiated template
1445    start_date: Run the job at scheduled date
1446    loop_period: Re-run (loop) the job periodically
1447                 (in every loop_period seconds)
1448    loop_count: Re-run (loop) count
1449    """
1450
1451    job = dbmodels.ForeignKey(Job)
1452    owner = dbmodels.ForeignKey(User)
1453    start_date = dbmodels.DateTimeField()
1454    loop_period = dbmodels.IntegerField(blank=True)
1455    loop_count = dbmodels.IntegerField(blank=True)
1456
1457    objects = model_logic.ExtendedManager()
1458
1459    class Meta:
1460        """Metadata for class RecurringRun."""
1461        db_table = 'afe_recurring_run'
1462
1463    def __unicode__(self):
1464        return u'RecurringRun(job %s, start %s, period %s, count %s)' % (
1465            self.job.id, self.start_date, self.loop_period, self.loop_count)
1466
1467
1468class SpecialTask(dbmodels.Model, model_logic.ModelExtensions):
1469    """\
1470    Tasks to run on hosts at the next time they are in the Ready state. Use this
1471    for high-priority tasks, such as forced repair or forced reinstall.
1472
1473    host: host to run this task on
1474    task: special task to run
1475    time_requested: date and time the request for this task was made
1476    is_active: task is currently running
1477    is_complete: task has finished running
1478    time_started: date and time the task started
1479    queue_entry: Host queue entry waiting on this task (or None, if task was not
1480                 started in preparation of a job)
1481    """
1482    Task = enum.Enum('Verify', 'Cleanup', 'Repair', string_values=True)
1483
1484    host = dbmodels.ForeignKey(Host, blank=False, null=False)
1485    task = dbmodels.CharField(max_length=64, choices=Task.choices(),
1486                              blank=False, null=False)
1487    requested_by = dbmodels.ForeignKey(User)
1488    time_requested = dbmodels.DateTimeField(auto_now_add=True, blank=False,
1489                                            null=False)
1490    is_active = dbmodels.BooleanField(default=False, blank=False, null=False)
1491    is_complete = dbmodels.BooleanField(default=False, blank=False, null=False)
1492    time_started = dbmodels.DateTimeField(null=True, blank=True)
1493    queue_entry = dbmodels.ForeignKey(HostQueueEntry, blank=True, null=True)
1494    success = dbmodels.BooleanField(default=False, blank=False, null=False)
1495
1496    objects = model_logic.ExtendedManager()
1497
1498
1499    def save(self, **kwargs):
1500        if self.queue_entry:
1501            self.requested_by = User.objects.get(
1502                    login=self.queue_entry.job.owner)
1503        super(SpecialTask, self).save(**kwargs)
1504
1505
1506    def execution_path(self):
1507        """@see HostQueueEntry.execution_path()"""
1508        return 'hosts/%s/%s-%s' % (self.host.hostname, self.id,
1509                                   self.task.lower())
1510
1511
1512    # property to emulate HostQueueEntry.status
1513    @property
1514    def status(self):
1515        """
1516        Return a host queue entry status appropriate for this task.  Although
1517        SpecialTasks are not HostQueueEntries, it is helpful to the user to
1518        present similar statuses.
1519        """
1520        if self.is_complete:
1521            if self.success:
1522                return HostQueueEntry.Status.COMPLETED
1523            return HostQueueEntry.Status.FAILED
1524        if self.is_active:
1525            return HostQueueEntry.Status.RUNNING
1526        return HostQueueEntry.Status.QUEUED
1527
1528
1529    # property to emulate HostQueueEntry.started_on
1530    @property
1531    def started_on(self):
1532        """Returns the time at which this special task started."""
1533        return self.time_started
1534
1535
1536    @classmethod
1537    def schedule_special_task(cls, host, task):
1538        """Schedules a special task on a host if not already scheduled.
1539
1540        @param cls: Implicit class object.
1541        @param host: The host to use.
1542        @param task: The task to schedule.
1543        """
1544        existing_tasks = SpecialTask.objects.filter(host__id=host.id, task=task,
1545                                                    is_active=False,
1546                                                    is_complete=False)
1547        if existing_tasks:
1548            return existing_tasks[0]
1549
1550        special_task = SpecialTask(host=host, task=task,
1551                                   requested_by=User.current_user())
1552        special_task.save()
1553        return special_task
1554
1555
1556    def activate(self):
1557        """
1558        Sets a task as active and sets the time started to the current time.
1559        """
1560        logging.info('Starting: %s', self)
1561        self.is_active = True
1562        self.time_started = datetime.now()
1563        self.save()
1564
1565
1566    def finish(self, success):
1567        """Sets a task as completed.
1568
1569        @param success: Whether or not the task was successful.
1570        """
1571        logging.info('Finished: %s', self)
1572        self.is_active = False
1573        self.is_complete = True
1574        self.success = success
1575        self.save()
1576
1577
1578    class Meta:
1579        """Metadata for class SpecialTask."""
1580        db_table = 'afe_special_tasks'
1581
1582
1583    def __unicode__(self):
1584        result = u'Special Task %s (host %s, task %s, time %s)' % (
1585            self.id, self.host, self.task, self.time_requested)
1586        if self.is_complete:
1587            result += u' (completed)'
1588        elif self.is_active:
1589            result += u' (active)'
1590
1591        return result
1592