models.py revision 51599038f08395067097dc265127cfbcf77c427d
1# pylint: disable-msg=C0111
2
3import logging, os
4from datetime import datetime
5import django.core
6try:
7    from django.db import models as dbmodels, connection
8except django.core.exceptions.ImproperlyConfigured:
9    raise ImportError('Django database not yet configured. Import either '
10                       'setup_django_environment or '
11                       'setup_django_lite_environment from '
12                       'autotest_lib.frontend before any imports that '
13                       'depend on django models.')
14from xml.sax import saxutils
15import common
16from autotest_lib.frontend.afe import model_logic, model_attributes
17from autotest_lib.frontend.afe import rdb_model_extensions
18from autotest_lib.frontend import settings, thread_local
19from autotest_lib.client.common_lib import enum, host_protections, global_config
20from autotest_lib.client.common_lib import host_queue_entry_states
21from autotest_lib.client.common_lib import control_data, priorities
22from autotest_lib.client.common_lib import decorators
23
24# job options and user preferences
25DEFAULT_REBOOT_BEFORE = model_attributes.RebootBefore.IF_DIRTY
26DEFAULT_REBOOT_AFTER = model_attributes.RebootBefore.NEVER
27
28
29class AclAccessViolation(Exception):
30    """\
31    Raised when an operation is attempted with proper permissions as
32    dictated by ACLs.
33    """
34
35
36class AtomicGroup(model_logic.ModelWithInvalid, dbmodels.Model):
37    """\
38    An atomic group defines a collection of hosts which must only be scheduled
39    all at once.  Any host with a label having an atomic group will only be
40    scheduled for a job at the same time as other hosts sharing that label.
41
42    Required:
43      name: A name for this atomic group, e.g. 'rack23' or 'funky_net'.
44      max_number_of_machines: The maximum number of machines that will be
45              scheduled at once when scheduling jobs to this atomic group.
46              The job.synch_count is considered the minimum.
47
48    Optional:
49      description: Arbitrary text description of this group's purpose.
50    """
51    name = dbmodels.CharField(max_length=255, unique=True)
52    description = dbmodels.TextField(blank=True)
53    # This magic value is the default to simplify the scheduler logic.
54    # It must be "large".  The common use of atomic groups is to want all
55    # machines in the group to be used, limits on which subset used are
56    # often chosen via dependency labels.
57    # TODO(dennisjeffrey): Revisit this so we don't have to assume that
58    # "infinity" is around 3.3 million.
59    INFINITE_MACHINES = 333333333
60    max_number_of_machines = dbmodels.IntegerField(default=INFINITE_MACHINES)
61    invalid = dbmodels.BooleanField(default=False,
62                                  editable=settings.FULL_ADMIN)
63
64    name_field = 'name'
65    objects = model_logic.ModelWithInvalidManager()
66    valid_objects = model_logic.ValidObjectsManager()
67
68
69    def enqueue_job(self, job, is_template=False):
70        """Enqueue a job on an associated atomic group of hosts.
71
72        @param job: A job to enqueue.
73        @param is_template: Whether the status should be "Template".
74        """
75        queue_entry = HostQueueEntry.create(atomic_group=self, job=job,
76                                            is_template=is_template)
77        queue_entry.save()
78
79
80    def clean_object(self):
81        self.label_set.clear()
82
83
84    class Meta:
85        """Metadata for class AtomicGroup."""
86        db_table = 'afe_atomic_groups'
87
88
89    def __unicode__(self):
90        return unicode(self.name)
91
92
93class Label(model_logic.ModelWithInvalid, dbmodels.Model):
94    """\
95    Required:
96      name: label name
97
98    Optional:
99      kernel_config: URL/path to kernel config for jobs run on this label.
100      platform: If True, this is a platform label (defaults to False).
101      only_if_needed: If True, a Host with this label can only be used if that
102              label is requested by the job/test (either as the meta_host or
103              in the job_dependencies).
104      atomic_group: The atomic group associated with this label.
105    """
106    name = dbmodels.CharField(max_length=255, unique=True)
107    kernel_config = dbmodels.CharField(max_length=255, blank=True)
108    platform = dbmodels.BooleanField(default=False)
109    invalid = dbmodels.BooleanField(default=False,
110                                    editable=settings.FULL_ADMIN)
111    only_if_needed = dbmodels.BooleanField(default=False)
112
113    name_field = 'name'
114    objects = model_logic.ModelWithInvalidManager()
115    valid_objects = model_logic.ValidObjectsManager()
116    atomic_group = dbmodels.ForeignKey(AtomicGroup, null=True, blank=True)
117
118
119    def clean_object(self):
120        self.host_set.clear()
121        self.test_set.clear()
122
123
124    def enqueue_job(self, job, atomic_group=None, is_template=False):
125        """Enqueue a job on any host of this label.
126
127        @param job: A job to enqueue.
128        @param atomic_group: The associated atomic group.
129        @param is_template: Whether the status should be "Template".
130        """
131        queue_entry = HostQueueEntry.create(meta_host=self, job=job,
132                                            is_template=is_template,
133                                            atomic_group=atomic_group)
134        queue_entry.save()
135
136
137    class Meta:
138        """Metadata for class Label."""
139        db_table = 'afe_labels'
140
141    def __unicode__(self):
142        return unicode(self.name)
143
144
145class Drone(dbmodels.Model, model_logic.ModelExtensions):
146    """
147    A scheduler drone
148
149    hostname: the drone's hostname
150    """
151    hostname = dbmodels.CharField(max_length=255, unique=True)
152
153    name_field = 'hostname'
154    objects = model_logic.ExtendedManager()
155
156
157    def save(self, *args, **kwargs):
158        if not User.current_user().is_superuser():
159            raise Exception('Only superusers may edit drones')
160        super(Drone, self).save(*args, **kwargs)
161
162
163    def delete(self):
164        if not User.current_user().is_superuser():
165            raise Exception('Only superusers may delete drones')
166        super(Drone, self).delete()
167
168
169    class Meta:
170        """Metadata for class Drone."""
171        db_table = 'afe_drones'
172
173    def __unicode__(self):
174        return unicode(self.hostname)
175
176
177class DroneSet(dbmodels.Model, model_logic.ModelExtensions):
178    """
179    A set of scheduler drones
180
181    These will be used by the scheduler to decide what drones a job is allowed
182    to run on.
183
184    name: the drone set's name
185    drones: the drones that are part of the set
186    """
187    DRONE_SETS_ENABLED = global_config.global_config.get_config_value(
188            'SCHEDULER', 'drone_sets_enabled', type=bool, default=False)
189    DEFAULT_DRONE_SET_NAME = global_config.global_config.get_config_value(
190            'SCHEDULER', 'default_drone_set_name', default=None)
191
192    name = dbmodels.CharField(max_length=255, unique=True)
193    drones = dbmodels.ManyToManyField(Drone, db_table='afe_drone_sets_drones')
194
195    name_field = 'name'
196    objects = model_logic.ExtendedManager()
197
198
199    def save(self, *args, **kwargs):
200        if not User.current_user().is_superuser():
201            raise Exception('Only superusers may edit drone sets')
202        super(DroneSet, self).save(*args, **kwargs)
203
204
205    def delete(self):
206        if not User.current_user().is_superuser():
207            raise Exception('Only superusers may delete drone sets')
208        super(DroneSet, self).delete()
209
210
211    @classmethod
212    def drone_sets_enabled(cls):
213        """Returns whether drone sets are enabled.
214
215        @param cls: Implicit class object.
216        """
217        return cls.DRONE_SETS_ENABLED
218
219
220    @classmethod
221    def default_drone_set_name(cls):
222        """Returns the default drone set name.
223
224        @param cls: Implicit class object.
225        """
226        return cls.DEFAULT_DRONE_SET_NAME
227
228
229    @classmethod
230    def get_default(cls):
231        """Gets the default drone set name, compatible with Job.add_object.
232
233        @param cls: Implicit class object.
234        """
235        return cls.smart_get(cls.DEFAULT_DRONE_SET_NAME)
236
237
238    @classmethod
239    def resolve_name(cls, drone_set_name):
240        """
241        Returns the name of one of these, if not None, in order of preference:
242        1) the drone set given,
243        2) the current user's default drone set, or
244        3) the global default drone set
245
246        or returns None if drone sets are disabled
247
248        @param cls: Implicit class object.
249        @param drone_set_name: A drone set name.
250        """
251        if not cls.drone_sets_enabled():
252            return None
253
254        user = User.current_user()
255        user_drone_set_name = user.drone_set and user.drone_set.name
256
257        return drone_set_name or user_drone_set_name or cls.get_default().name
258
259
260    def get_drone_hostnames(self):
261        """
262        Gets the hostnames of all drones in this drone set
263        """
264        return set(self.drones.all().values_list('hostname', flat=True))
265
266
267    class Meta:
268        """Metadata for class DroneSet."""
269        db_table = 'afe_drone_sets'
270
271    def __unicode__(self):
272        return unicode(self.name)
273
274
275class User(dbmodels.Model, model_logic.ModelExtensions):
276    """\
277    Required:
278    login :user login name
279
280    Optional:
281    access_level: 0=User (default), 1=Admin, 100=Root
282    """
283    ACCESS_ROOT = 100
284    ACCESS_ADMIN = 1
285    ACCESS_USER = 0
286
287    AUTOTEST_SYSTEM = 'autotest_system'
288
289    login = dbmodels.CharField(max_length=255, unique=True)
290    access_level = dbmodels.IntegerField(default=ACCESS_USER, blank=True)
291
292    # user preferences
293    reboot_before = dbmodels.SmallIntegerField(
294        choices=model_attributes.RebootBefore.choices(), blank=True,
295        default=DEFAULT_REBOOT_BEFORE)
296    reboot_after = dbmodels.SmallIntegerField(
297        choices=model_attributes.RebootAfter.choices(), blank=True,
298        default=DEFAULT_REBOOT_AFTER)
299    drone_set = dbmodels.ForeignKey(DroneSet, null=True, blank=True)
300    show_experimental = dbmodels.BooleanField(default=False)
301
302    name_field = 'login'
303    objects = model_logic.ExtendedManager()
304
305
306    def save(self, *args, **kwargs):
307        # is this a new object being saved for the first time?
308        first_time = (self.id is None)
309        user = thread_local.get_user()
310        if user and not user.is_superuser() and user.login != self.login:
311            raise AclAccessViolation("You cannot modify user " + self.login)
312        super(User, self).save(*args, **kwargs)
313        if first_time:
314            everyone = AclGroup.objects.get(name='Everyone')
315            everyone.users.add(self)
316
317
318    def is_superuser(self):
319        """Returns whether the user has superuser access."""
320        return self.access_level >= self.ACCESS_ROOT
321
322
323    @classmethod
324    def current_user(cls):
325        """Returns the current user.
326
327        @param cls: Implicit class object.
328        """
329        user = thread_local.get_user()
330        if user is None:
331            user, _ = cls.objects.get_or_create(login=cls.AUTOTEST_SYSTEM)
332            user.access_level = cls.ACCESS_ROOT
333            user.save()
334        return user
335
336
337    class Meta:
338        """Metadata for class User."""
339        db_table = 'afe_users'
340
341    def __unicode__(self):
342        return unicode(self.login)
343
344
345class Host(model_logic.ModelWithInvalid, rdb_model_extensions.AbstractHostModel,
346           model_logic.ModelWithAttributes):
347    """\
348    Required:
349    hostname
350
351    optional:
352    locked: if true, host is locked and will not be queued
353
354    Internal:
355    From AbstractHostModel:
356        synch_id: currently unused
357        status: string describing status of host
358        invalid: true if the host has been deleted
359        protection: indicates what can be done to this host during repair
360        lock_time: DateTime at which the host was locked
361        dirty: true if the host has been used without being rebooted
362    Local:
363        locked_by: user that locked the host, or null if the host is unlocked
364    """
365
366    # Note: Only specify foreign keys here, specify all native host columns in
367    # rdb_model_extensions instead.
368    Protection = host_protections.Protection
369    labels = dbmodels.ManyToManyField(Label, blank=True,
370                                      db_table='afe_hosts_labels')
371    locked_by = dbmodels.ForeignKey(User, null=True, blank=True, editable=False)
372    name_field = 'hostname'
373    objects = model_logic.ModelWithInvalidManager()
374    valid_objects = model_logic.ValidObjectsManager()
375    leased_objects = model_logic.LeasedHostManager()
376
377
378    def __init__(self, *args, **kwargs):
379        super(Host, self).__init__(*args, **kwargs)
380        self._record_attributes(['status'])
381
382
383    @staticmethod
384    def create_one_time_host(hostname):
385        """Creates a one-time host.
386
387        @param hostname: The name for the host.
388        """
389        query = Host.objects.filter(hostname=hostname)
390        if query.count() == 0:
391            host = Host(hostname=hostname, invalid=True)
392            host.do_validate()
393        else:
394            host = query[0]
395            if not host.invalid:
396                raise model_logic.ValidationError({
397                    'hostname' : '%s already exists in the autotest DB.  '
398                        'Select it rather than entering it as a one time '
399                        'host.' % hostname
400                    })
401        host.protection = host_protections.Protection.DO_NOT_REPAIR
402        host.locked = False
403        host.save()
404        host.clean_object()
405        return host
406
407
408    def resurrect_object(self, old_object):
409        super(Host, self).resurrect_object(old_object)
410        # invalid hosts can be in use by the scheduler (as one-time hosts), so
411        # don't change the status
412        self.status = old_object.status
413
414
415    def clean_object(self):
416        self.aclgroup_set.clear()
417        self.labels.clear()
418
419
420    def save(self, *args, **kwargs):
421        # extra spaces in the hostname can be a sneaky source of errors
422        self.hostname = self.hostname.strip()
423        # is this a new object being saved for the first time?
424        first_time = (self.id is None)
425        if not first_time:
426            AclGroup.check_for_acl_violation_hosts([self])
427        if self.locked and not self.locked_by:
428            self.locked_by = User.current_user()
429            self.lock_time = datetime.now()
430            self.dirty = True
431        elif not self.locked and self.locked_by:
432            self.locked_by = None
433            self.lock_time = None
434        super(Host, self).save(*args, **kwargs)
435        if first_time:
436            everyone = AclGroup.objects.get(name='Everyone')
437            everyone.hosts.add(self)
438        self._check_for_updated_attributes()
439
440
441    def delete(self):
442        AclGroup.check_for_acl_violation_hosts([self])
443        for queue_entry in self.hostqueueentry_set.all():
444            queue_entry.deleted = True
445            queue_entry.abort()
446        super(Host, self).delete()
447
448
449    def on_attribute_changed(self, attribute, old_value):
450        assert attribute == 'status'
451        logging.info(self.hostname + ' -> ' + self.status)
452
453
454    def enqueue_job(self, job, atomic_group=None, is_template=False):
455        """Enqueue a job on this host.
456
457        @param job: A job to enqueue.
458        @param atomic_group: The associated atomic group.
459        @param is_template: Whther the status should be "Template".
460        """
461        queue_entry = HostQueueEntry.create(host=self, job=job,
462                                            is_template=is_template,
463                                            atomic_group=atomic_group)
464        # allow recovery of dead hosts from the frontend
465        if not self.active_queue_entry() and self.is_dead():
466            self.status = Host.Status.READY
467            self.save()
468        queue_entry.save()
469
470        block = IneligibleHostQueue(job=job, host=self)
471        block.save()
472
473
474    def platform(self):
475        """The platform of the host."""
476        # TODO(showard): slighly hacky?
477        platforms = self.labels.filter(platform=True)
478        if len(platforms) == 0:
479            return None
480        return platforms[0]
481    platform.short_description = 'Platform'
482
483
484    @classmethod
485    def check_no_platform(cls, hosts):
486        """Verify the specified hosts have no associated platforms.
487
488        @param cls: Implicit class object.
489        @param hosts: The hosts to verify.
490        @raises model_logic.ValidationError if any hosts already have a
491            platform.
492        """
493        Host.objects.populate_relationships(hosts, Label, 'label_list')
494        errors = []
495        for host in hosts:
496            platforms = [label.name for label in host.label_list
497                         if label.platform]
498            if platforms:
499                # do a join, just in case this host has multiple platforms,
500                # we'll be able to see it
501                errors.append('Host %s already has a platform: %s' % (
502                              host.hostname, ', '.join(platforms)))
503        if errors:
504            raise model_logic.ValidationError({'labels': '; '.join(errors)})
505
506
507    def is_dead(self):
508        """Returns whether the host is dead (has status repair failed)."""
509        return self.status == Host.Status.REPAIR_FAILED
510
511
512    def active_queue_entry(self):
513        """Returns the active queue entry for this host, or None if none."""
514        active = list(self.hostqueueentry_set.filter(active=True))
515        if not active:
516            return None
517        assert len(active) == 1, ('More than one active entry for '
518                                  'host ' + self.hostname)
519        return active[0]
520
521
522    def _get_attribute_model_and_args(self, attribute):
523        return HostAttribute, dict(host=self, attribute=attribute)
524
525
526    class Meta:
527        """Metadata for the Host class."""
528        db_table = 'afe_hosts'
529
530    def __unicode__(self):
531        return unicode(self.hostname)
532
533
534class HostAttribute(dbmodels.Model):
535    """Arbitrary keyvals associated with hosts."""
536    host = dbmodels.ForeignKey(Host)
537    attribute = dbmodels.CharField(max_length=90)
538    value = dbmodels.CharField(max_length=300)
539
540    objects = model_logic.ExtendedManager()
541
542    class Meta:
543        """Metadata for the HostAttribute class."""
544        db_table = 'afe_host_attributes'
545
546
547class Test(dbmodels.Model, model_logic.ModelExtensions):
548    """\
549    Required:
550    author: author name
551    description: description of the test
552    name: test name
553    time: short, medium, long
554    test_class: This describes the class for your the test belongs in.
555    test_category: This describes the category for your tests
556    test_type: Client or Server
557    path: path to pass to run_test()
558    sync_count:  is a number >=1 (1 being the default). If it's 1, then it's an
559                 async job. If it's >1 it's sync job for that number of machines
560                 i.e. if sync_count = 2 it is a sync job that requires two
561                 machines.
562    Optional:
563    dependencies: What the test requires to run. Comma deliminated list
564    dependency_labels: many-to-many relationship with labels corresponding to
565                       test dependencies.
566    experimental: If this is set to True production servers will ignore the test
567    run_verify: Whether or not the scheduler should run the verify stage
568    run_reset: Whether or not the scheduler should run the reset stage
569    test_retry: Number of times to retry test if the test did not complete
570                successfully. (optional, default: 0)
571    """
572    TestTime = enum.Enum('SHORT', 'MEDIUM', 'LONG', start_value=1)
573
574    name = dbmodels.CharField(max_length=255, unique=True)
575    author = dbmodels.CharField(max_length=255)
576    test_class = dbmodels.CharField(max_length=255)
577    test_category = dbmodels.CharField(max_length=255)
578    dependencies = dbmodels.CharField(max_length=255, blank=True)
579    description = dbmodels.TextField(blank=True)
580    experimental = dbmodels.BooleanField(default=True)
581    run_verify = dbmodels.BooleanField(default=False)
582    test_time = dbmodels.SmallIntegerField(choices=TestTime.choices(),
583                                           default=TestTime.MEDIUM)
584    test_type = dbmodels.SmallIntegerField(
585        choices=control_data.CONTROL_TYPE.choices())
586    sync_count = dbmodels.IntegerField(default=1)
587    path = dbmodels.CharField(max_length=255, unique=True)
588    test_retry = dbmodels.IntegerField(blank=True, default=0)
589    run_reset = dbmodels.BooleanField(default=True)
590
591    dependency_labels = (
592        dbmodels.ManyToManyField(Label, blank=True,
593                                 db_table='afe_autotests_dependency_labels'))
594    name_field = 'name'
595    objects = model_logic.ExtendedManager()
596
597
598    def admin_description(self):
599        """Returns a string representing the admin description."""
600        escaped_description = saxutils.escape(self.description)
601        return '<span style="white-space:pre">%s</span>' % escaped_description
602    admin_description.allow_tags = True
603    admin_description.short_description = 'Description'
604
605
606    class Meta:
607        """Metadata for class Test."""
608        db_table = 'afe_autotests'
609
610    def __unicode__(self):
611        return unicode(self.name)
612
613
614class TestParameter(dbmodels.Model):
615    """
616    A declared parameter of a test
617    """
618    test = dbmodels.ForeignKey(Test)
619    name = dbmodels.CharField(max_length=255)
620
621    class Meta:
622        """Metadata for class TestParameter."""
623        db_table = 'afe_test_parameters'
624        unique_together = ('test', 'name')
625
626    def __unicode__(self):
627        return u'%s (%s)' % (self.name, self.test.name)
628
629
630class Profiler(dbmodels.Model, model_logic.ModelExtensions):
631    """\
632    Required:
633    name: profiler name
634    test_type: Client or Server
635
636    Optional:
637    description: arbirary text description
638    """
639    name = dbmodels.CharField(max_length=255, unique=True)
640    description = dbmodels.TextField(blank=True)
641
642    name_field = 'name'
643    objects = model_logic.ExtendedManager()
644
645
646    class Meta:
647        """Metadata for class Profiler."""
648        db_table = 'afe_profilers'
649
650    def __unicode__(self):
651        return unicode(self.name)
652
653
654class AclGroup(dbmodels.Model, model_logic.ModelExtensions):
655    """\
656    Required:
657    name: name of ACL group
658
659    Optional:
660    description: arbitrary description of group
661    """
662    name = dbmodels.CharField(max_length=255, unique=True)
663    description = dbmodels.CharField(max_length=255, blank=True)
664    users = dbmodels.ManyToManyField(User, blank=False,
665                                     db_table='afe_acl_groups_users')
666    hosts = dbmodels.ManyToManyField(Host, blank=True,
667                                     db_table='afe_acl_groups_hosts')
668
669    name_field = 'name'
670    objects = model_logic.ExtendedManager()
671
672    @staticmethod
673    def check_for_acl_violation_hosts(hosts):
674        """Verify the current user has access to the specified hosts.
675
676        @param hosts: The hosts to verify against.
677        @raises AclAccessViolation if the current user doesn't have access
678            to a host.
679        """
680        user = User.current_user()
681        if user.is_superuser():
682            return
683        accessible_host_ids = set(
684            host.id for host in Host.objects.filter(aclgroup__users=user))
685        for host in hosts:
686            # Check if the user has access to this host,
687            # but only if it is not a metahost or a one-time-host.
688            no_access = (isinstance(host, Host)
689                         and not host.invalid
690                         and int(host.id) not in accessible_host_ids)
691            if no_access:
692                raise AclAccessViolation("%s does not have access to %s" %
693                                         (str(user), str(host)))
694
695
696    @staticmethod
697    def check_abort_permissions(queue_entries):
698        """Look for queue entries that aren't abortable by the current user.
699
700        An entry is not abortable if:
701           * the job isn't owned by this user, and
702           * the machine isn't ACL-accessible, or
703           * the machine is in the "Everyone" ACL
704
705        @param queue_entries: The queue entries to check.
706        @raises AclAccessViolation if a queue entry is not abortable by the
707            current user.
708        """
709        user = User.current_user()
710        if user.is_superuser():
711            return
712        not_owned = queue_entries.exclude(job__owner=user.login)
713        # I do this using ID sets instead of just Django filters because
714        # filtering on M2M dbmodels is broken in Django 0.96. It's better in
715        # 1.0.
716        # TODO: Use Django filters, now that we're using 1.0.
717        accessible_ids = set(
718            entry.id for entry
719            in not_owned.filter(host__aclgroup__users__login=user.login))
720        public_ids = set(entry.id for entry
721                         in not_owned.filter(host__aclgroup__name='Everyone'))
722        cannot_abort = [entry for entry in not_owned.select_related()
723                        if entry.id not in accessible_ids
724                        or entry.id in public_ids]
725        if len(cannot_abort) == 0:
726            return
727        entry_names = ', '.join('%s-%s/%s' % (entry.job.id, entry.job.owner,
728                                              entry.host_or_metahost_name())
729                                for entry in cannot_abort)
730        raise AclAccessViolation('You cannot abort the following job entries: '
731                                 + entry_names)
732
733
734    def check_for_acl_violation_acl_group(self):
735        """Verifies the current user has acces to this ACL group.
736
737        @raises AclAccessViolation if the current user doesn't have access to
738            this ACL group.
739        """
740        user = User.current_user()
741        if user.is_superuser():
742            return
743        if self.name == 'Everyone':
744            raise AclAccessViolation("You cannot modify 'Everyone'!")
745        if not user in self.users.all():
746            raise AclAccessViolation("You do not have access to %s"
747                                     % self.name)
748
749    @staticmethod
750    def on_host_membership_change():
751        """Invoked when host membership changes."""
752        everyone = AclGroup.objects.get(name='Everyone')
753
754        # find hosts that aren't in any ACL group and add them to Everyone
755        # TODO(showard): this is a bit of a hack, since the fact that this query
756        # works is kind of a coincidence of Django internals.  This trick
757        # doesn't work in general (on all foreign key relationships).  I'll
758        # replace it with a better technique when the need arises.
759        orphaned_hosts = Host.valid_objects.filter(aclgroup__id__isnull=True)
760        everyone.hosts.add(*orphaned_hosts.distinct())
761
762        # find hosts in both Everyone and another ACL group, and remove them
763        # from Everyone
764        hosts_in_everyone = Host.valid_objects.filter(aclgroup__name='Everyone')
765        acled_hosts = set()
766        for host in hosts_in_everyone:
767            # Has an ACL group other than Everyone
768            if host.aclgroup_set.count() > 1:
769                acled_hosts.add(host)
770        everyone.hosts.remove(*acled_hosts)
771
772
773    def delete(self):
774        if (self.name == 'Everyone'):
775            raise AclAccessViolation("You cannot delete 'Everyone'!")
776        self.check_for_acl_violation_acl_group()
777        super(AclGroup, self).delete()
778        self.on_host_membership_change()
779
780
781    def add_current_user_if_empty(self):
782        """Adds the current user if the set of users is empty."""
783        if not self.users.count():
784            self.users.add(User.current_user())
785
786
787    def perform_after_save(self, change):
788        """Called after a save.
789
790        @param change: Whether there was a change.
791        """
792        if not change:
793            self.users.add(User.current_user())
794        self.add_current_user_if_empty()
795        self.on_host_membership_change()
796
797
798    def save(self, *args, **kwargs):
799        change = bool(self.id)
800        if change:
801            # Check the original object for an ACL violation
802            AclGroup.objects.get(id=self.id).check_for_acl_violation_acl_group()
803        super(AclGroup, self).save(*args, **kwargs)
804        self.perform_after_save(change)
805
806
807    class Meta:
808        """Metadata for class AclGroup."""
809        db_table = 'afe_acl_groups'
810
811    def __unicode__(self):
812        return unicode(self.name)
813
814
815class Kernel(dbmodels.Model):
816    """
817    A kernel configuration for a parameterized job
818    """
819    version = dbmodels.CharField(max_length=255)
820    cmdline = dbmodels.CharField(max_length=255, blank=True)
821
822    @classmethod
823    def create_kernels(cls, kernel_list):
824        """Creates all kernels in the kernel list.
825
826        @param cls: Implicit class object.
827        @param kernel_list: A list of dictionaries that describe the kernels,
828            in the same format as the 'kernel' argument to
829            rpc_interface.generate_control_file.
830        @return A list of the created kernels.
831        """
832        if not kernel_list:
833            return None
834        return [cls._create(kernel) for kernel in kernel_list]
835
836
837    @classmethod
838    def _create(cls, kernel_dict):
839        version = kernel_dict.pop('version')
840        cmdline = kernel_dict.pop('cmdline', '')
841
842        if kernel_dict:
843            raise Exception('Extraneous kernel arguments remain: %r'
844                            % kernel_dict)
845
846        kernel, _ = cls.objects.get_or_create(version=version,
847                                              cmdline=cmdline)
848        return kernel
849
850
851    class Meta:
852        """Metadata for class Kernel."""
853        db_table = 'afe_kernels'
854        unique_together = ('version', 'cmdline')
855
856    def __unicode__(self):
857        return u'%s %s' % (self.version, self.cmdline)
858
859
860class ParameterizedJob(dbmodels.Model):
861    """
862    Auxiliary configuration for a parameterized job.
863    """
864    test = dbmodels.ForeignKey(Test)
865    label = dbmodels.ForeignKey(Label, null=True)
866    use_container = dbmodels.BooleanField(default=False)
867    profile_only = dbmodels.BooleanField(default=False)
868    upload_kernel_config = dbmodels.BooleanField(default=False)
869
870    kernels = dbmodels.ManyToManyField(
871            Kernel, db_table='afe_parameterized_job_kernels')
872    profilers = dbmodels.ManyToManyField(
873            Profiler, through='ParameterizedJobProfiler')
874
875
876    @classmethod
877    def smart_get(cls, id_or_name, *args, **kwargs):
878        """For compatibility with Job.add_object.
879
880        @param cls: Implicit class object.
881        @param id_or_name: The ID or name to get.
882        @param args: Non-keyword arguments.
883        @param kwargs: Keyword arguments.
884        """
885        return cls.objects.get(pk=id_or_name)
886
887
888    def job(self):
889        """Returns the job if it exists, or else None."""
890        jobs = self.job_set.all()
891        assert jobs.count() <= 1
892        return jobs and jobs[0] or None
893
894
895    class Meta:
896        """Metadata for class ParameterizedJob."""
897        db_table = 'afe_parameterized_jobs'
898
899    def __unicode__(self):
900        return u'%s (parameterized) - %s' % (self.test.name, self.job())
901
902
903class ParameterizedJobProfiler(dbmodels.Model):
904    """
905    A profiler to run on a parameterized job
906    """
907    parameterized_job = dbmodels.ForeignKey(ParameterizedJob)
908    profiler = dbmodels.ForeignKey(Profiler)
909
910    class Meta:
911        """Metedata for class ParameterizedJobProfiler."""
912        db_table = 'afe_parameterized_jobs_profilers'
913        unique_together = ('parameterized_job', 'profiler')
914
915
916class ParameterizedJobProfilerParameter(dbmodels.Model):
917    """
918    A parameter for a profiler in a parameterized job
919    """
920    parameterized_job_profiler = dbmodels.ForeignKey(ParameterizedJobProfiler)
921    parameter_name = dbmodels.CharField(max_length=255)
922    parameter_value = dbmodels.TextField()
923    parameter_type = dbmodels.CharField(
924            max_length=8, choices=model_attributes.ParameterTypes.choices())
925
926    class Meta:
927        """Metadata for class ParameterizedJobProfilerParameter."""
928        db_table = 'afe_parameterized_job_profiler_parameters'
929        unique_together = ('parameterized_job_profiler', 'parameter_name')
930
931    def __unicode__(self):
932        return u'%s - %s' % (self.parameterized_job_profiler.profiler.name,
933                             self.parameter_name)
934
935
936class ParameterizedJobParameter(dbmodels.Model):
937    """
938    Parameters for a parameterized job
939    """
940    parameterized_job = dbmodels.ForeignKey(ParameterizedJob)
941    test_parameter = dbmodels.ForeignKey(TestParameter)
942    parameter_value = dbmodels.TextField()
943    parameter_type = dbmodels.CharField(
944            max_length=8, choices=model_attributes.ParameterTypes.choices())
945
946    class Meta:
947        """Metadata for class ParameterizedJobParameter."""
948        db_table = 'afe_parameterized_job_parameters'
949        unique_together = ('parameterized_job', 'test_parameter')
950
951    def __unicode__(self):
952        return u'%s - %s' % (self.parameterized_job.job().name,
953                             self.test_parameter.name)
954
955
956class JobManager(model_logic.ExtendedManager):
957    'Custom manager to provide efficient status counts querying.'
958    def get_status_counts(self, job_ids):
959        """Returns a dict mapping the given job IDs to their status count dicts.
960
961        @param job_ids: A list of job IDs.
962        """
963        if not job_ids:
964            return {}
965        id_list = '(%s)' % ','.join(str(job_id) for job_id in job_ids)
966        cursor = connection.cursor()
967        cursor.execute("""
968            SELECT job_id, status, aborted, complete, COUNT(*)
969            FROM afe_host_queue_entries
970            WHERE job_id IN %s
971            GROUP BY job_id, status, aborted, complete
972            """ % id_list)
973        all_job_counts = dict((job_id, {}) for job_id in job_ids)
974        for job_id, status, aborted, complete, count in cursor.fetchall():
975            job_dict = all_job_counts[job_id]
976            full_status = HostQueueEntry.compute_full_status(status, aborted,
977                                                             complete)
978            job_dict.setdefault(full_status, 0)
979            job_dict[full_status] += count
980        return all_job_counts
981
982
983class Job(dbmodels.Model, model_logic.ModelExtensions):
984    """\
985    owner: username of job owner
986    name: job name (does not have to be unique)
987    priority: Integer priority value.  Higher is more important.
988    control_file: contents of control file
989    control_type: Client or Server
990    created_on: date of job creation
991    submitted_on: date of job submission
992    synch_count: how many hosts should be used per autoserv execution
993    run_verify: Whether or not to run the verify phase
994    run_reset: Whether or not to run the reset phase
995    timeout: DEPRECATED - hours from queuing time until job times out
996    timeout_mins: minutes from job queuing time until the job times out
997    max_runtime_hrs: DEPRECATED - hours from job starting time until job
998                     times out
999    max_runtime_mins: minutes from job starting time until job times out
1000    email_list: list of people to email on completion delimited by any of:
1001                white space, ',', ':', ';'
1002    dependency_labels: many-to-many relationship with labels corresponding to
1003                       job dependencies
1004    reboot_before: Never, If dirty, or Always
1005    reboot_after: Never, If all tests passed, or Always
1006    parse_failed_repair: if True, a failed repair launched by this job will have
1007    its results parsed as part of the job.
1008    drone_set: The set of drones to run this job on
1009    parent_job: Parent job (optional)
1010    test_retry: Number of times to retry test if the test did not complete
1011                successfully. (optional, default: 0)
1012    """
1013    # TIMEOUT is deprecated.
1014    DEFAULT_TIMEOUT = global_config.global_config.get_config_value(
1015        'AUTOTEST_WEB', 'job_timeout_default', default=24)
1016    DEFAULT_TIMEOUT_MINS = global_config.global_config.get_config_value(
1017        'AUTOTEST_WEB', 'job_timeout_mins_default', default=24*60)
1018    # MAX_RUNTIME_HRS is deprecated. Will be removed after switch to mins is
1019    # completed.
1020    DEFAULT_MAX_RUNTIME_HRS = global_config.global_config.get_config_value(
1021        'AUTOTEST_WEB', 'job_max_runtime_hrs_default', default=72)
1022    DEFAULT_MAX_RUNTIME_MINS = global_config.global_config.get_config_value(
1023        'AUTOTEST_WEB', 'job_max_runtime_mins_default', default=72*60)
1024    DEFAULT_PARSE_FAILED_REPAIR = global_config.global_config.get_config_value(
1025        'AUTOTEST_WEB', 'parse_failed_repair_default', type=bool,
1026        default=False)
1027
1028    owner = dbmodels.CharField(max_length=255)
1029    name = dbmodels.CharField(max_length=255)
1030    priority = dbmodels.SmallIntegerField(default=priorities.Priority.DEFAULT)
1031    control_file = dbmodels.TextField(null=True, blank=True)
1032    control_type = dbmodels.SmallIntegerField(
1033        choices=control_data.CONTROL_TYPE.choices(),
1034        blank=True, # to allow 0
1035        default=control_data.CONTROL_TYPE.CLIENT)
1036    created_on = dbmodels.DateTimeField()
1037    synch_count = dbmodels.IntegerField(blank=True, default=0)
1038    timeout = dbmodels.IntegerField(default=DEFAULT_TIMEOUT)
1039    run_verify = dbmodels.BooleanField(default=False)
1040    email_list = dbmodels.CharField(max_length=250, blank=True)
1041    dependency_labels = (
1042            dbmodels.ManyToManyField(Label, blank=True,
1043                                     db_table='afe_jobs_dependency_labels'))
1044    reboot_before = dbmodels.SmallIntegerField(
1045        choices=model_attributes.RebootBefore.choices(), blank=True,
1046        default=DEFAULT_REBOOT_BEFORE)
1047    reboot_after = dbmodels.SmallIntegerField(
1048        choices=model_attributes.RebootAfter.choices(), blank=True,
1049        default=DEFAULT_REBOOT_AFTER)
1050    parse_failed_repair = dbmodels.BooleanField(
1051        default=DEFAULT_PARSE_FAILED_REPAIR)
1052    # max_runtime_hrs is deprecated. Will be removed after switch to mins is
1053    # completed.
1054    max_runtime_hrs = dbmodels.IntegerField(default=DEFAULT_MAX_RUNTIME_HRS)
1055    max_runtime_mins = dbmodels.IntegerField(default=DEFAULT_MAX_RUNTIME_MINS)
1056    drone_set = dbmodels.ForeignKey(DroneSet, null=True, blank=True)
1057
1058    parameterized_job = dbmodels.ForeignKey(ParameterizedJob, null=True,
1059                                            blank=True)
1060
1061    parent_job = dbmodels.ForeignKey('self', blank=True, null=True)
1062
1063    test_retry = dbmodels.IntegerField(blank=True, default=0)
1064
1065    run_reset = dbmodels.BooleanField(default=True)
1066
1067    timeout_mins = dbmodels.IntegerField(default=DEFAULT_TIMEOUT_MINS)
1068
1069    # custom manager
1070    objects = JobManager()
1071
1072
1073    @decorators.cached_property
1074    def labels(self):
1075        """All the labels of this job"""
1076        # We need to convert dependency_labels to a list, because all() gives us
1077        # back an iterator, and storing/caching an iterator means we'd only be
1078        # able to read from it once.
1079        return list(self.dependency_labels.all())
1080
1081
1082    def is_server_job(self):
1083        """Returns whether this job is of type server."""
1084        return self.control_type == control_data.CONTROL_TYPE.SERVER
1085
1086
1087    @classmethod
1088    def parameterized_jobs_enabled(cls):
1089        """Returns whether parameterized jobs are enabled.
1090
1091        @param cls: Implicit class object.
1092        """
1093        return global_config.global_config.get_config_value(
1094                'AUTOTEST_WEB', 'parameterized_jobs', type=bool)
1095
1096
1097    @classmethod
1098    def check_parameterized_job(cls, control_file, parameterized_job):
1099        """Checks that the job is valid given the global config settings.
1100
1101        First, either control_file must be set, or parameterized_job must be
1102        set, but not both. Second, parameterized_job must be set if and only if
1103        the parameterized_jobs option in the global config is set to True.
1104
1105        @param cls: Implict class object.
1106        @param control_file: A control file.
1107        @param parameterized_job: A parameterized job.
1108        """
1109        if not (bool(control_file) ^ bool(parameterized_job)):
1110            raise Exception('Job must have either control file or '
1111                            'parameterization, but not both')
1112
1113        parameterized_jobs_enabled = cls.parameterized_jobs_enabled()
1114        if control_file and parameterized_jobs_enabled:
1115            raise Exception('Control file specified, but parameterized jobs '
1116                            'are enabled')
1117        if parameterized_job and not parameterized_jobs_enabled:
1118            raise Exception('Parameterized job specified, but parameterized '
1119                            'jobs are not enabled')
1120
1121
1122    @classmethod
1123    def create(cls, owner, options, hosts):
1124        """Creates a job.
1125
1126        The job is created by taking some information (the listed args) and
1127        filling in the rest of the necessary information.
1128
1129        @param cls: Implicit class object.
1130        @param owner: The owner for the job.
1131        @param options: An options object.
1132        @param hosts: The hosts to use.
1133        """
1134        AclGroup.check_for_acl_violation_hosts(hosts)
1135
1136        control_file = options.get('control_file')
1137        parameterized_job = options.get('parameterized_job')
1138
1139        # The current implementation of parameterized jobs requires that only
1140        # control files or parameterized jobs are used. Using the image
1141        # parameter on autoupdate_ParameterizedJob doesn't mix pure
1142        # parameterized jobs and control files jobs, it does muck enough with
1143        # normal jobs by adding a parameterized id to them that this check will
1144        # fail. So for now we just skip this check.
1145        # cls.check_parameterized_job(control_file=control_file,
1146        #                             parameterized_job=parameterized_job)
1147        user = User.current_user()
1148        if options.get('reboot_before') is None:
1149            options['reboot_before'] = user.get_reboot_before_display()
1150        if options.get('reboot_after') is None:
1151            options['reboot_after'] = user.get_reboot_after_display()
1152
1153        drone_set = DroneSet.resolve_name(options.get('drone_set'))
1154
1155        if options.get('timeout_mins') is None and options.get('timeout'):
1156            options['timeout_mins'] = options['timeout'] * 60
1157
1158        job = cls.add_object(
1159            owner=owner,
1160            name=options['name'],
1161            priority=options['priority'],
1162            control_file=control_file,
1163            control_type=options['control_type'],
1164            synch_count=options.get('synch_count'),
1165            # timeout needs to be deleted in the future.
1166            timeout=options.get('timeout'),
1167            timeout_mins=options.get('timeout_mins'),
1168            max_runtime_mins=options.get('max_runtime_mins'),
1169            run_verify=options.get('run_verify'),
1170            email_list=options.get('email_list'),
1171            reboot_before=options.get('reboot_before'),
1172            reboot_after=options.get('reboot_after'),
1173            parse_failed_repair=options.get('parse_failed_repair'),
1174            created_on=datetime.now(),
1175            drone_set=drone_set,
1176            parameterized_job=parameterized_job,
1177            parent_job=options.get('parent_job_id'),
1178            test_retry=options.get('test_retry'),
1179            run_reset=options.get('run_reset'))
1180
1181        job.dependency_labels = options['dependencies']
1182
1183        if options.get('keyvals'):
1184            for key, value in options['keyvals'].iteritems():
1185                JobKeyval.objects.create(job=job, key=key, value=value)
1186
1187        return job
1188
1189
1190    def save(self, *args, **kwargs):
1191        # The current implementation of parameterized jobs requires that only
1192        # control files or parameterized jobs are used. Using the image
1193        # parameter on autoupdate_ParameterizedJob doesn't mix pure
1194        # parameterized jobs and control files jobs, it does muck enough with
1195        # normal jobs by adding a parameterized id to them that this check will
1196        # fail. So for now we just skip this check.
1197        # cls.check_parameterized_job(control_file=self.control_file,
1198        #                             parameterized_job=self.parameterized_job)
1199        super(Job, self).save(*args, **kwargs)
1200
1201
1202    def queue(self, hosts, atomic_group=None, is_template=False):
1203        """Enqueue a job on the given hosts.
1204
1205        @param hosts: The hosts to use.
1206        @param atomic_group: The associated atomic group.
1207        @param is_template: Whether the status should be "Template".
1208        """
1209        if not hosts:
1210            if atomic_group:
1211                # No hosts or labels are required to queue an atomic group
1212                # Job.  However, if they are given, we respect them below.
1213                atomic_group.enqueue_job(self, is_template=is_template)
1214            else:
1215                # hostless job
1216                entry = HostQueueEntry.create(job=self, is_template=is_template)
1217                entry.save()
1218            return
1219
1220        for host in hosts:
1221            host.enqueue_job(self, atomic_group=atomic_group,
1222                             is_template=is_template)
1223
1224
1225    def create_recurring_job(self, start_date, loop_period, loop_count, owner):
1226        """Creates a recurring job.
1227
1228        @param start_date: The starting date of the job.
1229        @param loop_period: How often to re-run the job, in seconds.
1230        @param loop_count: The re-run count.
1231        @param owner: The owner of the job.
1232        """
1233        rec = RecurringRun(job=self, start_date=start_date,
1234                           loop_period=loop_period,
1235                           loop_count=loop_count,
1236                           owner=User.objects.get(login=owner))
1237        rec.save()
1238        return rec.id
1239
1240
1241    def user(self):
1242        """Gets the user of this job, or None if it doesn't exist."""
1243        try:
1244            return User.objects.get(login=self.owner)
1245        except self.DoesNotExist:
1246            return None
1247
1248
1249    def abort(self):
1250        """Aborts this job."""
1251        for queue_entry in self.hostqueueentry_set.all():
1252            queue_entry.abort()
1253
1254
1255    def tag(self):
1256        """Returns a string tag for this job."""
1257        return '%s-%s' % (self.id, self.owner)
1258
1259
1260    def keyval_dict(self):
1261        """Returns all keyvals for this job as a dictionary."""
1262        return dict((keyval.key, keyval.value)
1263                    for keyval in self.jobkeyval_set.all())
1264
1265
1266    class Meta:
1267        """Metadata for class Job."""
1268        db_table = 'afe_jobs'
1269
1270    def __unicode__(self):
1271        return u'%s (%s-%s)' % (self.name, self.id, self.owner)
1272
1273
1274class JobKeyval(dbmodels.Model, model_logic.ModelExtensions):
1275    """Keyvals associated with jobs"""
1276    job = dbmodels.ForeignKey(Job)
1277    key = dbmodels.CharField(max_length=90)
1278    value = dbmodels.CharField(max_length=300)
1279
1280    objects = model_logic.ExtendedManager()
1281
1282    class Meta:
1283        """Metadata for class JobKeyval."""
1284        db_table = 'afe_job_keyvals'
1285
1286
1287class IneligibleHostQueue(dbmodels.Model, model_logic.ModelExtensions):
1288    """Represents an ineligible host queue."""
1289    job = dbmodels.ForeignKey(Job)
1290    host = dbmodels.ForeignKey(Host)
1291
1292    objects = model_logic.ExtendedManager()
1293
1294    class Meta:
1295        """Metadata for class IneligibleHostQueue."""
1296        db_table = 'afe_ineligible_host_queues'
1297
1298
1299class HostQueueEntry(dbmodels.Model, model_logic.ModelExtensions):
1300    """Represents a host queue entry."""
1301    Status = host_queue_entry_states.Status
1302    ACTIVE_STATUSES = host_queue_entry_states.ACTIVE_STATUSES
1303    COMPLETE_STATUSES = host_queue_entry_states.COMPLETE_STATUSES
1304
1305    job = dbmodels.ForeignKey(Job)
1306    host = dbmodels.ForeignKey(Host, blank=True, null=True)
1307    status = dbmodels.CharField(max_length=255)
1308    meta_host = dbmodels.ForeignKey(Label, blank=True, null=True,
1309                                    db_column='meta_host')
1310    active = dbmodels.BooleanField(default=False)
1311    complete = dbmodels.BooleanField(default=False)
1312    deleted = dbmodels.BooleanField(default=False)
1313    execution_subdir = dbmodels.CharField(max_length=255, blank=True,
1314                                          default='')
1315    # If atomic_group is set, this is a virtual HostQueueEntry that will
1316    # be expanded into many actual hosts within the group at schedule time.
1317    atomic_group = dbmodels.ForeignKey(AtomicGroup, blank=True, null=True)
1318    aborted = dbmodels.BooleanField(default=False)
1319    started_on = dbmodels.DateTimeField(null=True, blank=True)
1320    finished_on = dbmodels.DateTimeField(null=True, blank=True)
1321
1322    objects = model_logic.ExtendedManager()
1323
1324
1325    def __init__(self, *args, **kwargs):
1326        super(HostQueueEntry, self).__init__(*args, **kwargs)
1327        self._record_attributes(['status'])
1328
1329
1330    @classmethod
1331    def create(cls, job, host=None, meta_host=None, atomic_group=None,
1332                 is_template=False):
1333        """Creates a new host queue entry.
1334
1335        @param cls: Implicit class object.
1336        @param job: The associated job.
1337        @param host: The associated host.
1338        @param meta_host: The associated meta host.
1339        @param atomic_group: The associated atomic group.
1340        @param is_template: Whether the status should be "Template".
1341        """
1342        if is_template:
1343            status = cls.Status.TEMPLATE
1344        else:
1345            status = cls.Status.QUEUED
1346
1347        return cls(job=job, host=host, meta_host=meta_host,
1348                   atomic_group=atomic_group, status=status)
1349
1350
1351    def save(self, *args, **kwargs):
1352        self._set_active_and_complete()
1353        super(HostQueueEntry, self).save(*args, **kwargs)
1354        self._check_for_updated_attributes()
1355
1356
1357    def execution_path(self):
1358        """
1359        Path to this entry's results (relative to the base results directory).
1360        """
1361        return os.path.join(self.job.tag(), self.execution_subdir)
1362
1363
1364    def host_or_metahost_name(self):
1365        """Returns the first non-None name found in priority order.
1366
1367        The priority order checked is: (1) host name; (2) meta host name; and
1368        (3) atomic group name.
1369        """
1370        if self.host:
1371            return self.host.hostname
1372        elif self.meta_host:
1373            return self.meta_host.name
1374        else:
1375            assert self.atomic_group, "no host, meta_host or atomic group!"
1376            return self.atomic_group.name
1377
1378
1379    def _set_active_and_complete(self):
1380        if self.status in self.ACTIVE_STATUSES:
1381            self.active, self.complete = True, False
1382        elif self.status in self.COMPLETE_STATUSES:
1383            self.active, self.complete = False, True
1384        else:
1385            self.active, self.complete = False, False
1386
1387
1388    def on_attribute_changed(self, attribute, old_value):
1389        assert attribute == 'status'
1390        logging.info('%s/%d (%d) -> %s', self.host, self.job.id, self.id,
1391                     self.status)
1392
1393
1394    def is_meta_host_entry(self):
1395        'True if this is a entry has a meta_host instead of a host.'
1396        return self.host is None and self.meta_host is not None
1397
1398
1399    # This code is shared between rpc_interface and models.HostQueueEntry.
1400    # Sadly due to circular imports between the 2 (crbug.com/230100) making it
1401    # a class method was the best way to refactor it. Attempting to put it in
1402    # rpc_utils or a new utils module failed as that would require us to import
1403    # models.py but to call it from here we would have to import the utils.py
1404    # thus creating a cycle.
1405    @classmethod
1406    def abort_host_queue_entries(cls, host_queue_entries):
1407        """Aborts a collection of host_queue_entries.
1408
1409        Abort these host queue entry and all host queue entries of jobs created
1410        by them.
1411
1412        @param host_queue_entries: List of host queue entries we want to abort.
1413        """
1414        # This isn't completely immune to race conditions since it's not atomic,
1415        # but it should be safe given the scheduler's behavior.
1416
1417        # TODO(milleral): crbug.com/230100
1418        # The |abort_host_queue_entries| rpc does nearly exactly this,
1419        # however, trying to re-use the code generates some horrible
1420        # circular import error.  I'd be nice to refactor things around
1421        # sometime so the code could be reused.
1422
1423        # Fixpoint algorithm to find the whole tree of HQEs to abort to
1424        # minimize the total number of database queries:
1425        children = set()
1426        new_children = set(host_queue_entries)
1427        while new_children:
1428            children.update(new_children)
1429            new_child_ids = [hqe.job_id for hqe in new_children]
1430            new_children = HostQueueEntry.objects.filter(
1431                    job__parent_job__in=new_child_ids,
1432                    complete=False, aborted=False).all()
1433            # To handle circular parental relationships
1434            new_children = set(new_children) - children
1435
1436        # Associate a user with the host queue entries that we're about
1437        # to abort so that we can look up who to blame for the aborts.
1438        now = datetime.now()
1439        user = User.current_user()
1440        aborted_hqes = [AbortedHostQueueEntry(queue_entry=hqe,
1441                aborted_by=user, aborted_on=now) for hqe in children]
1442        AbortedHostQueueEntry.objects.bulk_create(aborted_hqes)
1443        # Bulk update all of the HQEs to set the abort bit.
1444        child_ids = [hqe.id for hqe in children]
1445        HostQueueEntry.objects.filter(id__in=child_ids).update(aborted=True)
1446
1447
1448    def abort(self):
1449        """ Aborts this host queue entry.
1450
1451        Abort this host queue entry and all host queue entries of jobs created by
1452        this one.
1453
1454        """
1455        if not self.complete and not self.aborted:
1456            HostQueueEntry.abort_host_queue_entries([self])
1457
1458
1459    @classmethod
1460    def compute_full_status(cls, status, aborted, complete):
1461        """Returns a modified status msg if the host queue entry was aborted.
1462
1463        @param cls: Implicit class object.
1464        @param status: The original status message.
1465        @param aborted: Whether the host queue entry was aborted.
1466        @param complete: Whether the host queue entry was completed.
1467        """
1468        if aborted and not complete:
1469            return 'Aborted (%s)' % status
1470        return status
1471
1472
1473    def full_status(self):
1474        """Returns the full status of this host queue entry, as a string."""
1475        return self.compute_full_status(self.status, self.aborted,
1476                                        self.complete)
1477
1478
1479    def _postprocess_object_dict(self, object_dict):
1480        object_dict['full_status'] = self.full_status()
1481
1482
1483    class Meta:
1484        """Metadata for class HostQueueEntry."""
1485        db_table = 'afe_host_queue_entries'
1486
1487
1488
1489    def __unicode__(self):
1490        hostname = None
1491        if self.host:
1492            hostname = self.host.hostname
1493        return u"%s/%d (%d)" % (hostname, self.job.id, self.id)
1494
1495
1496class AbortedHostQueueEntry(dbmodels.Model, model_logic.ModelExtensions):
1497    """Represents an aborted host queue entry."""
1498    queue_entry = dbmodels.OneToOneField(HostQueueEntry, primary_key=True)
1499    aborted_by = dbmodels.ForeignKey(User)
1500    aborted_on = dbmodels.DateTimeField()
1501
1502    objects = model_logic.ExtendedManager()
1503
1504
1505    def save(self, *args, **kwargs):
1506        self.aborted_on = datetime.now()
1507        super(AbortedHostQueueEntry, self).save(*args, **kwargs)
1508
1509    class Meta:
1510        """Metadata for class AbortedHostQueueEntry."""
1511        db_table = 'afe_aborted_host_queue_entries'
1512
1513
1514class RecurringRun(dbmodels.Model, model_logic.ModelExtensions):
1515    """\
1516    job: job to use as a template
1517    owner: owner of the instantiated template
1518    start_date: Run the job at scheduled date
1519    loop_period: Re-run (loop) the job periodically
1520                 (in every loop_period seconds)
1521    loop_count: Re-run (loop) count
1522    """
1523
1524    job = dbmodels.ForeignKey(Job)
1525    owner = dbmodels.ForeignKey(User)
1526    start_date = dbmodels.DateTimeField()
1527    loop_period = dbmodels.IntegerField(blank=True)
1528    loop_count = dbmodels.IntegerField(blank=True)
1529
1530    objects = model_logic.ExtendedManager()
1531
1532    class Meta:
1533        """Metadata for class RecurringRun."""
1534        db_table = 'afe_recurring_run'
1535
1536    def __unicode__(self):
1537        return u'RecurringRun(job %s, start %s, period %s, count %s)' % (
1538            self.job.id, self.start_date, self.loop_period, self.loop_count)
1539
1540
1541class SpecialTask(dbmodels.Model, model_logic.ModelExtensions):
1542    """\
1543    Tasks to run on hosts at the next time they are in the Ready state. Use this
1544    for high-priority tasks, such as forced repair or forced reinstall.
1545
1546    host: host to run this task on
1547    task: special task to run
1548    time_requested: date and time the request for this task was made
1549    is_active: task is currently running
1550    is_complete: task has finished running
1551    is_aborted: task was aborted
1552    time_started: date and time the task started
1553    time_finished: date and time the task finished
1554    queue_entry: Host queue entry waiting on this task (or None, if task was not
1555                 started in preparation of a job)
1556    """
1557    Task = enum.Enum('Verify', 'Cleanup', 'Repair', 'Reset', 'Provision',
1558                     string_values=True)
1559
1560    host = dbmodels.ForeignKey(Host, blank=False, null=False)
1561    task = dbmodels.CharField(max_length=64, choices=Task.choices(),
1562                              blank=False, null=False)
1563    requested_by = dbmodels.ForeignKey(User)
1564    time_requested = dbmodels.DateTimeField(auto_now_add=True, blank=False,
1565                                            null=False)
1566    is_active = dbmodels.BooleanField(default=False, blank=False, null=False)
1567    is_complete = dbmodels.BooleanField(default=False, blank=False, null=False)
1568    is_aborted = dbmodels.BooleanField(default=False, blank=False, null=False)
1569    time_started = dbmodels.DateTimeField(null=True, blank=True)
1570    queue_entry = dbmodels.ForeignKey(HostQueueEntry, blank=True, null=True)
1571    success = dbmodels.BooleanField(default=False, blank=False, null=False)
1572    time_finished = dbmodels.DateTimeField(null=True, blank=True)
1573
1574    objects = model_logic.ExtendedManager()
1575
1576
1577    def save(self, **kwargs):
1578        if self.queue_entry:
1579            self.requested_by = User.objects.get(
1580                    login=self.queue_entry.job.owner)
1581        super(SpecialTask, self).save(**kwargs)
1582
1583
1584    def execution_path(self):
1585        """@see HostQueueEntry.execution_path()"""
1586        return 'hosts/%s/%s-%s' % (self.host.hostname, self.id,
1587                                   self.task.lower())
1588
1589
1590    # property to emulate HostQueueEntry.status
1591    @property
1592    def status(self):
1593        """
1594        Return a host queue entry status appropriate for this task.  Although
1595        SpecialTasks are not HostQueueEntries, it is helpful to the user to
1596        present similar statuses.
1597        """
1598        if self.is_complete:
1599            if self.success:
1600                return HostQueueEntry.Status.COMPLETED
1601            return HostQueueEntry.Status.FAILED
1602        if self.is_active:
1603            return HostQueueEntry.Status.RUNNING
1604        return HostQueueEntry.Status.QUEUED
1605
1606
1607    # property to emulate HostQueueEntry.started_on
1608    @property
1609    def started_on(self):
1610        """Returns the time at which this special task started."""
1611        return self.time_started
1612
1613
1614    @classmethod
1615    def schedule_special_task(cls, host, task):
1616        """Schedules a special task on a host if not already scheduled.
1617
1618        @param cls: Implicit class object.
1619        @param host: The host to use.
1620        @param task: The task to schedule.
1621        """
1622        existing_tasks = SpecialTask.objects.filter(host__id=host.id, task=task,
1623                                                    is_active=False,
1624                                                    is_complete=False)
1625        if existing_tasks:
1626            return existing_tasks[0]
1627
1628        special_task = SpecialTask(host=host, task=task,
1629                                   requested_by=User.current_user())
1630        special_task.save()
1631        return special_task
1632
1633
1634    def abort(self):
1635        """ Abort this special task."""
1636        self.is_aborted = True
1637        self.save()
1638
1639
1640    def activate(self):
1641        """
1642        Sets a task as active and sets the time started to the current time.
1643        """
1644        logging.info('Starting: %s', self)
1645        self.is_active = True
1646        self.time_started = datetime.now()
1647        self.save()
1648
1649
1650    def finish(self, success):
1651        """Sets a task as completed.
1652
1653        @param success: Whether or not the task was successful.
1654        """
1655        logging.info('Finished: %s', self)
1656        self.is_active = False
1657        self.is_complete = True
1658        self.success = success
1659        self.time_finished = datetime.now()
1660        self.save()
1661
1662
1663    class Meta:
1664        """Metadata for class SpecialTask."""
1665        db_table = 'afe_special_tasks'
1666
1667
1668    def __unicode__(self):
1669        result = u'Special Task %s (host %s, task %s, time %s)' % (
1670            self.id, self.host, self.task, self.time_requested)
1671        if self.is_complete:
1672            result += u' (completed)'
1673        elif self.is_active:
1674            result += u' (active)'
1675
1676        return result
1677