models.py revision 962e3413dce0a6ea2fb0bed87323655f1641e57c
1# pylint: disable-msg=C0111
2
3import logging, os
4from datetime import datetime
5import django.core
6try:
7    from django.db import models as dbmodels, connection
8except django.core.exceptions.ImproperlyConfigured:
9    raise ImportError('Django database not yet configured. Import either '
10                       'setup_django_environment or '
11                       'setup_django_lite_environment from '
12                       'autotest_lib.frontend before any imports that '
13                       'depend on django models.')
14from xml.sax import saxutils
15import common
16from autotest_lib.frontend.afe import model_logic, model_attributes
17from autotest_lib.frontend.afe import rdb_model_extensions
18from autotest_lib.frontend import settings, thread_local
19from autotest_lib.client.common_lib import enum, host_protections, global_config
20from autotest_lib.client.common_lib import host_queue_entry_states
21from autotest_lib.client.common_lib import control_data, priorities
22from autotest_lib.client.common_lib import decorators
23
24# job options and user preferences
25DEFAULT_REBOOT_BEFORE = model_attributes.RebootBefore.IF_DIRTY
26DEFAULT_REBOOT_AFTER = model_attributes.RebootBefore.NEVER
27
28
29class AclAccessViolation(Exception):
30    """\
31    Raised when an operation is attempted with proper permissions as
32    dictated by ACLs.
33    """
34
35
36class AtomicGroup(model_logic.ModelWithInvalid, dbmodels.Model):
37    """\
38    An atomic group defines a collection of hosts which must only be scheduled
39    all at once.  Any host with a label having an atomic group will only be
40    scheduled for a job at the same time as other hosts sharing that label.
41
42    Required:
43      name: A name for this atomic group, e.g. 'rack23' or 'funky_net'.
44      max_number_of_machines: The maximum number of machines that will be
45              scheduled at once when scheduling jobs to this atomic group.
46              The job.synch_count is considered the minimum.
47
48    Optional:
49      description: Arbitrary text description of this group's purpose.
50    """
51    name = dbmodels.CharField(max_length=255, unique=True)
52    description = dbmodels.TextField(blank=True)
53    # This magic value is the default to simplify the scheduler logic.
54    # It must be "large".  The common use of atomic groups is to want all
55    # machines in the group to be used, limits on which subset used are
56    # often chosen via dependency labels.
57    # TODO(dennisjeffrey): Revisit this so we don't have to assume that
58    # "infinity" is around 3.3 million.
59    INFINITE_MACHINES = 333333333
60    max_number_of_machines = dbmodels.IntegerField(default=INFINITE_MACHINES)
61    invalid = dbmodels.BooleanField(default=False,
62                                  editable=settings.FULL_ADMIN)
63
64    name_field = 'name'
65    objects = model_logic.ModelWithInvalidManager()
66    valid_objects = model_logic.ValidObjectsManager()
67
68
69    def enqueue_job(self, job, is_template=False):
70        """Enqueue a job on an associated atomic group of hosts.
71
72        @param job: A job to enqueue.
73        @param is_template: Whether the status should be "Template".
74        """
75        queue_entry = HostQueueEntry.create(atomic_group=self, job=job,
76                                            is_template=is_template)
77        queue_entry.save()
78
79
80    def clean_object(self):
81        self.label_set.clear()
82
83
84    class Meta:
85        """Metadata for class AtomicGroup."""
86        db_table = 'afe_atomic_groups'
87
88
89    def __unicode__(self):
90        return unicode(self.name)
91
92
93class Label(model_logic.ModelWithInvalid, dbmodels.Model):
94    """\
95    Required:
96      name: label name
97
98    Optional:
99      kernel_config: URL/path to kernel config for jobs run on this label.
100      platform: If True, this is a platform label (defaults to False).
101      only_if_needed: If True, a Host with this label can only be used if that
102              label is requested by the job/test (either as the meta_host or
103              in the job_dependencies).
104      atomic_group: The atomic group associated with this label.
105    """
106    name = dbmodels.CharField(max_length=255, unique=True)
107    kernel_config = dbmodels.CharField(max_length=255, blank=True)
108    platform = dbmodels.BooleanField(default=False)
109    invalid = dbmodels.BooleanField(default=False,
110                                    editable=settings.FULL_ADMIN)
111    only_if_needed = dbmodels.BooleanField(default=False)
112
113    name_field = 'name'
114    objects = model_logic.ModelWithInvalidManager()
115    valid_objects = model_logic.ValidObjectsManager()
116    atomic_group = dbmodels.ForeignKey(AtomicGroup, null=True, blank=True)
117
118
119    def clean_object(self):
120        self.host_set.clear()
121        self.test_set.clear()
122
123
124    def enqueue_job(self, job, atomic_group=None, is_template=False):
125        """Enqueue a job on any host of this label.
126
127        @param job: A job to enqueue.
128        @param atomic_group: The associated atomic group.
129        @param is_template: Whether the status should be "Template".
130        """
131        queue_entry = HostQueueEntry.create(meta_host=self, job=job,
132                                            is_template=is_template,
133                                            atomic_group=atomic_group)
134        queue_entry.save()
135
136
137    class Meta:
138        """Metadata for class Label."""
139        db_table = 'afe_labels'
140
141    def __unicode__(self):
142        return unicode(self.name)
143
144
145class Shard(dbmodels.Model, model_logic.ModelExtensions):
146
147    labels = dbmodels.ManyToManyField(Label, blank=True,
148                                      db_table='afe_shards_labels')
149
150    class Meta:
151        """Metadata for class ParameterizedJob."""
152        db_table = 'afe_shards'
153
154
155class Drone(dbmodels.Model, model_logic.ModelExtensions):
156    """
157    A scheduler drone
158
159    hostname: the drone's hostname
160    """
161    hostname = dbmodels.CharField(max_length=255, unique=True)
162
163    name_field = 'hostname'
164    objects = model_logic.ExtendedManager()
165
166
167    def save(self, *args, **kwargs):
168        if not User.current_user().is_superuser():
169            raise Exception('Only superusers may edit drones')
170        super(Drone, self).save(*args, **kwargs)
171
172
173    def delete(self):
174        if not User.current_user().is_superuser():
175            raise Exception('Only superusers may delete drones')
176        super(Drone, self).delete()
177
178
179    class Meta:
180        """Metadata for class Drone."""
181        db_table = 'afe_drones'
182
183    def __unicode__(self):
184        return unicode(self.hostname)
185
186
187class DroneSet(dbmodels.Model, model_logic.ModelExtensions):
188    """
189    A set of scheduler drones
190
191    These will be used by the scheduler to decide what drones a job is allowed
192    to run on.
193
194    name: the drone set's name
195    drones: the drones that are part of the set
196    """
197    DRONE_SETS_ENABLED = global_config.global_config.get_config_value(
198            'SCHEDULER', 'drone_sets_enabled', type=bool, default=False)
199    DEFAULT_DRONE_SET_NAME = global_config.global_config.get_config_value(
200            'SCHEDULER', 'default_drone_set_name', default=None)
201
202    name = dbmodels.CharField(max_length=255, unique=True)
203    drones = dbmodels.ManyToManyField(Drone, db_table='afe_drone_sets_drones')
204
205    name_field = 'name'
206    objects = model_logic.ExtendedManager()
207
208
209    def save(self, *args, **kwargs):
210        if not User.current_user().is_superuser():
211            raise Exception('Only superusers may edit drone sets')
212        super(DroneSet, self).save(*args, **kwargs)
213
214
215    def delete(self):
216        if not User.current_user().is_superuser():
217            raise Exception('Only superusers may delete drone sets')
218        super(DroneSet, self).delete()
219
220
221    @classmethod
222    def drone_sets_enabled(cls):
223        """Returns whether drone sets are enabled.
224
225        @param cls: Implicit class object.
226        """
227        return cls.DRONE_SETS_ENABLED
228
229
230    @classmethod
231    def default_drone_set_name(cls):
232        """Returns the default drone set name.
233
234        @param cls: Implicit class object.
235        """
236        return cls.DEFAULT_DRONE_SET_NAME
237
238
239    @classmethod
240    def get_default(cls):
241        """Gets the default drone set name, compatible with Job.add_object.
242
243        @param cls: Implicit class object.
244        """
245        return cls.smart_get(cls.DEFAULT_DRONE_SET_NAME)
246
247
248    @classmethod
249    def resolve_name(cls, drone_set_name):
250        """
251        Returns the name of one of these, if not None, in order of preference:
252        1) the drone set given,
253        2) the current user's default drone set, or
254        3) the global default drone set
255
256        or returns None if drone sets are disabled
257
258        @param cls: Implicit class object.
259        @param drone_set_name: A drone set name.
260        """
261        if not cls.drone_sets_enabled():
262            return None
263
264        user = User.current_user()
265        user_drone_set_name = user.drone_set and user.drone_set.name
266
267        return drone_set_name or user_drone_set_name or cls.get_default().name
268
269
270    def get_drone_hostnames(self):
271        """
272        Gets the hostnames of all drones in this drone set
273        """
274        return set(self.drones.all().values_list('hostname', flat=True))
275
276
277    class Meta:
278        """Metadata for class DroneSet."""
279        db_table = 'afe_drone_sets'
280
281    def __unicode__(self):
282        return unicode(self.name)
283
284
285class User(dbmodels.Model, model_logic.ModelExtensions):
286    """\
287    Required:
288    login :user login name
289
290    Optional:
291    access_level: 0=User (default), 1=Admin, 100=Root
292    """
293    ACCESS_ROOT = 100
294    ACCESS_ADMIN = 1
295    ACCESS_USER = 0
296
297    AUTOTEST_SYSTEM = 'autotest_system'
298
299    login = dbmodels.CharField(max_length=255, unique=True)
300    access_level = dbmodels.IntegerField(default=ACCESS_USER, blank=True)
301
302    # user preferences
303    reboot_before = dbmodels.SmallIntegerField(
304        choices=model_attributes.RebootBefore.choices(), blank=True,
305        default=DEFAULT_REBOOT_BEFORE)
306    reboot_after = dbmodels.SmallIntegerField(
307        choices=model_attributes.RebootAfter.choices(), blank=True,
308        default=DEFAULT_REBOOT_AFTER)
309    drone_set = dbmodels.ForeignKey(DroneSet, null=True, blank=True)
310    show_experimental = dbmodels.BooleanField(default=False)
311
312    name_field = 'login'
313    objects = model_logic.ExtendedManager()
314
315
316    def save(self, *args, **kwargs):
317        # is this a new object being saved for the first time?
318        first_time = (self.id is None)
319        user = thread_local.get_user()
320        if user and not user.is_superuser() and user.login != self.login:
321            raise AclAccessViolation("You cannot modify user " + self.login)
322        super(User, self).save(*args, **kwargs)
323        if first_time:
324            everyone = AclGroup.objects.get(name='Everyone')
325            everyone.users.add(self)
326
327
328    def is_superuser(self):
329        """Returns whether the user has superuser access."""
330        return self.access_level >= self.ACCESS_ROOT
331
332
333    @classmethod
334    def current_user(cls):
335        """Returns the current user.
336
337        @param cls: Implicit class object.
338        """
339        user = thread_local.get_user()
340        if user is None:
341            user, _ = cls.objects.get_or_create(login=cls.AUTOTEST_SYSTEM)
342            user.access_level = cls.ACCESS_ROOT
343            user.save()
344        return user
345
346
347    class Meta:
348        """Metadata for class User."""
349        db_table = 'afe_users'
350
351    def __unicode__(self):
352        return unicode(self.login)
353
354
355class Host(model_logic.ModelWithInvalid, rdb_model_extensions.AbstractHostModel,
356           model_logic.ModelWithAttributes):
357    """\
358    Required:
359    hostname
360
361    optional:
362    locked: if true, host is locked and will not be queued
363
364    Internal:
365    From AbstractHostModel:
366        synch_id: currently unused
367        status: string describing status of host
368        invalid: true if the host has been deleted
369        protection: indicates what can be done to this host during repair
370        lock_time: DateTime at which the host was locked
371        dirty: true if the host has been used without being rebooted
372    Local:
373        locked_by: user that locked the host, or null if the host is unlocked
374    """
375
376    # Note: Only specify foreign keys here, specify all native host columns in
377    # rdb_model_extensions instead.
378    Protection = host_protections.Protection
379    labels = dbmodels.ManyToManyField(Label, blank=True,
380                                      db_table='afe_hosts_labels')
381    locked_by = dbmodels.ForeignKey(User, null=True, blank=True, editable=False)
382    name_field = 'hostname'
383    objects = model_logic.ModelWithInvalidManager()
384    valid_objects = model_logic.ValidObjectsManager()
385    leased_objects = model_logic.LeasedHostManager()
386
387
388    def __init__(self, *args, **kwargs):
389        super(Host, self).__init__(*args, **kwargs)
390        self._record_attributes(['status'])
391
392
393    @staticmethod
394    def create_one_time_host(hostname):
395        """Creates a one-time host.
396
397        @param hostname: The name for the host.
398        """
399        query = Host.objects.filter(hostname=hostname)
400        if query.count() == 0:
401            host = Host(hostname=hostname, invalid=True)
402            host.do_validate()
403        else:
404            host = query[0]
405            if not host.invalid:
406                raise model_logic.ValidationError({
407                    'hostname' : '%s already exists in the autotest DB.  '
408                        'Select it rather than entering it as a one time '
409                        'host.' % hostname
410                    })
411        host.protection = host_protections.Protection.DO_NOT_REPAIR
412        host.locked = False
413        host.save()
414        host.clean_object()
415        return host
416
417
418    def resurrect_object(self, old_object):
419        super(Host, self).resurrect_object(old_object)
420        # invalid hosts can be in use by the scheduler (as one-time hosts), so
421        # don't change the status
422        self.status = old_object.status
423
424
425    def clean_object(self):
426        self.aclgroup_set.clear()
427        self.labels.clear()
428
429
430    def save(self, *args, **kwargs):
431        # extra spaces in the hostname can be a sneaky source of errors
432        self.hostname = self.hostname.strip()
433        # is this a new object being saved for the first time?
434        first_time = (self.id is None)
435        if not first_time:
436            AclGroup.check_for_acl_violation_hosts([self])
437        if self.locked and not self.locked_by:
438            self.locked_by = User.current_user()
439            self.lock_time = datetime.now()
440            self.dirty = True
441        elif not self.locked and self.locked_by:
442            self.locked_by = None
443            self.lock_time = None
444        super(Host, self).save(*args, **kwargs)
445        if first_time:
446            everyone = AclGroup.objects.get(name='Everyone')
447            everyone.hosts.add(self)
448        self._check_for_updated_attributes()
449
450
451    def delete(self):
452        AclGroup.check_for_acl_violation_hosts([self])
453        for queue_entry in self.hostqueueentry_set.all():
454            queue_entry.deleted = True
455            queue_entry.abort()
456        super(Host, self).delete()
457
458
459    def on_attribute_changed(self, attribute, old_value):
460        assert attribute == 'status'
461        logging.info(self.hostname + ' -> ' + self.status)
462
463
464    def enqueue_job(self, job, atomic_group=None, is_template=False):
465        """Enqueue a job on this host.
466
467        @param job: A job to enqueue.
468        @param atomic_group: The associated atomic group.
469        @param is_template: Whther the status should be "Template".
470        """
471        queue_entry = HostQueueEntry.create(host=self, job=job,
472                                            is_template=is_template,
473                                            atomic_group=atomic_group)
474        # allow recovery of dead hosts from the frontend
475        if not self.active_queue_entry() and self.is_dead():
476            self.status = Host.Status.READY
477            self.save()
478        queue_entry.save()
479
480        block = IneligibleHostQueue(job=job, host=self)
481        block.save()
482
483
484    def platform(self):
485        """The platform of the host."""
486        # TODO(showard): slighly hacky?
487        platforms = self.labels.filter(platform=True)
488        if len(platforms) == 0:
489            return None
490        return platforms[0]
491    platform.short_description = 'Platform'
492
493
494    @classmethod
495    def check_no_platform(cls, hosts):
496        """Verify the specified hosts have no associated platforms.
497
498        @param cls: Implicit class object.
499        @param hosts: The hosts to verify.
500        @raises model_logic.ValidationError if any hosts already have a
501            platform.
502        """
503        Host.objects.populate_relationships(hosts, Label, 'label_list')
504        errors = []
505        for host in hosts:
506            platforms = [label.name for label in host.label_list
507                         if label.platform]
508            if platforms:
509                # do a join, just in case this host has multiple platforms,
510                # we'll be able to see it
511                errors.append('Host %s already has a platform: %s' % (
512                              host.hostname, ', '.join(platforms)))
513        if errors:
514            raise model_logic.ValidationError({'labels': '; '.join(errors)})
515
516
517    def is_dead(self):
518        """Returns whether the host is dead (has status repair failed)."""
519        return self.status == Host.Status.REPAIR_FAILED
520
521
522    def active_queue_entry(self):
523        """Returns the active queue entry for this host, or None if none."""
524        active = list(self.hostqueueentry_set.filter(active=True))
525        if not active:
526            return None
527        assert len(active) == 1, ('More than one active entry for '
528                                  'host ' + self.hostname)
529        return active[0]
530
531
532    def _get_attribute_model_and_args(self, attribute):
533        return HostAttribute, dict(host=self, attribute=attribute)
534
535
536    class Meta:
537        """Metadata for the Host class."""
538        db_table = 'afe_hosts'
539
540    def __unicode__(self):
541        return unicode(self.hostname)
542
543
544class HostAttribute(dbmodels.Model):
545    """Arbitrary keyvals associated with hosts."""
546    host = dbmodels.ForeignKey(Host)
547    attribute = dbmodels.CharField(max_length=90)
548    value = dbmodels.CharField(max_length=300)
549
550    objects = model_logic.ExtendedManager()
551
552    class Meta:
553        """Metadata for the HostAttribute class."""
554        db_table = 'afe_host_attributes'
555
556
557class Test(dbmodels.Model, model_logic.ModelExtensions):
558    """\
559    Required:
560    author: author name
561    description: description of the test
562    name: test name
563    time: short, medium, long
564    test_class: This describes the class for your the test belongs in.
565    test_category: This describes the category for your tests
566    test_type: Client or Server
567    path: path to pass to run_test()
568    sync_count:  is a number >=1 (1 being the default). If it's 1, then it's an
569                 async job. If it's >1 it's sync job for that number of machines
570                 i.e. if sync_count = 2 it is a sync job that requires two
571                 machines.
572    Optional:
573    dependencies: What the test requires to run. Comma deliminated list
574    dependency_labels: many-to-many relationship with labels corresponding to
575                       test dependencies.
576    experimental: If this is set to True production servers will ignore the test
577    run_verify: Whether or not the scheduler should run the verify stage
578    run_reset: Whether or not the scheduler should run the reset stage
579    test_retry: Number of times to retry test if the test did not complete
580                successfully. (optional, default: 0)
581    """
582    TestTime = enum.Enum('SHORT', 'MEDIUM', 'LONG', start_value=1)
583
584    name = dbmodels.CharField(max_length=255, unique=True)
585    author = dbmodels.CharField(max_length=255)
586    test_class = dbmodels.CharField(max_length=255)
587    test_category = dbmodels.CharField(max_length=255)
588    dependencies = dbmodels.CharField(max_length=255, blank=True)
589    description = dbmodels.TextField(blank=True)
590    experimental = dbmodels.BooleanField(default=True)
591    run_verify = dbmodels.BooleanField(default=False)
592    test_time = dbmodels.SmallIntegerField(choices=TestTime.choices(),
593                                           default=TestTime.MEDIUM)
594    test_type = dbmodels.SmallIntegerField(
595        choices=control_data.CONTROL_TYPE.choices())
596    sync_count = dbmodels.IntegerField(default=1)
597    path = dbmodels.CharField(max_length=255, unique=True)
598    test_retry = dbmodels.IntegerField(blank=True, default=0)
599    run_reset = dbmodels.BooleanField(default=True)
600
601    dependency_labels = (
602        dbmodels.ManyToManyField(Label, blank=True,
603                                 db_table='afe_autotests_dependency_labels'))
604    name_field = 'name'
605    objects = model_logic.ExtendedManager()
606
607
608    def admin_description(self):
609        """Returns a string representing the admin description."""
610        escaped_description = saxutils.escape(self.description)
611        return '<span style="white-space:pre">%s</span>' % escaped_description
612    admin_description.allow_tags = True
613    admin_description.short_description = 'Description'
614
615
616    class Meta:
617        """Metadata for class Test."""
618        db_table = 'afe_autotests'
619
620    def __unicode__(self):
621        return unicode(self.name)
622
623
624class TestParameter(dbmodels.Model):
625    """
626    A declared parameter of a test
627    """
628    test = dbmodels.ForeignKey(Test)
629    name = dbmodels.CharField(max_length=255)
630
631    class Meta:
632        """Metadata for class TestParameter."""
633        db_table = 'afe_test_parameters'
634        unique_together = ('test', 'name')
635
636    def __unicode__(self):
637        return u'%s (%s)' % (self.name, self.test.name)
638
639
640class Profiler(dbmodels.Model, model_logic.ModelExtensions):
641    """\
642    Required:
643    name: profiler name
644    test_type: Client or Server
645
646    Optional:
647    description: arbirary text description
648    """
649    name = dbmodels.CharField(max_length=255, unique=True)
650    description = dbmodels.TextField(blank=True)
651
652    name_field = 'name'
653    objects = model_logic.ExtendedManager()
654
655
656    class Meta:
657        """Metadata for class Profiler."""
658        db_table = 'afe_profilers'
659
660    def __unicode__(self):
661        return unicode(self.name)
662
663
664class AclGroup(dbmodels.Model, model_logic.ModelExtensions):
665    """\
666    Required:
667    name: name of ACL group
668
669    Optional:
670    description: arbitrary description of group
671    """
672    name = dbmodels.CharField(max_length=255, unique=True)
673    description = dbmodels.CharField(max_length=255, blank=True)
674    users = dbmodels.ManyToManyField(User, blank=False,
675                                     db_table='afe_acl_groups_users')
676    hosts = dbmodels.ManyToManyField(Host, blank=True,
677                                     db_table='afe_acl_groups_hosts')
678
679    name_field = 'name'
680    objects = model_logic.ExtendedManager()
681
682    @staticmethod
683    def check_for_acl_violation_hosts(hosts):
684        """Verify the current user has access to the specified hosts.
685
686        @param hosts: The hosts to verify against.
687        @raises AclAccessViolation if the current user doesn't have access
688            to a host.
689        """
690        user = User.current_user()
691        if user.is_superuser():
692            return
693        accessible_host_ids = set(
694            host.id for host in Host.objects.filter(aclgroup__users=user))
695        for host in hosts:
696            # Check if the user has access to this host,
697            # but only if it is not a metahost or a one-time-host.
698            no_access = (isinstance(host, Host)
699                         and not host.invalid
700                         and int(host.id) not in accessible_host_ids)
701            if no_access:
702                raise AclAccessViolation("%s does not have access to %s" %
703                                         (str(user), str(host)))
704
705
706    @staticmethod
707    def check_abort_permissions(queue_entries):
708        """Look for queue entries that aren't abortable by the current user.
709
710        An entry is not abortable if:
711           * the job isn't owned by this user, and
712           * the machine isn't ACL-accessible, or
713           * the machine is in the "Everyone" ACL
714
715        @param queue_entries: The queue entries to check.
716        @raises AclAccessViolation if a queue entry is not abortable by the
717            current user.
718        """
719        user = User.current_user()
720        if user.is_superuser():
721            return
722        not_owned = queue_entries.exclude(job__owner=user.login)
723        # I do this using ID sets instead of just Django filters because
724        # filtering on M2M dbmodels is broken in Django 0.96. It's better in
725        # 1.0.
726        # TODO: Use Django filters, now that we're using 1.0.
727        accessible_ids = set(
728            entry.id for entry
729            in not_owned.filter(host__aclgroup__users__login=user.login))
730        public_ids = set(entry.id for entry
731                         in not_owned.filter(host__aclgroup__name='Everyone'))
732        cannot_abort = [entry for entry in not_owned.select_related()
733                        if entry.id not in accessible_ids
734                        or entry.id in public_ids]
735        if len(cannot_abort) == 0:
736            return
737        entry_names = ', '.join('%s-%s/%s' % (entry.job.id, entry.job.owner,
738                                              entry.host_or_metahost_name())
739                                for entry in cannot_abort)
740        raise AclAccessViolation('You cannot abort the following job entries: '
741                                 + entry_names)
742
743
744    def check_for_acl_violation_acl_group(self):
745        """Verifies the current user has acces to this ACL group.
746
747        @raises AclAccessViolation if the current user doesn't have access to
748            this ACL group.
749        """
750        user = User.current_user()
751        if user.is_superuser():
752            return
753        if self.name == 'Everyone':
754            raise AclAccessViolation("You cannot modify 'Everyone'!")
755        if not user in self.users.all():
756            raise AclAccessViolation("You do not have access to %s"
757                                     % self.name)
758
759    @staticmethod
760    def on_host_membership_change():
761        """Invoked when host membership changes."""
762        everyone = AclGroup.objects.get(name='Everyone')
763
764        # find hosts that aren't in any ACL group and add them to Everyone
765        # TODO(showard): this is a bit of a hack, since the fact that this query
766        # works is kind of a coincidence of Django internals.  This trick
767        # doesn't work in general (on all foreign key relationships).  I'll
768        # replace it with a better technique when the need arises.
769        orphaned_hosts = Host.valid_objects.filter(aclgroup__id__isnull=True)
770        everyone.hosts.add(*orphaned_hosts.distinct())
771
772        # find hosts in both Everyone and another ACL group, and remove them
773        # from Everyone
774        hosts_in_everyone = Host.valid_objects.filter(aclgroup__name='Everyone')
775        acled_hosts = set()
776        for host in hosts_in_everyone:
777            # Has an ACL group other than Everyone
778            if host.aclgroup_set.count() > 1:
779                acled_hosts.add(host)
780        everyone.hosts.remove(*acled_hosts)
781
782
783    def delete(self):
784        if (self.name == 'Everyone'):
785            raise AclAccessViolation("You cannot delete 'Everyone'!")
786        self.check_for_acl_violation_acl_group()
787        super(AclGroup, self).delete()
788        self.on_host_membership_change()
789
790
791    def add_current_user_if_empty(self):
792        """Adds the current user if the set of users is empty."""
793        if not self.users.count():
794            self.users.add(User.current_user())
795
796
797    def perform_after_save(self, change):
798        """Called after a save.
799
800        @param change: Whether there was a change.
801        """
802        if not change:
803            self.users.add(User.current_user())
804        self.add_current_user_if_empty()
805        self.on_host_membership_change()
806
807
808    def save(self, *args, **kwargs):
809        change = bool(self.id)
810        if change:
811            # Check the original object for an ACL violation
812            AclGroup.objects.get(id=self.id).check_for_acl_violation_acl_group()
813        super(AclGroup, self).save(*args, **kwargs)
814        self.perform_after_save(change)
815
816
817    class Meta:
818        """Metadata for class AclGroup."""
819        db_table = 'afe_acl_groups'
820
821    def __unicode__(self):
822        return unicode(self.name)
823
824
825class Kernel(dbmodels.Model):
826    """
827    A kernel configuration for a parameterized job
828    """
829    version = dbmodels.CharField(max_length=255)
830    cmdline = dbmodels.CharField(max_length=255, blank=True)
831
832    @classmethod
833    def create_kernels(cls, kernel_list):
834        """Creates all kernels in the kernel list.
835
836        @param cls: Implicit class object.
837        @param kernel_list: A list of dictionaries that describe the kernels,
838            in the same format as the 'kernel' argument to
839            rpc_interface.generate_control_file.
840        @return A list of the created kernels.
841        """
842        if not kernel_list:
843            return None
844        return [cls._create(kernel) for kernel in kernel_list]
845
846
847    @classmethod
848    def _create(cls, kernel_dict):
849        version = kernel_dict.pop('version')
850        cmdline = kernel_dict.pop('cmdline', '')
851
852        if kernel_dict:
853            raise Exception('Extraneous kernel arguments remain: %r'
854                            % kernel_dict)
855
856        kernel, _ = cls.objects.get_or_create(version=version,
857                                              cmdline=cmdline)
858        return kernel
859
860
861    class Meta:
862        """Metadata for class Kernel."""
863        db_table = 'afe_kernels'
864        unique_together = ('version', 'cmdline')
865
866    def __unicode__(self):
867        return u'%s %s' % (self.version, self.cmdline)
868
869
870class ParameterizedJob(dbmodels.Model):
871    """
872    Auxiliary configuration for a parameterized job.
873    """
874    test = dbmodels.ForeignKey(Test)
875    label = dbmodels.ForeignKey(Label, null=True)
876    use_container = dbmodels.BooleanField(default=False)
877    profile_only = dbmodels.BooleanField(default=False)
878    upload_kernel_config = dbmodels.BooleanField(default=False)
879
880    kernels = dbmodels.ManyToManyField(
881            Kernel, db_table='afe_parameterized_job_kernels')
882    profilers = dbmodels.ManyToManyField(
883            Profiler, through='ParameterizedJobProfiler')
884
885
886    @classmethod
887    def smart_get(cls, id_or_name, *args, **kwargs):
888        """For compatibility with Job.add_object.
889
890        @param cls: Implicit class object.
891        @param id_or_name: The ID or name to get.
892        @param args: Non-keyword arguments.
893        @param kwargs: Keyword arguments.
894        """
895        return cls.objects.get(pk=id_or_name)
896
897
898    def job(self):
899        """Returns the job if it exists, or else None."""
900        jobs = self.job_set.all()
901        assert jobs.count() <= 1
902        return jobs and jobs[0] or None
903
904
905    class Meta:
906        """Metadata for class ParameterizedJob."""
907        db_table = 'afe_parameterized_jobs'
908
909    def __unicode__(self):
910        return u'%s (parameterized) - %s' % (self.test.name, self.job())
911
912
913class ParameterizedJobProfiler(dbmodels.Model):
914    """
915    A profiler to run on a parameterized job
916    """
917    parameterized_job = dbmodels.ForeignKey(ParameterizedJob)
918    profiler = dbmodels.ForeignKey(Profiler)
919
920    class Meta:
921        """Metedata for class ParameterizedJobProfiler."""
922        db_table = 'afe_parameterized_jobs_profilers'
923        unique_together = ('parameterized_job', 'profiler')
924
925
926class ParameterizedJobProfilerParameter(dbmodels.Model):
927    """
928    A parameter for a profiler in a parameterized job
929    """
930    parameterized_job_profiler = dbmodels.ForeignKey(ParameterizedJobProfiler)
931    parameter_name = dbmodels.CharField(max_length=255)
932    parameter_value = dbmodels.TextField()
933    parameter_type = dbmodels.CharField(
934            max_length=8, choices=model_attributes.ParameterTypes.choices())
935
936    class Meta:
937        """Metadata for class ParameterizedJobProfilerParameter."""
938        db_table = 'afe_parameterized_job_profiler_parameters'
939        unique_together = ('parameterized_job_profiler', 'parameter_name')
940
941    def __unicode__(self):
942        return u'%s - %s' % (self.parameterized_job_profiler.profiler.name,
943                             self.parameter_name)
944
945
946class ParameterizedJobParameter(dbmodels.Model):
947    """
948    Parameters for a parameterized job
949    """
950    parameterized_job = dbmodels.ForeignKey(ParameterizedJob)
951    test_parameter = dbmodels.ForeignKey(TestParameter)
952    parameter_value = dbmodels.TextField()
953    parameter_type = dbmodels.CharField(
954            max_length=8, choices=model_attributes.ParameterTypes.choices())
955
956    class Meta:
957        """Metadata for class ParameterizedJobParameter."""
958        db_table = 'afe_parameterized_job_parameters'
959        unique_together = ('parameterized_job', 'test_parameter')
960
961    def __unicode__(self):
962        return u'%s - %s' % (self.parameterized_job.job().name,
963                             self.test_parameter.name)
964
965
966class JobManager(model_logic.ExtendedManager):
967    'Custom manager to provide efficient status counts querying.'
968    def get_status_counts(self, job_ids):
969        """Returns a dict mapping the given job IDs to their status count dicts.
970
971        @param job_ids: A list of job IDs.
972        """
973        if not job_ids:
974            return {}
975        id_list = '(%s)' % ','.join(str(job_id) for job_id in job_ids)
976        cursor = connection.cursor()
977        cursor.execute("""
978            SELECT job_id, status, aborted, complete, COUNT(*)
979            FROM afe_host_queue_entries
980            WHERE job_id IN %s
981            GROUP BY job_id, status, aborted, complete
982            """ % id_list)
983        all_job_counts = dict((job_id, {}) for job_id in job_ids)
984        for job_id, status, aborted, complete, count in cursor.fetchall():
985            job_dict = all_job_counts[job_id]
986            full_status = HostQueueEntry.compute_full_status(status, aborted,
987                                                             complete)
988            job_dict.setdefault(full_status, 0)
989            job_dict[full_status] += count
990        return all_job_counts
991
992
993class Job(dbmodels.Model, model_logic.ModelExtensions):
994    """\
995    owner: username of job owner
996    name: job name (does not have to be unique)
997    priority: Integer priority value.  Higher is more important.
998    control_file: contents of control file
999    control_type: Client or Server
1000    created_on: date of job creation
1001    submitted_on: date of job submission
1002    synch_count: how many hosts should be used per autoserv execution
1003    run_verify: Whether or not to run the verify phase
1004    run_reset: Whether or not to run the reset phase
1005    timeout: DEPRECATED - hours from queuing time until job times out
1006    timeout_mins: minutes from job queuing time until the job times out
1007    max_runtime_hrs: DEPRECATED - hours from job starting time until job
1008                     times out
1009    max_runtime_mins: minutes from job starting time until job times out
1010    email_list: list of people to email on completion delimited by any of:
1011                white space, ',', ':', ';'
1012    dependency_labels: many-to-many relationship with labels corresponding to
1013                       job dependencies
1014    reboot_before: Never, If dirty, or Always
1015    reboot_after: Never, If all tests passed, or Always
1016    parse_failed_repair: if True, a failed repair launched by this job will have
1017    its results parsed as part of the job.
1018    drone_set: The set of drones to run this job on
1019    parent_job: Parent job (optional)
1020    test_retry: Number of times to retry test if the test did not complete
1021                successfully. (optional, default: 0)
1022    """
1023    # TIMEOUT is deprecated.
1024    DEFAULT_TIMEOUT = global_config.global_config.get_config_value(
1025        'AUTOTEST_WEB', 'job_timeout_default', default=24)
1026    DEFAULT_TIMEOUT_MINS = global_config.global_config.get_config_value(
1027        'AUTOTEST_WEB', 'job_timeout_mins_default', default=24*60)
1028    # MAX_RUNTIME_HRS is deprecated. Will be removed after switch to mins is
1029    # completed.
1030    DEFAULT_MAX_RUNTIME_HRS = global_config.global_config.get_config_value(
1031        'AUTOTEST_WEB', 'job_max_runtime_hrs_default', default=72)
1032    DEFAULT_MAX_RUNTIME_MINS = global_config.global_config.get_config_value(
1033        'AUTOTEST_WEB', 'job_max_runtime_mins_default', default=72*60)
1034    DEFAULT_PARSE_FAILED_REPAIR = global_config.global_config.get_config_value(
1035        'AUTOTEST_WEB', 'parse_failed_repair_default', type=bool,
1036        default=False)
1037
1038    owner = dbmodels.CharField(max_length=255)
1039    name = dbmodels.CharField(max_length=255)
1040    priority = dbmodels.SmallIntegerField(default=priorities.Priority.DEFAULT)
1041    control_file = dbmodels.TextField(null=True, blank=True)
1042    control_type = dbmodels.SmallIntegerField(
1043        choices=control_data.CONTROL_TYPE.choices(),
1044        blank=True, # to allow 0
1045        default=control_data.CONTROL_TYPE.CLIENT)
1046    created_on = dbmodels.DateTimeField()
1047    synch_count = dbmodels.IntegerField(blank=True, default=0)
1048    timeout = dbmodels.IntegerField(default=DEFAULT_TIMEOUT)
1049    run_verify = dbmodels.BooleanField(default=False)
1050    email_list = dbmodels.CharField(max_length=250, blank=True)
1051    dependency_labels = (
1052            dbmodels.ManyToManyField(Label, blank=True,
1053                                     db_table='afe_jobs_dependency_labels'))
1054    reboot_before = dbmodels.SmallIntegerField(
1055        choices=model_attributes.RebootBefore.choices(), blank=True,
1056        default=DEFAULT_REBOOT_BEFORE)
1057    reboot_after = dbmodels.SmallIntegerField(
1058        choices=model_attributes.RebootAfter.choices(), blank=True,
1059        default=DEFAULT_REBOOT_AFTER)
1060    parse_failed_repair = dbmodels.BooleanField(
1061        default=DEFAULT_PARSE_FAILED_REPAIR)
1062    # max_runtime_hrs is deprecated. Will be removed after switch to mins is
1063    # completed.
1064    max_runtime_hrs = dbmodels.IntegerField(default=DEFAULT_MAX_RUNTIME_HRS)
1065    max_runtime_mins = dbmodels.IntegerField(default=DEFAULT_MAX_RUNTIME_MINS)
1066    drone_set = dbmodels.ForeignKey(DroneSet, null=True, blank=True)
1067
1068    parameterized_job = dbmodels.ForeignKey(ParameterizedJob, null=True,
1069                                            blank=True)
1070
1071    parent_job = dbmodels.ForeignKey('self', blank=True, null=True)
1072
1073    test_retry = dbmodels.IntegerField(blank=True, default=0)
1074
1075    run_reset = dbmodels.BooleanField(default=True)
1076
1077    timeout_mins = dbmodels.IntegerField(default=DEFAULT_TIMEOUT_MINS)
1078
1079    shard = dbmodels.ForeignKey(Shard, blank=True, null=True)
1080
1081    # custom manager
1082    objects = JobManager()
1083
1084
1085    @decorators.cached_property
1086    def labels(self):
1087        """All the labels of this job"""
1088        # We need to convert dependency_labels to a list, because all() gives us
1089        # back an iterator, and storing/caching an iterator means we'd only be
1090        # able to read from it once.
1091        return list(self.dependency_labels.all())
1092
1093
1094    def is_server_job(self):
1095        """Returns whether this job is of type server."""
1096        return self.control_type == control_data.CONTROL_TYPE.SERVER
1097
1098
1099    @classmethod
1100    def parameterized_jobs_enabled(cls):
1101        """Returns whether parameterized jobs are enabled.
1102
1103        @param cls: Implicit class object.
1104        """
1105        return global_config.global_config.get_config_value(
1106                'AUTOTEST_WEB', 'parameterized_jobs', type=bool)
1107
1108
1109    @classmethod
1110    def check_parameterized_job(cls, control_file, parameterized_job):
1111        """Checks that the job is valid given the global config settings.
1112
1113        First, either control_file must be set, or parameterized_job must be
1114        set, but not both. Second, parameterized_job must be set if and only if
1115        the parameterized_jobs option in the global config is set to True.
1116
1117        @param cls: Implict class object.
1118        @param control_file: A control file.
1119        @param parameterized_job: A parameterized job.
1120        """
1121        if not (bool(control_file) ^ bool(parameterized_job)):
1122            raise Exception('Job must have either control file or '
1123                            'parameterization, but not both')
1124
1125        parameterized_jobs_enabled = cls.parameterized_jobs_enabled()
1126        if control_file and parameterized_jobs_enabled:
1127            raise Exception('Control file specified, but parameterized jobs '
1128                            'are enabled')
1129        if parameterized_job and not parameterized_jobs_enabled:
1130            raise Exception('Parameterized job specified, but parameterized '
1131                            'jobs are not enabled')
1132
1133
1134    @classmethod
1135    def create(cls, owner, options, hosts):
1136        """Creates a job.
1137
1138        The job is created by taking some information (the listed args) and
1139        filling in the rest of the necessary information.
1140
1141        @param cls: Implicit class object.
1142        @param owner: The owner for the job.
1143        @param options: An options object.
1144        @param hosts: The hosts to use.
1145        """
1146        AclGroup.check_for_acl_violation_hosts(hosts)
1147
1148        control_file = options.get('control_file')
1149        parameterized_job = options.get('parameterized_job')
1150
1151        # The current implementation of parameterized jobs requires that only
1152        # control files or parameterized jobs are used. Using the image
1153        # parameter on autoupdate_ParameterizedJob doesn't mix pure
1154        # parameterized jobs and control files jobs, it does muck enough with
1155        # normal jobs by adding a parameterized id to them that this check will
1156        # fail. So for now we just skip this check.
1157        # cls.check_parameterized_job(control_file=control_file,
1158        #                             parameterized_job=parameterized_job)
1159        user = User.current_user()
1160        if options.get('reboot_before') is None:
1161            options['reboot_before'] = user.get_reboot_before_display()
1162        if options.get('reboot_after') is None:
1163            options['reboot_after'] = user.get_reboot_after_display()
1164
1165        drone_set = DroneSet.resolve_name(options.get('drone_set'))
1166
1167        if options.get('timeout_mins') is None and options.get('timeout'):
1168            options['timeout_mins'] = options['timeout'] * 60
1169
1170        job = cls.add_object(
1171            owner=owner,
1172            name=options['name'],
1173            priority=options['priority'],
1174            control_file=control_file,
1175            control_type=options['control_type'],
1176            synch_count=options.get('synch_count'),
1177            # timeout needs to be deleted in the future.
1178            timeout=options.get('timeout'),
1179            timeout_mins=options.get('timeout_mins'),
1180            max_runtime_mins=options.get('max_runtime_mins'),
1181            run_verify=options.get('run_verify'),
1182            email_list=options.get('email_list'),
1183            reboot_before=options.get('reboot_before'),
1184            reboot_after=options.get('reboot_after'),
1185            parse_failed_repair=options.get('parse_failed_repair'),
1186            created_on=datetime.now(),
1187            drone_set=drone_set,
1188            parameterized_job=parameterized_job,
1189            parent_job=options.get('parent_job_id'),
1190            test_retry=options.get('test_retry'),
1191            run_reset=options.get('run_reset'))
1192
1193        job.dependency_labels = options['dependencies']
1194
1195        if options.get('keyvals'):
1196            for key, value in options['keyvals'].iteritems():
1197                JobKeyval.objects.create(job=job, key=key, value=value)
1198
1199        return job
1200
1201
1202    def save(self, *args, **kwargs):
1203        # The current implementation of parameterized jobs requires that only
1204        # control files or parameterized jobs are used. Using the image
1205        # parameter on autoupdate_ParameterizedJob doesn't mix pure
1206        # parameterized jobs and control files jobs, it does muck enough with
1207        # normal jobs by adding a parameterized id to them that this check will
1208        # fail. So for now we just skip this check.
1209        # cls.check_parameterized_job(control_file=self.control_file,
1210        #                             parameterized_job=self.parameterized_job)
1211        super(Job, self).save(*args, **kwargs)
1212
1213
1214    def queue(self, hosts, atomic_group=None, is_template=False):
1215        """Enqueue a job on the given hosts.
1216
1217        @param hosts: The hosts to use.
1218        @param atomic_group: The associated atomic group.
1219        @param is_template: Whether the status should be "Template".
1220        """
1221        if not hosts:
1222            if atomic_group:
1223                # No hosts or labels are required to queue an atomic group
1224                # Job.  However, if they are given, we respect them below.
1225                atomic_group.enqueue_job(self, is_template=is_template)
1226            else:
1227                # hostless job
1228                entry = HostQueueEntry.create(job=self, is_template=is_template)
1229                entry.save()
1230            return
1231
1232        for host in hosts:
1233            host.enqueue_job(self, atomic_group=atomic_group,
1234                             is_template=is_template)
1235
1236
1237    def create_recurring_job(self, start_date, loop_period, loop_count, owner):
1238        """Creates a recurring job.
1239
1240        @param start_date: The starting date of the job.
1241        @param loop_period: How often to re-run the job, in seconds.
1242        @param loop_count: The re-run count.
1243        @param owner: The owner of the job.
1244        """
1245        rec = RecurringRun(job=self, start_date=start_date,
1246                           loop_period=loop_period,
1247                           loop_count=loop_count,
1248                           owner=User.objects.get(login=owner))
1249        rec.save()
1250        return rec.id
1251
1252
1253    def user(self):
1254        """Gets the user of this job, or None if it doesn't exist."""
1255        try:
1256            return User.objects.get(login=self.owner)
1257        except self.DoesNotExist:
1258            return None
1259
1260
1261    def abort(self):
1262        """Aborts this job."""
1263        for queue_entry in self.hostqueueentry_set.all():
1264            queue_entry.abort()
1265
1266
1267    def tag(self):
1268        """Returns a string tag for this job."""
1269        return '%s-%s' % (self.id, self.owner)
1270
1271
1272    def keyval_dict(self):
1273        """Returns all keyvals for this job as a dictionary."""
1274        return dict((keyval.key, keyval.value)
1275                    for keyval in self.jobkeyval_set.all())
1276
1277
1278    class Meta:
1279        """Metadata for class Job."""
1280        db_table = 'afe_jobs'
1281
1282    def __unicode__(self):
1283        return u'%s (%s-%s)' % (self.name, self.id, self.owner)
1284
1285
1286class JobKeyval(dbmodels.Model, model_logic.ModelExtensions):
1287    """Keyvals associated with jobs"""
1288    job = dbmodels.ForeignKey(Job)
1289    key = dbmodels.CharField(max_length=90)
1290    value = dbmodels.CharField(max_length=300)
1291
1292    objects = model_logic.ExtendedManager()
1293
1294    class Meta:
1295        """Metadata for class JobKeyval."""
1296        db_table = 'afe_job_keyvals'
1297
1298
1299class IneligibleHostQueue(dbmodels.Model, model_logic.ModelExtensions):
1300    """Represents an ineligible host queue."""
1301    job = dbmodels.ForeignKey(Job)
1302    host = dbmodels.ForeignKey(Host)
1303
1304    objects = model_logic.ExtendedManager()
1305
1306    class Meta:
1307        """Metadata for class IneligibleHostQueue."""
1308        db_table = 'afe_ineligible_host_queues'
1309
1310
1311class HostQueueEntry(dbmodels.Model, model_logic.ModelExtensions):
1312    """Represents a host queue entry."""
1313    Status = host_queue_entry_states.Status
1314    ACTIVE_STATUSES = host_queue_entry_states.ACTIVE_STATUSES
1315    COMPLETE_STATUSES = host_queue_entry_states.COMPLETE_STATUSES
1316
1317    job = dbmodels.ForeignKey(Job)
1318    host = dbmodels.ForeignKey(Host, blank=True, null=True)
1319    status = dbmodels.CharField(max_length=255)
1320    meta_host = dbmodels.ForeignKey(Label, blank=True, null=True,
1321                                    db_column='meta_host')
1322    active = dbmodels.BooleanField(default=False)
1323    complete = dbmodels.BooleanField(default=False)
1324    deleted = dbmodels.BooleanField(default=False)
1325    execution_subdir = dbmodels.CharField(max_length=255, blank=True,
1326                                          default='')
1327    # If atomic_group is set, this is a virtual HostQueueEntry that will
1328    # be expanded into many actual hosts within the group at schedule time.
1329    atomic_group = dbmodels.ForeignKey(AtomicGroup, blank=True, null=True)
1330    aborted = dbmodels.BooleanField(default=False)
1331    started_on = dbmodels.DateTimeField(null=True, blank=True)
1332    finished_on = dbmodels.DateTimeField(null=True, blank=True)
1333
1334    objects = model_logic.ExtendedManager()
1335
1336
1337    def __init__(self, *args, **kwargs):
1338        super(HostQueueEntry, self).__init__(*args, **kwargs)
1339        self._record_attributes(['status'])
1340
1341
1342    @classmethod
1343    def create(cls, job, host=None, meta_host=None, atomic_group=None,
1344                 is_template=False):
1345        """Creates a new host queue entry.
1346
1347        @param cls: Implicit class object.
1348        @param job: The associated job.
1349        @param host: The associated host.
1350        @param meta_host: The associated meta host.
1351        @param atomic_group: The associated atomic group.
1352        @param is_template: Whether the status should be "Template".
1353        """
1354        if is_template:
1355            status = cls.Status.TEMPLATE
1356        else:
1357            status = cls.Status.QUEUED
1358
1359        return cls(job=job, host=host, meta_host=meta_host,
1360                   atomic_group=atomic_group, status=status)
1361
1362
1363    def save(self, *args, **kwargs):
1364        self._set_active_and_complete()
1365        super(HostQueueEntry, self).save(*args, **kwargs)
1366        self._check_for_updated_attributes()
1367
1368
1369    def execution_path(self):
1370        """
1371        Path to this entry's results (relative to the base results directory).
1372        """
1373        return os.path.join(self.job.tag(), self.execution_subdir)
1374
1375
1376    def host_or_metahost_name(self):
1377        """Returns the first non-None name found in priority order.
1378
1379        The priority order checked is: (1) host name; (2) meta host name; and
1380        (3) atomic group name.
1381        """
1382        if self.host:
1383            return self.host.hostname
1384        elif self.meta_host:
1385            return self.meta_host.name
1386        else:
1387            assert self.atomic_group, "no host, meta_host or atomic group!"
1388            return self.atomic_group.name
1389
1390
1391    def _set_active_and_complete(self):
1392        if self.status in self.ACTIVE_STATUSES:
1393            self.active, self.complete = True, False
1394        elif self.status in self.COMPLETE_STATUSES:
1395            self.active, self.complete = False, True
1396        else:
1397            self.active, self.complete = False, False
1398
1399
1400    def on_attribute_changed(self, attribute, old_value):
1401        assert attribute == 'status'
1402        logging.info('%s/%d (%d) -> %s', self.host, self.job.id, self.id,
1403                     self.status)
1404
1405
1406    def is_meta_host_entry(self):
1407        'True if this is a entry has a meta_host instead of a host.'
1408        return self.host is None and self.meta_host is not None
1409
1410
1411    # This code is shared between rpc_interface and models.HostQueueEntry.
1412    # Sadly due to circular imports between the 2 (crbug.com/230100) making it
1413    # a class method was the best way to refactor it. Attempting to put it in
1414    # rpc_utils or a new utils module failed as that would require us to import
1415    # models.py but to call it from here we would have to import the utils.py
1416    # thus creating a cycle.
1417    @classmethod
1418    def abort_host_queue_entries(cls, host_queue_entries):
1419        """Aborts a collection of host_queue_entries.
1420
1421        Abort these host queue entry and all host queue entries of jobs created
1422        by them.
1423
1424        @param host_queue_entries: List of host queue entries we want to abort.
1425        """
1426        # This isn't completely immune to race conditions since it's not atomic,
1427        # but it should be safe given the scheduler's behavior.
1428
1429        # TODO(milleral): crbug.com/230100
1430        # The |abort_host_queue_entries| rpc does nearly exactly this,
1431        # however, trying to re-use the code generates some horrible
1432        # circular import error.  I'd be nice to refactor things around
1433        # sometime so the code could be reused.
1434
1435        # Fixpoint algorithm to find the whole tree of HQEs to abort to
1436        # minimize the total number of database queries:
1437        children = set()
1438        new_children = set(host_queue_entries)
1439        while new_children:
1440            children.update(new_children)
1441            new_child_ids = [hqe.job_id for hqe in new_children]
1442            new_children = HostQueueEntry.objects.filter(
1443                    job__parent_job__in=new_child_ids,
1444                    complete=False, aborted=False).all()
1445            # To handle circular parental relationships
1446            new_children = set(new_children) - children
1447
1448        # Associate a user with the host queue entries that we're about
1449        # to abort so that we can look up who to blame for the aborts.
1450        now = datetime.now()
1451        user = User.current_user()
1452        aborted_hqes = [AbortedHostQueueEntry(queue_entry=hqe,
1453                aborted_by=user, aborted_on=now) for hqe in children]
1454        AbortedHostQueueEntry.objects.bulk_create(aborted_hqes)
1455        # Bulk update all of the HQEs to set the abort bit.
1456        child_ids = [hqe.id for hqe in children]
1457        HostQueueEntry.objects.filter(id__in=child_ids).update(aborted=True)
1458
1459
1460    def abort(self):
1461        """ Aborts this host queue entry.
1462
1463        Abort this host queue entry and all host queue entries of jobs created by
1464        this one.
1465
1466        """
1467        if not self.complete and not self.aborted:
1468            HostQueueEntry.abort_host_queue_entries([self])
1469
1470
1471    @classmethod
1472    def compute_full_status(cls, status, aborted, complete):
1473        """Returns a modified status msg if the host queue entry was aborted.
1474
1475        @param cls: Implicit class object.
1476        @param status: The original status message.
1477        @param aborted: Whether the host queue entry was aborted.
1478        @param complete: Whether the host queue entry was completed.
1479        """
1480        if aborted and not complete:
1481            return 'Aborted (%s)' % status
1482        return status
1483
1484
1485    def full_status(self):
1486        """Returns the full status of this host queue entry, as a string."""
1487        return self.compute_full_status(self.status, self.aborted,
1488                                        self.complete)
1489
1490
1491    def _postprocess_object_dict(self, object_dict):
1492        object_dict['full_status'] = self.full_status()
1493
1494
1495    class Meta:
1496        """Metadata for class HostQueueEntry."""
1497        db_table = 'afe_host_queue_entries'
1498
1499
1500
1501    def __unicode__(self):
1502        hostname = None
1503        if self.host:
1504            hostname = self.host.hostname
1505        return u"%s/%d (%d)" % (hostname, self.job.id, self.id)
1506
1507
1508class AbortedHostQueueEntry(dbmodels.Model, model_logic.ModelExtensions):
1509    """Represents an aborted host queue entry."""
1510    queue_entry = dbmodels.OneToOneField(HostQueueEntry, primary_key=True)
1511    aborted_by = dbmodels.ForeignKey(User)
1512    aborted_on = dbmodels.DateTimeField()
1513
1514    objects = model_logic.ExtendedManager()
1515
1516
1517    def save(self, *args, **kwargs):
1518        self.aborted_on = datetime.now()
1519        super(AbortedHostQueueEntry, self).save(*args, **kwargs)
1520
1521    class Meta:
1522        """Metadata for class AbortedHostQueueEntry."""
1523        db_table = 'afe_aborted_host_queue_entries'
1524
1525
1526class RecurringRun(dbmodels.Model, model_logic.ModelExtensions):
1527    """\
1528    job: job to use as a template
1529    owner: owner of the instantiated template
1530    start_date: Run the job at scheduled date
1531    loop_period: Re-run (loop) the job periodically
1532                 (in every loop_period seconds)
1533    loop_count: Re-run (loop) count
1534    """
1535
1536    job = dbmodels.ForeignKey(Job)
1537    owner = dbmodels.ForeignKey(User)
1538    start_date = dbmodels.DateTimeField()
1539    loop_period = dbmodels.IntegerField(blank=True)
1540    loop_count = dbmodels.IntegerField(blank=True)
1541
1542    objects = model_logic.ExtendedManager()
1543
1544    class Meta:
1545        """Metadata for class RecurringRun."""
1546        db_table = 'afe_recurring_run'
1547
1548    def __unicode__(self):
1549        return u'RecurringRun(job %s, start %s, period %s, count %s)' % (
1550            self.job.id, self.start_date, self.loop_period, self.loop_count)
1551
1552
1553class SpecialTask(dbmodels.Model, model_logic.ModelExtensions):
1554    """\
1555    Tasks to run on hosts at the next time they are in the Ready state. Use this
1556    for high-priority tasks, such as forced repair or forced reinstall.
1557
1558    host: host to run this task on
1559    task: special task to run
1560    time_requested: date and time the request for this task was made
1561    is_active: task is currently running
1562    is_complete: task has finished running
1563    is_aborted: task was aborted
1564    time_started: date and time the task started
1565    time_finished: date and time the task finished
1566    queue_entry: Host queue entry waiting on this task (or None, if task was not
1567                 started in preparation of a job)
1568    """
1569    Task = enum.Enum('Verify', 'Cleanup', 'Repair', 'Reset', 'Provision',
1570                     string_values=True)
1571
1572    host = dbmodels.ForeignKey(Host, blank=False, null=False)
1573    task = dbmodels.CharField(max_length=64, choices=Task.choices(),
1574                              blank=False, null=False)
1575    requested_by = dbmodels.ForeignKey(User)
1576    time_requested = dbmodels.DateTimeField(auto_now_add=True, blank=False,
1577                                            null=False)
1578    is_active = dbmodels.BooleanField(default=False, blank=False, null=False)
1579    is_complete = dbmodels.BooleanField(default=False, blank=False, null=False)
1580    is_aborted = dbmodels.BooleanField(default=False, blank=False, null=False)
1581    time_started = dbmodels.DateTimeField(null=True, blank=True)
1582    queue_entry = dbmodels.ForeignKey(HostQueueEntry, blank=True, null=True)
1583    success = dbmodels.BooleanField(default=False, blank=False, null=False)
1584    time_finished = dbmodels.DateTimeField(null=True, blank=True)
1585
1586    objects = model_logic.ExtendedManager()
1587
1588
1589    def save(self, **kwargs):
1590        if self.queue_entry:
1591            self.requested_by = User.objects.get(
1592                    login=self.queue_entry.job.owner)
1593        super(SpecialTask, self).save(**kwargs)
1594
1595
1596    def execution_path(self):
1597        """@see HostQueueEntry.execution_path()"""
1598        return 'hosts/%s/%s-%s' % (self.host.hostname, self.id,
1599                                   self.task.lower())
1600
1601
1602    # property to emulate HostQueueEntry.status
1603    @property
1604    def status(self):
1605        """
1606        Return a host queue entry status appropriate for this task.  Although
1607        SpecialTasks are not HostQueueEntries, it is helpful to the user to
1608        present similar statuses.
1609        """
1610        if self.is_complete:
1611            if self.success:
1612                return HostQueueEntry.Status.COMPLETED
1613            return HostQueueEntry.Status.FAILED
1614        if self.is_active:
1615            return HostQueueEntry.Status.RUNNING
1616        return HostQueueEntry.Status.QUEUED
1617
1618
1619    # property to emulate HostQueueEntry.started_on
1620    @property
1621    def started_on(self):
1622        """Returns the time at which this special task started."""
1623        return self.time_started
1624
1625
1626    @classmethod
1627    def schedule_special_task(cls, host, task):
1628        """Schedules a special task on a host if not already scheduled.
1629
1630        @param cls: Implicit class object.
1631        @param host: The host to use.
1632        @param task: The task to schedule.
1633        """
1634        existing_tasks = SpecialTask.objects.filter(host__id=host.id, task=task,
1635                                                    is_active=False,
1636                                                    is_complete=False)
1637        if existing_tasks:
1638            return existing_tasks[0]
1639
1640        special_task = SpecialTask(host=host, task=task,
1641                                   requested_by=User.current_user())
1642        special_task.save()
1643        return special_task
1644
1645
1646    def abort(self):
1647        """ Abort this special task."""
1648        self.is_aborted = True
1649        self.save()
1650
1651
1652    def activate(self):
1653        """
1654        Sets a task as active and sets the time started to the current time.
1655        """
1656        logging.info('Starting: %s', self)
1657        self.is_active = True
1658        self.time_started = datetime.now()
1659        self.save()
1660
1661
1662    def finish(self, success):
1663        """Sets a task as completed.
1664
1665        @param success: Whether or not the task was successful.
1666        """
1667        logging.info('Finished: %s', self)
1668        self.is_active = False
1669        self.is_complete = True
1670        self.success = success
1671        if self.time_started:
1672            self.time_finished = datetime.now()
1673        self.save()
1674
1675
1676    class Meta:
1677        """Metadata for class SpecialTask."""
1678        db_table = 'afe_special_tasks'
1679
1680
1681    def __unicode__(self):
1682        result = u'Special Task %s (host %s, task %s, time %s)' % (
1683            self.id, self.host, self.task, self.time_requested)
1684        if self.is_complete:
1685            result += u' (completed)'
1686        elif self.is_active:
1687            result += u' (active)'
1688
1689        return result
1690