models.py revision 6b89f1c14c65c2a3a130cc1a063143b38ebc3b94
1# pylint: disable-msg=C0111
2
3import logging
4from datetime import datetime
5import django.core
6try:
7    from django.db import models as dbmodels, connection
8except django.core.exceptions.ImproperlyConfigured:
9    raise ImportError('Django database not yet configured. Import either '
10                       'setup_django_environment or '
11                       'setup_django_lite_environment from '
12                       'autotest_lib.frontend before any imports that '
13                       'depend on django models.')
14from xml.sax import saxutils
15import common
16from autotest_lib.frontend.afe import model_logic, model_attributes
17from autotest_lib.frontend.afe import rdb_model_extensions
18from autotest_lib.frontend import settings, thread_local
19from autotest_lib.client.common_lib import enum, error, host_protections
20from autotest_lib.client.common_lib import global_config
21from autotest_lib.client.common_lib import host_queue_entry_states
22from autotest_lib.client.common_lib import control_data, priorities, decorators
23from autotest_lib.client.common_lib import site_utils
24from autotest_lib.client.common_lib.cros.graphite import autotest_es
25from autotest_lib.server import utils as server_utils
26
27# job options and user preferences
28DEFAULT_REBOOT_BEFORE = model_attributes.RebootBefore.IF_DIRTY
29DEFAULT_REBOOT_AFTER = model_attributes.RebootBefore.NEVER
30
31
32class AclAccessViolation(Exception):
33    """\
34    Raised when an operation is attempted with proper permissions as
35    dictated by ACLs.
36    """
37
38
39class AtomicGroup(model_logic.ModelWithInvalid, dbmodels.Model):
40    """\
41    An atomic group defines a collection of hosts which must only be scheduled
42    all at once.  Any host with a label having an atomic group will only be
43    scheduled for a job at the same time as other hosts sharing that label.
44
45    Required:
46      name: A name for this atomic group, e.g. 'rack23' or 'funky_net'.
47      max_number_of_machines: The maximum number of machines that will be
48              scheduled at once when scheduling jobs to this atomic group.
49              The job.synch_count is considered the minimum.
50
51    Optional:
52      description: Arbitrary text description of this group's purpose.
53    """
54    name = dbmodels.CharField(max_length=255, unique=True)
55    description = dbmodels.TextField(blank=True)
56    # This magic value is the default to simplify the scheduler logic.
57    # It must be "large".  The common use of atomic groups is to want all
58    # machines in the group to be used, limits on which subset used are
59    # often chosen via dependency labels.
60    # TODO(dennisjeffrey): Revisit this so we don't have to assume that
61    # "infinity" is around 3.3 million.
62    INFINITE_MACHINES = 333333333
63    max_number_of_machines = dbmodels.IntegerField(default=INFINITE_MACHINES)
64    invalid = dbmodels.BooleanField(default=False,
65                                  editable=settings.FULL_ADMIN)
66
67    name_field = 'name'
68    objects = model_logic.ModelWithInvalidManager()
69    valid_objects = model_logic.ValidObjectsManager()
70
71
72    def enqueue_job(self, job, is_template=False):
73        """Enqueue a job on an associated atomic group of hosts.
74
75        @param job: A job to enqueue.
76        @param is_template: Whether the status should be "Template".
77        """
78        queue_entry = HostQueueEntry.create(atomic_group=self, job=job,
79                                            is_template=is_template)
80        queue_entry.save()
81
82
83    def clean_object(self):
84        self.label_set.clear()
85
86
87    class Meta:
88        """Metadata for class AtomicGroup."""
89        db_table = 'afe_atomic_groups'
90
91
92    def __unicode__(self):
93        return unicode(self.name)
94
95
96class Label(model_logic.ModelWithInvalid, dbmodels.Model):
97    """\
98    Required:
99      name: label name
100
101    Optional:
102      kernel_config: URL/path to kernel config for jobs run on this label.
103      platform: If True, this is a platform label (defaults to False).
104      only_if_needed: If True, a Host with this label can only be used if that
105              label is requested by the job/test (either as the meta_host or
106              in the job_dependencies).
107      atomic_group: The atomic group associated with this label.
108    """
109    name = dbmodels.CharField(max_length=255, unique=True)
110    kernel_config = dbmodels.CharField(max_length=255, blank=True)
111    platform = dbmodels.BooleanField(default=False)
112    invalid = dbmodels.BooleanField(default=False,
113                                    editable=settings.FULL_ADMIN)
114    only_if_needed = dbmodels.BooleanField(default=False)
115
116    name_field = 'name'
117    objects = model_logic.ModelWithInvalidManager()
118    valid_objects = model_logic.ValidObjectsManager()
119    atomic_group = dbmodels.ForeignKey(AtomicGroup, null=True, blank=True)
120
121
122    def clean_object(self):
123        self.host_set.clear()
124        self.test_set.clear()
125
126
127    def enqueue_job(self, job, atomic_group=None, is_template=False):
128        """Enqueue a job on any host of this label.
129
130        @param job: A job to enqueue.
131        @param atomic_group: The associated atomic group.
132        @param is_template: Whether the status should be "Template".
133        """
134        queue_entry = HostQueueEntry.create(meta_host=self, job=job,
135                                            is_template=is_template,
136                                            atomic_group=atomic_group)
137        queue_entry.save()
138
139
140
141    class Meta:
142        """Metadata for class Label."""
143        db_table = 'afe_labels'
144
145
146    def __unicode__(self):
147        return unicode(self.name)
148
149
150class Shard(dbmodels.Model, model_logic.ModelExtensions):
151
152    hostname = dbmodels.CharField(max_length=255, unique=True)
153
154    name_field = 'hostname'
155
156    labels = dbmodels.ManyToManyField(Label, blank=True,
157                                      db_table='afe_shards_labels')
158
159    class Meta:
160        """Metadata for class ParameterizedJob."""
161        db_table = 'afe_shards'
162
163
164    def rpc_hostname(self):
165        """Get the rpc hostname of the shard.
166
167        @return: Just the shard hostname for all non-testing environments.
168                 The address of the default gateway for vm testing environments.
169        """
170        # TODO: Figure out a better solution for testing. Since no 2 shards
171        # can run on the same host, if the shard hostname is localhost we
172        # conclude that it must be a vm in a test cluster. In such situations
173        # a name of localhost:<port> is necessary to achieve the correct
174        # afe links/redirection from the frontend (this happens through the
175        # host), but for rpcs that are performed *on* the shard, they need to
176        # use the address of the gateway.
177        # In the virtual machine testing environment (i.e., puppylab), each
178        # shard VM has a hostname like localhost:<port>. In the real cluster
179        # environment, a shard node does not have 'localhost' for its hostname.
180        # The following hostname substitution is needed only for the VM
181        # in puppylab.
182        # The 'hostname' should not be replaced in the case of real cluster.
183        if site_utils.is_puppylab_vm(self.hostname):
184            hostname = self.hostname.split(':')[0]
185            return self.hostname.replace(
186                    hostname, site_utils.DEFAULT_VM_GATEWAY)
187        return self.hostname
188
189
190class Drone(dbmodels.Model, model_logic.ModelExtensions):
191    """
192    A scheduler drone
193
194    hostname: the drone's hostname
195    """
196    hostname = dbmodels.CharField(max_length=255, unique=True)
197
198    name_field = 'hostname'
199    objects = model_logic.ExtendedManager()
200
201
202    def save(self, *args, **kwargs):
203        if not User.current_user().is_superuser():
204            raise Exception('Only superusers may edit drones')
205        super(Drone, self).save(*args, **kwargs)
206
207
208    def delete(self):
209        if not User.current_user().is_superuser():
210            raise Exception('Only superusers may delete drones')
211        super(Drone, self).delete()
212
213
214    class Meta:
215        """Metadata for class Drone."""
216        db_table = 'afe_drones'
217
218    def __unicode__(self):
219        return unicode(self.hostname)
220
221
222class DroneSet(dbmodels.Model, model_logic.ModelExtensions):
223    """
224    A set of scheduler drones
225
226    These will be used by the scheduler to decide what drones a job is allowed
227    to run on.
228
229    name: the drone set's name
230    drones: the drones that are part of the set
231    """
232    DRONE_SETS_ENABLED = global_config.global_config.get_config_value(
233            'SCHEDULER', 'drone_sets_enabled', type=bool, default=False)
234    DEFAULT_DRONE_SET_NAME = global_config.global_config.get_config_value(
235            'SCHEDULER', 'default_drone_set_name', default=None)
236
237    name = dbmodels.CharField(max_length=255, unique=True)
238    drones = dbmodels.ManyToManyField(Drone, db_table='afe_drone_sets_drones')
239
240    name_field = 'name'
241    objects = model_logic.ExtendedManager()
242
243
244    def save(self, *args, **kwargs):
245        if not User.current_user().is_superuser():
246            raise Exception('Only superusers may edit drone sets')
247        super(DroneSet, self).save(*args, **kwargs)
248
249
250    def delete(self):
251        if not User.current_user().is_superuser():
252            raise Exception('Only superusers may delete drone sets')
253        super(DroneSet, self).delete()
254
255
256    @classmethod
257    def drone_sets_enabled(cls):
258        """Returns whether drone sets are enabled.
259
260        @param cls: Implicit class object.
261        """
262        return cls.DRONE_SETS_ENABLED
263
264
265    @classmethod
266    def default_drone_set_name(cls):
267        """Returns the default drone set name.
268
269        @param cls: Implicit class object.
270        """
271        return cls.DEFAULT_DRONE_SET_NAME
272
273
274    @classmethod
275    def get_default(cls):
276        """Gets the default drone set name, compatible with Job.add_object.
277
278        @param cls: Implicit class object.
279        """
280        return cls.smart_get(cls.DEFAULT_DRONE_SET_NAME)
281
282
283    @classmethod
284    def resolve_name(cls, drone_set_name):
285        """
286        Returns the name of one of these, if not None, in order of preference:
287        1) the drone set given,
288        2) the current user's default drone set, or
289        3) the global default drone set
290
291        or returns None if drone sets are disabled
292
293        @param cls: Implicit class object.
294        @param drone_set_name: A drone set name.
295        """
296        if not cls.drone_sets_enabled():
297            return None
298
299        user = User.current_user()
300        user_drone_set_name = user.drone_set and user.drone_set.name
301
302        return drone_set_name or user_drone_set_name or cls.get_default().name
303
304
305    def get_drone_hostnames(self):
306        """
307        Gets the hostnames of all drones in this drone set
308        """
309        return set(self.drones.all().values_list('hostname', flat=True))
310
311
312    class Meta:
313        """Metadata for class DroneSet."""
314        db_table = 'afe_drone_sets'
315
316    def __unicode__(self):
317        return unicode(self.name)
318
319
320class User(dbmodels.Model, model_logic.ModelExtensions):
321    """\
322    Required:
323    login :user login name
324
325    Optional:
326    access_level: 0=User (default), 1=Admin, 100=Root
327    """
328    ACCESS_ROOT = 100
329    ACCESS_ADMIN = 1
330    ACCESS_USER = 0
331
332    AUTOTEST_SYSTEM = 'autotest_system'
333
334    login = dbmodels.CharField(max_length=255, unique=True)
335    access_level = dbmodels.IntegerField(default=ACCESS_USER, blank=True)
336
337    # user preferences
338    reboot_before = dbmodels.SmallIntegerField(
339        choices=model_attributes.RebootBefore.choices(), blank=True,
340        default=DEFAULT_REBOOT_BEFORE)
341    reboot_after = dbmodels.SmallIntegerField(
342        choices=model_attributes.RebootAfter.choices(), blank=True,
343        default=DEFAULT_REBOOT_AFTER)
344    drone_set = dbmodels.ForeignKey(DroneSet, null=True, blank=True)
345    show_experimental = dbmodels.BooleanField(default=False)
346
347    name_field = 'login'
348    objects = model_logic.ExtendedManager()
349
350
351    def save(self, *args, **kwargs):
352        # is this a new object being saved for the first time?
353        first_time = (self.id is None)
354        user = thread_local.get_user()
355        if user and not user.is_superuser() and user.login != self.login:
356            raise AclAccessViolation("You cannot modify user " + self.login)
357        super(User, self).save(*args, **kwargs)
358        if first_time:
359            everyone = AclGroup.objects.get(name='Everyone')
360            everyone.users.add(self)
361
362
363    def is_superuser(self):
364        """Returns whether the user has superuser access."""
365        return self.access_level >= self.ACCESS_ROOT
366
367
368    @classmethod
369    def current_user(cls):
370        """Returns the current user.
371
372        @param cls: Implicit class object.
373        """
374        user = thread_local.get_user()
375        if user is None:
376            user, _ = cls.objects.get_or_create(login=cls.AUTOTEST_SYSTEM)
377            user.access_level = cls.ACCESS_ROOT
378            user.save()
379        return user
380
381
382    @classmethod
383    def get_record(cls, data):
384        """Check the database for an identical record.
385
386        Check for a record with matching id and login. If one exists,
387        return it. If one does not exist there is a possibility that
388        the following cases have happened:
389        1. Same id, different login
390            We received: "1 chromeos-test"
391            And we have: "1 debug-user"
392        In this case we need to delete "1 debug_user" and insert
393        "1 chromeos-test".
394
395        2. Same login, different id:
396            We received: "1 chromeos-test"
397            And we have: "2 chromeos-test"
398        In this case we need to delete "2 chromeos-test" and insert
399        "1 chromeos-test".
400
401        As long as this method deletes bad records and raises the
402        DoesNotExist exception the caller will handle creating the
403        new record.
404
405        @raises: DoesNotExist, if a record with the matching login and id
406                does not exist.
407        """
408
409        # Both the id and login should be uniqe but there are cases when
410        # we might already have a user with the same login/id because
411        # current_user will proactively create a user record if it doesn't
412        # exist. Since we want to avoid conflict between the master and
413        # shard, just delete any existing user records that don't match
414        # what we're about to deserialize from the master.
415        try:
416            return cls.objects.get(login=data['login'], id=data['id'])
417        except cls.DoesNotExist:
418            cls.delete_matching_record(login=data['login'])
419            cls.delete_matching_record(id=data['id'])
420            raise
421
422
423    class Meta:
424        """Metadata for class User."""
425        db_table = 'afe_users'
426
427    def __unicode__(self):
428        return unicode(self.login)
429
430
431class Host(model_logic.ModelWithInvalid, rdb_model_extensions.AbstractHostModel,
432           model_logic.ModelWithAttributes):
433    """\
434    Required:
435    hostname
436
437    optional:
438    locked: if true, host is locked and will not be queued
439
440    Internal:
441    From AbstractHostModel:
442        synch_id: currently unused
443        status: string describing status of host
444        invalid: true if the host has been deleted
445        protection: indicates what can be done to this host during repair
446        lock_time: DateTime at which the host was locked
447        dirty: true if the host has been used without being rebooted
448    Local:
449        locked_by: user that locked the host, or null if the host is unlocked
450    """
451
452    SERIALIZATION_LINKS_TO_FOLLOW = set(['aclgroup_set',
453                                         'hostattribute_set',
454                                         'labels',
455                                         'shard'])
456    SERIALIZATION_LOCAL_LINKS_TO_UPDATE = set(['invalid'])
457
458
459    def custom_deserialize_relation(self, link, data):
460        assert link == 'shard', 'Link %s should not be deserialized' % link
461        self.shard = Shard.deserialize(data)
462
463
464    # Note: Only specify foreign keys here, specify all native host columns in
465    # rdb_model_extensions instead.
466    Protection = host_protections.Protection
467    labels = dbmodels.ManyToManyField(Label, blank=True,
468                                      db_table='afe_hosts_labels')
469    locked_by = dbmodels.ForeignKey(User, null=True, blank=True, editable=False)
470    name_field = 'hostname'
471    objects = model_logic.ModelWithInvalidManager()
472    valid_objects = model_logic.ValidObjectsManager()
473    leased_objects = model_logic.LeasedHostManager()
474
475    shard = dbmodels.ForeignKey(Shard, blank=True, null=True)
476
477    def __init__(self, *args, **kwargs):
478        super(Host, self).__init__(*args, **kwargs)
479        self._record_attributes(['status'])
480
481
482    @staticmethod
483    def create_one_time_host(hostname):
484        """Creates a one-time host.
485
486        @param hostname: The name for the host.
487        """
488        query = Host.objects.filter(hostname=hostname)
489        if query.count() == 0:
490            host = Host(hostname=hostname, invalid=True)
491            host.do_validate()
492        else:
493            host = query[0]
494            if not host.invalid:
495                raise model_logic.ValidationError({
496                    'hostname' : '%s already exists in the autotest DB.  '
497                        'Select it rather than entering it as a one time '
498                        'host.' % hostname
499                    })
500        host.protection = host_protections.Protection.DO_NOT_REPAIR
501        host.locked = False
502        host.save()
503        host.clean_object()
504        return host
505
506
507    @classmethod
508    def assign_to_shard(cls, shard, known_ids):
509        """Assigns hosts to a shard.
510
511        For all labels that have been assigned to a shard, all hosts that
512        have at least one of the shard's labels are assigned to the shard.
513        Hosts that are assigned to the shard but aren't already present on the
514        shard are returned.
515
516        Board to shard mapping is many-to-one. Many different boards can be
517        hosted in a shard. However, DUTs of a single board cannot be distributed
518        into more than one shard.
519
520        @param shard: The shard object to assign labels/hosts for.
521        @param known_ids: List of all host-ids the shard already knows.
522                          This is used to figure out which hosts should be sent
523                          to the shard. If shard_ids were used instead, hosts
524                          would only be transferred once, even if the client
525                          failed persisting them.
526                          The number of hosts usually lies in O(100), so the
527                          overhead is acceptable.
528
529        @returns the hosts objects that should be sent to the shard.
530        """
531
532        # Disclaimer: concurrent heartbeats should theoretically not occur in
533        # the current setup. As they may be introduced in the near future,
534        # this comment will be left here.
535
536        # Sending stuff twice is acceptable, but forgetting something isn't.
537        # Detecting duplicates on the client is easy, but here it's harder. The
538        # following options were considered:
539        # - SELECT ... WHERE and then UPDATE ... WHERE: Update might update more
540        #   than select returned, as concurrently more hosts might have been
541        #   inserted
542        # - UPDATE and then SELECT WHERE shard=shard: select always returns all
543        #   hosts for the shard, this is overhead
544        # - SELECT and then UPDATE only selected without requerying afterwards:
545        #   returns the old state of the records.
546        host_ids = set(Host.objects.filter(
547            labels__in=shard.labels.all(),
548            leased=False
549            ).exclude(
550            id__in=known_ids,
551            ).values_list('pk', flat=True))
552
553        if host_ids:
554            Host.objects.filter(pk__in=host_ids).update(shard=shard)
555            return list(Host.objects.filter(pk__in=host_ids).all())
556        return []
557
558    def resurrect_object(self, old_object):
559        super(Host, self).resurrect_object(old_object)
560        # invalid hosts can be in use by the scheduler (as one-time hosts), so
561        # don't change the status
562        self.status = old_object.status
563
564
565    def clean_object(self):
566        self.aclgroup_set.clear()
567        self.labels.clear()
568
569
570    def record_state(self, type_str, state, value, other_metadata=None):
571        """Record metadata in elasticsearch.
572
573        @param type_str: sets the _type field in elasticsearch db.
574        @param state: string representing what state we are recording,
575                      e.g. 'locked'
576        @param value: value of the state, e.g. True
577        @param other_metadata: Other metadata to store in metaDB.
578        """
579        metadata = {
580            state: value,
581            'hostname': self.hostname,
582        }
583        if other_metadata:
584            metadata = dict(metadata.items() + other_metadata.items())
585        autotest_es.post(use_http=True, type_str=type_str, metadata=metadata)
586
587
588    def save(self, *args, **kwargs):
589        # extra spaces in the hostname can be a sneaky source of errors
590        self.hostname = self.hostname.strip()
591        # is this a new object being saved for the first time?
592        first_time = (self.id is None)
593        if not first_time:
594            AclGroup.check_for_acl_violation_hosts([self])
595        # If locked is changed, send its status and user made the change to
596        # metaDB. Locks are important in host history because if a device is
597        # locked then we don't really care what state it is in.
598        if self.locked and not self.locked_by:
599            self.locked_by = User.current_user()
600            if not self.lock_time:
601                self.lock_time = datetime.now()
602            self.record_state('lock_history', 'locked', self.locked,
603                              {'changed_by': self.locked_by.login,
604                               'lock_reason': self.lock_reason})
605            self.dirty = True
606        elif not self.locked and self.locked_by:
607            self.record_state('lock_history', 'locked', self.locked,
608                              {'changed_by': self.locked_by.login})
609            self.locked_by = None
610            self.lock_time = None
611        super(Host, self).save(*args, **kwargs)
612        if first_time:
613            everyone = AclGroup.objects.get(name='Everyone')
614            everyone.hosts.add(self)
615        self._check_for_updated_attributes()
616
617
618    def delete(self):
619        AclGroup.check_for_acl_violation_hosts([self])
620        for queue_entry in self.hostqueueentry_set.all():
621            queue_entry.deleted = True
622            queue_entry.abort()
623        super(Host, self).delete()
624
625
626    def on_attribute_changed(self, attribute, old_value):
627        assert attribute == 'status'
628        logging.info(self.hostname + ' -> ' + self.status)
629
630
631    def enqueue_job(self, job, atomic_group=None, is_template=False):
632        """Enqueue a job on this host.
633
634        @param job: A job to enqueue.
635        @param atomic_group: The associated atomic group.
636        @param is_template: Whther the status should be "Template".
637        """
638        queue_entry = HostQueueEntry.create(host=self, job=job,
639                                            is_template=is_template,
640                                            atomic_group=atomic_group)
641        # allow recovery of dead hosts from the frontend
642        if not self.active_queue_entry() and self.is_dead():
643            self.status = Host.Status.READY
644            self.save()
645        queue_entry.save()
646
647        block = IneligibleHostQueue(job=job, host=self)
648        block.save()
649
650
651    def platform(self):
652        """The platform of the host."""
653        # TODO(showard): slighly hacky?
654        platforms = self.labels.filter(platform=True)
655        if len(platforms) == 0:
656            return None
657        return platforms[0]
658    platform.short_description = 'Platform'
659
660
661    @classmethod
662    def check_no_platform(cls, hosts):
663        """Verify the specified hosts have no associated platforms.
664
665        @param cls: Implicit class object.
666        @param hosts: The hosts to verify.
667        @raises model_logic.ValidationError if any hosts already have a
668            platform.
669        """
670        Host.objects.populate_relationships(hosts, Label, 'label_list')
671        errors = []
672        for host in hosts:
673            platforms = [label.name for label in host.label_list
674                         if label.platform]
675            if platforms:
676                # do a join, just in case this host has multiple platforms,
677                # we'll be able to see it
678                errors.append('Host %s already has a platform: %s' % (
679                              host.hostname, ', '.join(platforms)))
680        if errors:
681            raise model_logic.ValidationError({'labels': '; '.join(errors)})
682
683
684    def is_dead(self):
685        """Returns whether the host is dead (has status repair failed)."""
686        return self.status == Host.Status.REPAIR_FAILED
687
688
689    def active_queue_entry(self):
690        """Returns the active queue entry for this host, or None if none."""
691        active = list(self.hostqueueentry_set.filter(active=True))
692        if not active:
693            return None
694        assert len(active) == 1, ('More than one active entry for '
695                                  'host ' + self.hostname)
696        return active[0]
697
698
699    def _get_attribute_model_and_args(self, attribute):
700        return HostAttribute, dict(host=self, attribute=attribute)
701
702
703    @classmethod
704    def get_attribute_model(cls):
705        """Return the attribute model.
706
707        Override method in parent class. See ModelExtensions for details.
708        @returns: The attribute model of Host.
709        """
710        return HostAttribute
711
712
713    class Meta:
714        """Metadata for the Host class."""
715        db_table = 'afe_hosts'
716
717
718    def __unicode__(self):
719        return unicode(self.hostname)
720
721
722class HostAttribute(dbmodels.Model, model_logic.ModelExtensions):
723    """Arbitrary keyvals associated with hosts."""
724
725    SERIALIZATION_LINKS_TO_KEEP = set(['host'])
726    SERIALIZATION_LOCAL_LINKS_TO_UPDATE = set(['value'])
727    host = dbmodels.ForeignKey(Host)
728    attribute = dbmodels.CharField(max_length=90)
729    value = dbmodels.CharField(max_length=300)
730
731    objects = model_logic.ExtendedManager()
732
733    class Meta:
734        """Metadata for the HostAttribute class."""
735        db_table = 'afe_host_attributes'
736
737
738    @classmethod
739    def get_record(cls, data):
740        """Check the database for an identical record.
741
742        Use host_id and attribute to search for a existing record.
743
744        @raises: DoesNotExist, if no record found
745        @raises: MultipleObjectsReturned if multiple records found.
746        """
747        # TODO(fdeng): We should use host_id and attribute together as
748        #              a primary key in the db.
749        return cls.objects.get(host_id=data['host_id'],
750                               attribute=data['attribute'])
751
752
753    @classmethod
754    def deserialize(cls, data):
755        """Override deserialize in parent class.
756
757        Do not deserialize id as id is not kept consistent on master and shards.
758
759        @param data: A dictionary of data to deserialize.
760
761        @returns: A HostAttribute object.
762        """
763        if data:
764            data.pop('id')
765        return super(HostAttribute, cls).deserialize(data)
766
767
768class Test(dbmodels.Model, model_logic.ModelExtensions):
769    """\
770    Required:
771    author: author name
772    description: description of the test
773    name: test name
774    time: short, medium, long
775    test_class: This describes the class for your the test belongs in.
776    test_category: This describes the category for your tests
777    test_type: Client or Server
778    path: path to pass to run_test()
779    sync_count:  is a number >=1 (1 being the default). If it's 1, then it's an
780                 async job. If it's >1 it's sync job for that number of machines
781                 i.e. if sync_count = 2 it is a sync job that requires two
782                 machines.
783    Optional:
784    dependencies: What the test requires to run. Comma deliminated list
785    dependency_labels: many-to-many relationship with labels corresponding to
786                       test dependencies.
787    experimental: If this is set to True production servers will ignore the test
788    run_verify: Whether or not the scheduler should run the verify stage
789    run_reset: Whether or not the scheduler should run the reset stage
790    test_retry: Number of times to retry test if the test did not complete
791                successfully. (optional, default: 0)
792    """
793    TestTime = enum.Enum('SHORT', 'MEDIUM', 'LONG', start_value=1)
794
795    name = dbmodels.CharField(max_length=255, unique=True)
796    author = dbmodels.CharField(max_length=255)
797    test_class = dbmodels.CharField(max_length=255)
798    test_category = dbmodels.CharField(max_length=255)
799    dependencies = dbmodels.CharField(max_length=255, blank=True)
800    description = dbmodels.TextField(blank=True)
801    experimental = dbmodels.BooleanField(default=True)
802    run_verify = dbmodels.BooleanField(default=False)
803    test_time = dbmodels.SmallIntegerField(choices=TestTime.choices(),
804                                           default=TestTime.MEDIUM)
805    test_type = dbmodels.SmallIntegerField(
806        choices=control_data.CONTROL_TYPE.choices())
807    sync_count = dbmodels.IntegerField(default=1)
808    path = dbmodels.CharField(max_length=255, unique=True)
809    test_retry = dbmodels.IntegerField(blank=True, default=0)
810    run_reset = dbmodels.BooleanField(default=True)
811
812    dependency_labels = (
813        dbmodels.ManyToManyField(Label, blank=True,
814                                 db_table='afe_autotests_dependency_labels'))
815    name_field = 'name'
816    objects = model_logic.ExtendedManager()
817
818
819    def admin_description(self):
820        """Returns a string representing the admin description."""
821        escaped_description = saxutils.escape(self.description)
822        return '<span style="white-space:pre">%s</span>' % escaped_description
823    admin_description.allow_tags = True
824    admin_description.short_description = 'Description'
825
826
827    class Meta:
828        """Metadata for class Test."""
829        db_table = 'afe_autotests'
830
831    def __unicode__(self):
832        return unicode(self.name)
833
834
835class TestParameter(dbmodels.Model):
836    """
837    A declared parameter of a test
838    """
839    test = dbmodels.ForeignKey(Test)
840    name = dbmodels.CharField(max_length=255)
841
842    class Meta:
843        """Metadata for class TestParameter."""
844        db_table = 'afe_test_parameters'
845        unique_together = ('test', 'name')
846
847    def __unicode__(self):
848        return u'%s (%s)' % (self.name, self.test.name)
849
850
851class Profiler(dbmodels.Model, model_logic.ModelExtensions):
852    """\
853    Required:
854    name: profiler name
855    test_type: Client or Server
856
857    Optional:
858    description: arbirary text description
859    """
860    name = dbmodels.CharField(max_length=255, unique=True)
861    description = dbmodels.TextField(blank=True)
862
863    name_field = 'name'
864    objects = model_logic.ExtendedManager()
865
866
867    class Meta:
868        """Metadata for class Profiler."""
869        db_table = 'afe_profilers'
870
871    def __unicode__(self):
872        return unicode(self.name)
873
874
875class AclGroup(dbmodels.Model, model_logic.ModelExtensions):
876    """\
877    Required:
878    name: name of ACL group
879
880    Optional:
881    description: arbitrary description of group
882    """
883
884    SERIALIZATION_LINKS_TO_FOLLOW = set(['users'])
885
886    name = dbmodels.CharField(max_length=255, unique=True)
887    description = dbmodels.CharField(max_length=255, blank=True)
888    users = dbmodels.ManyToManyField(User, blank=False,
889                                     db_table='afe_acl_groups_users')
890    hosts = dbmodels.ManyToManyField(Host, blank=True,
891                                     db_table='afe_acl_groups_hosts')
892
893    name_field = 'name'
894    objects = model_logic.ExtendedManager()
895
896    @staticmethod
897    def check_for_acl_violation_hosts(hosts):
898        """Verify the current user has access to the specified hosts.
899
900        @param hosts: The hosts to verify against.
901        @raises AclAccessViolation if the current user doesn't have access
902            to a host.
903        """
904        user = User.current_user()
905        if user.is_superuser():
906            return
907        accessible_host_ids = set(
908            host.id for host in Host.objects.filter(aclgroup__users=user))
909        for host in hosts:
910            # Check if the user has access to this host,
911            # but only if it is not a metahost or a one-time-host.
912            no_access = (isinstance(host, Host)
913                         and not host.invalid
914                         and int(host.id) not in accessible_host_ids)
915            if no_access:
916                raise AclAccessViolation("%s does not have access to %s" %
917                                         (str(user), str(host)))
918
919
920    @staticmethod
921    def check_abort_permissions(queue_entries):
922        """Look for queue entries that aren't abortable by the current user.
923
924        An entry is not abortable if:
925           * the job isn't owned by this user, and
926           * the machine isn't ACL-accessible, or
927           * the machine is in the "Everyone" ACL
928
929        @param queue_entries: The queue entries to check.
930        @raises AclAccessViolation if a queue entry is not abortable by the
931            current user.
932        """
933        user = User.current_user()
934        if user.is_superuser():
935            return
936        not_owned = queue_entries.exclude(job__owner=user.login)
937        # I do this using ID sets instead of just Django filters because
938        # filtering on M2M dbmodels is broken in Django 0.96. It's better in
939        # 1.0.
940        # TODO: Use Django filters, now that we're using 1.0.
941        accessible_ids = set(
942            entry.id for entry
943            in not_owned.filter(host__aclgroup__users__login=user.login))
944        public_ids = set(entry.id for entry
945                         in not_owned.filter(host__aclgroup__name='Everyone'))
946        cannot_abort = [entry for entry in not_owned.select_related()
947                        if entry.id not in accessible_ids
948                        or entry.id in public_ids]
949        if len(cannot_abort) == 0:
950            return
951        entry_names = ', '.join('%s-%s/%s' % (entry.job.id, entry.job.owner,
952                                              entry.host_or_metahost_name())
953                                for entry in cannot_abort)
954        raise AclAccessViolation('You cannot abort the following job entries: '
955                                 + entry_names)
956
957
958    def check_for_acl_violation_acl_group(self):
959        """Verifies the current user has acces to this ACL group.
960
961        @raises AclAccessViolation if the current user doesn't have access to
962            this ACL group.
963        """
964        user = User.current_user()
965        if user.is_superuser():
966            return
967        if self.name == 'Everyone':
968            raise AclAccessViolation("You cannot modify 'Everyone'!")
969        if not user in self.users.all():
970            raise AclAccessViolation("You do not have access to %s"
971                                     % self.name)
972
973    @staticmethod
974    def on_host_membership_change():
975        """Invoked when host membership changes."""
976        everyone = AclGroup.objects.get(name='Everyone')
977
978        # find hosts that aren't in any ACL group and add them to Everyone
979        # TODO(showard): this is a bit of a hack, since the fact that this query
980        # works is kind of a coincidence of Django internals.  This trick
981        # doesn't work in general (on all foreign key relationships).  I'll
982        # replace it with a better technique when the need arises.
983        orphaned_hosts = Host.valid_objects.filter(aclgroup__id__isnull=True)
984        everyone.hosts.add(*orphaned_hosts.distinct())
985
986        # find hosts in both Everyone and another ACL group, and remove them
987        # from Everyone
988        hosts_in_everyone = Host.valid_objects.filter(aclgroup__name='Everyone')
989        acled_hosts = set()
990        for host in hosts_in_everyone:
991            # Has an ACL group other than Everyone
992            if host.aclgroup_set.count() > 1:
993                acled_hosts.add(host)
994        everyone.hosts.remove(*acled_hosts)
995
996
997    def delete(self):
998        if (self.name == 'Everyone'):
999            raise AclAccessViolation("You cannot delete 'Everyone'!")
1000        self.check_for_acl_violation_acl_group()
1001        super(AclGroup, self).delete()
1002        self.on_host_membership_change()
1003
1004
1005    def add_current_user_if_empty(self):
1006        """Adds the current user if the set of users is empty."""
1007        if not self.users.count():
1008            self.users.add(User.current_user())
1009
1010
1011    def perform_after_save(self, change):
1012        """Called after a save.
1013
1014        @param change: Whether there was a change.
1015        """
1016        if not change:
1017            self.users.add(User.current_user())
1018        self.add_current_user_if_empty()
1019        self.on_host_membership_change()
1020
1021
1022    def save(self, *args, **kwargs):
1023        change = bool(self.id)
1024        if change:
1025            # Check the original object for an ACL violation
1026            AclGroup.objects.get(id=self.id).check_for_acl_violation_acl_group()
1027        super(AclGroup, self).save(*args, **kwargs)
1028        self.perform_after_save(change)
1029
1030
1031    class Meta:
1032        """Metadata for class AclGroup."""
1033        db_table = 'afe_acl_groups'
1034
1035    def __unicode__(self):
1036        return unicode(self.name)
1037
1038
1039class Kernel(dbmodels.Model):
1040    """
1041    A kernel configuration for a parameterized job
1042    """
1043    version = dbmodels.CharField(max_length=255)
1044    cmdline = dbmodels.CharField(max_length=255, blank=True)
1045
1046    @classmethod
1047    def create_kernels(cls, kernel_list):
1048        """Creates all kernels in the kernel list.
1049
1050        @param cls: Implicit class object.
1051        @param kernel_list: A list of dictionaries that describe the kernels,
1052            in the same format as the 'kernel' argument to
1053            rpc_interface.generate_control_file.
1054        @return A list of the created kernels.
1055        """
1056        if not kernel_list:
1057            return None
1058        return [cls._create(kernel) for kernel in kernel_list]
1059
1060
1061    @classmethod
1062    def _create(cls, kernel_dict):
1063        version = kernel_dict.pop('version')
1064        cmdline = kernel_dict.pop('cmdline', '')
1065
1066        if kernel_dict:
1067            raise Exception('Extraneous kernel arguments remain: %r'
1068                            % kernel_dict)
1069
1070        kernel, _ = cls.objects.get_or_create(version=version,
1071                                              cmdline=cmdline)
1072        return kernel
1073
1074
1075    class Meta:
1076        """Metadata for class Kernel."""
1077        db_table = 'afe_kernels'
1078        unique_together = ('version', 'cmdline')
1079
1080    def __unicode__(self):
1081        return u'%s %s' % (self.version, self.cmdline)
1082
1083
1084class ParameterizedJob(dbmodels.Model):
1085    """
1086    Auxiliary configuration for a parameterized job.
1087    """
1088    test = dbmodels.ForeignKey(Test)
1089    label = dbmodels.ForeignKey(Label, null=True)
1090    use_container = dbmodels.BooleanField(default=False)
1091    profile_only = dbmodels.BooleanField(default=False)
1092    upload_kernel_config = dbmodels.BooleanField(default=False)
1093
1094    kernels = dbmodels.ManyToManyField(
1095            Kernel, db_table='afe_parameterized_job_kernels')
1096    profilers = dbmodels.ManyToManyField(
1097            Profiler, through='ParameterizedJobProfiler')
1098
1099
1100    @classmethod
1101    def smart_get(cls, id_or_name, *args, **kwargs):
1102        """For compatibility with Job.add_object.
1103
1104        @param cls: Implicit class object.
1105        @param id_or_name: The ID or name to get.
1106        @param args: Non-keyword arguments.
1107        @param kwargs: Keyword arguments.
1108        """
1109        return cls.objects.get(pk=id_or_name)
1110
1111
1112    def job(self):
1113        """Returns the job if it exists, or else None."""
1114        jobs = self.job_set.all()
1115        assert jobs.count() <= 1
1116        return jobs and jobs[0] or None
1117
1118
1119    class Meta:
1120        """Metadata for class ParameterizedJob."""
1121        db_table = 'afe_parameterized_jobs'
1122
1123    def __unicode__(self):
1124        return u'%s (parameterized) - %s' % (self.test.name, self.job())
1125
1126
1127class ParameterizedJobProfiler(dbmodels.Model):
1128    """
1129    A profiler to run on a parameterized job
1130    """
1131    parameterized_job = dbmodels.ForeignKey(ParameterizedJob)
1132    profiler = dbmodels.ForeignKey(Profiler)
1133
1134    class Meta:
1135        """Metedata for class ParameterizedJobProfiler."""
1136        db_table = 'afe_parameterized_jobs_profilers'
1137        unique_together = ('parameterized_job', 'profiler')
1138
1139
1140class ParameterizedJobProfilerParameter(dbmodels.Model):
1141    """
1142    A parameter for a profiler in a parameterized job
1143    """
1144    parameterized_job_profiler = dbmodels.ForeignKey(ParameterizedJobProfiler)
1145    parameter_name = dbmodels.CharField(max_length=255)
1146    parameter_value = dbmodels.TextField()
1147    parameter_type = dbmodels.CharField(
1148            max_length=8, choices=model_attributes.ParameterTypes.choices())
1149
1150    class Meta:
1151        """Metadata for class ParameterizedJobProfilerParameter."""
1152        db_table = 'afe_parameterized_job_profiler_parameters'
1153        unique_together = ('parameterized_job_profiler', 'parameter_name')
1154
1155    def __unicode__(self):
1156        return u'%s - %s' % (self.parameterized_job_profiler.profiler.name,
1157                             self.parameter_name)
1158
1159
1160class ParameterizedJobParameter(dbmodels.Model):
1161    """
1162    Parameters for a parameterized job
1163    """
1164    parameterized_job = dbmodels.ForeignKey(ParameterizedJob)
1165    test_parameter = dbmodels.ForeignKey(TestParameter)
1166    parameter_value = dbmodels.TextField()
1167    parameter_type = dbmodels.CharField(
1168            max_length=8, choices=model_attributes.ParameterTypes.choices())
1169
1170    class Meta:
1171        """Metadata for class ParameterizedJobParameter."""
1172        db_table = 'afe_parameterized_job_parameters'
1173        unique_together = ('parameterized_job', 'test_parameter')
1174
1175    def __unicode__(self):
1176        return u'%s - %s' % (self.parameterized_job.job().name,
1177                             self.test_parameter.name)
1178
1179
1180class JobManager(model_logic.ExtendedManager):
1181    'Custom manager to provide efficient status counts querying.'
1182    def get_status_counts(self, job_ids):
1183        """Returns a dict mapping the given job IDs to their status count dicts.
1184
1185        @param job_ids: A list of job IDs.
1186        """
1187        if not job_ids:
1188            return {}
1189        id_list = '(%s)' % ','.join(str(job_id) for job_id in job_ids)
1190        cursor = connection.cursor()
1191        cursor.execute("""
1192            SELECT job_id, status, aborted, complete, COUNT(*)
1193            FROM afe_host_queue_entries
1194            WHERE job_id IN %s
1195            GROUP BY job_id, status, aborted, complete
1196            """ % id_list)
1197        all_job_counts = dict((job_id, {}) for job_id in job_ids)
1198        for job_id, status, aborted, complete, count in cursor.fetchall():
1199            job_dict = all_job_counts[job_id]
1200            full_status = HostQueueEntry.compute_full_status(status, aborted,
1201                                                             complete)
1202            job_dict.setdefault(full_status, 0)
1203            job_dict[full_status] += count
1204        return all_job_counts
1205
1206
1207class Job(dbmodels.Model, model_logic.ModelExtensions):
1208    """\
1209    owner: username of job owner
1210    name: job name (does not have to be unique)
1211    priority: Integer priority value.  Higher is more important.
1212    control_file: contents of control file
1213    control_type: Client or Server
1214    created_on: date of job creation
1215    submitted_on: date of job submission
1216    synch_count: how many hosts should be used per autoserv execution
1217    run_verify: Whether or not to run the verify phase
1218    run_reset: Whether or not to run the reset phase
1219    timeout: DEPRECATED - hours from queuing time until job times out
1220    timeout_mins: minutes from job queuing time until the job times out
1221    max_runtime_hrs: DEPRECATED - hours from job starting time until job
1222                     times out
1223    max_runtime_mins: minutes from job starting time until job times out
1224    email_list: list of people to email on completion delimited by any of:
1225                white space, ',', ':', ';'
1226    dependency_labels: many-to-many relationship with labels corresponding to
1227                       job dependencies
1228    reboot_before: Never, If dirty, or Always
1229    reboot_after: Never, If all tests passed, or Always
1230    parse_failed_repair: if True, a failed repair launched by this job will have
1231    its results parsed as part of the job.
1232    drone_set: The set of drones to run this job on
1233    parent_job: Parent job (optional)
1234    test_retry: Number of times to retry test if the test did not complete
1235                successfully. (optional, default: 0)
1236    require_ssp: Require server-side packaging unless require_ssp is set to
1237                 False. (optional, default: None)
1238    """
1239
1240    # TODO: Investigate, if jobkeyval_set is really needed.
1241    # dynamic_suite will write them into an attached file for the drone, but
1242    # it doesn't seem like they are actually used. If they aren't used, remove
1243    # jobkeyval_set here.
1244    SERIALIZATION_LINKS_TO_FOLLOW = set(['dependency_labels',
1245                                         'hostqueueentry_set',
1246                                         'jobkeyval_set',
1247                                         'shard'])
1248
1249    # SQL for selecting jobs that should be sent to shard.
1250    # We use raw sql as django filters were not optimized.
1251    # The following jobs are excluded by the SQL.
1252    #     - Non-aborted jobs known to shard as specified in |known_ids|.
1253    #       Note for jobs aborted on master, even if already known to shard,
1254    #       will be sent to shard again so that shard can abort them.
1255    #     - Completed jobs
1256    #     - Active jobs
1257    #     - Jobs without host_queue_entries
1258    NON_ABORTED_KNOWN_JOBS = '(t2.aborted = 0 AND t1.id IN (%(known_ids)s))'
1259
1260    SQL_SHARD_JOBS = (
1261        'SELECT DISTINCT(t1.id) FROM afe_jobs t1 '
1262        'INNER JOIN afe_host_queue_entries t2  ON '
1263        '  (t1.id = t2.job_id AND t2.complete != 1 AND t2.active != 1 '
1264        '   %(check_known_jobs)s) '
1265        'LEFT OUTER JOIN afe_jobs_dependency_labels t3 ON (t1.id = t3.job_id) '
1266        'JOIN afe_shards_labels t4 '
1267        '  ON (t4.label_id = t3.label_id OR t4.label_id = t2.meta_host) '
1268        'WHERE t4.shard_id = %(shard_id)s'
1269        )
1270
1271    # Jobs can be created with assigned hosts and have no dependency
1272    # labels nor meta_host.
1273    # We are looking for:
1274    #     - a job whose hqe's meta_host is null
1275    #     - a job whose hqe has a host
1276    #     - one of the host's labels matches the shard's label.
1277    # Non-aborted known jobs, completed jobs, active jobs, jobs
1278    # without hqe are exluded as we do with SQL_SHARD_JOBS.
1279    SQL_SHARD_JOBS_WITH_HOSTS = (
1280        'SELECT DISTINCT(t1.id) FROM afe_jobs t1 '
1281        'INNER JOIN afe_host_queue_entries t2 ON '
1282        '  (t1.id = t2.job_id AND t2.complete != 1 AND t2.active != 1 '
1283        '   AND t2.meta_host IS NULL AND t2.host_id IS NOT NULL '
1284        '   %(check_known_jobs)s) '
1285        'LEFT OUTER JOIN afe_hosts_labels t3 ON (t2.host_id = t3.host_id) '
1286        'WHERE (t3.label_id IN '
1287        '  (SELECT label_id FROM afe_shards_labels '
1288        '   WHERE shard_id = %(shard_id)s))'
1289        )
1290
1291    # Even if we had filters about complete, active and aborted
1292    # bits in the above two SQLs, there is a chance that
1293    # the result may still contain a job with an hqe with 'complete=1'
1294    # or 'active=1' or 'aborted=0 and afe_job.id in known jobs.'
1295    # This happens when a job has two (or more) hqes and at least
1296    # one hqe has different bits than others.
1297    # We use a second sql to ensure we exclude all un-desired jobs.
1298    SQL_JOBS_TO_EXCLUDE =(
1299        'SELECT t1.id FROM afe_jobs t1 '
1300        'INNER JOIN afe_host_queue_entries t2 ON '
1301        '  (t1.id = t2.job_id) '
1302        'WHERE (t1.id in (%(candidates)s) '
1303        '  AND (t2.complete=1 OR t2.active=1 '
1304        '  %(check_known_jobs)s))'
1305        )
1306
1307    def _deserialize_relation(self, link, data):
1308        if link in ['hostqueueentry_set', 'jobkeyval_set']:
1309            for obj in data:
1310                obj['job_id'] = self.id
1311
1312        super(Job, self)._deserialize_relation(link, data)
1313
1314
1315    def custom_deserialize_relation(self, link, data):
1316        assert link == 'shard', 'Link %s should not be deserialized' % link
1317        self.shard = Shard.deserialize(data)
1318
1319
1320    def sanity_check_update_from_shard(self, shard, updated_serialized):
1321        # If the job got aborted on the master after the client fetched it
1322        # no shard_id will be set. The shard might still push updates though,
1323        # as the job might complete before the abort bit syncs to the shard.
1324        # Alternative considered: The master scheduler could be changed to not
1325        # set aborted jobs to completed that are sharded out. But that would
1326        # require database queries and seemed more complicated to implement.
1327        # This seems safe to do, as there won't be updates pushed from the wrong
1328        # shards should be powered off and wiped hen they are removed from the
1329        # master.
1330        if self.shard_id and self.shard_id != shard.id:
1331            raise error.UnallowedRecordsSentToMaster(
1332                'Job id=%s is assigned to shard (%s). Cannot update it with %s '
1333                'from shard %s.' % (self.id, self.shard_id, updated_serialized,
1334                                    shard.id))
1335
1336
1337    # TIMEOUT is deprecated.
1338    DEFAULT_TIMEOUT = global_config.global_config.get_config_value(
1339        'AUTOTEST_WEB', 'job_timeout_default', default=24)
1340    DEFAULT_TIMEOUT_MINS = global_config.global_config.get_config_value(
1341        'AUTOTEST_WEB', 'job_timeout_mins_default', default=24*60)
1342    # MAX_RUNTIME_HRS is deprecated. Will be removed after switch to mins is
1343    # completed.
1344    DEFAULT_MAX_RUNTIME_HRS = global_config.global_config.get_config_value(
1345        'AUTOTEST_WEB', 'job_max_runtime_hrs_default', default=72)
1346    DEFAULT_MAX_RUNTIME_MINS = global_config.global_config.get_config_value(
1347        'AUTOTEST_WEB', 'job_max_runtime_mins_default', default=72*60)
1348    DEFAULT_PARSE_FAILED_REPAIR = global_config.global_config.get_config_value(
1349        'AUTOTEST_WEB', 'parse_failed_repair_default', type=bool,
1350        default=False)
1351
1352    owner = dbmodels.CharField(max_length=255)
1353    name = dbmodels.CharField(max_length=255)
1354    priority = dbmodels.SmallIntegerField(default=priorities.Priority.DEFAULT)
1355    control_file = dbmodels.TextField(null=True, blank=True)
1356    control_type = dbmodels.SmallIntegerField(
1357        choices=control_data.CONTROL_TYPE.choices(),
1358        blank=True, # to allow 0
1359        default=control_data.CONTROL_TYPE.CLIENT)
1360    created_on = dbmodels.DateTimeField()
1361    synch_count = dbmodels.IntegerField(blank=True, default=0)
1362    timeout = dbmodels.IntegerField(default=DEFAULT_TIMEOUT)
1363    run_verify = dbmodels.BooleanField(default=False)
1364    email_list = dbmodels.CharField(max_length=250, blank=True)
1365    dependency_labels = (
1366            dbmodels.ManyToManyField(Label, blank=True,
1367                                     db_table='afe_jobs_dependency_labels'))
1368    reboot_before = dbmodels.SmallIntegerField(
1369        choices=model_attributes.RebootBefore.choices(), blank=True,
1370        default=DEFAULT_REBOOT_BEFORE)
1371    reboot_after = dbmodels.SmallIntegerField(
1372        choices=model_attributes.RebootAfter.choices(), blank=True,
1373        default=DEFAULT_REBOOT_AFTER)
1374    parse_failed_repair = dbmodels.BooleanField(
1375        default=DEFAULT_PARSE_FAILED_REPAIR)
1376    # max_runtime_hrs is deprecated. Will be removed after switch to mins is
1377    # completed.
1378    max_runtime_hrs = dbmodels.IntegerField(default=DEFAULT_MAX_RUNTIME_HRS)
1379    max_runtime_mins = dbmodels.IntegerField(default=DEFAULT_MAX_RUNTIME_MINS)
1380    drone_set = dbmodels.ForeignKey(DroneSet, null=True, blank=True)
1381
1382    parameterized_job = dbmodels.ForeignKey(ParameterizedJob, null=True,
1383                                            blank=True)
1384
1385    parent_job = dbmodels.ForeignKey('self', blank=True, null=True)
1386
1387    test_retry = dbmodels.IntegerField(blank=True, default=0)
1388
1389    run_reset = dbmodels.BooleanField(default=True)
1390
1391    timeout_mins = dbmodels.IntegerField(default=DEFAULT_TIMEOUT_MINS)
1392
1393    # If this is None on the master, a slave should be found.
1394    # If this is None on a slave, it should be synced back to the master
1395    shard = dbmodels.ForeignKey(Shard, blank=True, null=True)
1396
1397    # If this is None, server-side packaging will be used for server side test,
1398    # unless it's disabled in global config AUTOSERV/enable_ssp_container.
1399    require_ssp = dbmodels.NullBooleanField(default=None, blank=True, null=True)
1400
1401    # custom manager
1402    objects = JobManager()
1403
1404
1405    @decorators.cached_property
1406    def labels(self):
1407        """All the labels of this job"""
1408        # We need to convert dependency_labels to a list, because all() gives us
1409        # back an iterator, and storing/caching an iterator means we'd only be
1410        # able to read from it once.
1411        return list(self.dependency_labels.all())
1412
1413
1414    def is_server_job(self):
1415        """Returns whether this job is of type server."""
1416        return self.control_type == control_data.CONTROL_TYPE.SERVER
1417
1418
1419    @classmethod
1420    def parameterized_jobs_enabled(cls):
1421        """Returns whether parameterized jobs are enabled.
1422
1423        @param cls: Implicit class object.
1424        """
1425        return global_config.global_config.get_config_value(
1426                'AUTOTEST_WEB', 'parameterized_jobs', type=bool)
1427
1428
1429    @classmethod
1430    def check_parameterized_job(cls, control_file, parameterized_job):
1431        """Checks that the job is valid given the global config settings.
1432
1433        First, either control_file must be set, or parameterized_job must be
1434        set, but not both. Second, parameterized_job must be set if and only if
1435        the parameterized_jobs option in the global config is set to True.
1436
1437        @param cls: Implict class object.
1438        @param control_file: A control file.
1439        @param parameterized_job: A parameterized job.
1440        """
1441        if not (bool(control_file) ^ bool(parameterized_job)):
1442            raise Exception('Job must have either control file or '
1443                            'parameterization, but not both')
1444
1445        parameterized_jobs_enabled = cls.parameterized_jobs_enabled()
1446        if control_file and parameterized_jobs_enabled:
1447            raise Exception('Control file specified, but parameterized jobs '
1448                            'are enabled')
1449        if parameterized_job and not parameterized_jobs_enabled:
1450            raise Exception('Parameterized job specified, but parameterized '
1451                            'jobs are not enabled')
1452
1453
1454    @classmethod
1455    def create(cls, owner, options, hosts):
1456        """Creates a job.
1457
1458        The job is created by taking some information (the listed args) and
1459        filling in the rest of the necessary information.
1460
1461        @param cls: Implicit class object.
1462        @param owner: The owner for the job.
1463        @param options: An options object.
1464        @param hosts: The hosts to use.
1465        """
1466        AclGroup.check_for_acl_violation_hosts(hosts)
1467
1468        control_file = options.get('control_file')
1469        parameterized_job = options.get('parameterized_job')
1470
1471        # The current implementation of parameterized jobs requires that only
1472        # control files or parameterized jobs are used. Using the image
1473        # parameter on autoupdate_ParameterizedJob doesn't mix pure
1474        # parameterized jobs and control files jobs, it does muck enough with
1475        # normal jobs by adding a parameterized id to them that this check will
1476        # fail. So for now we just skip this check.
1477        # cls.check_parameterized_job(control_file=control_file,
1478        #                             parameterized_job=parameterized_job)
1479        user = User.current_user()
1480        if options.get('reboot_before') is None:
1481            options['reboot_before'] = user.get_reboot_before_display()
1482        if options.get('reboot_after') is None:
1483            options['reboot_after'] = user.get_reboot_after_display()
1484
1485        drone_set = DroneSet.resolve_name(options.get('drone_set'))
1486
1487        if options.get('timeout_mins') is None and options.get('timeout'):
1488            options['timeout_mins'] = options['timeout'] * 60
1489
1490        job = cls.add_object(
1491            owner=owner,
1492            name=options['name'],
1493            priority=options['priority'],
1494            control_file=control_file,
1495            control_type=options['control_type'],
1496            synch_count=options.get('synch_count'),
1497            # timeout needs to be deleted in the future.
1498            timeout=options.get('timeout'),
1499            timeout_mins=options.get('timeout_mins'),
1500            max_runtime_mins=options.get('max_runtime_mins'),
1501            run_verify=options.get('run_verify'),
1502            email_list=options.get('email_list'),
1503            reboot_before=options.get('reboot_before'),
1504            reboot_after=options.get('reboot_after'),
1505            parse_failed_repair=options.get('parse_failed_repair'),
1506            created_on=datetime.now(),
1507            drone_set=drone_set,
1508            parameterized_job=parameterized_job,
1509            parent_job=options.get('parent_job_id'),
1510            test_retry=options.get('test_retry'),
1511            run_reset=options.get('run_reset'),
1512            require_ssp=options.get('require_ssp'))
1513
1514        job.dependency_labels = options['dependencies']
1515
1516        if options.get('keyvals'):
1517            for key, value in options['keyvals'].iteritems():
1518                JobKeyval.objects.create(job=job, key=key, value=value)
1519
1520        return job
1521
1522
1523    @classmethod
1524    def assign_to_shard(cls, shard, known_ids):
1525        """Assigns unassigned jobs to a shard.
1526
1527        For all labels that have been assigned to this shard, all jobs that
1528        have this label, are assigned to this shard.
1529
1530        Jobs that are assigned to the shard but aren't already present on the
1531        shard are returned.
1532
1533        @param shard: The shard to assign jobs to.
1534        @param known_ids: List of all ids of incomplete jobs, the shard already
1535                          knows about.
1536                          This is used to figure out which jobs should be sent
1537                          to the shard. If shard_ids were used instead, jobs
1538                          would only be transferred once, even if the client
1539                          failed persisting them.
1540                          The number of unfinished jobs usually lies in O(1000).
1541                          Assuming one id takes 8 chars in the json, this means
1542                          overhead that lies in the lower kilobyte range.
1543                          A not in query with 5000 id's takes about 30ms.
1544
1545        @returns The job objects that should be sent to the shard.
1546        """
1547        # Disclaimer: Concurrent heartbeats should not occur in today's setup.
1548        # If this changes or they are triggered manually, this applies:
1549        # Jobs may be returned more than once by concurrent calls of this
1550        # function, as there is a race condition between SELECT and UPDATE.
1551        job_ids = set([])
1552        check_known_jobs_exclude = ''
1553        check_known_jobs_include = ''
1554
1555        if known_ids:
1556            check_known_jobs = (
1557                    cls.NON_ABORTED_KNOWN_JOBS %
1558                    {'known_ids': ','.join([str(i) for i in known_ids])})
1559            check_known_jobs_exclude = 'AND NOT ' + check_known_jobs
1560            check_known_jobs_include = 'OR ' + check_known_jobs
1561
1562        for sql in [cls.SQL_SHARD_JOBS, cls.SQL_SHARD_JOBS_WITH_HOSTS]:
1563            query = Job.objects.raw(sql % {
1564                    'check_known_jobs': check_known_jobs_exclude,
1565                    'shard_id': shard.id})
1566            job_ids |= set([j.id for j in query])
1567
1568        if job_ids:
1569            query = Job.objects.raw(
1570                    cls.SQL_JOBS_TO_EXCLUDE %
1571                    {'check_known_jobs': check_known_jobs_include,
1572                     'candidates': ','.join([str(i) for i in job_ids])})
1573            job_ids -= set([j.id for j in query])
1574
1575        if job_ids:
1576            Job.objects.filter(pk__in=job_ids).update(shard=shard)
1577            return list(Job.objects.filter(pk__in=job_ids).all())
1578        return []
1579
1580
1581    def save(self, *args, **kwargs):
1582        # The current implementation of parameterized jobs requires that only
1583        # control files or parameterized jobs are used. Using the image
1584        # parameter on autoupdate_ParameterizedJob doesn't mix pure
1585        # parameterized jobs and control files jobs, it does muck enough with
1586        # normal jobs by adding a parameterized id to them that this check will
1587        # fail. So for now we just skip this check.
1588        # cls.check_parameterized_job(control_file=self.control_file,
1589        #                             parameterized_job=self.parameterized_job)
1590        super(Job, self).save(*args, **kwargs)
1591
1592
1593    def queue(self, hosts, atomic_group=None, is_template=False):
1594        """Enqueue a job on the given hosts.
1595
1596        @param hosts: The hosts to use.
1597        @param atomic_group: The associated atomic group.
1598        @param is_template: Whether the status should be "Template".
1599        """
1600        if not hosts:
1601            if atomic_group:
1602                # No hosts or labels are required to queue an atomic group
1603                # Job.  However, if they are given, we respect them below.
1604                atomic_group.enqueue_job(self, is_template=is_template)
1605            else:
1606                # hostless job
1607                entry = HostQueueEntry.create(job=self, is_template=is_template)
1608                entry.save()
1609            return
1610
1611        for host in hosts:
1612            host.enqueue_job(self, atomic_group=atomic_group,
1613                             is_template=is_template)
1614
1615
1616    def create_recurring_job(self, start_date, loop_period, loop_count, owner):
1617        """Creates a recurring job.
1618
1619        @param start_date: The starting date of the job.
1620        @param loop_period: How often to re-run the job, in seconds.
1621        @param loop_count: The re-run count.
1622        @param owner: The owner of the job.
1623        """
1624        rec = RecurringRun(job=self, start_date=start_date,
1625                           loop_period=loop_period,
1626                           loop_count=loop_count,
1627                           owner=User.objects.get(login=owner))
1628        rec.save()
1629        return rec.id
1630
1631
1632    def user(self):
1633        """Gets the user of this job, or None if it doesn't exist."""
1634        try:
1635            return User.objects.get(login=self.owner)
1636        except self.DoesNotExist:
1637            return None
1638
1639
1640    def abort(self):
1641        """Aborts this job."""
1642        for queue_entry in self.hostqueueentry_set.all():
1643            queue_entry.abort()
1644
1645
1646    def tag(self):
1647        """Returns a string tag for this job."""
1648        return server_utils.get_job_tag(self.id, self.owner)
1649
1650
1651    def keyval_dict(self):
1652        """Returns all keyvals for this job as a dictionary."""
1653        return dict((keyval.key, keyval.value)
1654                    for keyval in self.jobkeyval_set.all())
1655
1656
1657    @classmethod
1658    def get_attribute_model(cls):
1659        """Return the attribute model.
1660
1661        Override method in parent class. This class is called when
1662        deserializing the one-to-many relationship betwen Job and JobKeyval.
1663        On deserialization, we will try to clear any existing job keyvals
1664        associated with a job to avoid any inconsistency.
1665        Though Job doesn't implement ModelWithAttribute, we still treat
1666        it as an attribute model for this purpose.
1667
1668        @returns: The attribute model of Job.
1669        """
1670        return JobKeyval
1671
1672
1673    class Meta:
1674        """Metadata for class Job."""
1675        db_table = 'afe_jobs'
1676
1677    def __unicode__(self):
1678        return u'%s (%s-%s)' % (self.name, self.id, self.owner)
1679
1680
1681class JobKeyval(dbmodels.Model, model_logic.ModelExtensions):
1682    """Keyvals associated with jobs"""
1683
1684    SERIALIZATION_LINKS_TO_KEEP = set(['job'])
1685    SERIALIZATION_LOCAL_LINKS_TO_UPDATE = set(['value'])
1686
1687    job = dbmodels.ForeignKey(Job)
1688    key = dbmodels.CharField(max_length=90)
1689    value = dbmodels.CharField(max_length=300)
1690
1691    objects = model_logic.ExtendedManager()
1692
1693
1694    @classmethod
1695    def get_record(cls, data):
1696        """Check the database for an identical record.
1697
1698        Use job_id and key to search for a existing record.
1699
1700        @raises: DoesNotExist, if no record found
1701        @raises: MultipleObjectsReturned if multiple records found.
1702        """
1703        # TODO(fdeng): We should use job_id and key together as
1704        #              a primary key in the db.
1705        return cls.objects.get(job_id=data['job_id'], key=data['key'])
1706
1707
1708    @classmethod
1709    def deserialize(cls, data):
1710        """Override deserialize in parent class.
1711
1712        Do not deserialize id as id is not kept consistent on master and shards.
1713
1714        @param data: A dictionary of data to deserialize.
1715
1716        @returns: A JobKeyval object.
1717        """
1718        if data:
1719            data.pop('id')
1720        return super(JobKeyval, cls).deserialize(data)
1721
1722
1723    class Meta:
1724        """Metadata for class JobKeyval."""
1725        db_table = 'afe_job_keyvals'
1726
1727
1728class IneligibleHostQueue(dbmodels.Model, model_logic.ModelExtensions):
1729    """Represents an ineligible host queue."""
1730    job = dbmodels.ForeignKey(Job)
1731    host = dbmodels.ForeignKey(Host)
1732
1733    objects = model_logic.ExtendedManager()
1734
1735    class Meta:
1736        """Metadata for class IneligibleHostQueue."""
1737        db_table = 'afe_ineligible_host_queues'
1738
1739
1740class HostQueueEntry(dbmodels.Model, model_logic.ModelExtensions):
1741    """Represents a host queue entry."""
1742
1743    SERIALIZATION_LINKS_TO_FOLLOW = set(['meta_host'])
1744    SERIALIZATION_LINKS_TO_KEEP = set(['host'])
1745    SERIALIZATION_LOCAL_LINKS_TO_UPDATE = set(['aborted'])
1746
1747
1748    def custom_deserialize_relation(self, link, data):
1749        assert link == 'meta_host'
1750        self.meta_host = Label.deserialize(data)
1751
1752
1753    def sanity_check_update_from_shard(self, shard, updated_serialized,
1754                                       job_ids_sent):
1755        if self.job_id not in job_ids_sent:
1756            raise error.UnallowedRecordsSentToMaster(
1757                'Sent HostQueueEntry without corresponding '
1758                'job entry: %s' % updated_serialized)
1759
1760
1761    Status = host_queue_entry_states.Status
1762    ACTIVE_STATUSES = host_queue_entry_states.ACTIVE_STATUSES
1763    COMPLETE_STATUSES = host_queue_entry_states.COMPLETE_STATUSES
1764    PRE_JOB_STATUSES = host_queue_entry_states.PRE_JOB_STATUSES
1765    IDLE_PRE_JOB_STATUSES = host_queue_entry_states.IDLE_PRE_JOB_STATUSES
1766
1767    job = dbmodels.ForeignKey(Job)
1768    host = dbmodels.ForeignKey(Host, blank=True, null=True)
1769    status = dbmodels.CharField(max_length=255)
1770    meta_host = dbmodels.ForeignKey(Label, blank=True, null=True,
1771                                    db_column='meta_host')
1772    active = dbmodels.BooleanField(default=False)
1773    complete = dbmodels.BooleanField(default=False)
1774    deleted = dbmodels.BooleanField(default=False)
1775    execution_subdir = dbmodels.CharField(max_length=255, blank=True,
1776                                          default='')
1777    # If atomic_group is set, this is a virtual HostQueueEntry that will
1778    # be expanded into many actual hosts within the group at schedule time.
1779    atomic_group = dbmodels.ForeignKey(AtomicGroup, blank=True, null=True)
1780    aborted = dbmodels.BooleanField(default=False)
1781    started_on = dbmodels.DateTimeField(null=True, blank=True)
1782    finished_on = dbmodels.DateTimeField(null=True, blank=True)
1783
1784    objects = model_logic.ExtendedManager()
1785
1786
1787    def __init__(self, *args, **kwargs):
1788        super(HostQueueEntry, self).__init__(*args, **kwargs)
1789        self._record_attributes(['status'])
1790
1791
1792    @classmethod
1793    def create(cls, job, host=None, meta_host=None, atomic_group=None,
1794                 is_template=False):
1795        """Creates a new host queue entry.
1796
1797        @param cls: Implicit class object.
1798        @param job: The associated job.
1799        @param host: The associated host.
1800        @param meta_host: The associated meta host.
1801        @param atomic_group: The associated atomic group.
1802        @param is_template: Whether the status should be "Template".
1803        """
1804        if is_template:
1805            status = cls.Status.TEMPLATE
1806        else:
1807            status = cls.Status.QUEUED
1808
1809        return cls(job=job, host=host, meta_host=meta_host,
1810                   atomic_group=atomic_group, status=status)
1811
1812
1813    def save(self, *args, **kwargs):
1814        self._set_active_and_complete()
1815        super(HostQueueEntry, self).save(*args, **kwargs)
1816        self._check_for_updated_attributes()
1817
1818
1819    def execution_path(self):
1820        """
1821        Path to this entry's results (relative to the base results directory).
1822        """
1823        return server_utils.get_hqe_exec_path(self.job.tag(),
1824                                              self.execution_subdir)
1825
1826
1827    def host_or_metahost_name(self):
1828        """Returns the first non-None name found in priority order.
1829
1830        The priority order checked is: (1) host name; (2) meta host name; and
1831        (3) atomic group name.
1832        """
1833        if self.host:
1834            return self.host.hostname
1835        elif self.meta_host:
1836            return self.meta_host.name
1837        else:
1838            assert self.atomic_group, "no host, meta_host or atomic group!"
1839            return self.atomic_group.name
1840
1841
1842    def _set_active_and_complete(self):
1843        if self.status in self.ACTIVE_STATUSES:
1844            self.active, self.complete = True, False
1845        elif self.status in self.COMPLETE_STATUSES:
1846            self.active, self.complete = False, True
1847        else:
1848            self.active, self.complete = False, False
1849
1850
1851    def on_attribute_changed(self, attribute, old_value):
1852        assert attribute == 'status'
1853        logging.info('%s/%d (%d) -> %s', self.host, self.job.id, self.id,
1854                     self.status)
1855
1856
1857    def is_meta_host_entry(self):
1858        'True if this is a entry has a meta_host instead of a host.'
1859        return self.host is None and self.meta_host is not None
1860
1861
1862    # This code is shared between rpc_interface and models.HostQueueEntry.
1863    # Sadly due to circular imports between the 2 (crbug.com/230100) making it
1864    # a class method was the best way to refactor it. Attempting to put it in
1865    # rpc_utils or a new utils module failed as that would require us to import
1866    # models.py but to call it from here we would have to import the utils.py
1867    # thus creating a cycle.
1868    @classmethod
1869    def abort_host_queue_entries(cls, host_queue_entries):
1870        """Aborts a collection of host_queue_entries.
1871
1872        Abort these host queue entry and all host queue entries of jobs created
1873        by them.
1874
1875        @param host_queue_entries: List of host queue entries we want to abort.
1876        """
1877        # This isn't completely immune to race conditions since it's not atomic,
1878        # but it should be safe given the scheduler's behavior.
1879
1880        # TODO(milleral): crbug.com/230100
1881        # The |abort_host_queue_entries| rpc does nearly exactly this,
1882        # however, trying to re-use the code generates some horrible
1883        # circular import error.  I'd be nice to refactor things around
1884        # sometime so the code could be reused.
1885
1886        # Fixpoint algorithm to find the whole tree of HQEs to abort to
1887        # minimize the total number of database queries:
1888        children = set()
1889        new_children = set(host_queue_entries)
1890        while new_children:
1891            children.update(new_children)
1892            new_child_ids = [hqe.job_id for hqe in new_children]
1893            new_children = HostQueueEntry.objects.filter(
1894                    job__parent_job__in=new_child_ids,
1895                    complete=False, aborted=False).all()
1896            # To handle circular parental relationships
1897            new_children = set(new_children) - children
1898
1899        # Associate a user with the host queue entries that we're about
1900        # to abort so that we can look up who to blame for the aborts.
1901        now = datetime.now()
1902        user = User.current_user()
1903        aborted_hqes = [AbortedHostQueueEntry(queue_entry=hqe,
1904                aborted_by=user, aborted_on=now) for hqe in children]
1905        AbortedHostQueueEntry.objects.bulk_create(aborted_hqes)
1906        # Bulk update all of the HQEs to set the abort bit.
1907        child_ids = [hqe.id for hqe in children]
1908        HostQueueEntry.objects.filter(id__in=child_ids).update(aborted=True)
1909
1910
1911    def abort(self):
1912        """ Aborts this host queue entry.
1913
1914        Abort this host queue entry and all host queue entries of jobs created by
1915        this one.
1916
1917        """
1918        if not self.complete and not self.aborted:
1919            HostQueueEntry.abort_host_queue_entries([self])
1920
1921
1922    @classmethod
1923    def compute_full_status(cls, status, aborted, complete):
1924        """Returns a modified status msg if the host queue entry was aborted.
1925
1926        @param cls: Implicit class object.
1927        @param status: The original status message.
1928        @param aborted: Whether the host queue entry was aborted.
1929        @param complete: Whether the host queue entry was completed.
1930        """
1931        if aborted and not complete:
1932            return 'Aborted (%s)' % status
1933        return status
1934
1935
1936    def full_status(self):
1937        """Returns the full status of this host queue entry, as a string."""
1938        return self.compute_full_status(self.status, self.aborted,
1939                                        self.complete)
1940
1941
1942    def _postprocess_object_dict(self, object_dict):
1943        object_dict['full_status'] = self.full_status()
1944
1945
1946    class Meta:
1947        """Metadata for class HostQueueEntry."""
1948        db_table = 'afe_host_queue_entries'
1949
1950
1951
1952    def __unicode__(self):
1953        hostname = None
1954        if self.host:
1955            hostname = self.host.hostname
1956        return u"%s/%d (%d)" % (hostname, self.job.id, self.id)
1957
1958
1959class AbortedHostQueueEntry(dbmodels.Model, model_logic.ModelExtensions):
1960    """Represents an aborted host queue entry."""
1961    queue_entry = dbmodels.OneToOneField(HostQueueEntry, primary_key=True)
1962    aborted_by = dbmodels.ForeignKey(User)
1963    aborted_on = dbmodels.DateTimeField()
1964
1965    objects = model_logic.ExtendedManager()
1966
1967
1968    def save(self, *args, **kwargs):
1969        self.aborted_on = datetime.now()
1970        super(AbortedHostQueueEntry, self).save(*args, **kwargs)
1971
1972    class Meta:
1973        """Metadata for class AbortedHostQueueEntry."""
1974        db_table = 'afe_aborted_host_queue_entries'
1975
1976
1977class RecurringRun(dbmodels.Model, model_logic.ModelExtensions):
1978    """\
1979    job: job to use as a template
1980    owner: owner of the instantiated template
1981    start_date: Run the job at scheduled date
1982    loop_period: Re-run (loop) the job periodically
1983                 (in every loop_period seconds)
1984    loop_count: Re-run (loop) count
1985    """
1986
1987    job = dbmodels.ForeignKey(Job)
1988    owner = dbmodels.ForeignKey(User)
1989    start_date = dbmodels.DateTimeField()
1990    loop_period = dbmodels.IntegerField(blank=True)
1991    loop_count = dbmodels.IntegerField(blank=True)
1992
1993    objects = model_logic.ExtendedManager()
1994
1995    class Meta:
1996        """Metadata for class RecurringRun."""
1997        db_table = 'afe_recurring_run'
1998
1999    def __unicode__(self):
2000        return u'RecurringRun(job %s, start %s, period %s, count %s)' % (
2001            self.job.id, self.start_date, self.loop_period, self.loop_count)
2002
2003
2004class SpecialTask(dbmodels.Model, model_logic.ModelExtensions):
2005    """\
2006    Tasks to run on hosts at the next time they are in the Ready state. Use this
2007    for high-priority tasks, such as forced repair or forced reinstall.
2008
2009    host: host to run this task on
2010    task: special task to run
2011    time_requested: date and time the request for this task was made
2012    is_active: task is currently running
2013    is_complete: task has finished running
2014    is_aborted: task was aborted
2015    time_started: date and time the task started
2016    time_finished: date and time the task finished
2017    queue_entry: Host queue entry waiting on this task (or None, if task was not
2018                 started in preparation of a job)
2019    """
2020    Task = enum.Enum('Verify', 'Cleanup', 'Repair', 'Reset', 'Provision',
2021                     string_values=True)
2022
2023    host = dbmodels.ForeignKey(Host, blank=False, null=False)
2024    task = dbmodels.CharField(max_length=64, choices=Task.choices(),
2025                              blank=False, null=False)
2026    requested_by = dbmodels.ForeignKey(User)
2027    time_requested = dbmodels.DateTimeField(auto_now_add=True, blank=False,
2028                                            null=False)
2029    is_active = dbmodels.BooleanField(default=False, blank=False, null=False)
2030    is_complete = dbmodels.BooleanField(default=False, blank=False, null=False)
2031    is_aborted = dbmodels.BooleanField(default=False, blank=False, null=False)
2032    time_started = dbmodels.DateTimeField(null=True, blank=True)
2033    queue_entry = dbmodels.ForeignKey(HostQueueEntry, blank=True, null=True)
2034    success = dbmodels.BooleanField(default=False, blank=False, null=False)
2035    time_finished = dbmodels.DateTimeField(null=True, blank=True)
2036
2037    objects = model_logic.ExtendedManager()
2038
2039
2040    def save(self, **kwargs):
2041        if self.queue_entry:
2042            self.requested_by = User.objects.get(
2043                    login=self.queue_entry.job.owner)
2044        super(SpecialTask, self).save(**kwargs)
2045
2046
2047    def execution_path(self):
2048        """Returns the execution path for a special task."""
2049        return server_utils.get_special_task_exec_path(
2050                self.host.hostname, self.id, self.task, self.time_requested)
2051
2052
2053    # property to emulate HostQueueEntry.status
2054    @property
2055    def status(self):
2056        """Returns a host queue entry status appropriate for a speical task."""
2057        return server_utils.get_special_task_status(
2058                self.is_complete, self.success, self.is_active)
2059
2060
2061    # property to emulate HostQueueEntry.started_on
2062    @property
2063    def started_on(self):
2064        """Returns the time at which this special task started."""
2065        return self.time_started
2066
2067
2068    @classmethod
2069    def schedule_special_task(cls, host, task):
2070        """Schedules a special task on a host if not already scheduled.
2071
2072        @param cls: Implicit class object.
2073        @param host: The host to use.
2074        @param task: The task to schedule.
2075        """
2076        existing_tasks = SpecialTask.objects.filter(host__id=host.id, task=task,
2077                                                    is_active=False,
2078                                                    is_complete=False)
2079        if existing_tasks:
2080            return existing_tasks[0]
2081
2082        special_task = SpecialTask(host=host, task=task,
2083                                   requested_by=User.current_user())
2084        special_task.save()
2085        return special_task
2086
2087
2088    def abort(self):
2089        """ Abort this special task."""
2090        self.is_aborted = True
2091        self.save()
2092
2093
2094    def activate(self):
2095        """
2096        Sets a task as active and sets the time started to the current time.
2097        """
2098        logging.info('Starting: %s', self)
2099        self.is_active = True
2100        self.time_started = datetime.now()
2101        self.save()
2102
2103
2104    def finish(self, success):
2105        """Sets a task as completed.
2106
2107        @param success: Whether or not the task was successful.
2108        """
2109        logging.info('Finished: %s', self)
2110        self.is_active = False
2111        self.is_complete = True
2112        self.success = success
2113        if self.time_started:
2114            self.time_finished = datetime.now()
2115        self.save()
2116
2117
2118    class Meta:
2119        """Metadata for class SpecialTask."""
2120        db_table = 'afe_special_tasks'
2121
2122
2123    def __unicode__(self):
2124        result = u'Special Task %s (host %s, task %s, time %s)' % (
2125            self.id, self.host, self.task, self.time_requested)
2126        if self.is_complete:
2127            result += u' (completed)'
2128        elif self.is_active:
2129            result += u' (active)'
2130
2131        return result
2132
2133
2134class StableVersion(dbmodels.Model, model_logic.ModelExtensions):
2135
2136    board = dbmodels.CharField(max_length=255, unique=True)
2137    version = dbmodels.CharField(max_length=255)
2138
2139    class Meta:
2140        """Metadata for class StableVersion."""
2141        db_table = 'afe_stable_versions'
2142