models.py revision b28def33556c59638a885cdc5710c9594c8fcd76
1# pylint: disable=missing-docstring
2
3import logging
4from datetime import datetime
5import django.core
6try:
7    from django.db import models as dbmodels, connection
8except django.core.exceptions.ImproperlyConfigured:
9    raise ImportError('Django database not yet configured. Import either '
10                       'setup_django_environment or '
11                       'setup_django_lite_environment from '
12                       'autotest_lib.frontend before any imports that '
13                       'depend on django models.')
14from xml.sax import saxutils
15import common
16from autotest_lib.frontend.afe import model_logic, model_attributes
17from autotest_lib.frontend.afe import rdb_model_extensions
18from autotest_lib.frontend import settings, thread_local
19from autotest_lib.client.common_lib import enum, error, host_protections
20from autotest_lib.client.common_lib import global_config
21from autotest_lib.client.common_lib import host_queue_entry_states
22from autotest_lib.client.common_lib import control_data, priorities, decorators
23from autotest_lib.client.common_lib import site_utils
24from autotest_lib.client.common_lib.cros.graphite import autotest_es
25from autotest_lib.server import utils as server_utils
26
27# job options and user preferences
28DEFAULT_REBOOT_BEFORE = model_attributes.RebootBefore.IF_DIRTY
29DEFAULT_REBOOT_AFTER = model_attributes.RebootBefore.NEVER
30
31
32class AclAccessViolation(Exception):
33    """\
34    Raised when an operation is attempted with proper permissions as
35    dictated by ACLs.
36    """
37
38
39class AtomicGroup(model_logic.ModelWithInvalid, dbmodels.Model):
40    """\
41    An atomic group defines a collection of hosts which must only be scheduled
42    all at once.  Any host with a label having an atomic group will only be
43    scheduled for a job at the same time as other hosts sharing that label.
44
45    Required:
46      name: A name for this atomic group, e.g. 'rack23' or 'funky_net'.
47      max_number_of_machines: The maximum number of machines that will be
48              scheduled at once when scheduling jobs to this atomic group.
49              The job.synch_count is considered the minimum.
50
51    Optional:
52      description: Arbitrary text description of this group's purpose.
53    """
54    name = dbmodels.CharField(max_length=255, unique=True)
55    description = dbmodels.TextField(blank=True)
56    # This magic value is the default to simplify the scheduler logic.
57    # It must be "large".  The common use of atomic groups is to want all
58    # machines in the group to be used, limits on which subset used are
59    # often chosen via dependency labels.
60    # TODO(dennisjeffrey): Revisit this so we don't have to assume that
61    # "infinity" is around 3.3 million.
62    INFINITE_MACHINES = 333333333
63    max_number_of_machines = dbmodels.IntegerField(default=INFINITE_MACHINES)
64    invalid = dbmodels.BooleanField(default=False,
65                                  editable=settings.FULL_ADMIN)
66
67    name_field = 'name'
68    objects = model_logic.ModelWithInvalidManager()
69    valid_objects = model_logic.ValidObjectsManager()
70
71
72    def enqueue_job(self, job, is_template=False):
73        """Enqueue a job on an associated atomic group of hosts.
74
75        @param job: A job to enqueue.
76        @param is_template: Whether the status should be "Template".
77        """
78        queue_entry = HostQueueEntry.create(atomic_group=self, job=job,
79                                            is_template=is_template)
80        queue_entry.save()
81
82
83    def clean_object(self):
84        self.label_set.clear()
85
86
87    class Meta:
88        """Metadata for class AtomicGroup."""
89        db_table = 'afe_atomic_groups'
90
91
92    def __unicode__(self):
93        return unicode(self.name)
94
95
96class Label(model_logic.ModelWithInvalid, dbmodels.Model):
97    """\
98    Required:
99      name: label name
100
101    Optional:
102      kernel_config: URL/path to kernel config for jobs run on this label.
103      platform: If True, this is a platform label (defaults to False).
104      only_if_needed: If True, a Host with this label can only be used if that
105              label is requested by the job/test (either as the meta_host or
106              in the job_dependencies).
107      atomic_group: The atomic group associated with this label.
108    """
109    name = dbmodels.CharField(max_length=255, unique=True)
110    kernel_config = dbmodels.CharField(max_length=255, blank=True)
111    platform = dbmodels.BooleanField(default=False)
112    invalid = dbmodels.BooleanField(default=False,
113                                    editable=settings.FULL_ADMIN)
114    only_if_needed = dbmodels.BooleanField(default=False)
115
116    name_field = 'name'
117    objects = model_logic.ModelWithInvalidManager()
118    valid_objects = model_logic.ValidObjectsManager()
119    atomic_group = dbmodels.ForeignKey(AtomicGroup, null=True, blank=True)
120
121
122    def clean_object(self):
123        self.host_set.clear()
124        self.test_set.clear()
125
126
127    def enqueue_job(self, job, atomic_group=None, is_template=False):
128        """Enqueue a job on any host of this label.
129
130        @param job: A job to enqueue.
131        @param atomic_group: The associated atomic group.
132        @param is_template: Whether the status should be "Template".
133        """
134        queue_entry = HostQueueEntry.create(meta_host=self, job=job,
135                                            is_template=is_template,
136                                            atomic_group=atomic_group)
137        queue_entry.save()
138
139
140
141    class Meta:
142        """Metadata for class Label."""
143        db_table = 'afe_labels'
144
145
146    def __unicode__(self):
147        return unicode(self.name)
148
149
150class Shard(dbmodels.Model, model_logic.ModelExtensions):
151
152    hostname = dbmodels.CharField(max_length=255, unique=True)
153
154    name_field = 'hostname'
155
156    labels = dbmodels.ManyToManyField(Label, blank=True,
157                                      db_table='afe_shards_labels')
158
159    class Meta:
160        """Metadata for class ParameterizedJob."""
161        db_table = 'afe_shards'
162
163
164    def rpc_hostname(self):
165        """Get the rpc hostname of the shard.
166
167        @return: Just the shard hostname for all non-testing environments.
168                 The address of the default gateway for vm testing environments.
169        """
170        # TODO: Figure out a better solution for testing. Since no 2 shards
171        # can run on the same host, if the shard hostname is localhost we
172        # conclude that it must be a vm in a test cluster. In such situations
173        # a name of localhost:<port> is necessary to achieve the correct
174        # afe links/redirection from the frontend (this happens through the
175        # host), but for rpcs that are performed *on* the shard, they need to
176        # use the address of the gateway.
177        # In the virtual machine testing environment (i.e., puppylab), each
178        # shard VM has a hostname like localhost:<port>. In the real cluster
179        # environment, a shard node does not have 'localhost' for its hostname.
180        # The following hostname substitution is needed only for the VM
181        # in puppylab.
182        # The 'hostname' should not be replaced in the case of real cluster.
183        if site_utils.is_puppylab_vm(self.hostname):
184            hostname = self.hostname.split(':')[0]
185            return self.hostname.replace(
186                    hostname, site_utils.DEFAULT_VM_GATEWAY)
187        return self.hostname
188
189
190class Drone(dbmodels.Model, model_logic.ModelExtensions):
191    """
192    A scheduler drone
193
194    hostname: the drone's hostname
195    """
196    hostname = dbmodels.CharField(max_length=255, unique=True)
197
198    name_field = 'hostname'
199    objects = model_logic.ExtendedManager()
200
201
202    def save(self, *args, **kwargs):
203        if not User.current_user().is_superuser():
204            raise Exception('Only superusers may edit drones')
205        super(Drone, self).save(*args, **kwargs)
206
207
208    def delete(self):
209        if not User.current_user().is_superuser():
210            raise Exception('Only superusers may delete drones')
211        super(Drone, self).delete()
212
213
214    class Meta:
215        """Metadata for class Drone."""
216        db_table = 'afe_drones'
217
218    def __unicode__(self):
219        return unicode(self.hostname)
220
221
222class DroneSet(dbmodels.Model, model_logic.ModelExtensions):
223    """
224    A set of scheduler drones
225
226    These will be used by the scheduler to decide what drones a job is allowed
227    to run on.
228
229    name: the drone set's name
230    drones: the drones that are part of the set
231    """
232    DRONE_SETS_ENABLED = global_config.global_config.get_config_value(
233            'SCHEDULER', 'drone_sets_enabled', type=bool, default=False)
234    DEFAULT_DRONE_SET_NAME = global_config.global_config.get_config_value(
235            'SCHEDULER', 'default_drone_set_name', default=None)
236
237    name = dbmodels.CharField(max_length=255, unique=True)
238    drones = dbmodels.ManyToManyField(Drone, db_table='afe_drone_sets_drones')
239
240    name_field = 'name'
241    objects = model_logic.ExtendedManager()
242
243
244    def save(self, *args, **kwargs):
245        if not User.current_user().is_superuser():
246            raise Exception('Only superusers may edit drone sets')
247        super(DroneSet, self).save(*args, **kwargs)
248
249
250    def delete(self):
251        if not User.current_user().is_superuser():
252            raise Exception('Only superusers may delete drone sets')
253        super(DroneSet, self).delete()
254
255
256    @classmethod
257    def drone_sets_enabled(cls):
258        """Returns whether drone sets are enabled.
259
260        @param cls: Implicit class object.
261        """
262        return cls.DRONE_SETS_ENABLED
263
264
265    @classmethod
266    def default_drone_set_name(cls):
267        """Returns the default drone set name.
268
269        @param cls: Implicit class object.
270        """
271        return cls.DEFAULT_DRONE_SET_NAME
272
273
274    @classmethod
275    def get_default(cls):
276        """Gets the default drone set name, compatible with Job.add_object.
277
278        @param cls: Implicit class object.
279        """
280        return cls.smart_get(cls.DEFAULT_DRONE_SET_NAME)
281
282
283    @classmethod
284    def resolve_name(cls, drone_set_name):
285        """
286        Returns the name of one of these, if not None, in order of preference:
287        1) the drone set given,
288        2) the current user's default drone set, or
289        3) the global default drone set
290
291        or returns None if drone sets are disabled
292
293        @param cls: Implicit class object.
294        @param drone_set_name: A drone set name.
295        """
296        if not cls.drone_sets_enabled():
297            return None
298
299        user = User.current_user()
300        user_drone_set_name = user.drone_set and user.drone_set.name
301
302        return drone_set_name or user_drone_set_name or cls.get_default().name
303
304
305    def get_drone_hostnames(self):
306        """
307        Gets the hostnames of all drones in this drone set
308        """
309        return set(self.drones.all().values_list('hostname', flat=True))
310
311
312    class Meta:
313        """Metadata for class DroneSet."""
314        db_table = 'afe_drone_sets'
315
316    def __unicode__(self):
317        return unicode(self.name)
318
319
320class User(dbmodels.Model, model_logic.ModelExtensions):
321    """\
322    Required:
323    login :user login name
324
325    Optional:
326    access_level: 0=User (default), 1=Admin, 100=Root
327    """
328    ACCESS_ROOT = 100
329    ACCESS_ADMIN = 1
330    ACCESS_USER = 0
331
332    AUTOTEST_SYSTEM = 'autotest_system'
333
334    login = dbmodels.CharField(max_length=255, unique=True)
335    access_level = dbmodels.IntegerField(default=ACCESS_USER, blank=True)
336
337    # user preferences
338    reboot_before = dbmodels.SmallIntegerField(
339        choices=model_attributes.RebootBefore.choices(), blank=True,
340        default=DEFAULT_REBOOT_BEFORE)
341    reboot_after = dbmodels.SmallIntegerField(
342        choices=model_attributes.RebootAfter.choices(), blank=True,
343        default=DEFAULT_REBOOT_AFTER)
344    drone_set = dbmodels.ForeignKey(DroneSet, null=True, blank=True)
345    show_experimental = dbmodels.BooleanField(default=False)
346
347    name_field = 'login'
348    objects = model_logic.ExtendedManager()
349
350
351    def save(self, *args, **kwargs):
352        # is this a new object being saved for the first time?
353        first_time = (self.id is None)
354        user = thread_local.get_user()
355        if user and not user.is_superuser() and user.login != self.login:
356            raise AclAccessViolation("You cannot modify user " + self.login)
357        super(User, self).save(*args, **kwargs)
358        if first_time:
359            everyone = AclGroup.objects.get(name='Everyone')
360            everyone.users.add(self)
361
362
363    def is_superuser(self):
364        """Returns whether the user has superuser access."""
365        return self.access_level >= self.ACCESS_ROOT
366
367
368    @classmethod
369    def current_user(cls):
370        """Returns the current user.
371
372        @param cls: Implicit class object.
373        """
374        user = thread_local.get_user()
375        if user is None:
376            user, _ = cls.objects.get_or_create(login=cls.AUTOTEST_SYSTEM)
377            user.access_level = cls.ACCESS_ROOT
378            user.save()
379        return user
380
381
382    @classmethod
383    def get_record(cls, data):
384        """Check the database for an identical record.
385
386        Check for a record with matching id and login. If one exists,
387        return it. If one does not exist there is a possibility that
388        the following cases have happened:
389        1. Same id, different login
390            We received: "1 chromeos-test"
391            And we have: "1 debug-user"
392        In this case we need to delete "1 debug_user" and insert
393        "1 chromeos-test".
394
395        2. Same login, different id:
396            We received: "1 chromeos-test"
397            And we have: "2 chromeos-test"
398        In this case we need to delete "2 chromeos-test" and insert
399        "1 chromeos-test".
400
401        As long as this method deletes bad records and raises the
402        DoesNotExist exception the caller will handle creating the
403        new record.
404
405        @raises: DoesNotExist, if a record with the matching login and id
406                does not exist.
407        """
408
409        # Both the id and login should be uniqe but there are cases when
410        # we might already have a user with the same login/id because
411        # current_user will proactively create a user record if it doesn't
412        # exist. Since we want to avoid conflict between the master and
413        # shard, just delete any existing user records that don't match
414        # what we're about to deserialize from the master.
415        try:
416            return cls.objects.get(login=data['login'], id=data['id'])
417        except cls.DoesNotExist:
418            cls.delete_matching_record(login=data['login'])
419            cls.delete_matching_record(id=data['id'])
420            raise
421
422
423    class Meta:
424        """Metadata for class User."""
425        db_table = 'afe_users'
426
427    def __unicode__(self):
428        return unicode(self.login)
429
430
431class Host(model_logic.ModelWithInvalid, rdb_model_extensions.AbstractHostModel,
432           model_logic.ModelWithAttributes):
433    """\
434    Required:
435    hostname
436
437    optional:
438    locked: if true, host is locked and will not be queued
439
440    Internal:
441    From AbstractHostModel:
442        status: string describing status of host
443        invalid: true if the host has been deleted
444        protection: indicates what can be done to this host during repair
445        lock_time: DateTime at which the host was locked
446        dirty: true if the host has been used without being rebooted
447    Local:
448        locked_by: user that locked the host, or null if the host is unlocked
449    """
450
451    SERIALIZATION_LINKS_TO_FOLLOW = set(['aclgroup_set',
452                                         'hostattribute_set',
453                                         'labels',
454                                         'shard'])
455    SERIALIZATION_LOCAL_LINKS_TO_UPDATE = set(['invalid'])
456
457
458    def custom_deserialize_relation(self, link, data):
459        assert link == 'shard', 'Link %s should not be deserialized' % link
460        self.shard = Shard.deserialize(data)
461
462
463    # Note: Only specify foreign keys here, specify all native host columns in
464    # rdb_model_extensions instead.
465    Protection = host_protections.Protection
466    labels = dbmodels.ManyToManyField(Label, blank=True,
467                                      db_table='afe_hosts_labels')
468    locked_by = dbmodels.ForeignKey(User, null=True, blank=True, editable=False)
469    name_field = 'hostname'
470    objects = model_logic.ModelWithInvalidManager()
471    valid_objects = model_logic.ValidObjectsManager()
472    leased_objects = model_logic.LeasedHostManager()
473
474    shard = dbmodels.ForeignKey(Shard, blank=True, null=True)
475
476    def __init__(self, *args, **kwargs):
477        super(Host, self).__init__(*args, **kwargs)
478        self._record_attributes(['status'])
479
480
481    @staticmethod
482    def create_one_time_host(hostname):
483        """Creates a one-time host.
484
485        @param hostname: The name for the host.
486        """
487        query = Host.objects.filter(hostname=hostname)
488        if query.count() == 0:
489            host = Host(hostname=hostname, invalid=True)
490            host.do_validate()
491        else:
492            host = query[0]
493            if not host.invalid:
494                raise model_logic.ValidationError({
495                    'hostname' : '%s already exists in the autotest DB.  '
496                        'Select it rather than entering it as a one time '
497                        'host.' % hostname
498                    })
499        host.protection = host_protections.Protection.DO_NOT_REPAIR
500        host.locked = False
501        host.save()
502        host.clean_object()
503        return host
504
505
506    @classmethod
507    def assign_to_shard(cls, shard, known_ids):
508        """Assigns hosts to a shard.
509
510        For all labels that have been assigned to a shard, all hosts that
511        have at least one of the shard's labels are assigned to the shard.
512        Hosts that are assigned to the shard but aren't already present on the
513        shard are returned.
514
515        Board to shard mapping is many-to-one. Many different boards can be
516        hosted in a shard. However, DUTs of a single board cannot be distributed
517        into more than one shard.
518
519        @param shard: The shard object to assign labels/hosts for.
520        @param known_ids: List of all host-ids the shard already knows.
521                          This is used to figure out which hosts should be sent
522                          to the shard. If shard_ids were used instead, hosts
523                          would only be transferred once, even if the client
524                          failed persisting them.
525                          The number of hosts usually lies in O(100), so the
526                          overhead is acceptable.
527
528        @returns the hosts objects that should be sent to the shard.
529        """
530
531        # Disclaimer: concurrent heartbeats should theoretically not occur in
532        # the current setup. As they may be introduced in the near future,
533        # this comment will be left here.
534
535        # Sending stuff twice is acceptable, but forgetting something isn't.
536        # Detecting duplicates on the client is easy, but here it's harder. The
537        # following options were considered:
538        # - SELECT ... WHERE and then UPDATE ... WHERE: Update might update more
539        #   than select returned, as concurrently more hosts might have been
540        #   inserted
541        # - UPDATE and then SELECT WHERE shard=shard: select always returns all
542        #   hosts for the shard, this is overhead
543        # - SELECT and then UPDATE only selected without requerying afterwards:
544        #   returns the old state of the records.
545        host_ids = set(Host.objects.filter(
546            labels__in=shard.labels.all(),
547            leased=False
548            ).exclude(
549            id__in=known_ids,
550            ).values_list('pk', flat=True))
551
552        if host_ids:
553            Host.objects.filter(pk__in=host_ids).update(shard=shard)
554            return list(Host.objects.filter(pk__in=host_ids).all())
555        return []
556
557    def resurrect_object(self, old_object):
558        super(Host, self).resurrect_object(old_object)
559        # invalid hosts can be in use by the scheduler (as one-time hosts), so
560        # don't change the status
561        self.status = old_object.status
562
563
564    def clean_object(self):
565        self.aclgroup_set.clear()
566        self.labels.clear()
567
568
569    def record_state(self, type_str, state, value, other_metadata=None):
570        """Record metadata in elasticsearch.
571
572        @param type_str: sets the _type field in elasticsearch db.
573        @param state: string representing what state we are recording,
574                      e.g. 'locked'
575        @param value: value of the state, e.g. True
576        @param other_metadata: Other metadata to store in metaDB.
577        """
578        metadata = {
579            state: value,
580            'hostname': self.hostname,
581        }
582        if other_metadata:
583            metadata = dict(metadata.items() + other_metadata.items())
584        autotest_es.post(use_http=True, type_str=type_str, metadata=metadata)
585
586
587    def save(self, *args, **kwargs):
588        # extra spaces in the hostname can be a sneaky source of errors
589        self.hostname = self.hostname.strip()
590        # is this a new object being saved for the first time?
591        first_time = (self.id is None)
592        if not first_time:
593            AclGroup.check_for_acl_violation_hosts([self])
594        # If locked is changed, send its status and user made the change to
595        # metaDB. Locks are important in host history because if a device is
596        # locked then we don't really care what state it is in.
597        if self.locked and not self.locked_by:
598            self.locked_by = User.current_user()
599            if not self.lock_time:
600                self.lock_time = datetime.now()
601            self.record_state('lock_history', 'locked', self.locked,
602                              {'changed_by': self.locked_by.login,
603                               'lock_reason': self.lock_reason})
604            self.dirty = True
605        elif not self.locked and self.locked_by:
606            self.record_state('lock_history', 'locked', self.locked,
607                              {'changed_by': self.locked_by.login})
608            self.locked_by = None
609            self.lock_time = None
610        super(Host, self).save(*args, **kwargs)
611        if first_time:
612            everyone = AclGroup.objects.get(name='Everyone')
613            everyone.hosts.add(self)
614        self._check_for_updated_attributes()
615
616
617    def delete(self):
618        AclGroup.check_for_acl_violation_hosts([self])
619        for queue_entry in self.hostqueueentry_set.all():
620            queue_entry.deleted = True
621            queue_entry.abort()
622        super(Host, self).delete()
623
624
625    def on_attribute_changed(self, attribute, old_value):
626        assert attribute == 'status'
627        logging.info(self.hostname + ' -> ' + self.status)
628
629
630    def enqueue_job(self, job, atomic_group=None, is_template=False):
631        """Enqueue a job on this host.
632
633        @param job: A job to enqueue.
634        @param atomic_group: The associated atomic group.
635        @param is_template: Whther the status should be "Template".
636        """
637        queue_entry = HostQueueEntry.create(host=self, job=job,
638                                            is_template=is_template,
639                                            atomic_group=atomic_group)
640        # allow recovery of dead hosts from the frontend
641        if not self.active_queue_entry() and self.is_dead():
642            self.status = Host.Status.READY
643            self.save()
644        queue_entry.save()
645
646        block = IneligibleHostQueue(job=job, host=self)
647        block.save()
648
649
650    def platform(self):
651        """The platform of the host."""
652        # TODO(showard): slighly hacky?
653        platforms = self.labels.filter(platform=True)
654        if len(platforms) == 0:
655            return None
656        return platforms[0]
657    platform.short_description = 'Platform'
658
659
660    @classmethod
661    def check_no_platform(cls, hosts):
662        """Verify the specified hosts have no associated platforms.
663
664        @param cls: Implicit class object.
665        @param hosts: The hosts to verify.
666        @raises model_logic.ValidationError if any hosts already have a
667            platform.
668        """
669        Host.objects.populate_relationships(hosts, Label, 'label_list')
670        errors = []
671        for host in hosts:
672            platforms = [label.name for label in host.label_list
673                         if label.platform]
674            if platforms:
675                # do a join, just in case this host has multiple platforms,
676                # we'll be able to see it
677                errors.append('Host %s already has a platform: %s' % (
678                              host.hostname, ', '.join(platforms)))
679        if errors:
680            raise model_logic.ValidationError({'labels': '; '.join(errors)})
681
682
683    @classmethod
684    def check_board_labels_allowed(cls, hosts, new_labels=[]):
685        """Verify the specified hosts have valid board labels and the given
686        new board labels can be added.
687
688        @param cls: Implicit class object.
689        @param hosts: The hosts to verify.
690        @param new_labels: A list of labels to be added to the hosts.
691
692        @raises model_logic.ValidationError if any host has invalid board labels
693                or the given board labels cannot be added to the hsots.
694        """
695        Host.objects.populate_relationships(hosts, Label, 'label_list')
696        errors = []
697        for host in hosts:
698            boards = [label.name for label in host.label_list
699                      if label.name.startswith('board:')]
700            if not server_utils.board_labels_allowed(boards + new_labels):
701                # do a join, just in case this host has multiple boards,
702                # we'll be able to see it
703                errors.append('Host %s already has board labels: %s' % (
704                              host.hostname, ', '.join(boards)))
705        if errors:
706            raise model_logic.ValidationError({'labels': '; '.join(errors)})
707
708
709    def is_dead(self):
710        """Returns whether the host is dead (has status repair failed)."""
711        return self.status == Host.Status.REPAIR_FAILED
712
713
714    def active_queue_entry(self):
715        """Returns the active queue entry for this host, or None if none."""
716        active = list(self.hostqueueentry_set.filter(active=True))
717        if not active:
718            return None
719        assert len(active) == 1, ('More than one active entry for '
720                                  'host ' + self.hostname)
721        return active[0]
722
723
724    def _get_attribute_model_and_args(self, attribute):
725        return HostAttribute, dict(host=self, attribute=attribute)
726
727
728    @classmethod
729    def get_attribute_model(cls):
730        """Return the attribute model.
731
732        Override method in parent class. See ModelExtensions for details.
733        @returns: The attribute model of Host.
734        """
735        return HostAttribute
736
737
738    class Meta:
739        """Metadata for the Host class."""
740        db_table = 'afe_hosts'
741
742
743    def __unicode__(self):
744        return unicode(self.hostname)
745
746
747class HostAttribute(dbmodels.Model, model_logic.ModelExtensions):
748    """Arbitrary keyvals associated with hosts."""
749
750    SERIALIZATION_LINKS_TO_KEEP = set(['host'])
751    SERIALIZATION_LOCAL_LINKS_TO_UPDATE = set(['value'])
752    host = dbmodels.ForeignKey(Host)
753    attribute = dbmodels.CharField(max_length=90)
754    value = dbmodels.CharField(max_length=300)
755
756    objects = model_logic.ExtendedManager()
757
758    class Meta:
759        """Metadata for the HostAttribute class."""
760        db_table = 'afe_host_attributes'
761
762
763    @classmethod
764    def get_record(cls, data):
765        """Check the database for an identical record.
766
767        Use host_id and attribute to search for a existing record.
768
769        @raises: DoesNotExist, if no record found
770        @raises: MultipleObjectsReturned if multiple records found.
771        """
772        # TODO(fdeng): We should use host_id and attribute together as
773        #              a primary key in the db.
774        return cls.objects.get(host_id=data['host_id'],
775                               attribute=data['attribute'])
776
777
778    @classmethod
779    def deserialize(cls, data):
780        """Override deserialize in parent class.
781
782        Do not deserialize id as id is not kept consistent on master and shards.
783
784        @param data: A dictionary of data to deserialize.
785
786        @returns: A HostAttribute object.
787        """
788        if data:
789            data.pop('id')
790        return super(HostAttribute, cls).deserialize(data)
791
792
793class Test(dbmodels.Model, model_logic.ModelExtensions):
794    """\
795    Required:
796    author: author name
797    description: description of the test
798    name: test name
799    time: short, medium, long
800    test_class: This describes the class for your the test belongs in.
801    test_category: This describes the category for your tests
802    test_type: Client or Server
803    path: path to pass to run_test()
804    sync_count:  is a number >=1 (1 being the default). If it's 1, then it's an
805                 async job. If it's >1 it's sync job for that number of machines
806                 i.e. if sync_count = 2 it is a sync job that requires two
807                 machines.
808    Optional:
809    dependencies: What the test requires to run. Comma deliminated list
810    dependency_labels: many-to-many relationship with labels corresponding to
811                       test dependencies.
812    experimental: If this is set to True production servers will ignore the test
813    run_verify: Whether or not the scheduler should run the verify stage
814    run_reset: Whether or not the scheduler should run the reset stage
815    test_retry: Number of times to retry test if the test did not complete
816                successfully. (optional, default: 0)
817    """
818    TestTime = enum.Enum('SHORT', 'MEDIUM', 'LONG', start_value=1)
819
820    name = dbmodels.CharField(max_length=255, unique=True)
821    author = dbmodels.CharField(max_length=255)
822    test_class = dbmodels.CharField(max_length=255)
823    test_category = dbmodels.CharField(max_length=255)
824    dependencies = dbmodels.CharField(max_length=255, blank=True)
825    description = dbmodels.TextField(blank=True)
826    experimental = dbmodels.BooleanField(default=True)
827    run_verify = dbmodels.BooleanField(default=False)
828    test_time = dbmodels.SmallIntegerField(choices=TestTime.choices(),
829                                           default=TestTime.MEDIUM)
830    test_type = dbmodels.SmallIntegerField(
831        choices=control_data.CONTROL_TYPE.choices())
832    sync_count = dbmodels.IntegerField(default=1)
833    path = dbmodels.CharField(max_length=255, unique=True)
834    test_retry = dbmodels.IntegerField(blank=True, default=0)
835    run_reset = dbmodels.BooleanField(default=True)
836
837    dependency_labels = (
838        dbmodels.ManyToManyField(Label, blank=True,
839                                 db_table='afe_autotests_dependency_labels'))
840    name_field = 'name'
841    objects = model_logic.ExtendedManager()
842
843
844    def admin_description(self):
845        """Returns a string representing the admin description."""
846        escaped_description = saxutils.escape(self.description)
847        return '<span style="white-space:pre">%s</span>' % escaped_description
848    admin_description.allow_tags = True
849    admin_description.short_description = 'Description'
850
851
852    class Meta:
853        """Metadata for class Test."""
854        db_table = 'afe_autotests'
855
856    def __unicode__(self):
857        return unicode(self.name)
858
859
860class TestParameter(dbmodels.Model):
861    """
862    A declared parameter of a test
863    """
864    test = dbmodels.ForeignKey(Test)
865    name = dbmodels.CharField(max_length=255)
866
867    class Meta:
868        """Metadata for class TestParameter."""
869        db_table = 'afe_test_parameters'
870        unique_together = ('test', 'name')
871
872    def __unicode__(self):
873        return u'%s (%s)' % (self.name, self.test.name)
874
875
876class Profiler(dbmodels.Model, model_logic.ModelExtensions):
877    """\
878    Required:
879    name: profiler name
880    test_type: Client or Server
881
882    Optional:
883    description: arbirary text description
884    """
885    name = dbmodels.CharField(max_length=255, unique=True)
886    description = dbmodels.TextField(blank=True)
887
888    name_field = 'name'
889    objects = model_logic.ExtendedManager()
890
891
892    class Meta:
893        """Metadata for class Profiler."""
894        db_table = 'afe_profilers'
895
896    def __unicode__(self):
897        return unicode(self.name)
898
899
900class AclGroup(dbmodels.Model, model_logic.ModelExtensions):
901    """\
902    Required:
903    name: name of ACL group
904
905    Optional:
906    description: arbitrary description of group
907    """
908
909    SERIALIZATION_LINKS_TO_FOLLOW = set(['users'])
910
911    name = dbmodels.CharField(max_length=255, unique=True)
912    description = dbmodels.CharField(max_length=255, blank=True)
913    users = dbmodels.ManyToManyField(User, blank=False,
914                                     db_table='afe_acl_groups_users')
915    hosts = dbmodels.ManyToManyField(Host, blank=True,
916                                     db_table='afe_acl_groups_hosts')
917
918    name_field = 'name'
919    objects = model_logic.ExtendedManager()
920
921    @staticmethod
922    def check_for_acl_violation_hosts(hosts):
923        """Verify the current user has access to the specified hosts.
924
925        @param hosts: The hosts to verify against.
926        @raises AclAccessViolation if the current user doesn't have access
927            to a host.
928        """
929        user = User.current_user()
930        if user.is_superuser():
931            return
932        accessible_host_ids = set(
933            host.id for host in Host.objects.filter(aclgroup__users=user))
934        for host in hosts:
935            # Check if the user has access to this host,
936            # but only if it is not a metahost or a one-time-host.
937            no_access = (isinstance(host, Host)
938                         and not host.invalid
939                         and int(host.id) not in accessible_host_ids)
940            if no_access:
941                raise AclAccessViolation("%s does not have access to %s" %
942                                         (str(user), str(host)))
943
944
945    @staticmethod
946    def check_abort_permissions(queue_entries):
947        """Look for queue entries that aren't abortable by the current user.
948
949        An entry is not abortable if:
950           * the job isn't owned by this user, and
951           * the machine isn't ACL-accessible, or
952           * the machine is in the "Everyone" ACL
953
954        @param queue_entries: The queue entries to check.
955        @raises AclAccessViolation if a queue entry is not abortable by the
956            current user.
957        """
958        user = User.current_user()
959        if user.is_superuser():
960            return
961        not_owned = queue_entries.exclude(job__owner=user.login)
962        # I do this using ID sets instead of just Django filters because
963        # filtering on M2M dbmodels is broken in Django 0.96. It's better in
964        # 1.0.
965        # TODO: Use Django filters, now that we're using 1.0.
966        accessible_ids = set(
967            entry.id for entry
968            in not_owned.filter(host__aclgroup__users__login=user.login))
969        public_ids = set(entry.id for entry
970                         in not_owned.filter(host__aclgroup__name='Everyone'))
971        cannot_abort = [entry for entry in not_owned.select_related()
972                        if entry.id not in accessible_ids
973                        or entry.id in public_ids]
974        if len(cannot_abort) == 0:
975            return
976        entry_names = ', '.join('%s-%s/%s' % (entry.job.id, entry.job.owner,
977                                              entry.host_or_metahost_name())
978                                for entry in cannot_abort)
979        raise AclAccessViolation('You cannot abort the following job entries: '
980                                 + entry_names)
981
982
983    def check_for_acl_violation_acl_group(self):
984        """Verifies the current user has acces to this ACL group.
985
986        @raises AclAccessViolation if the current user doesn't have access to
987            this ACL group.
988        """
989        user = User.current_user()
990        if user.is_superuser():
991            return
992        if self.name == 'Everyone':
993            raise AclAccessViolation("You cannot modify 'Everyone'!")
994        if not user in self.users.all():
995            raise AclAccessViolation("You do not have access to %s"
996                                     % self.name)
997
998    @staticmethod
999    def on_host_membership_change():
1000        """Invoked when host membership changes."""
1001        everyone = AclGroup.objects.get(name='Everyone')
1002
1003        # find hosts that aren't in any ACL group and add them to Everyone
1004        # TODO(showard): this is a bit of a hack, since the fact that this query
1005        # works is kind of a coincidence of Django internals.  This trick
1006        # doesn't work in general (on all foreign key relationships).  I'll
1007        # replace it with a better technique when the need arises.
1008        orphaned_hosts = Host.valid_objects.filter(aclgroup__id__isnull=True)
1009        everyone.hosts.add(*orphaned_hosts.distinct())
1010
1011        # find hosts in both Everyone and another ACL group, and remove them
1012        # from Everyone
1013        hosts_in_everyone = Host.valid_objects.filter(aclgroup__name='Everyone')
1014        acled_hosts = set()
1015        for host in hosts_in_everyone:
1016            # Has an ACL group other than Everyone
1017            if host.aclgroup_set.count() > 1:
1018                acled_hosts.add(host)
1019        everyone.hosts.remove(*acled_hosts)
1020
1021
1022    def delete(self):
1023        if (self.name == 'Everyone'):
1024            raise AclAccessViolation("You cannot delete 'Everyone'!")
1025        self.check_for_acl_violation_acl_group()
1026        super(AclGroup, self).delete()
1027        self.on_host_membership_change()
1028
1029
1030    def add_current_user_if_empty(self):
1031        """Adds the current user if the set of users is empty."""
1032        if not self.users.count():
1033            self.users.add(User.current_user())
1034
1035
1036    def perform_after_save(self, change):
1037        """Called after a save.
1038
1039        @param change: Whether there was a change.
1040        """
1041        if not change:
1042            self.users.add(User.current_user())
1043        self.add_current_user_if_empty()
1044        self.on_host_membership_change()
1045
1046
1047    def save(self, *args, **kwargs):
1048        change = bool(self.id)
1049        if change:
1050            # Check the original object for an ACL violation
1051            AclGroup.objects.get(id=self.id).check_for_acl_violation_acl_group()
1052        super(AclGroup, self).save(*args, **kwargs)
1053        self.perform_after_save(change)
1054
1055
1056    class Meta:
1057        """Metadata for class AclGroup."""
1058        db_table = 'afe_acl_groups'
1059
1060    def __unicode__(self):
1061        return unicode(self.name)
1062
1063
1064class Kernel(dbmodels.Model):
1065    """
1066    A kernel configuration for a parameterized job
1067    """
1068    version = dbmodels.CharField(max_length=255)
1069    cmdline = dbmodels.CharField(max_length=255, blank=True)
1070
1071    @classmethod
1072    def create_kernels(cls, kernel_list):
1073        """Creates all kernels in the kernel list.
1074
1075        @param cls: Implicit class object.
1076        @param kernel_list: A list of dictionaries that describe the kernels,
1077            in the same format as the 'kernel' argument to
1078            rpc_interface.generate_control_file.
1079        @return A list of the created kernels.
1080        """
1081        if not kernel_list:
1082            return None
1083        return [cls._create(kernel) for kernel in kernel_list]
1084
1085
1086    @classmethod
1087    def _create(cls, kernel_dict):
1088        version = kernel_dict.pop('version')
1089        cmdline = kernel_dict.pop('cmdline', '')
1090
1091        if kernel_dict:
1092            raise Exception('Extraneous kernel arguments remain: %r'
1093                            % kernel_dict)
1094
1095        kernel, _ = cls.objects.get_or_create(version=version,
1096                                              cmdline=cmdline)
1097        return kernel
1098
1099
1100    class Meta:
1101        """Metadata for class Kernel."""
1102        db_table = 'afe_kernels'
1103        unique_together = ('version', 'cmdline')
1104
1105    def __unicode__(self):
1106        return u'%s %s' % (self.version, self.cmdline)
1107
1108
1109class ParameterizedJob(dbmodels.Model):
1110    """
1111    Auxiliary configuration for a parameterized job.
1112    """
1113    test = dbmodels.ForeignKey(Test)
1114    label = dbmodels.ForeignKey(Label, null=True)
1115    use_container = dbmodels.BooleanField(default=False)
1116    profile_only = dbmodels.BooleanField(default=False)
1117    upload_kernel_config = dbmodels.BooleanField(default=False)
1118
1119    kernels = dbmodels.ManyToManyField(
1120            Kernel, db_table='afe_parameterized_job_kernels')
1121    profilers = dbmodels.ManyToManyField(
1122            Profiler, through='ParameterizedJobProfiler')
1123
1124
1125    @classmethod
1126    def smart_get(cls, id_or_name, *args, **kwargs):
1127        """For compatibility with Job.add_object.
1128
1129        @param cls: Implicit class object.
1130        @param id_or_name: The ID or name to get.
1131        @param args: Non-keyword arguments.
1132        @param kwargs: Keyword arguments.
1133        """
1134        return cls.objects.get(pk=id_or_name)
1135
1136
1137    def job(self):
1138        """Returns the job if it exists, or else None."""
1139        jobs = self.job_set.all()
1140        assert jobs.count() <= 1
1141        return jobs and jobs[0] or None
1142
1143
1144    class Meta:
1145        """Metadata for class ParameterizedJob."""
1146        db_table = 'afe_parameterized_jobs'
1147
1148    def __unicode__(self):
1149        return u'%s (parameterized) - %s' % (self.test.name, self.job())
1150
1151
1152class ParameterizedJobProfiler(dbmodels.Model):
1153    """
1154    A profiler to run on a parameterized job
1155    """
1156    parameterized_job = dbmodels.ForeignKey(ParameterizedJob)
1157    profiler = dbmodels.ForeignKey(Profiler)
1158
1159    class Meta:
1160        """Metedata for class ParameterizedJobProfiler."""
1161        db_table = 'afe_parameterized_jobs_profilers'
1162        unique_together = ('parameterized_job', 'profiler')
1163
1164
1165class ParameterizedJobProfilerParameter(dbmodels.Model):
1166    """
1167    A parameter for a profiler in a parameterized job
1168    """
1169    parameterized_job_profiler = dbmodels.ForeignKey(ParameterizedJobProfiler)
1170    parameter_name = dbmodels.CharField(max_length=255)
1171    parameter_value = dbmodels.TextField()
1172    parameter_type = dbmodels.CharField(
1173            max_length=8, choices=model_attributes.ParameterTypes.choices())
1174
1175    class Meta:
1176        """Metadata for class ParameterizedJobProfilerParameter."""
1177        db_table = 'afe_parameterized_job_profiler_parameters'
1178        unique_together = ('parameterized_job_profiler', 'parameter_name')
1179
1180    def __unicode__(self):
1181        return u'%s - %s' % (self.parameterized_job_profiler.profiler.name,
1182                             self.parameter_name)
1183
1184
1185class ParameterizedJobParameter(dbmodels.Model):
1186    """
1187    Parameters for a parameterized job
1188    """
1189    parameterized_job = dbmodels.ForeignKey(ParameterizedJob)
1190    test_parameter = dbmodels.ForeignKey(TestParameter)
1191    parameter_value = dbmodels.TextField()
1192    parameter_type = dbmodels.CharField(
1193            max_length=8, choices=model_attributes.ParameterTypes.choices())
1194
1195    class Meta:
1196        """Metadata for class ParameterizedJobParameter."""
1197        db_table = 'afe_parameterized_job_parameters'
1198        unique_together = ('parameterized_job', 'test_parameter')
1199
1200    def __unicode__(self):
1201        return u'%s - %s' % (self.parameterized_job.job().name,
1202                             self.test_parameter.name)
1203
1204
1205class JobManager(model_logic.ExtendedManager):
1206    'Custom manager to provide efficient status counts querying.'
1207    def get_status_counts(self, job_ids):
1208        """Returns a dict mapping the given job IDs to their status count dicts.
1209
1210        @param job_ids: A list of job IDs.
1211        """
1212        if not job_ids:
1213            return {}
1214        id_list = '(%s)' % ','.join(str(job_id) for job_id in job_ids)
1215        cursor = connection.cursor()
1216        cursor.execute("""
1217            SELECT job_id, status, aborted, complete, COUNT(*)
1218            FROM afe_host_queue_entries
1219            WHERE job_id IN %s
1220            GROUP BY job_id, status, aborted, complete
1221            """ % id_list)
1222        all_job_counts = dict((job_id, {}) for job_id in job_ids)
1223        for job_id, status, aborted, complete, count in cursor.fetchall():
1224            job_dict = all_job_counts[job_id]
1225            full_status = HostQueueEntry.compute_full_status(status, aborted,
1226                                                             complete)
1227            job_dict.setdefault(full_status, 0)
1228            job_dict[full_status] += count
1229        return all_job_counts
1230
1231
1232class Job(dbmodels.Model, model_logic.ModelExtensions):
1233    """\
1234    owner: username of job owner
1235    name: job name (does not have to be unique)
1236    priority: Integer priority value.  Higher is more important.
1237    control_file: contents of control file
1238    control_type: Client or Server
1239    created_on: date of job creation
1240    submitted_on: date of job submission
1241    synch_count: how many hosts should be used per autoserv execution
1242    run_verify: Whether or not to run the verify phase
1243    run_reset: Whether or not to run the reset phase
1244    timeout: DEPRECATED - hours from queuing time until job times out
1245    timeout_mins: minutes from job queuing time until the job times out
1246    max_runtime_hrs: DEPRECATED - hours from job starting time until job
1247                     times out
1248    max_runtime_mins: minutes from job starting time until job times out
1249    email_list: list of people to email on completion delimited by any of:
1250                white space, ',', ':', ';'
1251    dependency_labels: many-to-many relationship with labels corresponding to
1252                       job dependencies
1253    reboot_before: Never, If dirty, or Always
1254    reboot_after: Never, If all tests passed, or Always
1255    parse_failed_repair: if True, a failed repair launched by this job will have
1256    its results parsed as part of the job.
1257    drone_set: The set of drones to run this job on
1258    parent_job: Parent job (optional)
1259    test_retry: Number of times to retry test if the test did not complete
1260                successfully. (optional, default: 0)
1261    require_ssp: Require server-side packaging unless require_ssp is set to
1262                 False. (optional, default: None)
1263    """
1264
1265    # TODO: Investigate, if jobkeyval_set is really needed.
1266    # dynamic_suite will write them into an attached file for the drone, but
1267    # it doesn't seem like they are actually used. If they aren't used, remove
1268    # jobkeyval_set here.
1269    SERIALIZATION_LINKS_TO_FOLLOW = set(['dependency_labels',
1270                                         'hostqueueentry_set',
1271                                         'jobkeyval_set',
1272                                         'shard'])
1273
1274    # SQL for selecting jobs that should be sent to shard.
1275    # We use raw sql as django filters were not optimized.
1276    # The following jobs are excluded by the SQL.
1277    #     - Non-aborted jobs known to shard as specified in |known_ids|.
1278    #       Note for jobs aborted on master, even if already known to shard,
1279    #       will be sent to shard again so that shard can abort them.
1280    #     - Completed jobs
1281    #     - Active jobs
1282    #     - Jobs without host_queue_entries
1283    NON_ABORTED_KNOWN_JOBS = '(t2.aborted = 0 AND t1.id IN (%(known_ids)s))'
1284
1285    SQL_SHARD_JOBS = (
1286        'SELECT DISTINCT(t1.id) FROM afe_jobs t1 '
1287        'INNER JOIN afe_host_queue_entries t2  ON '
1288        '  (t1.id = t2.job_id AND t2.complete != 1 AND t2.active != 1 '
1289        '   %(check_known_jobs)s) '
1290        'LEFT OUTER JOIN afe_jobs_dependency_labels t3 ON (t1.id = t3.job_id) '
1291        'JOIN afe_shards_labels t4 '
1292        '  ON (t4.label_id = t3.label_id OR t4.label_id = t2.meta_host) '
1293        'WHERE t4.shard_id = %(shard_id)s'
1294        )
1295
1296    # Jobs can be created with assigned hosts and have no dependency
1297    # labels nor meta_host.
1298    # We are looking for:
1299    #     - a job whose hqe's meta_host is null
1300    #     - a job whose hqe has a host
1301    #     - one of the host's labels matches the shard's label.
1302    # Non-aborted known jobs, completed jobs, active jobs, jobs
1303    # without hqe are exluded as we do with SQL_SHARD_JOBS.
1304    SQL_SHARD_JOBS_WITH_HOSTS = (
1305        'SELECT DISTINCT(t1.id) FROM afe_jobs t1 '
1306        'INNER JOIN afe_host_queue_entries t2 ON '
1307        '  (t1.id = t2.job_id AND t2.complete != 1 AND t2.active != 1 '
1308        '   AND t2.meta_host IS NULL AND t2.host_id IS NOT NULL '
1309        '   %(check_known_jobs)s) '
1310        'LEFT OUTER JOIN afe_hosts_labels t3 ON (t2.host_id = t3.host_id) '
1311        'WHERE (t3.label_id IN '
1312        '  (SELECT label_id FROM afe_shards_labels '
1313        '   WHERE shard_id = %(shard_id)s))'
1314        )
1315
1316    # Even if we had filters about complete, active and aborted
1317    # bits in the above two SQLs, there is a chance that
1318    # the result may still contain a job with an hqe with 'complete=1'
1319    # or 'active=1' or 'aborted=0 and afe_job.id in known jobs.'
1320    # This happens when a job has two (or more) hqes and at least
1321    # one hqe has different bits than others.
1322    # We use a second sql to ensure we exclude all un-desired jobs.
1323    SQL_JOBS_TO_EXCLUDE =(
1324        'SELECT t1.id FROM afe_jobs t1 '
1325        'INNER JOIN afe_host_queue_entries t2 ON '
1326        '  (t1.id = t2.job_id) '
1327        'WHERE (t1.id in (%(candidates)s) '
1328        '  AND (t2.complete=1 OR t2.active=1 '
1329        '  %(check_known_jobs)s))'
1330        )
1331
1332    def _deserialize_relation(self, link, data):
1333        if link in ['hostqueueentry_set', 'jobkeyval_set']:
1334            for obj in data:
1335                obj['job_id'] = self.id
1336
1337        super(Job, self)._deserialize_relation(link, data)
1338
1339
1340    def custom_deserialize_relation(self, link, data):
1341        assert link == 'shard', 'Link %s should not be deserialized' % link
1342        self.shard = Shard.deserialize(data)
1343
1344
1345    def sanity_check_update_from_shard(self, shard, updated_serialized):
1346        # If the job got aborted on the master after the client fetched it
1347        # no shard_id will be set. The shard might still push updates though,
1348        # as the job might complete before the abort bit syncs to the shard.
1349        # Alternative considered: The master scheduler could be changed to not
1350        # set aborted jobs to completed that are sharded out. But that would
1351        # require database queries and seemed more complicated to implement.
1352        # This seems safe to do, as there won't be updates pushed from the wrong
1353        # shards should be powered off and wiped hen they are removed from the
1354        # master.
1355        if self.shard_id and self.shard_id != shard.id:
1356            raise error.UnallowedRecordsSentToMaster(
1357                'Job id=%s is assigned to shard (%s). Cannot update it with %s '
1358                'from shard %s.' % (self.id, self.shard_id, updated_serialized,
1359                                    shard.id))
1360
1361
1362    # TIMEOUT is deprecated.
1363    DEFAULT_TIMEOUT = global_config.global_config.get_config_value(
1364        'AUTOTEST_WEB', 'job_timeout_default', default=24)
1365    DEFAULT_TIMEOUT_MINS = global_config.global_config.get_config_value(
1366        'AUTOTEST_WEB', 'job_timeout_mins_default', default=24*60)
1367    # MAX_RUNTIME_HRS is deprecated. Will be removed after switch to mins is
1368    # completed.
1369    DEFAULT_MAX_RUNTIME_HRS = global_config.global_config.get_config_value(
1370        'AUTOTEST_WEB', 'job_max_runtime_hrs_default', default=72)
1371    DEFAULT_MAX_RUNTIME_MINS = global_config.global_config.get_config_value(
1372        'AUTOTEST_WEB', 'job_max_runtime_mins_default', default=72*60)
1373    DEFAULT_PARSE_FAILED_REPAIR = global_config.global_config.get_config_value(
1374        'AUTOTEST_WEB', 'parse_failed_repair_default', type=bool,
1375        default=False)
1376
1377    owner = dbmodels.CharField(max_length=255)
1378    name = dbmodels.CharField(max_length=255)
1379    priority = dbmodels.SmallIntegerField(default=priorities.Priority.DEFAULT)
1380    control_file = dbmodels.TextField(null=True, blank=True)
1381    control_type = dbmodels.SmallIntegerField(
1382        choices=control_data.CONTROL_TYPE.choices(),
1383        blank=True, # to allow 0
1384        default=control_data.CONTROL_TYPE.CLIENT)
1385    created_on = dbmodels.DateTimeField()
1386    synch_count = dbmodels.IntegerField(blank=True, default=0)
1387    timeout = dbmodels.IntegerField(default=DEFAULT_TIMEOUT)
1388    run_verify = dbmodels.BooleanField(default=False)
1389    email_list = dbmodels.CharField(max_length=250, blank=True)
1390    dependency_labels = (
1391            dbmodels.ManyToManyField(Label, blank=True,
1392                                     db_table='afe_jobs_dependency_labels'))
1393    reboot_before = dbmodels.SmallIntegerField(
1394        choices=model_attributes.RebootBefore.choices(), blank=True,
1395        default=DEFAULT_REBOOT_BEFORE)
1396    reboot_after = dbmodels.SmallIntegerField(
1397        choices=model_attributes.RebootAfter.choices(), blank=True,
1398        default=DEFAULT_REBOOT_AFTER)
1399    parse_failed_repair = dbmodels.BooleanField(
1400        default=DEFAULT_PARSE_FAILED_REPAIR)
1401    # max_runtime_hrs is deprecated. Will be removed after switch to mins is
1402    # completed.
1403    max_runtime_hrs = dbmodels.IntegerField(default=DEFAULT_MAX_RUNTIME_HRS)
1404    max_runtime_mins = dbmodels.IntegerField(default=DEFAULT_MAX_RUNTIME_MINS)
1405    drone_set = dbmodels.ForeignKey(DroneSet, null=True, blank=True)
1406
1407    parameterized_job = dbmodels.ForeignKey(ParameterizedJob, null=True,
1408                                            blank=True)
1409
1410    parent_job = dbmodels.ForeignKey('self', blank=True, null=True)
1411
1412    test_retry = dbmodels.IntegerField(blank=True, default=0)
1413
1414    run_reset = dbmodels.BooleanField(default=True)
1415
1416    timeout_mins = dbmodels.IntegerField(default=DEFAULT_TIMEOUT_MINS)
1417
1418    # If this is None on the master, a slave should be found.
1419    # If this is None on a slave, it should be synced back to the master
1420    shard = dbmodels.ForeignKey(Shard, blank=True, null=True)
1421
1422    # If this is None, server-side packaging will be used for server side test,
1423    # unless it's disabled in global config AUTOSERV/enable_ssp_container.
1424    require_ssp = dbmodels.NullBooleanField(default=None, blank=True, null=True)
1425
1426    # custom manager
1427    objects = JobManager()
1428
1429
1430    @decorators.cached_property
1431    def labels(self):
1432        """All the labels of this job"""
1433        # We need to convert dependency_labels to a list, because all() gives us
1434        # back an iterator, and storing/caching an iterator means we'd only be
1435        # able to read from it once.
1436        return list(self.dependency_labels.all())
1437
1438
1439    def is_server_job(self):
1440        """Returns whether this job is of type server."""
1441        return self.control_type == control_data.CONTROL_TYPE.SERVER
1442
1443
1444    @classmethod
1445    def create(cls, owner, options, hosts):
1446        """Creates a job.
1447
1448        The job is created by taking some information (the listed args) and
1449        filling in the rest of the necessary information.
1450
1451        @param cls: Implicit class object.
1452        @param owner: The owner for the job.
1453        @param options: An options object.
1454        @param hosts: The hosts to use.
1455        """
1456        AclGroup.check_for_acl_violation_hosts(hosts)
1457
1458        control_file = options.get('control_file')
1459        parameterized_job = options.get('parameterized_job')
1460
1461        user = User.current_user()
1462        if options.get('reboot_before') is None:
1463            options['reboot_before'] = user.get_reboot_before_display()
1464        if options.get('reboot_after') is None:
1465            options['reboot_after'] = user.get_reboot_after_display()
1466
1467        drone_set = DroneSet.resolve_name(options.get('drone_set'))
1468
1469        if options.get('timeout_mins') is None and options.get('timeout'):
1470            options['timeout_mins'] = options['timeout'] * 60
1471
1472        job = cls.add_object(
1473            owner=owner,
1474            name=options['name'],
1475            priority=options['priority'],
1476            control_file=control_file,
1477            control_type=options['control_type'],
1478            synch_count=options.get('synch_count'),
1479            # timeout needs to be deleted in the future.
1480            timeout=options.get('timeout'),
1481            timeout_mins=options.get('timeout_mins'),
1482            max_runtime_mins=options.get('max_runtime_mins'),
1483            run_verify=options.get('run_verify'),
1484            email_list=options.get('email_list'),
1485            reboot_before=options.get('reboot_before'),
1486            reboot_after=options.get('reboot_after'),
1487            parse_failed_repair=options.get('parse_failed_repair'),
1488            created_on=datetime.now(),
1489            drone_set=drone_set,
1490            parameterized_job=parameterized_job,
1491            parent_job=options.get('parent_job_id'),
1492            test_retry=options.get('test_retry'),
1493            run_reset=options.get('run_reset'),
1494            require_ssp=options.get('require_ssp'))
1495
1496        job.dependency_labels = options['dependencies']
1497
1498        if options.get('keyvals'):
1499            for key, value in options['keyvals'].iteritems():
1500                JobKeyval.objects.create(job=job, key=key, value=value)
1501
1502        return job
1503
1504
1505    @classmethod
1506    def assign_to_shard(cls, shard, known_ids):
1507        """Assigns unassigned jobs to a shard.
1508
1509        For all labels that have been assigned to this shard, all jobs that
1510        have this label, are assigned to this shard.
1511
1512        Jobs that are assigned to the shard but aren't already present on the
1513        shard are returned.
1514
1515        @param shard: The shard to assign jobs to.
1516        @param known_ids: List of all ids of incomplete jobs, the shard already
1517                          knows about.
1518                          This is used to figure out which jobs should be sent
1519                          to the shard. If shard_ids were used instead, jobs
1520                          would only be transferred once, even if the client
1521                          failed persisting them.
1522                          The number of unfinished jobs usually lies in O(1000).
1523                          Assuming one id takes 8 chars in the json, this means
1524                          overhead that lies in the lower kilobyte range.
1525                          A not in query with 5000 id's takes about 30ms.
1526
1527        @returns The job objects that should be sent to the shard.
1528        """
1529        # Disclaimer: Concurrent heartbeats should not occur in today's setup.
1530        # If this changes or they are triggered manually, this applies:
1531        # Jobs may be returned more than once by concurrent calls of this
1532        # function, as there is a race condition between SELECT and UPDATE.
1533        job_ids = set([])
1534        check_known_jobs_exclude = ''
1535        check_known_jobs_include = ''
1536
1537        if known_ids:
1538            check_known_jobs = (
1539                    cls.NON_ABORTED_KNOWN_JOBS %
1540                    {'known_ids': ','.join([str(i) for i in known_ids])})
1541            check_known_jobs_exclude = 'AND NOT ' + check_known_jobs
1542            check_known_jobs_include = 'OR ' + check_known_jobs
1543
1544        for sql in [cls.SQL_SHARD_JOBS, cls.SQL_SHARD_JOBS_WITH_HOSTS]:
1545            query = Job.objects.raw(sql % {
1546                    'check_known_jobs': check_known_jobs_exclude,
1547                    'shard_id': shard.id})
1548            job_ids |= set([j.id for j in query])
1549
1550        if job_ids:
1551            query = Job.objects.raw(
1552                    cls.SQL_JOBS_TO_EXCLUDE %
1553                    {'check_known_jobs': check_known_jobs_include,
1554                     'candidates': ','.join([str(i) for i in job_ids])})
1555            job_ids -= set([j.id for j in query])
1556
1557        if job_ids:
1558            Job.objects.filter(pk__in=job_ids).update(shard=shard)
1559            return list(Job.objects.filter(pk__in=job_ids).all())
1560        return []
1561
1562
1563    def save(self, *args, **kwargs):
1564        # The current implementation of parameterized jobs requires that only
1565        # control files or parameterized jobs are used. Using the image
1566        # parameter on autoupdate_ParameterizedJob doesn't mix pure
1567        # parameterized jobs and control files jobs, it does muck enough with
1568        # normal jobs by adding a parameterized id to them that this check will
1569        # fail. So for now we just skip this check.
1570        # cls.check_parameterized_job(control_file=self.control_file,
1571        #                             parameterized_job=self.parameterized_job)
1572        super(Job, self).save(*args, **kwargs)
1573
1574
1575    def queue(self, hosts, atomic_group=None, is_template=False):
1576        """Enqueue a job on the given hosts.
1577
1578        @param hosts: The hosts to use.
1579        @param atomic_group: The associated atomic group.
1580        @param is_template: Whether the status should be "Template".
1581        """
1582        if not hosts:
1583            if atomic_group:
1584                # No hosts or labels are required to queue an atomic group
1585                # Job.  However, if they are given, we respect them below.
1586                atomic_group.enqueue_job(self, is_template=is_template)
1587            else:
1588                # hostless job
1589                entry = HostQueueEntry.create(job=self, is_template=is_template)
1590                entry.save()
1591            return
1592
1593        for host in hosts:
1594            host.enqueue_job(self, atomic_group=atomic_group,
1595                             is_template=is_template)
1596
1597
1598    def user(self):
1599        """Gets the user of this job, or None if it doesn't exist."""
1600        try:
1601            return User.objects.get(login=self.owner)
1602        except self.DoesNotExist:
1603            return None
1604
1605
1606    def abort(self):
1607        """Aborts this job."""
1608        for queue_entry in self.hostqueueentry_set.all():
1609            queue_entry.abort()
1610
1611
1612    def tag(self):
1613        """Returns a string tag for this job."""
1614        return server_utils.get_job_tag(self.id, self.owner)
1615
1616
1617    def keyval_dict(self):
1618        """Returns all keyvals for this job as a dictionary."""
1619        return dict((keyval.key, keyval.value)
1620                    for keyval in self.jobkeyval_set.all())
1621
1622
1623    @classmethod
1624    def get_attribute_model(cls):
1625        """Return the attribute model.
1626
1627        Override method in parent class. This class is called when
1628        deserializing the one-to-many relationship betwen Job and JobKeyval.
1629        On deserialization, we will try to clear any existing job keyvals
1630        associated with a job to avoid any inconsistency.
1631        Though Job doesn't implement ModelWithAttribute, we still treat
1632        it as an attribute model for this purpose.
1633
1634        @returns: The attribute model of Job.
1635        """
1636        return JobKeyval
1637
1638
1639    class Meta:
1640        """Metadata for class Job."""
1641        db_table = 'afe_jobs'
1642
1643    def __unicode__(self):
1644        return u'%s (%s-%s)' % (self.name, self.id, self.owner)
1645
1646
1647class JobKeyval(dbmodels.Model, model_logic.ModelExtensions):
1648    """Keyvals associated with jobs"""
1649
1650    SERIALIZATION_LINKS_TO_KEEP = set(['job'])
1651    SERIALIZATION_LOCAL_LINKS_TO_UPDATE = set(['value'])
1652
1653    job = dbmodels.ForeignKey(Job)
1654    key = dbmodels.CharField(max_length=90)
1655    value = dbmodels.CharField(max_length=300)
1656
1657    objects = model_logic.ExtendedManager()
1658
1659
1660    @classmethod
1661    def get_record(cls, data):
1662        """Check the database for an identical record.
1663
1664        Use job_id and key to search for a existing record.
1665
1666        @raises: DoesNotExist, if no record found
1667        @raises: MultipleObjectsReturned if multiple records found.
1668        """
1669        # TODO(fdeng): We should use job_id and key together as
1670        #              a primary key in the db.
1671        return cls.objects.get(job_id=data['job_id'], key=data['key'])
1672
1673
1674    @classmethod
1675    def deserialize(cls, data):
1676        """Override deserialize in parent class.
1677
1678        Do not deserialize id as id is not kept consistent on master and shards.
1679
1680        @param data: A dictionary of data to deserialize.
1681
1682        @returns: A JobKeyval object.
1683        """
1684        if data:
1685            data.pop('id')
1686        return super(JobKeyval, cls).deserialize(data)
1687
1688
1689    class Meta:
1690        """Metadata for class JobKeyval."""
1691        db_table = 'afe_job_keyvals'
1692
1693
1694class IneligibleHostQueue(dbmodels.Model, model_logic.ModelExtensions):
1695    """Represents an ineligible host queue."""
1696    job = dbmodels.ForeignKey(Job)
1697    host = dbmodels.ForeignKey(Host)
1698
1699    objects = model_logic.ExtendedManager()
1700
1701    class Meta:
1702        """Metadata for class IneligibleHostQueue."""
1703        db_table = 'afe_ineligible_host_queues'
1704
1705
1706class HostQueueEntry(dbmodels.Model, model_logic.ModelExtensions):
1707    """Represents a host queue entry."""
1708
1709    SERIALIZATION_LINKS_TO_FOLLOW = set(['meta_host'])
1710    SERIALIZATION_LINKS_TO_KEEP = set(['host'])
1711    SERIALIZATION_LOCAL_LINKS_TO_UPDATE = set(['aborted'])
1712
1713
1714    def custom_deserialize_relation(self, link, data):
1715        assert link == 'meta_host'
1716        self.meta_host = Label.deserialize(data)
1717
1718
1719    def sanity_check_update_from_shard(self, shard, updated_serialized,
1720                                       job_ids_sent):
1721        if self.job_id not in job_ids_sent:
1722            raise error.UnallowedRecordsSentToMaster(
1723                'Sent HostQueueEntry without corresponding '
1724                'job entry: %s' % updated_serialized)
1725
1726
1727    Status = host_queue_entry_states.Status
1728    ACTIVE_STATUSES = host_queue_entry_states.ACTIVE_STATUSES
1729    COMPLETE_STATUSES = host_queue_entry_states.COMPLETE_STATUSES
1730    PRE_JOB_STATUSES = host_queue_entry_states.PRE_JOB_STATUSES
1731    IDLE_PRE_JOB_STATUSES = host_queue_entry_states.IDLE_PRE_JOB_STATUSES
1732
1733    job = dbmodels.ForeignKey(Job)
1734    host = dbmodels.ForeignKey(Host, blank=True, null=True)
1735    status = dbmodels.CharField(max_length=255)
1736    meta_host = dbmodels.ForeignKey(Label, blank=True, null=True,
1737                                    db_column='meta_host')
1738    active = dbmodels.BooleanField(default=False)
1739    complete = dbmodels.BooleanField(default=False)
1740    deleted = dbmodels.BooleanField(default=False)
1741    execution_subdir = dbmodels.CharField(max_length=255, blank=True,
1742                                          default='')
1743    # If atomic_group is set, this is a virtual HostQueueEntry that will
1744    # be expanded into many actual hosts within the group at schedule time.
1745    atomic_group = dbmodels.ForeignKey(AtomicGroup, blank=True, null=True)
1746    aborted = dbmodels.BooleanField(default=False)
1747    started_on = dbmodels.DateTimeField(null=True, blank=True)
1748    finished_on = dbmodels.DateTimeField(null=True, blank=True)
1749
1750    objects = model_logic.ExtendedManager()
1751
1752
1753    def __init__(self, *args, **kwargs):
1754        super(HostQueueEntry, self).__init__(*args, **kwargs)
1755        self._record_attributes(['status'])
1756
1757
1758    @classmethod
1759    def create(cls, job, host=None, meta_host=None, atomic_group=None,
1760                 is_template=False):
1761        """Creates a new host queue entry.
1762
1763        @param cls: Implicit class object.
1764        @param job: The associated job.
1765        @param host: The associated host.
1766        @param meta_host: The associated meta host.
1767        @param atomic_group: The associated atomic group.
1768        @param is_template: Whether the status should be "Template".
1769        """
1770        if is_template:
1771            status = cls.Status.TEMPLATE
1772        else:
1773            status = cls.Status.QUEUED
1774
1775        return cls(job=job, host=host, meta_host=meta_host,
1776                   atomic_group=atomic_group, status=status)
1777
1778
1779    def save(self, *args, **kwargs):
1780        self._set_active_and_complete()
1781        super(HostQueueEntry, self).save(*args, **kwargs)
1782        self._check_for_updated_attributes()
1783
1784
1785    def execution_path(self):
1786        """
1787        Path to this entry's results (relative to the base results directory).
1788        """
1789        return server_utils.get_hqe_exec_path(self.job.tag(),
1790                                              self.execution_subdir)
1791
1792
1793    def host_or_metahost_name(self):
1794        """Returns the first non-None name found in priority order.
1795
1796        The priority order checked is: (1) host name; (2) meta host name; and
1797        (3) atomic group name.
1798        """
1799        if self.host:
1800            return self.host.hostname
1801        elif self.meta_host:
1802            return self.meta_host.name
1803        else:
1804            assert self.atomic_group, "no host, meta_host or atomic group!"
1805            return self.atomic_group.name
1806
1807
1808    def _set_active_and_complete(self):
1809        if self.status in self.ACTIVE_STATUSES:
1810            self.active, self.complete = True, False
1811        elif self.status in self.COMPLETE_STATUSES:
1812            self.active, self.complete = False, True
1813        else:
1814            self.active, self.complete = False, False
1815
1816
1817    def on_attribute_changed(self, attribute, old_value):
1818        assert attribute == 'status'
1819        logging.info('%s/%d (%d) -> %s', self.host, self.job.id, self.id,
1820                     self.status)
1821
1822
1823    def is_meta_host_entry(self):
1824        'True if this is a entry has a meta_host instead of a host.'
1825        return self.host is None and self.meta_host is not None
1826
1827
1828    # This code is shared between rpc_interface and models.HostQueueEntry.
1829    # Sadly due to circular imports between the 2 (crbug.com/230100) making it
1830    # a class method was the best way to refactor it. Attempting to put it in
1831    # rpc_utils or a new utils module failed as that would require us to import
1832    # models.py but to call it from here we would have to import the utils.py
1833    # thus creating a cycle.
1834    @classmethod
1835    def abort_host_queue_entries(cls, host_queue_entries):
1836        """Aborts a collection of host_queue_entries.
1837
1838        Abort these host queue entry and all host queue entries of jobs created
1839        by them.
1840
1841        @param host_queue_entries: List of host queue entries we want to abort.
1842        """
1843        # This isn't completely immune to race conditions since it's not atomic,
1844        # but it should be safe given the scheduler's behavior.
1845
1846        # TODO(milleral): crbug.com/230100
1847        # The |abort_host_queue_entries| rpc does nearly exactly this,
1848        # however, trying to re-use the code generates some horrible
1849        # circular import error.  I'd be nice to refactor things around
1850        # sometime so the code could be reused.
1851
1852        # Fixpoint algorithm to find the whole tree of HQEs to abort to
1853        # minimize the total number of database queries:
1854        children = set()
1855        new_children = set(host_queue_entries)
1856        while new_children:
1857            children.update(new_children)
1858            new_child_ids = [hqe.job_id for hqe in new_children]
1859            new_children = HostQueueEntry.objects.filter(
1860                    job__parent_job__in=new_child_ids,
1861                    complete=False, aborted=False).all()
1862            # To handle circular parental relationships
1863            new_children = set(new_children) - children
1864
1865        # Associate a user with the host queue entries that we're about
1866        # to abort so that we can look up who to blame for the aborts.
1867        now = datetime.now()
1868        user = User.current_user()
1869        aborted_hqes = [AbortedHostQueueEntry(queue_entry=hqe,
1870                aborted_by=user, aborted_on=now) for hqe in children]
1871        AbortedHostQueueEntry.objects.bulk_create(aborted_hqes)
1872        # Bulk update all of the HQEs to set the abort bit.
1873        child_ids = [hqe.id for hqe in children]
1874        HostQueueEntry.objects.filter(id__in=child_ids).update(aborted=True)
1875
1876
1877    def abort(self):
1878        """ Aborts this host queue entry.
1879
1880        Abort this host queue entry and all host queue entries of jobs created by
1881        this one.
1882
1883        """
1884        if not self.complete and not self.aborted:
1885            HostQueueEntry.abort_host_queue_entries([self])
1886
1887
1888    @classmethod
1889    def compute_full_status(cls, status, aborted, complete):
1890        """Returns a modified status msg if the host queue entry was aborted.
1891
1892        @param cls: Implicit class object.
1893        @param status: The original status message.
1894        @param aborted: Whether the host queue entry was aborted.
1895        @param complete: Whether the host queue entry was completed.
1896        """
1897        if aborted and not complete:
1898            return 'Aborted (%s)' % status
1899        return status
1900
1901
1902    def full_status(self):
1903        """Returns the full status of this host queue entry, as a string."""
1904        return self.compute_full_status(self.status, self.aborted,
1905                                        self.complete)
1906
1907
1908    def _postprocess_object_dict(self, object_dict):
1909        object_dict['full_status'] = self.full_status()
1910
1911
1912    class Meta:
1913        """Metadata for class HostQueueEntry."""
1914        db_table = 'afe_host_queue_entries'
1915
1916
1917
1918    def __unicode__(self):
1919        hostname = None
1920        if self.host:
1921            hostname = self.host.hostname
1922        return u"%s/%d (%d)" % (hostname, self.job.id, self.id)
1923
1924
1925class AbortedHostQueueEntry(dbmodels.Model, model_logic.ModelExtensions):
1926    """Represents an aborted host queue entry."""
1927    queue_entry = dbmodels.OneToOneField(HostQueueEntry, primary_key=True)
1928    aborted_by = dbmodels.ForeignKey(User)
1929    aborted_on = dbmodels.DateTimeField()
1930
1931    objects = model_logic.ExtendedManager()
1932
1933
1934    def save(self, *args, **kwargs):
1935        self.aborted_on = datetime.now()
1936        super(AbortedHostQueueEntry, self).save(*args, **kwargs)
1937
1938    class Meta:
1939        """Metadata for class AbortedHostQueueEntry."""
1940        db_table = 'afe_aborted_host_queue_entries'
1941
1942
1943class SpecialTask(dbmodels.Model, model_logic.ModelExtensions):
1944    """\
1945    Tasks to run on hosts at the next time they are in the Ready state. Use this
1946    for high-priority tasks, such as forced repair or forced reinstall.
1947
1948    host: host to run this task on
1949    task: special task to run
1950    time_requested: date and time the request for this task was made
1951    is_active: task is currently running
1952    is_complete: task has finished running
1953    is_aborted: task was aborted
1954    time_started: date and time the task started
1955    time_finished: date and time the task finished
1956    queue_entry: Host queue entry waiting on this task (or None, if task was not
1957                 started in preparation of a job)
1958    """
1959    Task = enum.Enum('Verify', 'Cleanup', 'Repair', 'Reset', 'Provision',
1960                     string_values=True)
1961
1962    host = dbmodels.ForeignKey(Host, blank=False, null=False)
1963    task = dbmodels.CharField(max_length=64, choices=Task.choices(),
1964                              blank=False, null=False)
1965    requested_by = dbmodels.ForeignKey(User)
1966    time_requested = dbmodels.DateTimeField(auto_now_add=True, blank=False,
1967                                            null=False)
1968    is_active = dbmodels.BooleanField(default=False, blank=False, null=False)
1969    is_complete = dbmodels.BooleanField(default=False, blank=False, null=False)
1970    is_aborted = dbmodels.BooleanField(default=False, blank=False, null=False)
1971    time_started = dbmodels.DateTimeField(null=True, blank=True)
1972    queue_entry = dbmodels.ForeignKey(HostQueueEntry, blank=True, null=True)
1973    success = dbmodels.BooleanField(default=False, blank=False, null=False)
1974    time_finished = dbmodels.DateTimeField(null=True, blank=True)
1975
1976    objects = model_logic.ExtendedManager()
1977
1978
1979    def save(self, **kwargs):
1980        if self.queue_entry:
1981            self.requested_by = User.objects.get(
1982                    login=self.queue_entry.job.owner)
1983        super(SpecialTask, self).save(**kwargs)
1984
1985
1986    def execution_path(self):
1987        """Returns the execution path for a special task."""
1988        return server_utils.get_special_task_exec_path(
1989                self.host.hostname, self.id, self.task, self.time_requested)
1990
1991
1992    # property to emulate HostQueueEntry.status
1993    @property
1994    def status(self):
1995        """Returns a host queue entry status appropriate for a speical task."""
1996        return server_utils.get_special_task_status(
1997                self.is_complete, self.success, self.is_active)
1998
1999
2000    # property to emulate HostQueueEntry.started_on
2001    @property
2002    def started_on(self):
2003        """Returns the time at which this special task started."""
2004        return self.time_started
2005
2006
2007    @classmethod
2008    def schedule_special_task(cls, host, task):
2009        """Schedules a special task on a host if not already scheduled.
2010
2011        @param cls: Implicit class object.
2012        @param host: The host to use.
2013        @param task: The task to schedule.
2014        """
2015        existing_tasks = SpecialTask.objects.filter(host__id=host.id, task=task,
2016                                                    is_active=False,
2017                                                    is_complete=False)
2018        if existing_tasks:
2019            return existing_tasks[0]
2020
2021        special_task = SpecialTask(host=host, task=task,
2022                                   requested_by=User.current_user())
2023        special_task.save()
2024        return special_task
2025
2026
2027    def abort(self):
2028        """ Abort this special task."""
2029        self.is_aborted = True
2030        self.save()
2031
2032
2033    def activate(self):
2034        """
2035        Sets a task as active and sets the time started to the current time.
2036        """
2037        logging.info('Starting: %s', self)
2038        self.is_active = True
2039        self.time_started = datetime.now()
2040        self.save()
2041
2042
2043    def finish(self, success):
2044        """Sets a task as completed.
2045
2046        @param success: Whether or not the task was successful.
2047        """
2048        logging.info('Finished: %s', self)
2049        self.is_active = False
2050        self.is_complete = True
2051        self.success = success
2052        if self.time_started:
2053            self.time_finished = datetime.now()
2054        self.save()
2055
2056
2057    class Meta:
2058        """Metadata for class SpecialTask."""
2059        db_table = 'afe_special_tasks'
2060
2061
2062    def __unicode__(self):
2063        result = u'Special Task %s (host %s, task %s, time %s)' % (
2064            self.id, self.host, self.task, self.time_requested)
2065        if self.is_complete:
2066            result += u' (completed)'
2067        elif self.is_active:
2068            result += u' (active)'
2069
2070        return result
2071
2072
2073class StableVersion(dbmodels.Model, model_logic.ModelExtensions):
2074
2075    board = dbmodels.CharField(max_length=255, unique=True)
2076    version = dbmodels.CharField(max_length=255)
2077
2078    class Meta:
2079        """Metadata for class StableVersion."""
2080        db_table = 'afe_stable_versions'
2081