models.py revision 5c7ef30e9d5ad3fd49a57b3ca08520875b2cce6d
1# pylint: disable-msg=C0111
2
3import logging
4from datetime import datetime
5import django.core
6try:
7    from django.db import models as dbmodels, connection
8except django.core.exceptions.ImproperlyConfigured:
9    raise ImportError('Django database not yet configured. Import either '
10                       'setup_django_environment or '
11                       'setup_django_lite_environment from '
12                       'autotest_lib.frontend before any imports that '
13                       'depend on django models.')
14from xml.sax import saxutils
15import common
16from autotest_lib.frontend.afe import model_logic, model_attributes
17from autotest_lib.frontend.afe import rdb_model_extensions
18from autotest_lib.frontend import settings, thread_local
19from autotest_lib.client.common_lib import enum, error, host_protections
20from autotest_lib.client.common_lib import global_config
21from autotest_lib.client.common_lib import host_queue_entry_states
22from autotest_lib.client.common_lib import control_data, priorities, decorators
23from autotest_lib.client.common_lib import site_utils
24from autotest_lib.client.common_lib.cros.graphite import autotest_es
25from autotest_lib.server import utils as server_utils
26
27# job options and user preferences
28DEFAULT_REBOOT_BEFORE = model_attributes.RebootBefore.IF_DIRTY
29DEFAULT_REBOOT_AFTER = model_attributes.RebootBefore.NEVER
30
31
32class AclAccessViolation(Exception):
33    """\
34    Raised when an operation is attempted with proper permissions as
35    dictated by ACLs.
36    """
37
38
39class AtomicGroup(model_logic.ModelWithInvalid, dbmodels.Model):
40    """\
41    An atomic group defines a collection of hosts which must only be scheduled
42    all at once.  Any host with a label having an atomic group will only be
43    scheduled for a job at the same time as other hosts sharing that label.
44
45    Required:
46      name: A name for this atomic group, e.g. 'rack23' or 'funky_net'.
47      max_number_of_machines: The maximum number of machines that will be
48              scheduled at once when scheduling jobs to this atomic group.
49              The job.synch_count is considered the minimum.
50
51    Optional:
52      description: Arbitrary text description of this group's purpose.
53    """
54    name = dbmodels.CharField(max_length=255, unique=True)
55    description = dbmodels.TextField(blank=True)
56    # This magic value is the default to simplify the scheduler logic.
57    # It must be "large".  The common use of atomic groups is to want all
58    # machines in the group to be used, limits on which subset used are
59    # often chosen via dependency labels.
60    # TODO(dennisjeffrey): Revisit this so we don't have to assume that
61    # "infinity" is around 3.3 million.
62    INFINITE_MACHINES = 333333333
63    max_number_of_machines = dbmodels.IntegerField(default=INFINITE_MACHINES)
64    invalid = dbmodels.BooleanField(default=False,
65                                  editable=settings.FULL_ADMIN)
66
67    name_field = 'name'
68    objects = model_logic.ModelWithInvalidManager()
69    valid_objects = model_logic.ValidObjectsManager()
70
71
72    def enqueue_job(self, job, is_template=False):
73        """Enqueue a job on an associated atomic group of hosts.
74
75        @param job: A job to enqueue.
76        @param is_template: Whether the status should be "Template".
77        """
78        queue_entry = HostQueueEntry.create(atomic_group=self, job=job,
79                                            is_template=is_template)
80        queue_entry.save()
81
82
83    def clean_object(self):
84        self.label_set.clear()
85
86
87    class Meta:
88        """Metadata for class AtomicGroup."""
89        db_table = 'afe_atomic_groups'
90
91
92    def __unicode__(self):
93        return unicode(self.name)
94
95
96class Label(model_logic.ModelWithInvalid, dbmodels.Model):
97    """\
98    Required:
99      name: label name
100
101    Optional:
102      kernel_config: URL/path to kernel config for jobs run on this label.
103      platform: If True, this is a platform label (defaults to False).
104      only_if_needed: If True, a Host with this label can only be used if that
105              label is requested by the job/test (either as the meta_host or
106              in the job_dependencies).
107      atomic_group: The atomic group associated with this label.
108    """
109    name = dbmodels.CharField(max_length=255, unique=True)
110    kernel_config = dbmodels.CharField(max_length=255, blank=True)
111    platform = dbmodels.BooleanField(default=False)
112    invalid = dbmodels.BooleanField(default=False,
113                                    editable=settings.FULL_ADMIN)
114    only_if_needed = dbmodels.BooleanField(default=False)
115
116    name_field = 'name'
117    objects = model_logic.ModelWithInvalidManager()
118    valid_objects = model_logic.ValidObjectsManager()
119    atomic_group = dbmodels.ForeignKey(AtomicGroup, null=True, blank=True)
120
121
122    def clean_object(self):
123        self.host_set.clear()
124        self.test_set.clear()
125
126
127    def enqueue_job(self, job, atomic_group=None, is_template=False):
128        """Enqueue a job on any host of this label.
129
130        @param job: A job to enqueue.
131        @param atomic_group: The associated atomic group.
132        @param is_template: Whether the status should be "Template".
133        """
134        queue_entry = HostQueueEntry.create(meta_host=self, job=job,
135                                            is_template=is_template,
136                                            atomic_group=atomic_group)
137        queue_entry.save()
138
139
140
141    class Meta:
142        """Metadata for class Label."""
143        db_table = 'afe_labels'
144
145
146    def __unicode__(self):
147        return unicode(self.name)
148
149
150class Shard(dbmodels.Model, model_logic.ModelExtensions):
151
152    hostname = dbmodels.CharField(max_length=255, unique=True)
153
154    name_field = 'hostname'
155
156    labels = dbmodels.ManyToManyField(Label, blank=True,
157                                      db_table='afe_shards_labels')
158
159    class Meta:
160        """Metadata for class ParameterizedJob."""
161        db_table = 'afe_shards'
162
163
164    def rpc_hostname(self):
165        """Get the rpc hostname of the shard.
166
167        @return: Just the shard hostname for all non-testing environments.
168                 The address of the default gateway for vm testing environments.
169        """
170        # TODO: Figure out a better solution for testing. Since no 2 shards
171        # can run on the same host, if the shard hostname is localhost we
172        # conclude that it must be a vm in a test cluster. In such situations
173        # a name of localhost:<port> is necessary to achieve the correct
174        # afe links/redirection from the frontend (this happens through the
175        # host), but for rpcs that are performed *on* the shard, they need to
176        # use the address of the gateway.
177        # In the virtual machine testing environment (i.e., puppylab), each
178        # shard VM has a hostname like localhost:<port>. In the real cluster
179        # environment, a shard node does not have 'localhost' for its hostname.
180        # The following hostname substitution is needed only for the VM
181        # in puppylab.
182        # The 'hostname' should not be replaced in the case of real cluster.
183        if site_utils.is_puppylab_vm(self.hostname):
184            hostname = self.hostname.split(':')[0]
185            return self.hostname.replace(
186                    hostname, site_utils.DEFAULT_VM_GATEWAY)
187        return self.hostname
188
189
190class Drone(dbmodels.Model, model_logic.ModelExtensions):
191    """
192    A scheduler drone
193
194    hostname: the drone's hostname
195    """
196    hostname = dbmodels.CharField(max_length=255, unique=True)
197
198    name_field = 'hostname'
199    objects = model_logic.ExtendedManager()
200
201
202    def save(self, *args, **kwargs):
203        if not User.current_user().is_superuser():
204            raise Exception('Only superusers may edit drones')
205        super(Drone, self).save(*args, **kwargs)
206
207
208    def delete(self):
209        if not User.current_user().is_superuser():
210            raise Exception('Only superusers may delete drones')
211        super(Drone, self).delete()
212
213
214    class Meta:
215        """Metadata for class Drone."""
216        db_table = 'afe_drones'
217
218    def __unicode__(self):
219        return unicode(self.hostname)
220
221
222class DroneSet(dbmodels.Model, model_logic.ModelExtensions):
223    """
224    A set of scheduler drones
225
226    These will be used by the scheduler to decide what drones a job is allowed
227    to run on.
228
229    name: the drone set's name
230    drones: the drones that are part of the set
231    """
232    DRONE_SETS_ENABLED = global_config.global_config.get_config_value(
233            'SCHEDULER', 'drone_sets_enabled', type=bool, default=False)
234    DEFAULT_DRONE_SET_NAME = global_config.global_config.get_config_value(
235            'SCHEDULER', 'default_drone_set_name', default=None)
236
237    name = dbmodels.CharField(max_length=255, unique=True)
238    drones = dbmodels.ManyToManyField(Drone, db_table='afe_drone_sets_drones')
239
240    name_field = 'name'
241    objects = model_logic.ExtendedManager()
242
243
244    def save(self, *args, **kwargs):
245        if not User.current_user().is_superuser():
246            raise Exception('Only superusers may edit drone sets')
247        super(DroneSet, self).save(*args, **kwargs)
248
249
250    def delete(self):
251        if not User.current_user().is_superuser():
252            raise Exception('Only superusers may delete drone sets')
253        super(DroneSet, self).delete()
254
255
256    @classmethod
257    def drone_sets_enabled(cls):
258        """Returns whether drone sets are enabled.
259
260        @param cls: Implicit class object.
261        """
262        return cls.DRONE_SETS_ENABLED
263
264
265    @classmethod
266    def default_drone_set_name(cls):
267        """Returns the default drone set name.
268
269        @param cls: Implicit class object.
270        """
271        return cls.DEFAULT_DRONE_SET_NAME
272
273
274    @classmethod
275    def get_default(cls):
276        """Gets the default drone set name, compatible with Job.add_object.
277
278        @param cls: Implicit class object.
279        """
280        return cls.smart_get(cls.DEFAULT_DRONE_SET_NAME)
281
282
283    @classmethod
284    def resolve_name(cls, drone_set_name):
285        """
286        Returns the name of one of these, if not None, in order of preference:
287        1) the drone set given,
288        2) the current user's default drone set, or
289        3) the global default drone set
290
291        or returns None if drone sets are disabled
292
293        @param cls: Implicit class object.
294        @param drone_set_name: A drone set name.
295        """
296        if not cls.drone_sets_enabled():
297            return None
298
299        user = User.current_user()
300        user_drone_set_name = user.drone_set and user.drone_set.name
301
302        return drone_set_name or user_drone_set_name or cls.get_default().name
303
304
305    def get_drone_hostnames(self):
306        """
307        Gets the hostnames of all drones in this drone set
308        """
309        return set(self.drones.all().values_list('hostname', flat=True))
310
311
312    class Meta:
313        """Metadata for class DroneSet."""
314        db_table = 'afe_drone_sets'
315
316    def __unicode__(self):
317        return unicode(self.name)
318
319
320class User(dbmodels.Model, model_logic.ModelExtensions):
321    """\
322    Required:
323    login :user login name
324
325    Optional:
326    access_level: 0=User (default), 1=Admin, 100=Root
327    """
328    ACCESS_ROOT = 100
329    ACCESS_ADMIN = 1
330    ACCESS_USER = 0
331
332    AUTOTEST_SYSTEM = 'autotest_system'
333
334    login = dbmodels.CharField(max_length=255, unique=True)
335    access_level = dbmodels.IntegerField(default=ACCESS_USER, blank=True)
336
337    # user preferences
338    reboot_before = dbmodels.SmallIntegerField(
339        choices=model_attributes.RebootBefore.choices(), blank=True,
340        default=DEFAULT_REBOOT_BEFORE)
341    reboot_after = dbmodels.SmallIntegerField(
342        choices=model_attributes.RebootAfter.choices(), blank=True,
343        default=DEFAULT_REBOOT_AFTER)
344    drone_set = dbmodels.ForeignKey(DroneSet, null=True, blank=True)
345    show_experimental = dbmodels.BooleanField(default=False)
346
347    name_field = 'login'
348    objects = model_logic.ExtendedManager()
349
350
351    def save(self, *args, **kwargs):
352        # is this a new object being saved for the first time?
353        first_time = (self.id is None)
354        user = thread_local.get_user()
355        if user and not user.is_superuser() and user.login != self.login:
356            raise AclAccessViolation("You cannot modify user " + self.login)
357        super(User, self).save(*args, **kwargs)
358        if first_time:
359            everyone = AclGroup.objects.get(name='Everyone')
360            everyone.users.add(self)
361
362
363    def is_superuser(self):
364        """Returns whether the user has superuser access."""
365        return self.access_level >= self.ACCESS_ROOT
366
367
368    @classmethod
369    def current_user(cls):
370        """Returns the current user.
371
372        @param cls: Implicit class object.
373        """
374        user = thread_local.get_user()
375        if user is None:
376            user, _ = cls.objects.get_or_create(login=cls.AUTOTEST_SYSTEM)
377            user.access_level = cls.ACCESS_ROOT
378            user.save()
379        return user
380
381
382    @classmethod
383    def get_record(cls, data):
384        """Check the database for an identical record.
385
386        Check for a record with matching id and login. If one exists,
387        return it. If one does not exist there is a possibility that
388        the following cases have happened:
389        1. Same id, different login
390            We received: "1 chromeos-test"
391            And we have: "1 debug-user"
392        In this case we need to delete "1 debug_user" and insert
393        "1 chromeos-test".
394
395        2. Same login, different id:
396            We received: "1 chromeos-test"
397            And we have: "2 chromeos-test"
398        In this case we need to delete "2 chromeos-test" and insert
399        "1 chromeos-test".
400
401        As long as this method deletes bad records and raises the
402        DoesNotExist exception the caller will handle creating the
403        new record.
404
405        @raises: DoesNotExist, if a record with the matching login and id
406                does not exist.
407        """
408
409        # Both the id and login should be uniqe but there are cases when
410        # we might already have a user with the same login/id because
411        # current_user will proactively create a user record if it doesn't
412        # exist. Since we want to avoid conflict between the master and
413        # shard, just delete any existing user records that don't match
414        # what we're about to deserialize from the master.
415        try:
416            return cls.objects.get(login=data['login'], id=data['id'])
417        except cls.DoesNotExist:
418            cls.delete_matching_record(login=data['login'])
419            cls.delete_matching_record(id=data['id'])
420            raise
421
422
423    class Meta:
424        """Metadata for class User."""
425        db_table = 'afe_users'
426
427    def __unicode__(self):
428        return unicode(self.login)
429
430
431class Host(model_logic.ModelWithInvalid, rdb_model_extensions.AbstractHostModel,
432           model_logic.ModelWithAttributes):
433    """\
434    Required:
435    hostname
436
437    optional:
438    locked: if true, host is locked and will not be queued
439
440    Internal:
441    From AbstractHostModel:
442        synch_id: currently unused
443        status: string describing status of host
444        invalid: true if the host has been deleted
445        protection: indicates what can be done to this host during repair
446        lock_time: DateTime at which the host was locked
447        dirty: true if the host has been used without being rebooted
448    Local:
449        locked_by: user that locked the host, or null if the host is unlocked
450    """
451
452    SERIALIZATION_LINKS_TO_FOLLOW = set(['aclgroup_set',
453                                         'hostattribute_set',
454                                         'labels',
455                                         'shard'])
456    SERIALIZATION_LOCAL_LINKS_TO_UPDATE = set(['invalid'])
457
458
459    def custom_deserialize_relation(self, link, data):
460        assert link == 'shard', 'Link %s should not be deserialized' % link
461        self.shard = Shard.deserialize(data)
462
463
464    # Note: Only specify foreign keys here, specify all native host columns in
465    # rdb_model_extensions instead.
466    Protection = host_protections.Protection
467    labels = dbmodels.ManyToManyField(Label, blank=True,
468                                      db_table='afe_hosts_labels')
469    locked_by = dbmodels.ForeignKey(User, null=True, blank=True, editable=False)
470    name_field = 'hostname'
471    objects = model_logic.ModelWithInvalidManager()
472    valid_objects = model_logic.ValidObjectsManager()
473    leased_objects = model_logic.LeasedHostManager()
474
475    shard = dbmodels.ForeignKey(Shard, blank=True, null=True)
476
477    def __init__(self, *args, **kwargs):
478        super(Host, self).__init__(*args, **kwargs)
479        self._record_attributes(['status'])
480
481
482    @staticmethod
483    def create_one_time_host(hostname):
484        """Creates a one-time host.
485
486        @param hostname: The name for the host.
487        """
488        query = Host.objects.filter(hostname=hostname)
489        if query.count() == 0:
490            host = Host(hostname=hostname, invalid=True)
491            host.do_validate()
492        else:
493            host = query[0]
494            if not host.invalid:
495                raise model_logic.ValidationError({
496                    'hostname' : '%s already exists in the autotest DB.  '
497                        'Select it rather than entering it as a one time '
498                        'host.' % hostname
499                    })
500        host.protection = host_protections.Protection.DO_NOT_REPAIR
501        host.locked = False
502        host.save()
503        host.clean_object()
504        return host
505
506
507    @classmethod
508    def assign_to_shard(cls, shard, known_ids):
509        """Assigns hosts to a shard.
510
511        For all labels that have been assigned to a shard, all hosts that
512        have at least one of the shard's labels are assigned to the shard.
513        Hosts that are assigned to the shard but aren't already present on the
514        shard are returned.
515
516        Board to shard mapping is many-to-one. Many different boards can be
517        hosted in a shard. However, DUTs of a single board cannot be distributed
518        into more than one shard.
519
520        @param shard: The shard object to assign labels/hosts for.
521        @param known_ids: List of all host-ids the shard already knows.
522                          This is used to figure out which hosts should be sent
523                          to the shard. If shard_ids were used instead, hosts
524                          would only be transferred once, even if the client
525                          failed persisting them.
526                          The number of hosts usually lies in O(100), so the
527                          overhead is acceptable.
528
529        @returns the hosts objects that should be sent to the shard.
530        """
531
532        # Disclaimer: concurrent heartbeats should theoretically not occur in
533        # the current setup. As they may be introduced in the near future,
534        # this comment will be left here.
535
536        # Sending stuff twice is acceptable, but forgetting something isn't.
537        # Detecting duplicates on the client is easy, but here it's harder. The
538        # following options were considered:
539        # - SELECT ... WHERE and then UPDATE ... WHERE: Update might update more
540        #   than select returned, as concurrently more hosts might have been
541        #   inserted
542        # - UPDATE and then SELECT WHERE shard=shard: select always returns all
543        #   hosts for the shard, this is overhead
544        # - SELECT and then UPDATE only selected without requerying afterwards:
545        #   returns the old state of the records.
546        host_ids = set(Host.objects.filter(
547            labels__in=shard.labels.all(),
548            leased=False
549            ).exclude(
550            id__in=known_ids,
551            ).values_list('pk', flat=True))
552
553        if host_ids:
554            Host.objects.filter(pk__in=host_ids).update(shard=shard)
555            return list(Host.objects.filter(pk__in=host_ids).all())
556        return []
557
558    def resurrect_object(self, old_object):
559        super(Host, self).resurrect_object(old_object)
560        # invalid hosts can be in use by the scheduler (as one-time hosts), so
561        # don't change the status
562        self.status = old_object.status
563
564
565    def clean_object(self):
566        self.aclgroup_set.clear()
567        self.labels.clear()
568
569
570    def record_state(self, type_str, state, value, other_metadata=None):
571        """Record metadata in elasticsearch.
572
573        @param type_str: sets the _type field in elasticsearch db.
574        @param state: string representing what state we are recording,
575                      e.g. 'locked'
576        @param value: value of the state, e.g. True
577        @param other_metadata: Other metadata to store in metaDB.
578        """
579        metadata = {
580            state: value,
581            'hostname': self.hostname,
582        }
583        if other_metadata:
584            metadata = dict(metadata.items() + other_metadata.items())
585        autotest_es.post(use_http=True, type_str=type_str, metadata=metadata)
586
587
588    def save(self, *args, **kwargs):
589        # extra spaces in the hostname can be a sneaky source of errors
590        self.hostname = self.hostname.strip()
591        # is this a new object being saved for the first time?
592        first_time = (self.id is None)
593        if not first_time:
594            AclGroup.check_for_acl_violation_hosts([self])
595        # If locked is changed, send its status and user made the change to
596        # metaDB. Locks are important in host history because if a device is
597        # locked then we don't really care what state it is in.
598        if self.locked and not self.locked_by:
599            self.locked_by = User.current_user()
600            self.lock_time = datetime.now()
601            self.record_state('lock_history', 'locked', self.locked,
602                              {'changed_by': self.locked_by.login,
603                               'lock_reason': self.lock_reason})
604            self.dirty = True
605        elif not self.locked and self.locked_by:
606            self.record_state('lock_history', 'locked', self.locked,
607                              {'changed_by': self.locked_by.login})
608            self.locked_by = None
609            self.lock_time = None
610        super(Host, self).save(*args, **kwargs)
611        if first_time:
612            everyone = AclGroup.objects.get(name='Everyone')
613            everyone.hosts.add(self)
614        self._check_for_updated_attributes()
615
616
617    def delete(self):
618        AclGroup.check_for_acl_violation_hosts([self])
619        for queue_entry in self.hostqueueentry_set.all():
620            queue_entry.deleted = True
621            queue_entry.abort()
622        super(Host, self).delete()
623
624
625    def on_attribute_changed(self, attribute, old_value):
626        assert attribute == 'status'
627        logging.info(self.hostname + ' -> ' + self.status)
628
629
630    def enqueue_job(self, job, atomic_group=None, is_template=False):
631        """Enqueue a job on this host.
632
633        @param job: A job to enqueue.
634        @param atomic_group: The associated atomic group.
635        @param is_template: Whther the status should be "Template".
636        """
637        queue_entry = HostQueueEntry.create(host=self, job=job,
638                                            is_template=is_template,
639                                            atomic_group=atomic_group)
640        # allow recovery of dead hosts from the frontend
641        if not self.active_queue_entry() and self.is_dead():
642            self.status = Host.Status.READY
643            self.save()
644        queue_entry.save()
645
646        block = IneligibleHostQueue(job=job, host=self)
647        block.save()
648
649
650    def platform(self):
651        """The platform of the host."""
652        # TODO(showard): slighly hacky?
653        platforms = self.labels.filter(platform=True)
654        if len(platforms) == 0:
655            return None
656        return platforms[0]
657    platform.short_description = 'Platform'
658
659
660    @classmethod
661    def check_no_platform(cls, hosts):
662        """Verify the specified hosts have no associated platforms.
663
664        @param cls: Implicit class object.
665        @param hosts: The hosts to verify.
666        @raises model_logic.ValidationError if any hosts already have a
667            platform.
668        """
669        Host.objects.populate_relationships(hosts, Label, 'label_list')
670        errors = []
671        for host in hosts:
672            platforms = [label.name for label in host.label_list
673                         if label.platform]
674            if platforms:
675                # do a join, just in case this host has multiple platforms,
676                # we'll be able to see it
677                errors.append('Host %s already has a platform: %s' % (
678                              host.hostname, ', '.join(platforms)))
679        if errors:
680            raise model_logic.ValidationError({'labels': '; '.join(errors)})
681
682
683    def is_dead(self):
684        """Returns whether the host is dead (has status repair failed)."""
685        return self.status == Host.Status.REPAIR_FAILED
686
687
688    def active_queue_entry(self):
689        """Returns the active queue entry for this host, or None if none."""
690        active = list(self.hostqueueentry_set.filter(active=True))
691        if not active:
692            return None
693        assert len(active) == 1, ('More than one active entry for '
694                                  'host ' + self.hostname)
695        return active[0]
696
697
698    def _get_attribute_model_and_args(self, attribute):
699        return HostAttribute, dict(host=self, attribute=attribute)
700
701
702    @classmethod
703    def get_attribute_model(cls):
704        """Return the attribute model.
705
706        Override method in parent class. See ModelExtensions for details.
707        @returns: The attribute model of Host.
708        """
709        return HostAttribute
710
711
712    class Meta:
713        """Metadata for the Host class."""
714        db_table = 'afe_hosts'
715
716
717    def __unicode__(self):
718        return unicode(self.hostname)
719
720
721class HostAttribute(dbmodels.Model, model_logic.ModelExtensions):
722    """Arbitrary keyvals associated with hosts."""
723
724    SERIALIZATION_LINKS_TO_KEEP = set(['host'])
725    SERIALIZATION_LOCAL_LINKS_TO_UPDATE = set(['value'])
726    host = dbmodels.ForeignKey(Host)
727    attribute = dbmodels.CharField(max_length=90)
728    value = dbmodels.CharField(max_length=300)
729
730    objects = model_logic.ExtendedManager()
731
732    class Meta:
733        """Metadata for the HostAttribute class."""
734        db_table = 'afe_host_attributes'
735
736
737    @classmethod
738    def get_record(cls, data):
739        """Check the database for an identical record.
740
741        Use host_id and attribute to search for a existing record.
742
743        @raises: DoesNotExist, if no record found
744        @raises: MultipleObjectsReturned if multiple records found.
745        """
746        # TODO(fdeng): We should use host_id and attribute together as
747        #              a primary key in the db.
748        return cls.objects.get(host_id=data['host_id'],
749                               attribute=data['attribute'])
750
751
752    @classmethod
753    def deserialize(cls, data):
754        """Override deserialize in parent class.
755
756        Do not deserialize id as id is not kept consistent on master and shards.
757
758        @param data: A dictionary of data to deserialize.
759
760        @returns: A HostAttribute object.
761        """
762        if data:
763            data.pop('id')
764        return super(HostAttribute, cls).deserialize(data)
765
766
767class Test(dbmodels.Model, model_logic.ModelExtensions):
768    """\
769    Required:
770    author: author name
771    description: description of the test
772    name: test name
773    time: short, medium, long
774    test_class: This describes the class for your the test belongs in.
775    test_category: This describes the category for your tests
776    test_type: Client or Server
777    path: path to pass to run_test()
778    sync_count:  is a number >=1 (1 being the default). If it's 1, then it's an
779                 async job. If it's >1 it's sync job for that number of machines
780                 i.e. if sync_count = 2 it is a sync job that requires two
781                 machines.
782    Optional:
783    dependencies: What the test requires to run. Comma deliminated list
784    dependency_labels: many-to-many relationship with labels corresponding to
785                       test dependencies.
786    experimental: If this is set to True production servers will ignore the test
787    run_verify: Whether or not the scheduler should run the verify stage
788    run_reset: Whether or not the scheduler should run the reset stage
789    test_retry: Number of times to retry test if the test did not complete
790                successfully. (optional, default: 0)
791    """
792    TestTime = enum.Enum('SHORT', 'MEDIUM', 'LONG', start_value=1)
793
794    name = dbmodels.CharField(max_length=255, unique=True)
795    author = dbmodels.CharField(max_length=255)
796    test_class = dbmodels.CharField(max_length=255)
797    test_category = dbmodels.CharField(max_length=255)
798    dependencies = dbmodels.CharField(max_length=255, blank=True)
799    description = dbmodels.TextField(blank=True)
800    experimental = dbmodels.BooleanField(default=True)
801    run_verify = dbmodels.BooleanField(default=False)
802    test_time = dbmodels.SmallIntegerField(choices=TestTime.choices(),
803                                           default=TestTime.MEDIUM)
804    test_type = dbmodels.SmallIntegerField(
805        choices=control_data.CONTROL_TYPE.choices())
806    sync_count = dbmodels.IntegerField(default=1)
807    path = dbmodels.CharField(max_length=255, unique=True)
808    test_retry = dbmodels.IntegerField(blank=True, default=0)
809    run_reset = dbmodels.BooleanField(default=True)
810
811    dependency_labels = (
812        dbmodels.ManyToManyField(Label, blank=True,
813                                 db_table='afe_autotests_dependency_labels'))
814    name_field = 'name'
815    objects = model_logic.ExtendedManager()
816
817
818    def admin_description(self):
819        """Returns a string representing the admin description."""
820        escaped_description = saxutils.escape(self.description)
821        return '<span style="white-space:pre">%s</span>' % escaped_description
822    admin_description.allow_tags = True
823    admin_description.short_description = 'Description'
824
825
826    class Meta:
827        """Metadata for class Test."""
828        db_table = 'afe_autotests'
829
830    def __unicode__(self):
831        return unicode(self.name)
832
833
834class TestParameter(dbmodels.Model):
835    """
836    A declared parameter of a test
837    """
838    test = dbmodels.ForeignKey(Test)
839    name = dbmodels.CharField(max_length=255)
840
841    class Meta:
842        """Metadata for class TestParameter."""
843        db_table = 'afe_test_parameters'
844        unique_together = ('test', 'name')
845
846    def __unicode__(self):
847        return u'%s (%s)' % (self.name, self.test.name)
848
849
850class Profiler(dbmodels.Model, model_logic.ModelExtensions):
851    """\
852    Required:
853    name: profiler name
854    test_type: Client or Server
855
856    Optional:
857    description: arbirary text description
858    """
859    name = dbmodels.CharField(max_length=255, unique=True)
860    description = dbmodels.TextField(blank=True)
861
862    name_field = 'name'
863    objects = model_logic.ExtendedManager()
864
865
866    class Meta:
867        """Metadata for class Profiler."""
868        db_table = 'afe_profilers'
869
870    def __unicode__(self):
871        return unicode(self.name)
872
873
874class AclGroup(dbmodels.Model, model_logic.ModelExtensions):
875    """\
876    Required:
877    name: name of ACL group
878
879    Optional:
880    description: arbitrary description of group
881    """
882
883    SERIALIZATION_LINKS_TO_FOLLOW = set(['users'])
884
885    name = dbmodels.CharField(max_length=255, unique=True)
886    description = dbmodels.CharField(max_length=255, blank=True)
887    users = dbmodels.ManyToManyField(User, blank=False,
888                                     db_table='afe_acl_groups_users')
889    hosts = dbmodels.ManyToManyField(Host, blank=True,
890                                     db_table='afe_acl_groups_hosts')
891
892    name_field = 'name'
893    objects = model_logic.ExtendedManager()
894
895    @staticmethod
896    def check_for_acl_violation_hosts(hosts):
897        """Verify the current user has access to the specified hosts.
898
899        @param hosts: The hosts to verify against.
900        @raises AclAccessViolation if the current user doesn't have access
901            to a host.
902        """
903        user = User.current_user()
904        if user.is_superuser():
905            return
906        accessible_host_ids = set(
907            host.id for host in Host.objects.filter(aclgroup__users=user))
908        for host in hosts:
909            # Check if the user has access to this host,
910            # but only if it is not a metahost or a one-time-host.
911            no_access = (isinstance(host, Host)
912                         and not host.invalid
913                         and int(host.id) not in accessible_host_ids)
914            if no_access:
915                raise AclAccessViolation("%s does not have access to %s" %
916                                         (str(user), str(host)))
917
918
919    @staticmethod
920    def check_abort_permissions(queue_entries):
921        """Look for queue entries that aren't abortable by the current user.
922
923        An entry is not abortable if:
924           * the job isn't owned by this user, and
925           * the machine isn't ACL-accessible, or
926           * the machine is in the "Everyone" ACL
927
928        @param queue_entries: The queue entries to check.
929        @raises AclAccessViolation if a queue entry is not abortable by the
930            current user.
931        """
932        user = User.current_user()
933        if user.is_superuser():
934            return
935        not_owned = queue_entries.exclude(job__owner=user.login)
936        # I do this using ID sets instead of just Django filters because
937        # filtering on M2M dbmodels is broken in Django 0.96. It's better in
938        # 1.0.
939        # TODO: Use Django filters, now that we're using 1.0.
940        accessible_ids = set(
941            entry.id for entry
942            in not_owned.filter(host__aclgroup__users__login=user.login))
943        public_ids = set(entry.id for entry
944                         in not_owned.filter(host__aclgroup__name='Everyone'))
945        cannot_abort = [entry for entry in not_owned.select_related()
946                        if entry.id not in accessible_ids
947                        or entry.id in public_ids]
948        if len(cannot_abort) == 0:
949            return
950        entry_names = ', '.join('%s-%s/%s' % (entry.job.id, entry.job.owner,
951                                              entry.host_or_metahost_name())
952                                for entry in cannot_abort)
953        raise AclAccessViolation('You cannot abort the following job entries: '
954                                 + entry_names)
955
956
957    def check_for_acl_violation_acl_group(self):
958        """Verifies the current user has acces to this ACL group.
959
960        @raises AclAccessViolation if the current user doesn't have access to
961            this ACL group.
962        """
963        user = User.current_user()
964        if user.is_superuser():
965            return
966        if self.name == 'Everyone':
967            raise AclAccessViolation("You cannot modify 'Everyone'!")
968        if not user in self.users.all():
969            raise AclAccessViolation("You do not have access to %s"
970                                     % self.name)
971
972    @staticmethod
973    def on_host_membership_change():
974        """Invoked when host membership changes."""
975        everyone = AclGroup.objects.get(name='Everyone')
976
977        # find hosts that aren't in any ACL group and add them to Everyone
978        # TODO(showard): this is a bit of a hack, since the fact that this query
979        # works is kind of a coincidence of Django internals.  This trick
980        # doesn't work in general (on all foreign key relationships).  I'll
981        # replace it with a better technique when the need arises.
982        orphaned_hosts = Host.valid_objects.filter(aclgroup__id__isnull=True)
983        everyone.hosts.add(*orphaned_hosts.distinct())
984
985        # find hosts in both Everyone and another ACL group, and remove them
986        # from Everyone
987        hosts_in_everyone = Host.valid_objects.filter(aclgroup__name='Everyone')
988        acled_hosts = set()
989        for host in hosts_in_everyone:
990            # Has an ACL group other than Everyone
991            if host.aclgroup_set.count() > 1:
992                acled_hosts.add(host)
993        everyone.hosts.remove(*acled_hosts)
994
995
996    def delete(self):
997        if (self.name == 'Everyone'):
998            raise AclAccessViolation("You cannot delete 'Everyone'!")
999        self.check_for_acl_violation_acl_group()
1000        super(AclGroup, self).delete()
1001        self.on_host_membership_change()
1002
1003
1004    def add_current_user_if_empty(self):
1005        """Adds the current user if the set of users is empty."""
1006        if not self.users.count():
1007            self.users.add(User.current_user())
1008
1009
1010    def perform_after_save(self, change):
1011        """Called after a save.
1012
1013        @param change: Whether there was a change.
1014        """
1015        if not change:
1016            self.users.add(User.current_user())
1017        self.add_current_user_if_empty()
1018        self.on_host_membership_change()
1019
1020
1021    def save(self, *args, **kwargs):
1022        change = bool(self.id)
1023        if change:
1024            # Check the original object for an ACL violation
1025            AclGroup.objects.get(id=self.id).check_for_acl_violation_acl_group()
1026        super(AclGroup, self).save(*args, **kwargs)
1027        self.perform_after_save(change)
1028
1029
1030    class Meta:
1031        """Metadata for class AclGroup."""
1032        db_table = 'afe_acl_groups'
1033
1034    def __unicode__(self):
1035        return unicode(self.name)
1036
1037
1038class Kernel(dbmodels.Model):
1039    """
1040    A kernel configuration for a parameterized job
1041    """
1042    version = dbmodels.CharField(max_length=255)
1043    cmdline = dbmodels.CharField(max_length=255, blank=True)
1044
1045    @classmethod
1046    def create_kernels(cls, kernel_list):
1047        """Creates all kernels in the kernel list.
1048
1049        @param cls: Implicit class object.
1050        @param kernel_list: A list of dictionaries that describe the kernels,
1051            in the same format as the 'kernel' argument to
1052            rpc_interface.generate_control_file.
1053        @return A list of the created kernels.
1054        """
1055        if not kernel_list:
1056            return None
1057        return [cls._create(kernel) for kernel in kernel_list]
1058
1059
1060    @classmethod
1061    def _create(cls, kernel_dict):
1062        version = kernel_dict.pop('version')
1063        cmdline = kernel_dict.pop('cmdline', '')
1064
1065        if kernel_dict:
1066            raise Exception('Extraneous kernel arguments remain: %r'
1067                            % kernel_dict)
1068
1069        kernel, _ = cls.objects.get_or_create(version=version,
1070                                              cmdline=cmdline)
1071        return kernel
1072
1073
1074    class Meta:
1075        """Metadata for class Kernel."""
1076        db_table = 'afe_kernels'
1077        unique_together = ('version', 'cmdline')
1078
1079    def __unicode__(self):
1080        return u'%s %s' % (self.version, self.cmdline)
1081
1082
1083class ParameterizedJob(dbmodels.Model):
1084    """
1085    Auxiliary configuration for a parameterized job.
1086    """
1087    test = dbmodels.ForeignKey(Test)
1088    label = dbmodels.ForeignKey(Label, null=True)
1089    use_container = dbmodels.BooleanField(default=False)
1090    profile_only = dbmodels.BooleanField(default=False)
1091    upload_kernel_config = dbmodels.BooleanField(default=False)
1092
1093    kernels = dbmodels.ManyToManyField(
1094            Kernel, db_table='afe_parameterized_job_kernels')
1095    profilers = dbmodels.ManyToManyField(
1096            Profiler, through='ParameterizedJobProfiler')
1097
1098
1099    @classmethod
1100    def smart_get(cls, id_or_name, *args, **kwargs):
1101        """For compatibility with Job.add_object.
1102
1103        @param cls: Implicit class object.
1104        @param id_or_name: The ID or name to get.
1105        @param args: Non-keyword arguments.
1106        @param kwargs: Keyword arguments.
1107        """
1108        return cls.objects.get(pk=id_or_name)
1109
1110
1111    def job(self):
1112        """Returns the job if it exists, or else None."""
1113        jobs = self.job_set.all()
1114        assert jobs.count() <= 1
1115        return jobs and jobs[0] or None
1116
1117
1118    class Meta:
1119        """Metadata for class ParameterizedJob."""
1120        db_table = 'afe_parameterized_jobs'
1121
1122    def __unicode__(self):
1123        return u'%s (parameterized) - %s' % (self.test.name, self.job())
1124
1125
1126class ParameterizedJobProfiler(dbmodels.Model):
1127    """
1128    A profiler to run on a parameterized job
1129    """
1130    parameterized_job = dbmodels.ForeignKey(ParameterizedJob)
1131    profiler = dbmodels.ForeignKey(Profiler)
1132
1133    class Meta:
1134        """Metedata for class ParameterizedJobProfiler."""
1135        db_table = 'afe_parameterized_jobs_profilers'
1136        unique_together = ('parameterized_job', 'profiler')
1137
1138
1139class ParameterizedJobProfilerParameter(dbmodels.Model):
1140    """
1141    A parameter for a profiler in a parameterized job
1142    """
1143    parameterized_job_profiler = dbmodels.ForeignKey(ParameterizedJobProfiler)
1144    parameter_name = dbmodels.CharField(max_length=255)
1145    parameter_value = dbmodels.TextField()
1146    parameter_type = dbmodels.CharField(
1147            max_length=8, choices=model_attributes.ParameterTypes.choices())
1148
1149    class Meta:
1150        """Metadata for class ParameterizedJobProfilerParameter."""
1151        db_table = 'afe_parameterized_job_profiler_parameters'
1152        unique_together = ('parameterized_job_profiler', 'parameter_name')
1153
1154    def __unicode__(self):
1155        return u'%s - %s' % (self.parameterized_job_profiler.profiler.name,
1156                             self.parameter_name)
1157
1158
1159class ParameterizedJobParameter(dbmodels.Model):
1160    """
1161    Parameters for a parameterized job
1162    """
1163    parameterized_job = dbmodels.ForeignKey(ParameterizedJob)
1164    test_parameter = dbmodels.ForeignKey(TestParameter)
1165    parameter_value = dbmodels.TextField()
1166    parameter_type = dbmodels.CharField(
1167            max_length=8, choices=model_attributes.ParameterTypes.choices())
1168
1169    class Meta:
1170        """Metadata for class ParameterizedJobParameter."""
1171        db_table = 'afe_parameterized_job_parameters'
1172        unique_together = ('parameterized_job', 'test_parameter')
1173
1174    def __unicode__(self):
1175        return u'%s - %s' % (self.parameterized_job.job().name,
1176                             self.test_parameter.name)
1177
1178
1179class JobManager(model_logic.ExtendedManager):
1180    'Custom manager to provide efficient status counts querying.'
1181    def get_status_counts(self, job_ids):
1182        """Returns a dict mapping the given job IDs to their status count dicts.
1183
1184        @param job_ids: A list of job IDs.
1185        """
1186        if not job_ids:
1187            return {}
1188        id_list = '(%s)' % ','.join(str(job_id) for job_id in job_ids)
1189        cursor = connection.cursor()
1190        cursor.execute("""
1191            SELECT job_id, status, aborted, complete, COUNT(*)
1192            FROM afe_host_queue_entries
1193            WHERE job_id IN %s
1194            GROUP BY job_id, status, aborted, complete
1195            """ % id_list)
1196        all_job_counts = dict((job_id, {}) for job_id in job_ids)
1197        for job_id, status, aborted, complete, count in cursor.fetchall():
1198            job_dict = all_job_counts[job_id]
1199            full_status = HostQueueEntry.compute_full_status(status, aborted,
1200                                                             complete)
1201            job_dict.setdefault(full_status, 0)
1202            job_dict[full_status] += count
1203        return all_job_counts
1204
1205
1206class Job(dbmodels.Model, model_logic.ModelExtensions):
1207    """\
1208    owner: username of job owner
1209    name: job name (does not have to be unique)
1210    priority: Integer priority value.  Higher is more important.
1211    control_file: contents of control file
1212    control_type: Client or Server
1213    created_on: date of job creation
1214    submitted_on: date of job submission
1215    synch_count: how many hosts should be used per autoserv execution
1216    run_verify: Whether or not to run the verify phase
1217    run_reset: Whether or not to run the reset phase
1218    timeout: DEPRECATED - hours from queuing time until job times out
1219    timeout_mins: minutes from job queuing time until the job times out
1220    max_runtime_hrs: DEPRECATED - hours from job starting time until job
1221                     times out
1222    max_runtime_mins: minutes from job starting time until job times out
1223    email_list: list of people to email on completion delimited by any of:
1224                white space, ',', ':', ';'
1225    dependency_labels: many-to-many relationship with labels corresponding to
1226                       job dependencies
1227    reboot_before: Never, If dirty, or Always
1228    reboot_after: Never, If all tests passed, or Always
1229    parse_failed_repair: if True, a failed repair launched by this job will have
1230    its results parsed as part of the job.
1231    drone_set: The set of drones to run this job on
1232    parent_job: Parent job (optional)
1233    test_retry: Number of times to retry test if the test did not complete
1234                successfully. (optional, default: 0)
1235    require_ssp: Require server-side packaging unless require_ssp is set to
1236                 False. (optional, default: None)
1237    """
1238
1239    # TODO: Investigate, if jobkeyval_set is really needed.
1240    # dynamic_suite will write them into an attached file for the drone, but
1241    # it doesn't seem like they are actually used. If they aren't used, remove
1242    # jobkeyval_set here.
1243    SERIALIZATION_LINKS_TO_FOLLOW = set(['dependency_labels',
1244                                         'hostqueueentry_set',
1245                                         'jobkeyval_set',
1246                                         'shard'])
1247
1248    # SQL for selecting jobs that should be sent to shard.
1249    # We use raw sql as django filters were not optimized.
1250    # The following jobs are excluded by the SQL.
1251    #     - Non-aborted jobs known to shard as specified in |known_ids|.
1252    #       Note for jobs aborted on master, even if already known to shard,
1253    #       will be sent to shard again so that shard can abort them.
1254    #     - Completed jobs
1255    #     - Active jobs
1256    #     - Jobs without host_queue_entries
1257    NON_ABORTED_KNOWN_JOBS = '(t2.aborted = 0 AND t1.id IN (%(known_ids)s))'
1258
1259    SQL_SHARD_JOBS = (
1260        'SELECT DISTINCT(t1.id) FROM afe_jobs t1 '
1261        'INNER JOIN afe_host_queue_entries t2  ON '
1262        '  (t1.id = t2.job_id AND t2.complete != 1 AND t2.active != 1 '
1263        '   %(check_known_jobs)s) '
1264        'LEFT OUTER JOIN afe_jobs_dependency_labels t3 ON (t1.id = t3.job_id) '
1265        'WHERE (t3.label_id IN  '
1266        '  (SELECT label_id FROM afe_shards_labels '
1267        '   WHERE shard_id = %(shard_id)s) '
1268        '  OR t2.meta_host IN '
1269        '  (SELECT label_id FROM afe_shards_labels '
1270        '   WHERE shard_id = %(shard_id)s))'
1271        )
1272
1273    # Jobs can be created with assigned hosts and have no dependency
1274    # labels nor meta_host.
1275    # We are looking for:
1276    #     - a job whose hqe's meta_host is null
1277    #     - a job whose hqe has a host
1278    #     - one of the host's labels matches the shard's label.
1279    # Non-aborted known jobs, completed jobs, active jobs, jobs
1280    # without hqe are exluded as we do with SQL_SHARD_JOBS.
1281    SQL_SHARD_JOBS_WITH_HOSTS = (
1282        'SELECT DISTINCT(t1.id) FROM afe_jobs t1 '
1283        'INNER JOIN afe_host_queue_entries t2 ON '
1284        '  (t1.id = t2.job_id AND t2.complete != 1 AND t2.active != 1 '
1285        '   AND t2.meta_host IS NULL AND t2.host_id IS NOT NULL '
1286        '   %(check_known_jobs)s) '
1287        'LEFT OUTER JOIN afe_hosts_labels t3 ON (t2.host_id = t3.host_id) '
1288        'WHERE (t3.label_id IN '
1289        '  (SELECT label_id FROM afe_shards_labels '
1290        '   WHERE shard_id = %(shard_id)s))'
1291        )
1292
1293    # Even if we had filters about complete, active and aborted
1294    # bits in the above two SQLs, there is a chance that
1295    # the result may still contain a job with an hqe with 'complete=1'
1296    # or 'active=1' or 'aborted=0 and afe_job.id in known jobs.'
1297    # This happens when a job has two (or more) hqes and at least
1298    # one hqe has different bits than others.
1299    # We use a second sql to ensure we exclude all un-desired jobs.
1300    SQL_JOBS_TO_EXCLUDE =(
1301        'SELECT t1.id FROM afe_jobs t1 '
1302        'INNER JOIN afe_host_queue_entries t2 ON '
1303        '  (t1.id = t2.job_id) '
1304        'WHERE (t1.id in (%(candidates)s) '
1305        '  AND (t2.complete=1 OR t2.active=1 '
1306        '  %(check_known_jobs)s))'
1307        )
1308
1309    def _deserialize_relation(self, link, data):
1310        if link in ['hostqueueentry_set', 'jobkeyval_set']:
1311            for obj in data:
1312                obj['job_id'] = self.id
1313
1314        super(Job, self)._deserialize_relation(link, data)
1315
1316
1317    def custom_deserialize_relation(self, link, data):
1318        assert link == 'shard', 'Link %s should not be deserialized' % link
1319        self.shard = Shard.deserialize(data)
1320
1321
1322    def sanity_check_update_from_shard(self, shard, updated_serialized):
1323        # If the job got aborted on the master after the client fetched it
1324        # no shard_id will be set. The shard might still push updates though,
1325        # as the job might complete before the abort bit syncs to the shard.
1326        # Alternative considered: The master scheduler could be changed to not
1327        # set aborted jobs to completed that are sharded out. But that would
1328        # require database queries and seemed more complicated to implement.
1329        # This seems safe to do, as there won't be updates pushed from the wrong
1330        # shards should be powered off and wiped hen they are removed from the
1331        # master.
1332        if self.shard_id and self.shard_id != shard.id:
1333            raise error.UnallowedRecordsSentToMaster(
1334                'Job id=%s is assigned to shard (%s). Cannot update it with %s '
1335                'from shard %s.' % (self.id, self.shard_id, updated_serialized,
1336                                    shard.id))
1337
1338
1339    # TIMEOUT is deprecated.
1340    DEFAULT_TIMEOUT = global_config.global_config.get_config_value(
1341        'AUTOTEST_WEB', 'job_timeout_default', default=24)
1342    DEFAULT_TIMEOUT_MINS = global_config.global_config.get_config_value(
1343        'AUTOTEST_WEB', 'job_timeout_mins_default', default=24*60)
1344    # MAX_RUNTIME_HRS is deprecated. Will be removed after switch to mins is
1345    # completed.
1346    DEFAULT_MAX_RUNTIME_HRS = global_config.global_config.get_config_value(
1347        'AUTOTEST_WEB', 'job_max_runtime_hrs_default', default=72)
1348    DEFAULT_MAX_RUNTIME_MINS = global_config.global_config.get_config_value(
1349        'AUTOTEST_WEB', 'job_max_runtime_mins_default', default=72*60)
1350    DEFAULT_PARSE_FAILED_REPAIR = global_config.global_config.get_config_value(
1351        'AUTOTEST_WEB', 'parse_failed_repair_default', type=bool,
1352        default=False)
1353
1354    owner = dbmodels.CharField(max_length=255)
1355    name = dbmodels.CharField(max_length=255)
1356    priority = dbmodels.SmallIntegerField(default=priorities.Priority.DEFAULT)
1357    control_file = dbmodels.TextField(null=True, blank=True)
1358    control_type = dbmodels.SmallIntegerField(
1359        choices=control_data.CONTROL_TYPE.choices(),
1360        blank=True, # to allow 0
1361        default=control_data.CONTROL_TYPE.CLIENT)
1362    created_on = dbmodels.DateTimeField()
1363    synch_count = dbmodels.IntegerField(blank=True, default=0)
1364    timeout = dbmodels.IntegerField(default=DEFAULT_TIMEOUT)
1365    run_verify = dbmodels.BooleanField(default=False)
1366    email_list = dbmodels.CharField(max_length=250, blank=True)
1367    dependency_labels = (
1368            dbmodels.ManyToManyField(Label, blank=True,
1369                                     db_table='afe_jobs_dependency_labels'))
1370    reboot_before = dbmodels.SmallIntegerField(
1371        choices=model_attributes.RebootBefore.choices(), blank=True,
1372        default=DEFAULT_REBOOT_BEFORE)
1373    reboot_after = dbmodels.SmallIntegerField(
1374        choices=model_attributes.RebootAfter.choices(), blank=True,
1375        default=DEFAULT_REBOOT_AFTER)
1376    parse_failed_repair = dbmodels.BooleanField(
1377        default=DEFAULT_PARSE_FAILED_REPAIR)
1378    # max_runtime_hrs is deprecated. Will be removed after switch to mins is
1379    # completed.
1380    max_runtime_hrs = dbmodels.IntegerField(default=DEFAULT_MAX_RUNTIME_HRS)
1381    max_runtime_mins = dbmodels.IntegerField(default=DEFAULT_MAX_RUNTIME_MINS)
1382    drone_set = dbmodels.ForeignKey(DroneSet, null=True, blank=True)
1383
1384    parameterized_job = dbmodels.ForeignKey(ParameterizedJob, null=True,
1385                                            blank=True)
1386
1387    parent_job = dbmodels.ForeignKey('self', blank=True, null=True)
1388
1389    test_retry = dbmodels.IntegerField(blank=True, default=0)
1390
1391    run_reset = dbmodels.BooleanField(default=True)
1392
1393    timeout_mins = dbmodels.IntegerField(default=DEFAULT_TIMEOUT_MINS)
1394
1395    # If this is None on the master, a slave should be found.
1396    # If this is None on a slave, it should be synced back to the master
1397    shard = dbmodels.ForeignKey(Shard, blank=True, null=True)
1398
1399    # If this is None, server-side packaging will be used for server side test,
1400    # unless it's disabled in global config AUTOSERV/enable_ssp_container.
1401    require_ssp = dbmodels.NullBooleanField(default=None, blank=True, null=True)
1402
1403    # custom manager
1404    objects = JobManager()
1405
1406
1407    @decorators.cached_property
1408    def labels(self):
1409        """All the labels of this job"""
1410        # We need to convert dependency_labels to a list, because all() gives us
1411        # back an iterator, and storing/caching an iterator means we'd only be
1412        # able to read from it once.
1413        return list(self.dependency_labels.all())
1414
1415
1416    def is_server_job(self):
1417        """Returns whether this job is of type server."""
1418        return self.control_type == control_data.CONTROL_TYPE.SERVER
1419
1420
1421    @classmethod
1422    def parameterized_jobs_enabled(cls):
1423        """Returns whether parameterized jobs are enabled.
1424
1425        @param cls: Implicit class object.
1426        """
1427        return global_config.global_config.get_config_value(
1428                'AUTOTEST_WEB', 'parameterized_jobs', type=bool)
1429
1430
1431    @classmethod
1432    def check_parameterized_job(cls, control_file, parameterized_job):
1433        """Checks that the job is valid given the global config settings.
1434
1435        First, either control_file must be set, or parameterized_job must be
1436        set, but not both. Second, parameterized_job must be set if and only if
1437        the parameterized_jobs option in the global config is set to True.
1438
1439        @param cls: Implict class object.
1440        @param control_file: A control file.
1441        @param parameterized_job: A parameterized job.
1442        """
1443        if not (bool(control_file) ^ bool(parameterized_job)):
1444            raise Exception('Job must have either control file or '
1445                            'parameterization, but not both')
1446
1447        parameterized_jobs_enabled = cls.parameterized_jobs_enabled()
1448        if control_file and parameterized_jobs_enabled:
1449            raise Exception('Control file specified, but parameterized jobs '
1450                            'are enabled')
1451        if parameterized_job and not parameterized_jobs_enabled:
1452            raise Exception('Parameterized job specified, but parameterized '
1453                            'jobs are not enabled')
1454
1455
1456    @classmethod
1457    def create(cls, owner, options, hosts):
1458        """Creates a job.
1459
1460        The job is created by taking some information (the listed args) and
1461        filling in the rest of the necessary information.
1462
1463        @param cls: Implicit class object.
1464        @param owner: The owner for the job.
1465        @param options: An options object.
1466        @param hosts: The hosts to use.
1467        """
1468        AclGroup.check_for_acl_violation_hosts(hosts)
1469
1470        control_file = options.get('control_file')
1471        parameterized_job = options.get('parameterized_job')
1472
1473        # The current implementation of parameterized jobs requires that only
1474        # control files or parameterized jobs are used. Using the image
1475        # parameter on autoupdate_ParameterizedJob doesn't mix pure
1476        # parameterized jobs and control files jobs, it does muck enough with
1477        # normal jobs by adding a parameterized id to them that this check will
1478        # fail. So for now we just skip this check.
1479        # cls.check_parameterized_job(control_file=control_file,
1480        #                             parameterized_job=parameterized_job)
1481        user = User.current_user()
1482        if options.get('reboot_before') is None:
1483            options['reboot_before'] = user.get_reboot_before_display()
1484        if options.get('reboot_after') is None:
1485            options['reboot_after'] = user.get_reboot_after_display()
1486
1487        drone_set = DroneSet.resolve_name(options.get('drone_set'))
1488
1489        if options.get('timeout_mins') is None and options.get('timeout'):
1490            options['timeout_mins'] = options['timeout'] * 60
1491
1492        job = cls.add_object(
1493            owner=owner,
1494            name=options['name'],
1495            priority=options['priority'],
1496            control_file=control_file,
1497            control_type=options['control_type'],
1498            synch_count=options.get('synch_count'),
1499            # timeout needs to be deleted in the future.
1500            timeout=options.get('timeout'),
1501            timeout_mins=options.get('timeout_mins'),
1502            max_runtime_mins=options.get('max_runtime_mins'),
1503            run_verify=options.get('run_verify'),
1504            email_list=options.get('email_list'),
1505            reboot_before=options.get('reboot_before'),
1506            reboot_after=options.get('reboot_after'),
1507            parse_failed_repair=options.get('parse_failed_repair'),
1508            created_on=datetime.now(),
1509            drone_set=drone_set,
1510            parameterized_job=parameterized_job,
1511            parent_job=options.get('parent_job_id'),
1512            test_retry=options.get('test_retry'),
1513            run_reset=options.get('run_reset'),
1514            require_ssp=options.get('require_ssp'))
1515
1516        job.dependency_labels = options['dependencies']
1517
1518        if options.get('keyvals'):
1519            for key, value in options['keyvals'].iteritems():
1520                JobKeyval.objects.create(job=job, key=key, value=value)
1521
1522        return job
1523
1524
1525    @classmethod
1526    def assign_to_shard(cls, shard, known_ids):
1527        """Assigns unassigned jobs to a shard.
1528
1529        For all labels that have been assigned to this shard, all jobs that
1530        have this label, are assigned to this shard.
1531
1532        Jobs that are assigned to the shard but aren't already present on the
1533        shard are returned.
1534
1535        @param shard: The shard to assign jobs to.
1536        @param known_ids: List of all ids of incomplete jobs, the shard already
1537                          knows about.
1538                          This is used to figure out which jobs should be sent
1539                          to the shard. If shard_ids were used instead, jobs
1540                          would only be transferred once, even if the client
1541                          failed persisting them.
1542                          The number of unfinished jobs usually lies in O(1000).
1543                          Assuming one id takes 8 chars in the json, this means
1544                          overhead that lies in the lower kilobyte range.
1545                          A not in query with 5000 id's takes about 30ms.
1546
1547        @returns The job objects that should be sent to the shard.
1548        """
1549        # Disclaimer: Concurrent heartbeats should not occur in today's setup.
1550        # If this changes or they are triggered manually, this applies:
1551        # Jobs may be returned more than once by concurrent calls of this
1552        # function, as there is a race condition between SELECT and UPDATE.
1553        job_ids = set([])
1554        check_known_jobs_exclude = ''
1555        check_known_jobs_include = ''
1556
1557        if known_ids:
1558            check_known_jobs = (
1559                    cls.NON_ABORTED_KNOWN_JOBS %
1560                    {'known_ids': ','.join([str(i) for i in known_ids])})
1561            check_known_jobs_exclude = 'AND NOT ' + check_known_jobs
1562            check_known_jobs_include = 'OR ' + check_known_jobs
1563
1564        for sql in [cls.SQL_SHARD_JOBS, cls.SQL_SHARD_JOBS_WITH_HOSTS]:
1565            query = Job.objects.raw(sql % {
1566                    'check_known_jobs': check_known_jobs_exclude,
1567                    'shard_id': shard.id})
1568            job_ids |= set([j.id for j in query])
1569
1570        if job_ids:
1571            query = Job.objects.raw(
1572                    cls.SQL_JOBS_TO_EXCLUDE %
1573                    {'check_known_jobs': check_known_jobs_include,
1574                     'candidates': ','.join([str(i) for i in job_ids])})
1575            job_ids -= set([j.id for j in query])
1576
1577        if job_ids:
1578            Job.objects.filter(pk__in=job_ids).update(shard=shard)
1579            return list(Job.objects.filter(pk__in=job_ids).all())
1580        return []
1581
1582
1583    def save(self, *args, **kwargs):
1584        # The current implementation of parameterized jobs requires that only
1585        # control files or parameterized jobs are used. Using the image
1586        # parameter on autoupdate_ParameterizedJob doesn't mix pure
1587        # parameterized jobs and control files jobs, it does muck enough with
1588        # normal jobs by adding a parameterized id to them that this check will
1589        # fail. So for now we just skip this check.
1590        # cls.check_parameterized_job(control_file=self.control_file,
1591        #                             parameterized_job=self.parameterized_job)
1592        super(Job, self).save(*args, **kwargs)
1593
1594
1595    def queue(self, hosts, atomic_group=None, is_template=False):
1596        """Enqueue a job on the given hosts.
1597
1598        @param hosts: The hosts to use.
1599        @param atomic_group: The associated atomic group.
1600        @param is_template: Whether the status should be "Template".
1601        """
1602        if not hosts:
1603            if atomic_group:
1604                # No hosts or labels are required to queue an atomic group
1605                # Job.  However, if they are given, we respect them below.
1606                atomic_group.enqueue_job(self, is_template=is_template)
1607            else:
1608                # hostless job
1609                entry = HostQueueEntry.create(job=self, is_template=is_template)
1610                entry.save()
1611            return
1612
1613        for host in hosts:
1614            host.enqueue_job(self, atomic_group=atomic_group,
1615                             is_template=is_template)
1616
1617
1618    def create_recurring_job(self, start_date, loop_period, loop_count, owner):
1619        """Creates a recurring job.
1620
1621        @param start_date: The starting date of the job.
1622        @param loop_period: How often to re-run the job, in seconds.
1623        @param loop_count: The re-run count.
1624        @param owner: The owner of the job.
1625        """
1626        rec = RecurringRun(job=self, start_date=start_date,
1627                           loop_period=loop_period,
1628                           loop_count=loop_count,
1629                           owner=User.objects.get(login=owner))
1630        rec.save()
1631        return rec.id
1632
1633
1634    def user(self):
1635        """Gets the user of this job, or None if it doesn't exist."""
1636        try:
1637            return User.objects.get(login=self.owner)
1638        except self.DoesNotExist:
1639            return None
1640
1641
1642    def abort(self):
1643        """Aborts this job."""
1644        for queue_entry in self.hostqueueentry_set.all():
1645            queue_entry.abort()
1646
1647
1648    def tag(self):
1649        """Returns a string tag for this job."""
1650        return server_utils.get_job_tag(self.id, self.owner)
1651
1652
1653    def keyval_dict(self):
1654        """Returns all keyvals for this job as a dictionary."""
1655        return dict((keyval.key, keyval.value)
1656                    for keyval in self.jobkeyval_set.all())
1657
1658
1659    @classmethod
1660    def get_attribute_model(cls):
1661        """Return the attribute model.
1662
1663        Override method in parent class. This class is called when
1664        deserializing the one-to-many relationship betwen Job and JobKeyval.
1665        On deserialization, we will try to clear any existing job keyvals
1666        associated with a job to avoid any inconsistency.
1667        Though Job doesn't implement ModelWithAttribute, we still treat
1668        it as an attribute model for this purpose.
1669
1670        @returns: The attribute model of Job.
1671        """
1672        return JobKeyval
1673
1674
1675    class Meta:
1676        """Metadata for class Job."""
1677        db_table = 'afe_jobs'
1678
1679    def __unicode__(self):
1680        return u'%s (%s-%s)' % (self.name, self.id, self.owner)
1681
1682
1683class JobKeyval(dbmodels.Model, model_logic.ModelExtensions):
1684    """Keyvals associated with jobs"""
1685
1686    SERIALIZATION_LINKS_TO_KEEP = set(['job'])
1687    SERIALIZATION_LOCAL_LINKS_TO_UPDATE = set(['value'])
1688
1689    job = dbmodels.ForeignKey(Job)
1690    key = dbmodels.CharField(max_length=90)
1691    value = dbmodels.CharField(max_length=300)
1692
1693    objects = model_logic.ExtendedManager()
1694
1695
1696    @classmethod
1697    def get_record(cls, data):
1698        """Check the database for an identical record.
1699
1700        Use job_id and key to search for a existing record.
1701
1702        @raises: DoesNotExist, if no record found
1703        @raises: MultipleObjectsReturned if multiple records found.
1704        """
1705        # TODO(fdeng): We should use job_id and key together as
1706        #              a primary key in the db.
1707        return cls.objects.get(job_id=data['job_id'], key=data['key'])
1708
1709
1710    @classmethod
1711    def deserialize(cls, data):
1712        """Override deserialize in parent class.
1713
1714        Do not deserialize id as id is not kept consistent on master and shards.
1715
1716        @param data: A dictionary of data to deserialize.
1717
1718        @returns: A JobKeyval object.
1719        """
1720        if data:
1721            data.pop('id')
1722        return super(JobKeyval, cls).deserialize(data)
1723
1724
1725    class Meta:
1726        """Metadata for class JobKeyval."""
1727        db_table = 'afe_job_keyvals'
1728
1729
1730class IneligibleHostQueue(dbmodels.Model, model_logic.ModelExtensions):
1731    """Represents an ineligible host queue."""
1732    job = dbmodels.ForeignKey(Job)
1733    host = dbmodels.ForeignKey(Host)
1734
1735    objects = model_logic.ExtendedManager()
1736
1737    class Meta:
1738        """Metadata for class IneligibleHostQueue."""
1739        db_table = 'afe_ineligible_host_queues'
1740
1741
1742class HostQueueEntry(dbmodels.Model, model_logic.ModelExtensions):
1743    """Represents a host queue entry."""
1744
1745    SERIALIZATION_LINKS_TO_FOLLOW = set(['meta_host'])
1746    SERIALIZATION_LINKS_TO_KEEP = set(['host'])
1747    SERIALIZATION_LOCAL_LINKS_TO_UPDATE = set(['aborted'])
1748
1749
1750    def custom_deserialize_relation(self, link, data):
1751        assert link == 'meta_host'
1752        self.meta_host = Label.deserialize(data)
1753
1754
1755    def sanity_check_update_from_shard(self, shard, updated_serialized,
1756                                       job_ids_sent):
1757        if self.job_id not in job_ids_sent:
1758            raise error.UnallowedRecordsSentToMaster(
1759                'Sent HostQueueEntry without corresponding '
1760                'job entry: %s' % updated_serialized)
1761
1762
1763    Status = host_queue_entry_states.Status
1764    ACTIVE_STATUSES = host_queue_entry_states.ACTIVE_STATUSES
1765    COMPLETE_STATUSES = host_queue_entry_states.COMPLETE_STATUSES
1766
1767    job = dbmodels.ForeignKey(Job)
1768    host = dbmodels.ForeignKey(Host, blank=True, null=True)
1769    status = dbmodels.CharField(max_length=255)
1770    meta_host = dbmodels.ForeignKey(Label, blank=True, null=True,
1771                                    db_column='meta_host')
1772    active = dbmodels.BooleanField(default=False)
1773    complete = dbmodels.BooleanField(default=False)
1774    deleted = dbmodels.BooleanField(default=False)
1775    execution_subdir = dbmodels.CharField(max_length=255, blank=True,
1776                                          default='')
1777    # If atomic_group is set, this is a virtual HostQueueEntry that will
1778    # be expanded into many actual hosts within the group at schedule time.
1779    atomic_group = dbmodels.ForeignKey(AtomicGroup, blank=True, null=True)
1780    aborted = dbmodels.BooleanField(default=False)
1781    started_on = dbmodels.DateTimeField(null=True, blank=True)
1782    finished_on = dbmodels.DateTimeField(null=True, blank=True)
1783
1784    objects = model_logic.ExtendedManager()
1785
1786
1787    def __init__(self, *args, **kwargs):
1788        super(HostQueueEntry, self).__init__(*args, **kwargs)
1789        self._record_attributes(['status'])
1790
1791
1792    @classmethod
1793    def create(cls, job, host=None, meta_host=None, atomic_group=None,
1794                 is_template=False):
1795        """Creates a new host queue entry.
1796
1797        @param cls: Implicit class object.
1798        @param job: The associated job.
1799        @param host: The associated host.
1800        @param meta_host: The associated meta host.
1801        @param atomic_group: The associated atomic group.
1802        @param is_template: Whether the status should be "Template".
1803        """
1804        if is_template:
1805            status = cls.Status.TEMPLATE
1806        else:
1807            status = cls.Status.QUEUED
1808
1809        return cls(job=job, host=host, meta_host=meta_host,
1810                   atomic_group=atomic_group, status=status)
1811
1812
1813    def save(self, *args, **kwargs):
1814        self._set_active_and_complete()
1815        super(HostQueueEntry, self).save(*args, **kwargs)
1816        self._check_for_updated_attributes()
1817
1818
1819    def execution_path(self):
1820        """
1821        Path to this entry's results (relative to the base results directory).
1822        """
1823        return server_utils.get_hqe_exec_path(self.job.tag(),
1824                                              self.execution_subdir)
1825
1826
1827    def host_or_metahost_name(self):
1828        """Returns the first non-None name found in priority order.
1829
1830        The priority order checked is: (1) host name; (2) meta host name; and
1831        (3) atomic group name.
1832        """
1833        if self.host:
1834            return self.host.hostname
1835        elif self.meta_host:
1836            return self.meta_host.name
1837        else:
1838            assert self.atomic_group, "no host, meta_host or atomic group!"
1839            return self.atomic_group.name
1840
1841
1842    def _set_active_and_complete(self):
1843        if self.status in self.ACTIVE_STATUSES:
1844            self.active, self.complete = True, False
1845        elif self.status in self.COMPLETE_STATUSES:
1846            self.active, self.complete = False, True
1847        else:
1848            self.active, self.complete = False, False
1849
1850
1851    def on_attribute_changed(self, attribute, old_value):
1852        assert attribute == 'status'
1853        logging.info('%s/%d (%d) -> %s', self.host, self.job.id, self.id,
1854                     self.status)
1855
1856
1857    def is_meta_host_entry(self):
1858        'True if this is a entry has a meta_host instead of a host.'
1859        return self.host is None and self.meta_host is not None
1860
1861
1862    # This code is shared between rpc_interface and models.HostQueueEntry.
1863    # Sadly due to circular imports between the 2 (crbug.com/230100) making it
1864    # a class method was the best way to refactor it. Attempting to put it in
1865    # rpc_utils or a new utils module failed as that would require us to import
1866    # models.py but to call it from here we would have to import the utils.py
1867    # thus creating a cycle.
1868    @classmethod
1869    def abort_host_queue_entries(cls, host_queue_entries):
1870        """Aborts a collection of host_queue_entries.
1871
1872        Abort these host queue entry and all host queue entries of jobs created
1873        by them.
1874
1875        @param host_queue_entries: List of host queue entries we want to abort.
1876        """
1877        # This isn't completely immune to race conditions since it's not atomic,
1878        # but it should be safe given the scheduler's behavior.
1879
1880        # TODO(milleral): crbug.com/230100
1881        # The |abort_host_queue_entries| rpc does nearly exactly this,
1882        # however, trying to re-use the code generates some horrible
1883        # circular import error.  I'd be nice to refactor things around
1884        # sometime so the code could be reused.
1885
1886        # Fixpoint algorithm to find the whole tree of HQEs to abort to
1887        # minimize the total number of database queries:
1888        children = set()
1889        new_children = set(host_queue_entries)
1890        while new_children:
1891            children.update(new_children)
1892            new_child_ids = [hqe.job_id for hqe in new_children]
1893            new_children = HostQueueEntry.objects.filter(
1894                    job__parent_job__in=new_child_ids,
1895                    complete=False, aborted=False).all()
1896            # To handle circular parental relationships
1897            new_children = set(new_children) - children
1898
1899        # Associate a user with the host queue entries that we're about
1900        # to abort so that we can look up who to blame for the aborts.
1901        now = datetime.now()
1902        user = User.current_user()
1903        aborted_hqes = [AbortedHostQueueEntry(queue_entry=hqe,
1904                aborted_by=user, aborted_on=now) for hqe in children]
1905        AbortedHostQueueEntry.objects.bulk_create(aborted_hqes)
1906        # Bulk update all of the HQEs to set the abort bit.
1907        child_ids = [hqe.id for hqe in children]
1908        HostQueueEntry.objects.filter(id__in=child_ids).update(aborted=True)
1909
1910
1911    def abort(self):
1912        """ Aborts this host queue entry.
1913
1914        Abort this host queue entry and all host queue entries of jobs created by
1915        this one.
1916
1917        """
1918        if not self.complete and not self.aborted:
1919            HostQueueEntry.abort_host_queue_entries([self])
1920
1921
1922    @classmethod
1923    def compute_full_status(cls, status, aborted, complete):
1924        """Returns a modified status msg if the host queue entry was aborted.
1925
1926        @param cls: Implicit class object.
1927        @param status: The original status message.
1928        @param aborted: Whether the host queue entry was aborted.
1929        @param complete: Whether the host queue entry was completed.
1930        """
1931        if aborted and not complete:
1932            return 'Aborted (%s)' % status
1933        return status
1934
1935
1936    def full_status(self):
1937        """Returns the full status of this host queue entry, as a string."""
1938        return self.compute_full_status(self.status, self.aborted,
1939                                        self.complete)
1940
1941
1942    def _postprocess_object_dict(self, object_dict):
1943        object_dict['full_status'] = self.full_status()
1944
1945
1946    class Meta:
1947        """Metadata for class HostQueueEntry."""
1948        db_table = 'afe_host_queue_entries'
1949
1950
1951
1952    def __unicode__(self):
1953        hostname = None
1954        if self.host:
1955            hostname = self.host.hostname
1956        return u"%s/%d (%d)" % (hostname, self.job.id, self.id)
1957
1958
1959class AbortedHostQueueEntry(dbmodels.Model, model_logic.ModelExtensions):
1960    """Represents an aborted host queue entry."""
1961    queue_entry = dbmodels.OneToOneField(HostQueueEntry, primary_key=True)
1962    aborted_by = dbmodels.ForeignKey(User)
1963    aborted_on = dbmodels.DateTimeField()
1964
1965    objects = model_logic.ExtendedManager()
1966
1967
1968    def save(self, *args, **kwargs):
1969        self.aborted_on = datetime.now()
1970        super(AbortedHostQueueEntry, self).save(*args, **kwargs)
1971
1972    class Meta:
1973        """Metadata for class AbortedHostQueueEntry."""
1974        db_table = 'afe_aborted_host_queue_entries'
1975
1976
1977class RecurringRun(dbmodels.Model, model_logic.ModelExtensions):
1978    """\
1979    job: job to use as a template
1980    owner: owner of the instantiated template
1981    start_date: Run the job at scheduled date
1982    loop_period: Re-run (loop) the job periodically
1983                 (in every loop_period seconds)
1984    loop_count: Re-run (loop) count
1985    """
1986
1987    job = dbmodels.ForeignKey(Job)
1988    owner = dbmodels.ForeignKey(User)
1989    start_date = dbmodels.DateTimeField()
1990    loop_period = dbmodels.IntegerField(blank=True)
1991    loop_count = dbmodels.IntegerField(blank=True)
1992
1993    objects = model_logic.ExtendedManager()
1994
1995    class Meta:
1996        """Metadata for class RecurringRun."""
1997        db_table = 'afe_recurring_run'
1998
1999    def __unicode__(self):
2000        return u'RecurringRun(job %s, start %s, period %s, count %s)' % (
2001            self.job.id, self.start_date, self.loop_period, self.loop_count)
2002
2003
2004class SpecialTask(dbmodels.Model, model_logic.ModelExtensions):
2005    """\
2006    Tasks to run on hosts at the next time they are in the Ready state. Use this
2007    for high-priority tasks, such as forced repair or forced reinstall.
2008
2009    host: host to run this task on
2010    task: special task to run
2011    time_requested: date and time the request for this task was made
2012    is_active: task is currently running
2013    is_complete: task has finished running
2014    is_aborted: task was aborted
2015    time_started: date and time the task started
2016    time_finished: date and time the task finished
2017    queue_entry: Host queue entry waiting on this task (or None, if task was not
2018                 started in preparation of a job)
2019    """
2020    Task = enum.Enum('Verify', 'Cleanup', 'Repair', 'Reset', 'Provision',
2021                     string_values=True)
2022
2023    host = dbmodels.ForeignKey(Host, blank=False, null=False)
2024    task = dbmodels.CharField(max_length=64, choices=Task.choices(),
2025                              blank=False, null=False)
2026    requested_by = dbmodels.ForeignKey(User)
2027    time_requested = dbmodels.DateTimeField(auto_now_add=True, blank=False,
2028                                            null=False)
2029    is_active = dbmodels.BooleanField(default=False, blank=False, null=False)
2030    is_complete = dbmodels.BooleanField(default=False, blank=False, null=False)
2031    is_aborted = dbmodels.BooleanField(default=False, blank=False, null=False)
2032    time_started = dbmodels.DateTimeField(null=True, blank=True)
2033    queue_entry = dbmodels.ForeignKey(HostQueueEntry, blank=True, null=True)
2034    success = dbmodels.BooleanField(default=False, blank=False, null=False)
2035    time_finished = dbmodels.DateTimeField(null=True, blank=True)
2036
2037    objects = model_logic.ExtendedManager()
2038
2039
2040    def save(self, **kwargs):
2041        if self.queue_entry:
2042            self.requested_by = User.objects.get(
2043                    login=self.queue_entry.job.owner)
2044        super(SpecialTask, self).save(**kwargs)
2045
2046
2047    def execution_path(self):
2048        """Returns the execution path for a special task."""
2049        return server_utils.get_special_task_exec_path(
2050                self.host.hostname, self.id, self.task, self.time_requested)
2051
2052
2053    # property to emulate HostQueueEntry.status
2054    @property
2055    def status(self):
2056        """Returns a host queue entry status appropriate for a speical task."""
2057        return server_utils.get_special_task_status(
2058                self.is_complete, self.success, self.is_active)
2059
2060
2061    # property to emulate HostQueueEntry.started_on
2062    @property
2063    def started_on(self):
2064        """Returns the time at which this special task started."""
2065        return self.time_started
2066
2067
2068    @classmethod
2069    def schedule_special_task(cls, host, task):
2070        """Schedules a special task on a host if not already scheduled.
2071
2072        @param cls: Implicit class object.
2073        @param host: The host to use.
2074        @param task: The task to schedule.
2075        """
2076        existing_tasks = SpecialTask.objects.filter(host__id=host.id, task=task,
2077                                                    is_active=False,
2078                                                    is_complete=False)
2079        if existing_tasks:
2080            return existing_tasks[0]
2081
2082        special_task = SpecialTask(host=host, task=task,
2083                                   requested_by=User.current_user())
2084        special_task.save()
2085        return special_task
2086
2087
2088    def abort(self):
2089        """ Abort this special task."""
2090        self.is_aborted = True
2091        self.save()
2092
2093
2094    def activate(self):
2095        """
2096        Sets a task as active and sets the time started to the current time.
2097        """
2098        logging.info('Starting: %s', self)
2099        self.is_active = True
2100        self.time_started = datetime.now()
2101        self.save()
2102
2103
2104    def finish(self, success):
2105        """Sets a task as completed.
2106
2107        @param success: Whether or not the task was successful.
2108        """
2109        logging.info('Finished: %s', self)
2110        self.is_active = False
2111        self.is_complete = True
2112        self.success = success
2113        if self.time_started:
2114            self.time_finished = datetime.now()
2115        self.save()
2116
2117
2118    class Meta:
2119        """Metadata for class SpecialTask."""
2120        db_table = 'afe_special_tasks'
2121
2122
2123    def __unicode__(self):
2124        result = u'Special Task %s (host %s, task %s, time %s)' % (
2125            self.id, self.host, self.task, self.time_requested)
2126        if self.is_complete:
2127            result += u' (completed)'
2128        elif self.is_active:
2129            result += u' (active)'
2130
2131        return result
2132
2133
2134class StableVersion(dbmodels.Model, model_logic.ModelExtensions):
2135
2136    board = dbmodels.CharField(max_length=255, unique=True)
2137    version = dbmodels.CharField(max_length=255)
2138
2139    class Meta:
2140        """Metadata for class StableVersion."""
2141        db_table = 'afe_stable_versions'
2142