rpc_interface.py revision 4e545a5ef263106c2d5316ad02c6c14b8d28e5b2
1"""\
2Functions to expose over the RPC interface.
3
4For all modify* and delete* functions that ask for an 'id' parameter to
5identify the object to operate on, the id may be either
6 * the database row ID
7 * the name of the object (label name, hostname, user login, etc.)
8 * a dictionary containing uniquely identifying field (this option should seldom
9   be used)
10
11When specifying foreign key fields (i.e. adding hosts to a label, or adding
12users to an ACL group), the given value may be either the database row ID or the
13name of the object.
14
15All get* functions return lists of dictionaries.  Each dictionary represents one
16object and maps field names to values.
17
18Some examples:
19modify_host(2, hostname='myhost') # modify hostname of host with database ID 2
20modify_host('ipaj2', hostname='myhost') # modify hostname of host 'ipaj2'
21modify_test('sleeptest', test_type='Client', params=', seconds=60')
22delete_acl_group(1) # delete by ID
23delete_acl_group('Everyone') # delete by name
24acl_group_add_users('Everyone', ['mbligh', 'showard'])
25get_jobs(owner='showard', status='Queued')
26
27See doctests/001_rpc_test.txt for (lots) more examples.
28"""
29
30__author__ = 'showard@google.com (Steve Howard)'
31
32import datetime
33import common
34from autotest_lib.frontend import thread_local
35from autotest_lib.frontend.afe import models, model_logic
36from autotest_lib.frontend.afe import control_file, rpc_utils
37from autotest_lib.client.common_lib import global_config
38
39
40# labels
41
42def add_label(name, kernel_config=None, platform=None, only_if_needed=None):
43    return models.Label.add_object(
44            name=name, kernel_config=kernel_config, platform=platform,
45            only_if_needed=only_if_needed).id
46
47
48def modify_label(id, **data):
49    models.Label.smart_get(id).update_object(data)
50
51
52def delete_label(id):
53    models.Label.smart_get(id).delete()
54
55
56def label_add_hosts(id, hosts):
57    host_objs = models.Host.smart_get_bulk(hosts)
58    label = models.Label.smart_get(id)
59    if label.platform:
60        models.Host.check_no_platform(host_objs)
61    label.host_set.add(*host_objs)
62
63
64def label_remove_hosts(id, hosts):
65    host_objs = models.Host.smart_get_bulk(hosts)
66    models.Label.smart_get(id).host_set.remove(*host_objs)
67
68
69def get_labels(**filter_data):
70    """\
71    @returns A sequence of nested dictionaries of label information.
72    """
73    return rpc_utils.prepare_rows_as_nested_dicts(
74            models.Label.query_objects(filter_data),
75            ('atomic_group',))
76
77
78# atomic groups
79
80def add_atomic_group(name, max_number_of_machines=None, description=None):
81    return models.AtomicGroup.add_object(
82            name=name, max_number_of_machines=max_number_of_machines,
83            description=description).id
84
85
86def modify_atomic_group(id, **data):
87    models.AtomicGroup.smart_get(id).update_object(data)
88
89
90def delete_atomic_group(id):
91    models.AtomicGroup.smart_get(id).delete()
92
93
94def atomic_group_add_labels(id, labels):
95    label_objs = models.Label.smart_get_bulk(labels)
96    models.AtomicGroup.smart_get(id).label_set.add(*label_objs)
97
98
99def atomic_group_remove_labels(id, labels):
100    label_objs = models.Label.smart_get_bulk(labels)
101    models.AtomicGroup.smart_get(id).label_set.remove(*label_objs)
102
103
104def get_atomic_groups(**filter_data):
105    return rpc_utils.prepare_for_serialization(
106            models.AtomicGroup.list_objects(filter_data))
107
108
109# hosts
110
111def add_host(hostname, status=None, locked=None, protection=None):
112    return models.Host.add_object(hostname=hostname, status=status,
113                                  locked=locked, protection=protection).id
114
115
116def modify_host(id, **data):
117    rpc_utils.check_modify_host(data)
118    host = models.Host.smart_get(id)
119    rpc_utils.check_modify_host_locking(host, data)
120    host.update_object(data)
121
122
123def modify_hosts(host_filter_data, update_data):
124    """
125    @param host_filter_data: Filters out which hosts to modify.
126    @param update_data: A dictionary with the changes to make to the hosts.
127    """
128    rpc_utils.check_modify_host(update_data)
129    hosts = models.Host.query_objects(host_filter_data)
130    for host in hosts:
131        host.update_object(update_data)
132
133
134def host_add_labels(id, labels):
135    labels = models.Label.smart_get_bulk(labels)
136    host = models.Host.smart_get(id)
137
138    platforms = [label.name for label in labels if label.platform]
139    if len(platforms) > 1:
140        raise model_logic.ValidationError(
141            {'labels': 'Adding more than one platform label: %s' %
142                       ', '.join(platforms)})
143    if len(platforms) == 1:
144        models.Host.check_no_platform([host])
145    host.labels.add(*labels)
146
147
148def host_remove_labels(id, labels):
149    labels = models.Label.smart_get_bulk(labels)
150    models.Host.smart_get(id).labels.remove(*labels)
151
152
153def set_host_attribute(attribute, value, **host_filter_data):
154    """
155    @param attribute string name of attribute
156    @param value string, or None to delete an attribute
157    @param host_filter_data filter data to apply to Hosts to choose hosts to act
158    upon
159    """
160    assert host_filter_data # disallow accidental actions on all hosts
161    hosts = models.Host.query_objects(host_filter_data)
162    models.AclGroup.check_for_acl_violation_hosts(hosts)
163
164    for host in hosts:
165        host.set_or_delete_attribute(attribute, value)
166
167
168def delete_host(id):
169    models.Host.smart_get(id).delete()
170
171
172def get_hosts(multiple_labels=(), exclude_only_if_needed_labels=False,
173              exclude_atomic_group_hosts=False, valid_only=True, **filter_data):
174    """
175    @param multiple_labels: match hosts in all of the labels given.  Should
176            be a list of label names.
177    @param exclude_only_if_needed_labels: Exclude hosts with at least one
178            "only_if_needed" label applied.
179    @param exclude_atomic_group_hosts: Exclude hosts that have one or more
180            atomic group labels associated with them.
181    """
182    hosts = rpc_utils.get_host_query(multiple_labels,
183                                     exclude_only_if_needed_labels,
184                                     exclude_atomic_group_hosts,
185                                     valid_only, filter_data)
186    hosts = list(hosts)
187    models.Host.objects.populate_relationships(hosts, models.Label,
188                                               'label_list')
189    models.Host.objects.populate_relationships(hosts, models.AclGroup,
190                                               'acl_list')
191    models.Host.objects.populate_relationships(hosts, models.HostAttribute,
192                                               'attribute_list')
193    host_dicts = []
194    for host_obj in hosts:
195        host_dict = host_obj.get_object_dict()
196        host_dict['labels'] = [label.name for label in host_obj.label_list]
197        host_dict['platform'], host_dict['atomic_group'] = (rpc_utils.
198                find_platform_and_atomic_group(host_obj))
199        host_dict['acls'] = [acl.name for acl in host_obj.acl_list]
200        host_dict['attributes'] = dict((attribute.attribute, attribute.value)
201                                       for attribute in host_obj.attribute_list)
202        host_dicts.append(host_dict)
203    return rpc_utils.prepare_for_serialization(host_dicts)
204
205
206def get_num_hosts(multiple_labels=(), exclude_only_if_needed_labels=False,
207                  exclude_atomic_group_hosts=False, valid_only=True,
208                  **filter_data):
209    """
210    Same parameters as get_hosts().
211
212    @returns The number of matching hosts.
213    """
214    hosts = rpc_utils.get_host_query(multiple_labels,
215                                     exclude_only_if_needed_labels,
216                                     exclude_atomic_group_hosts,
217                                     valid_only, filter_data)
218    return hosts.count()
219
220
221# tests
222
223def add_test(name, test_type, path, author=None, dependencies=None,
224             experimental=True, run_verify=None, test_class=None,
225             test_time=None, test_category=None, description=None,
226             sync_count=1):
227    return models.Test.add_object(name=name, test_type=test_type, path=path,
228                                  author=author, dependencies=dependencies,
229                                  experimental=experimental,
230                                  run_verify=run_verify, test_time=test_time,
231                                  test_category=test_category,
232                                  sync_count=sync_count,
233                                  test_class=test_class,
234                                  description=description).id
235
236
237def modify_test(id, **data):
238    models.Test.smart_get(id).update_object(data)
239
240
241def delete_test(id):
242    models.Test.smart_get(id).delete()
243
244
245def get_tests(**filter_data):
246    return rpc_utils.prepare_for_serialization(
247        models.Test.list_objects(filter_data))
248
249
250# profilers
251
252def add_profiler(name, description=None):
253    return models.Profiler.add_object(name=name, description=description).id
254
255
256def modify_profiler(id, **data):
257    models.Profiler.smart_get(id).update_object(data)
258
259
260def delete_profiler(id):
261    models.Profiler.smart_get(id).delete()
262
263
264def get_profilers(**filter_data):
265    return rpc_utils.prepare_for_serialization(
266        models.Profiler.list_objects(filter_data))
267
268
269# users
270
271def add_user(login, access_level=None):
272    return models.User.add_object(login=login, access_level=access_level).id
273
274
275def modify_user(id, **data):
276    models.User.smart_get(id).update_object(data)
277
278
279def delete_user(id):
280    models.User.smart_get(id).delete()
281
282
283def get_users(**filter_data):
284    return rpc_utils.prepare_for_serialization(
285        models.User.list_objects(filter_data))
286
287
288# acl groups
289
290def add_acl_group(name, description=None):
291    group = models.AclGroup.add_object(name=name, description=description)
292    group.users.add(thread_local.get_user())
293    return group.id
294
295
296def modify_acl_group(id, **data):
297    group = models.AclGroup.smart_get(id)
298    group.check_for_acl_violation_acl_group()
299    group.update_object(data)
300    group.add_current_user_if_empty()
301
302
303def acl_group_add_users(id, users):
304    group = models.AclGroup.smart_get(id)
305    group.check_for_acl_violation_acl_group()
306    users = models.User.smart_get_bulk(users)
307    group.users.add(*users)
308
309
310def acl_group_remove_users(id, users):
311    group = models.AclGroup.smart_get(id)
312    group.check_for_acl_violation_acl_group()
313    users = models.User.smart_get_bulk(users)
314    group.users.remove(*users)
315    group.add_current_user_if_empty()
316
317
318def acl_group_add_hosts(id, hosts):
319    group = models.AclGroup.smart_get(id)
320    group.check_for_acl_violation_acl_group()
321    hosts = models.Host.smart_get_bulk(hosts)
322    group.hosts.add(*hosts)
323    group.on_host_membership_change()
324
325
326def acl_group_remove_hosts(id, hosts):
327    group = models.AclGroup.smart_get(id)
328    group.check_for_acl_violation_acl_group()
329    hosts = models.Host.smart_get_bulk(hosts)
330    group.hosts.remove(*hosts)
331    group.on_host_membership_change()
332
333
334def delete_acl_group(id):
335    models.AclGroup.smart_get(id).delete()
336
337
338def get_acl_groups(**filter_data):
339    acl_groups = models.AclGroup.list_objects(filter_data)
340    for acl_group in acl_groups:
341        acl_group_obj = models.AclGroup.objects.get(id=acl_group['id'])
342        acl_group['users'] = [user.login
343                              for user in acl_group_obj.users.all()]
344        acl_group['hosts'] = [host.hostname
345                              for host in acl_group_obj.hosts.all()]
346    return rpc_utils.prepare_for_serialization(acl_groups)
347
348
349# jobs
350
351def generate_control_file(tests=(), kernel=None, label=None, profilers=(),
352                          client_control_file='', use_container=False,
353                          profile_only=None, upload_kernel_config=False):
354    """
355    Generates a client-side control file to load a kernel and run tests.
356
357    @param tests List of tests to run.
358    @param kernel A list of kernel info dictionaries configuring which kernels
359        to boot for this job and other options for them
360    @param label Name of label to grab kernel config from.
361    @param profilers List of profilers to activate during the job.
362    @param client_control_file The contents of a client-side control file to
363        run at the end of all tests.  If this is supplied, all tests must be
364        client side.
365        TODO: in the future we should support server control files directly
366        to wrap with a kernel.  That'll require changing the parameter
367        name and adding a boolean to indicate if it is a client or server
368        control file.
369    @param use_container unused argument today.  TODO: Enable containers
370        on the host during a client side test.
371    @param profile_only A boolean that indicates what default profile_only
372        mode to use in the control file. Passing None will generate a
373        control file that does not explcitly set the default mode at all.
374    @param upload_kernel_config: if enabled it will generate server control
375            file code that uploads the kernel config file to the client and
376            tells the client of the new (local) path when compiling the kernel;
377            the tests must be server side tests
378
379    @returns a dict with the following keys:
380        control_file: str, The control file text.
381        is_server: bool, is the control file a server-side control file?
382        synch_count: How many machines the job uses per autoserv execution.
383            synch_count == 1 means the job is asynchronous.
384        dependencies: A list of the names of labels on which the job depends.
385    """
386    if not tests and not client_control_file:
387        return dict(control_file='', is_server=False, synch_count=1,
388                    dependencies=[])
389
390    cf_info, test_objects, profiler_objects, label = (
391        rpc_utils.prepare_generate_control_file(tests, kernel, label,
392                                                profilers))
393    cf_info['control_file'] = control_file.generate_control(
394        tests=test_objects, kernels=kernel, platform=label,
395        profilers=profiler_objects, is_server=cf_info['is_server'],
396        client_control_file=client_control_file, profile_only=profile_only,
397        upload_kernel_config=upload_kernel_config)
398    return cf_info
399
400
401def create_job(name, priority, control_file, control_type,
402               hosts=(), meta_hosts=(), one_time_hosts=(),
403               atomic_group_name=None, synch_count=None, is_template=False,
404               timeout=None, max_runtime_hrs=None, run_verify=True,
405               email_list='', dependencies=(), reboot_before=None,
406               reboot_after=None, parse_failed_repair=None, hostless=False):
407    """\
408    Create and enqueue a job.
409
410    @param name name of this job
411    @param priority Low, Medium, High, Urgent
412    @param control_file String contents of the control file.
413    @param control_type Type of control file, Client or Server.
414    @param synch_count How many machines the job uses per autoserv execution.
415    synch_count == 1 means the job is asynchronous.  If an atomic group is
416    given this value is treated as a minimum.
417    @param is_template If true then create a template job.
418    @param timeout Hours after this call returns until the job times out.
419    @param max_runtime_hrs Hours from job starting time until job times out
420    @param run_verify Should the host be verified before running the test?
421    @param email_list String containing emails to mail when the job is done
422    @param dependencies List of label names on which this job depends
423    @param reboot_before Never, If dirty, or Always
424    @param reboot_after Never, If all tests passed, or Always
425    @param parse_failed_repair if true, results of failed repairs launched by
426    this job will be parsed as part of the job.
427    @param hostless if true, create a hostless job
428
429    @param hosts List of hosts to run job on.
430    @param meta_hosts List where each entry is a label name, and for each entry
431    one host will be chosen from that label to run the job on.
432    @param one_time_hosts List of hosts not in the database to run the job on.
433    @param atomic_group_name The name of an atomic group to schedule the job on.
434
435
436    @returns The created Job id number.
437    """
438    user = thread_local.get_user()
439    owner = user.login
440    # input validation
441    if not (hosts or meta_hosts or one_time_hosts or atomic_group_name
442            or hostless):
443        raise model_logic.ValidationError({
444            'arguments' : "You must pass at least one of 'hosts', "
445                          "'meta_hosts', 'one_time_hosts', "
446                          "'atomic_group_name', or 'hostless'"
447            })
448
449    if hostless:
450        if hosts or meta_hosts or one_time_hosts or atomic_group_name:
451            raise model_logic.ValidationError({
452                    'hostless': 'Hostless jobs cannot include any hosts!'})
453        server_type = models.Job.ControlType.get_string(
454                models.Job.ControlType.SERVER)
455        if control_type != server_type:
456            raise model_logic.ValidationError({
457                    'control_type': 'Hostless jobs cannot use client-side '
458                                    'control files'})
459
460    labels_by_name = dict((label.name, label)
461                          for label in models.Label.objects.all())
462    atomic_groups_by_name = dict((ag.name, ag)
463                                 for ag in models.AtomicGroup.objects.all())
464
465    # Schedule on an atomic group automagically if one of the labels given
466    # is an atomic group label and no explicit atomic_group_name was supplied.
467    if not atomic_group_name:
468        for label_name in meta_hosts or []:
469            label = labels_by_name.get(label_name)
470            if label and label.atomic_group:
471                atomic_group_name = label.atomic_group.name
472                break
473
474    # convert hostnames & meta hosts to host/label objects
475    host_objects = models.Host.smart_get_bulk(hosts)
476    metahost_objects = []
477    for label_name in meta_hosts or []:
478        if label_name in labels_by_name:
479            label = labels_by_name[label_name]
480            metahost_objects.append(label)
481        elif label_name in atomic_groups_by_name:
482            # If given a metahost name that isn't a Label, check to
483            # see if the user was specifying an Atomic Group instead.
484            atomic_group = atomic_groups_by_name[label_name]
485            if atomic_group_name and atomic_group_name != atomic_group.name:
486                raise model_logic.ValidationError({
487                        'meta_hosts': (
488                                'Label "%s" not found.  If assumed to be an '
489                                'atomic group it would conflict with the '
490                                'supplied atomic group "%s".' % (
491                                        label_name, atomic_group_name))})
492            atomic_group_name = atomic_group.name
493        else:
494            raise model_logic.ValidationError(
495                {'meta_hosts' : 'Label "%s" not found' % label})
496
497    # Create and sanity check an AtomicGroup object if requested.
498    if atomic_group_name:
499        if one_time_hosts:
500            raise model_logic.ValidationError(
501                    {'one_time_hosts':
502                     'One time hosts cannot be used with an Atomic Group.'})
503        atomic_group = models.AtomicGroup.smart_get(atomic_group_name)
504        if synch_count and synch_count > atomic_group.max_number_of_machines:
505            raise model_logic.ValidationError(
506                    {'atomic_group_name' :
507                     'You have requested a synch_count (%d) greater than the '
508                     'maximum machines in the requested Atomic Group (%d).' %
509                     (synch_count, atomic_group.max_number_of_machines)})
510    else:
511        atomic_group = None
512
513    for host in one_time_hosts or []:
514        this_host = models.Host.create_one_time_host(host)
515        host_objects.append(this_host)
516
517    if reboot_before is None:
518        reboot_before = user.get_reboot_before_display()
519    if reboot_after is None:
520        reboot_after = user.get_reboot_after_display()
521
522    options = dict(name=name,
523                   priority=priority,
524                   control_file=control_file,
525                   control_type=control_type,
526                   is_template=is_template,
527                   timeout=timeout,
528                   max_runtime_hrs=max_runtime_hrs,
529                   synch_count=synch_count,
530                   run_verify=run_verify,
531                   email_list=email_list,
532                   dependencies=dependencies,
533                   reboot_before=reboot_before,
534                   reboot_after=reboot_after,
535                   parse_failed_repair=parse_failed_repair)
536    return rpc_utils.create_new_job(owner=owner,
537                                    options=options,
538                                    host_objects=host_objects,
539                                    metahost_objects=metahost_objects,
540                                    atomic_group=atomic_group)
541
542
543def abort_host_queue_entries(**filter_data):
544    """\
545    Abort a set of host queue entries.
546    """
547    query = models.HostQueueEntry.query_objects(filter_data)
548    query = query.filter(complete=False)
549    models.AclGroup.check_abort_permissions(query)
550    host_queue_entries = list(query.select_related())
551    rpc_utils.check_abort_synchronous_jobs(host_queue_entries)
552
553    user = thread_local.get_user()
554    for queue_entry in host_queue_entries:
555        queue_entry.abort(user)
556
557
558def reverify_hosts(**filter_data):
559    """\
560    Schedules a set of hosts for verify.
561
562    @returns A list of hostnames that a verify task was created for.
563    """
564    hosts = models.Host.query_objects(filter_data)
565    models.AclGroup.check_for_acl_violation_hosts(hosts)
566    models.SpecialTask.schedule_special_task(hosts,
567                                             models.SpecialTask.Task.VERIFY)
568    return list(sorted(host.hostname for host in hosts))
569
570
571def get_jobs(not_yet_run=False, running=False, finished=False, **filter_data):
572    """\
573    Extra filter args for get_jobs:
574    -not_yet_run: Include only jobs that have not yet started running.
575    -running: Include only jobs that have start running but for which not
576    all hosts have completed.
577    -finished: Include only jobs for which all hosts have completed (or
578    aborted).
579    At most one of these three fields should be specified.
580    """
581    filter_data['extra_args'] = rpc_utils.extra_job_filters(not_yet_run,
582                                                            running,
583                                                            finished)
584    job_dicts = []
585    jobs = list(models.Job.query_objects(filter_data))
586    models.Job.objects.populate_relationships(jobs, models.Label,
587                                              'dependencies')
588    for job in jobs:
589        job_dict = job.get_object_dict()
590        job_dict['dependencies'] = ','.join(label.name
591                                            for label in job.dependencies)
592        job_dicts.append(job_dict)
593    return rpc_utils.prepare_for_serialization(job_dicts)
594
595
596def get_num_jobs(not_yet_run=False, running=False, finished=False,
597                 **filter_data):
598    """\
599    See get_jobs() for documentation of extra filter parameters.
600    """
601    filter_data['extra_args'] = rpc_utils.extra_job_filters(not_yet_run,
602                                                            running,
603                                                            finished)
604    return models.Job.query_count(filter_data)
605
606
607def get_jobs_summary(**filter_data):
608    """\
609    Like get_jobs(), but adds a 'status_counts' field, which is a dictionary
610    mapping status strings to the number of hosts currently with that
611    status, i.e. {'Queued' : 4, 'Running' : 2}.
612    """
613    jobs = get_jobs(**filter_data)
614    ids = [job['id'] for job in jobs]
615    all_status_counts = models.Job.objects.get_status_counts(ids)
616    for job in jobs:
617        job['status_counts'] = all_status_counts[job['id']]
618    return rpc_utils.prepare_for_serialization(jobs)
619
620
621def get_info_for_clone(id, preserve_metahosts, queue_entry_filter_data=None):
622    """\
623    Retrieves all the information needed to clone a job.
624    """
625    job = models.Job.objects.get(id=id)
626    job_info = rpc_utils.get_job_info(job,
627                                      preserve_metahosts,
628                                      queue_entry_filter_data)
629
630    host_dicts = []
631    for host in job_info['hosts']:
632        host_dict = get_hosts(id=host.id)[0]
633        other_labels = host_dict['labels']
634        if host_dict['platform']:
635            other_labels.remove(host_dict['platform'])
636        host_dict['other_labels'] = ', '.join(other_labels)
637        host_dicts.append(host_dict)
638
639    for host in job_info['one_time_hosts']:
640        host_dict = dict(hostname=host.hostname,
641                         id=host.id,
642                         platform='(one-time host)',
643                         locked_text='')
644        host_dicts.append(host_dict)
645
646    # convert keys from Label objects to strings (names of labels)
647    meta_host_counts = dict((meta_host.name, count) for meta_host, count
648                            in job_info['meta_host_counts'].iteritems())
649
650    info = dict(job=job.get_object_dict(),
651                meta_host_counts=meta_host_counts,
652                hosts=host_dicts)
653    info['job']['dependencies'] = job_info['dependencies']
654    if job_info['atomic_group']:
655        info['atomic_group_name'] = (job_info['atomic_group']).name
656    else:
657        info['atomic_group_name'] = None
658
659    return rpc_utils.prepare_for_serialization(info)
660
661
662# host queue entries
663
664def get_host_queue_entries(**filter_data):
665    """\
666    @returns A sequence of nested dictionaries of host and job information.
667    """
668    return rpc_utils.prepare_rows_as_nested_dicts(
669            models.HostQueueEntry.query_objects(filter_data),
670            ('host', 'atomic_group', 'job'))
671
672
673def get_num_host_queue_entries(**filter_data):
674    """\
675    Get the number of host queue entries associated with this job.
676    """
677    return models.HostQueueEntry.query_count(filter_data)
678
679
680def get_hqe_percentage_complete(**filter_data):
681    """
682    Computes the fraction of host queue entries matching the given filter data
683    that are complete.
684    """
685    query = models.HostQueueEntry.query_objects(filter_data)
686    complete_count = query.filter(complete=True).count()
687    total_count = query.count()
688    if total_count == 0:
689        return 1
690    return float(complete_count) / total_count
691
692
693# special tasks
694
695def get_special_tasks(**filter_data):
696    return rpc_utils.prepare_rows_as_nested_dicts(
697            models.SpecialTask.query_objects(filter_data),
698            ('host', 'queue_entry'))
699
700
701# support for host detail view
702
703def get_host_queue_entries_and_special_tasks(hostname, query_start=None,
704                                             query_limit=None):
705    """
706    @returns an interleaved list of HostQueueEntries and SpecialTasks,
707            in approximate run order.  each dict contains keys for type, host,
708            job, status, started_on, execution_path, and ID.
709    """
710    total_limit = None
711    if query_limit is not None:
712        total_limit = query_start + query_limit
713    filter_data = {'host__hostname': hostname,
714                   'query_limit': total_limit,
715                   'sort_by': ['-id']}
716
717    queue_entries = list(models.HostQueueEntry.query_objects(filter_data))
718    special_tasks = list(models.SpecialTask.query_objects(filter_data))
719
720    interleaved_entries = rpc_utils.interleave_entries(queue_entries,
721                                                       special_tasks)
722    if query_start is not None:
723        interleaved_entries = interleaved_entries[query_start:]
724    if query_limit is not None:
725        interleaved_entries = interleaved_entries[:query_limit]
726    return rpc_utils.prepare_for_serialization(interleaved_entries)
727
728
729def get_num_host_queue_entries_and_special_tasks(hostname):
730    filter_data = {'host__hostname': hostname}
731    return (models.HostQueueEntry.query_count(filter_data)
732            + models.SpecialTask.query_count(filter_data))
733
734
735# recurring run
736
737def get_recurring(**filter_data):
738    return rpc_utils.prepare_rows_as_nested_dicts(
739            models.RecurringRun.query_objects(filter_data),
740            ('job', 'owner'))
741
742
743def get_num_recurring(**filter_data):
744    return models.RecurringRun.query_count(filter_data)
745
746
747def delete_recurring_runs(**filter_data):
748    to_delete = models.RecurringRun.query_objects(filter_data)
749    to_delete.delete()
750
751
752def create_recurring_run(job_id, start_date, loop_period, loop_count):
753    owner = thread_local.get_user().login
754    job = models.Job.objects.get(id=job_id)
755    return job.create_recurring_job(start_date=start_date,
756                                    loop_period=loop_period,
757                                    loop_count=loop_count,
758                                    owner=owner)
759
760
761# other
762
763def echo(data=""):
764    """\
765    Returns a passed in string. For doing a basic test to see if RPC calls
766    can successfully be made.
767    """
768    return data
769
770
771def get_motd():
772    """\
773    Returns the message of the day as a string.
774    """
775    return rpc_utils.get_motd()
776
777
778def get_static_data():
779    """\
780    Returns a dictionary containing a bunch of data that shouldn't change
781    often and is otherwise inaccessible.  This includes:
782
783    priorities: List of job priority choices.
784    default_priority: Default priority value for new jobs.
785    users: Sorted list of all users.
786    labels: Sorted list of all labels.
787    atomic_groups: Sorted list of all atomic groups.
788    tests: Sorted list of all tests.
789    profilers: Sorted list of all profilers.
790    current_user: Logged-in username.
791    host_statuses: Sorted list of possible Host statuses.
792    job_statuses: Sorted list of possible HostQueueEntry statuses.
793    job_timeout_default: The default job timeout length in hours.
794    parse_failed_repair_default: Default value for the parse_failed_repair job
795    option.
796    reboot_before_options: A list of valid RebootBefore string enums.
797    reboot_after_options: A list of valid RebootAfter string enums.
798    motd: Server's message of the day.
799    status_dictionary: A mapping from one word job status names to a more
800            informative description.
801    """
802
803    job_fields = models.Job.get_field_dict()
804
805    result = {}
806    result['priorities'] = models.Job.Priority.choices()
807    default_priority = job_fields['priority'].default
808    default_string = models.Job.Priority.get_string(default_priority)
809    result['default_priority'] = default_string
810    result['users'] = get_users(sort_by=['login'])
811    result['labels'] = get_labels(sort_by=['-platform', 'name'])
812    result['atomic_groups'] = get_atomic_groups(sort_by=['name'])
813    result['tests'] = get_tests(sort_by=['name'])
814    result['profilers'] = get_profilers(sort_by=['name'])
815    result['current_user'] = rpc_utils.prepare_for_serialization(
816        thread_local.get_user().get_object_dict())
817    result['host_statuses'] = sorted(models.Host.Status.names)
818    result['job_statuses'] = sorted(models.HostQueueEntry.Status.names)
819    result['job_timeout_default'] = models.Job.DEFAULT_TIMEOUT
820    result['job_max_runtime_hrs_default'] = models.Job.DEFAULT_MAX_RUNTIME_HRS
821    result['parse_failed_repair_default'] = bool(
822        models.Job.DEFAULT_PARSE_FAILED_REPAIR)
823    result['reboot_before_options'] = models.RebootBefore.names
824    result['reboot_after_options'] = models.RebootAfter.names
825    result['motd'] = rpc_utils.get_motd()
826
827    result['status_dictionary'] = {"Aborted": "Aborted",
828                                   "Verifying": "Verifying Host",
829                                   "Pending": "Waiting on other hosts",
830                                   "Running": "Running autoserv",
831                                   "Completed": "Autoserv completed",
832                                   "Failed": "Failed to complete",
833                                   "Queued": "Queued",
834                                   "Starting": "Next in host's queue",
835                                   "Stopped": "Other host(s) failed verify",
836                                   "Parsing": "Awaiting parse of final results",
837                                   "Gathering": "Gathering log files",
838                                   "Template": "Template job for recurring run",
839                                   "Waiting": "Waiting for scheduler action"}
840    return result
841
842
843def get_server_time():
844    return datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
845