rpc_interface.py revision 6d7b2ff05b2232b1b225a4cb3521d76c0152cad9
1"""\
2Functions to expose over the RPC interface.
3
4For all modify* and delete* functions that ask for an 'id' parameter to
5identify the object to operate on, the id may be either
6 * the database row ID
7 * the name of the object (label name, hostname, user login, etc.)
8 * a dictionary containing uniquely identifying field (this option should seldom
9   be used)
10
11When specifying foreign key fields (i.e. adding hosts to a label, or adding
12users to an ACL group), the given value may be either the database row ID or the
13name of the object.
14
15All get* functions return lists of dictionaries.  Each dictionary represents one
16object and maps field names to values.
17
18Some examples:
19modify_host(2, hostname='myhost') # modify hostname of host with database ID 2
20modify_host('ipaj2', hostname='myhost') # modify hostname of host 'ipaj2'
21modify_test('sleeptest', test_type='Client', params=', seconds=60')
22delete_acl_group(1) # delete by ID
23delete_acl_group('Everyone') # delete by name
24acl_group_add_users('Everyone', ['mbligh', 'showard'])
25get_jobs(owner='showard', status='Queued')
26
27See doctests/001_rpc_test.txt for (lots) more examples.
28"""
29
30__author__ = 'showard@google.com (Steve Howard)'
31
32import datetime
33import common
34from autotest_lib.frontend import thread_local
35from autotest_lib.frontend.afe import models, model_logic
36from autotest_lib.frontend.afe import control_file, rpc_utils
37from autotest_lib.client.common_lib import global_config
38
39
40# labels
41
42def add_label(name, kernel_config=None, platform=None, only_if_needed=None):
43    return models.Label.add_object(
44            name=name, kernel_config=kernel_config, platform=platform,
45            only_if_needed=only_if_needed).id
46
47
48def modify_label(id, **data):
49    models.Label.smart_get(id).update_object(data)
50
51
52def delete_label(id):
53    models.Label.smart_get(id).delete()
54
55
56def label_add_hosts(id, hosts):
57    host_objs = models.Host.smart_get_bulk(hosts)
58    label = models.Label.smart_get(id)
59    if label.platform:
60        models.Host.check_no_platform(host_objs)
61    label.host_set.add(*host_objs)
62
63
64def label_remove_hosts(id, hosts):
65    host_objs = models.Host.smart_get_bulk(hosts)
66    models.Label.smart_get(id).host_set.remove(*host_objs)
67
68
69def get_labels(**filter_data):
70    """\
71    @returns A sequence of nested dictionaries of label information.
72    """
73    return rpc_utils.prepare_rows_as_nested_dicts(
74            models.Label.query_objects(filter_data),
75            ('atomic_group',))
76
77
78# atomic groups
79
80def add_atomic_group(name, max_number_of_machines, description=None):
81    return models.AtomicGroup.add_object(
82            name=name, max_number_of_machines=max_number_of_machines,
83            description=description).id
84
85
86def modify_atomic_group(id, **data):
87    models.AtomicGroup.smart_get(id).update_object(data)
88
89
90def delete_atomic_group(id):
91    models.AtomicGroup.smart_get(id).delete()
92
93
94def atomic_group_add_labels(id, labels):
95    label_objs = models.Label.smart_get_bulk(labels)
96    models.AtomicGroup.smart_get(id).label_set.add(*label_objs)
97
98
99def atomic_group_remove_labels(id, labels):
100    label_objs = models.Label.smart_get_bulk(labels)
101    models.AtomicGroup.smart_get(id).label_set.remove(*label_objs)
102
103
104def get_atomic_groups(**filter_data):
105    return rpc_utils.prepare_for_serialization(
106            models.AtomicGroup.list_objects(filter_data))
107
108
109# hosts
110
111def add_host(hostname, status=None, locked=None, protection=None):
112    return models.Host.add_object(hostname=hostname, status=status,
113                                  locked=locked, protection=protection).id
114
115
116def modify_host(id, **data):
117    models.Host.smart_get(id).update_object(data)
118
119
120def modify_hosts(host_filter_data, update_data):
121    """
122    @param host_filter_data filters out which hosts to modify
123    @param update_data dictionary with the changes to make to the hosts
124    """
125    hosts = models.Host.query_objects(host_filter_data)
126    for host in hosts:
127        host.update_object(update_data)
128
129
130def host_add_labels(id, labels):
131    labels = models.Label.smart_get_bulk(labels)
132    host = models.Host.smart_get(id)
133
134    platforms = [label.name for label in labels if label.platform]
135    if len(platforms) > 1:
136        raise model_logic.ValidationError(
137            {'labels': 'Adding more than one platform label: %s' %
138                       ', '.join(platforms)})
139    if len(platforms) == 1:
140        models.Host.check_no_platform([host])
141    host.labels.add(*labels)
142
143
144def host_remove_labels(id, labels):
145    labels = models.Label.smart_get_bulk(labels)
146    models.Host.smart_get(id).labels.remove(*labels)
147
148
149def set_host_attribute(attribute, value, **host_filter_data):
150    """
151    @param attribute string name of attribute
152    @param value string, or None to delete an attribute
153    @param host_filter_data filter data to apply to Hosts to choose hosts to act
154    upon
155    """
156    assert host_filter_data # disallow accidental actions on all hosts
157    hosts = models.Host.query_objects(host_filter_data)
158    models.AclGroup.check_for_acl_violation_hosts(hosts)
159
160    for host in hosts:
161        host.set_or_delete_attribute(attribute, value)
162
163
164def delete_host(id):
165    models.Host.smart_get(id).delete()
166
167
168def get_hosts(multiple_labels=[], exclude_only_if_needed_labels=False,
169              **filter_data):
170    """\
171    multiple_labels: match hosts in all of the labels given.  Should be a
172    list of label names.
173    exclude_only_if_needed_labels: exclude hosts with at least one
174    "only_if_needed" label applied.
175    """
176    hosts = rpc_utils.get_host_query(multiple_labels,
177                                     exclude_only_if_needed_labels,
178                                     filter_data)
179    hosts = list(hosts)
180    models.Host.objects.populate_relationships(hosts, models.Label,
181                                               'label_list')
182    models.Host.objects.populate_relationships(hosts, models.AclGroup,
183                                               'acl_list')
184    models.Host.objects.populate_relationships(hosts, models.HostAttribute,
185                                               'attribute_list')
186    host_dicts = []
187    for host_obj in hosts:
188        host_dict = host_obj.get_object_dict()
189        host_dict['labels'] = [label.name for label in host_obj.label_list]
190        host_dict['platform'] = rpc_utils.find_platform(host_obj)
191        host_dict['acls'] = [acl.name for acl in host_obj.acl_list]
192        host_dict['attributes'] = dict((attribute.attribute, attribute.value)
193                                       for attribute in host_obj.attribute_list)
194        host_dicts.append(host_dict)
195    return rpc_utils.prepare_for_serialization(host_dicts)
196
197
198def get_num_hosts(multiple_labels=[], exclude_only_if_needed_labels=False,
199                  **filter_data):
200    hosts = rpc_utils.get_host_query(multiple_labels,
201                                     exclude_only_if_needed_labels,
202                                     filter_data)
203    return hosts.count()
204
205
206# tests
207
208def add_test(name, test_type, path, author=None, dependencies=None,
209             experimental=True, run_verify=None, test_class=None,
210             test_time=None, test_category=None, description=None,
211             sync_count=1):
212    return models.Test.add_object(name=name, test_type=test_type, path=path,
213                                  author=author, dependencies=dependencies,
214                                  experimental=experimental,
215                                  run_verify=run_verify, test_time=test_time,
216                                  test_category=test_category,
217                                  sync_count=sync_count,
218                                  test_class=test_class,
219                                  description=description).id
220
221
222def modify_test(id, **data):
223    models.Test.smart_get(id).update_object(data)
224
225
226def delete_test(id):
227    models.Test.smart_get(id).delete()
228
229
230def get_tests(**filter_data):
231    return rpc_utils.prepare_for_serialization(
232        models.Test.list_objects(filter_data))
233
234
235# profilers
236
237def add_profiler(name, description=None):
238    return models.Profiler.add_object(name=name, description=description).id
239
240
241def modify_profiler(id, **data):
242    models.Profiler.smart_get(id).update_object(data)
243
244
245def delete_profiler(id):
246    models.Profiler.smart_get(id).delete()
247
248
249def get_profilers(**filter_data):
250    return rpc_utils.prepare_for_serialization(
251        models.Profiler.list_objects(filter_data))
252
253
254# users
255
256def add_user(login, access_level=None):
257    return models.User.add_object(login=login, access_level=access_level).id
258
259
260def modify_user(id, **data):
261    models.User.smart_get(id).update_object(data)
262
263
264def delete_user(id):
265    models.User.smart_get(id).delete()
266
267
268def get_users(**filter_data):
269    return rpc_utils.prepare_for_serialization(
270        models.User.list_objects(filter_data))
271
272
273# acl groups
274
275def add_acl_group(name, description=None):
276    group = models.AclGroup.add_object(name=name, description=description)
277    group.users.add(thread_local.get_user())
278    return group.id
279
280
281def modify_acl_group(id, **data):
282    group = models.AclGroup.smart_get(id)
283    group.check_for_acl_violation_acl_group()
284    group.update_object(data)
285    group.add_current_user_if_empty()
286
287
288def acl_group_add_users(id, users):
289    group = models.AclGroup.smart_get(id)
290    group.check_for_acl_violation_acl_group()
291    users = models.User.smart_get_bulk(users)
292    group.users.add(*users)
293
294
295def acl_group_remove_users(id, users):
296    group = models.AclGroup.smart_get(id)
297    group.check_for_acl_violation_acl_group()
298    users = models.User.smart_get_bulk(users)
299    group.users.remove(*users)
300    group.add_current_user_if_empty()
301
302
303def acl_group_add_hosts(id, hosts):
304    group = models.AclGroup.smart_get(id)
305    group.check_for_acl_violation_acl_group()
306    hosts = models.Host.smart_get_bulk(hosts)
307    group.hosts.add(*hosts)
308    group.on_host_membership_change()
309
310
311def acl_group_remove_hosts(id, hosts):
312    group = models.AclGroup.smart_get(id)
313    group.check_for_acl_violation_acl_group()
314    hosts = models.Host.smart_get_bulk(hosts)
315    group.hosts.remove(*hosts)
316    group.on_host_membership_change()
317
318
319def delete_acl_group(id):
320    models.AclGroup.smart_get(id).delete()
321
322
323def get_acl_groups(**filter_data):
324    acl_groups = models.AclGroup.list_objects(filter_data)
325    for acl_group in acl_groups:
326        acl_group_obj = models.AclGroup.objects.get(id=acl_group['id'])
327        acl_group['users'] = [user.login
328                              for user in acl_group_obj.users.all()]
329        acl_group['hosts'] = [host.hostname
330                              for host in acl_group_obj.hosts.all()]
331    return rpc_utils.prepare_for_serialization(acl_groups)
332
333
334# jobs
335
336def generate_control_file(tests=(), kernel=None, label=None, profilers=(),
337                          client_control_file='', use_container=False):
338    """
339    Generates a client-side control file to load a kernel and run tests.
340
341    @param tests List of tests to run.
342    @param kernel Kernel to install in generated control file.
343    @param label Name of label to grab kernel config from.
344    @param profilers List of profilers to activate during the job.
345    @param client_control_file The contents of a client-side control file to
346        run at the end of all tests.  If this is supplied, all tests must be
347        client side.
348        TODO: in the future we should support server control files directly
349        to wrap with a kernel.  That'll require changing the parameter
350        name and adding a boolean to indicate if it is a client or server
351        control file.
352    @param use_container unused argument today.  TODO: Enable containers
353        on the host during a client side test.
354
355    @returns a dict with the following keys:
356        control_file: str, The control file text.
357        is_server: bool, is the control file a server-side control file?
358        synch_count: How many machines the job uses per autoserv execution.
359            synch_count == 1 means the job is asynchronous.
360        dependencies: A list of the names of labels on which the job depends.
361    """
362    if not tests and not control_file:
363        return dict(control_file='', is_server=False, synch_count=1,
364                    dependencies=[])
365
366    cf_info, test_objects, profiler_objects, label = (
367        rpc_utils.prepare_generate_control_file(tests, kernel, label,
368                                                profilers))
369    cf_info['control_file'] = control_file.generate_control(
370        tests=test_objects, kernel=kernel, platform=label,
371        profilers=profiler_objects, is_server=cf_info['is_server'],
372        client_control_file=client_control_file)
373    return cf_info
374
375
376def create_job(name, priority, control_file, control_type,
377               hosts=(), meta_hosts=(), one_time_hosts=(),
378               atomic_group_name=None, synch_count=None, is_template=False,
379               timeout=None, max_runtime_hrs=None, run_verify=True,
380               email_list='', dependencies=(), reboot_before=None,
381               reboot_after=None, parse_failed_repair=None):
382    """\
383    Create and enqueue a job.
384
385    @param name name of this job
386    @param priority Low, Medium, High, Urgent
387    @param control_file String contents of the control file.
388    @param control_type Type of control file, Client or Server.
389    @param synch_count How many machines the job uses per autoserv execution.
390    synch_count == 1 means the job is asynchronous.  If an atomic group is
391    given this value is treated as a minimum.
392    @param is_template If true then create a template job.
393    @param timeout Hours after this call returns until the job times out.
394    @param max_runtime_hrs Hours from job starting time until job times out
395    @param run_verify Should the host be verified before running the test?
396    @param email_list String containing emails to mail when the job is done
397    @param dependencies List of label names on which this job depends
398    @param reboot_before Never, If dirty, or Always
399    @param reboot_after Never, If all tests passed, or Always
400    @param parse_failed_repair if true, results of failed repairs launched by
401    this job will be parsed as part of the job.
402
403    @param hosts List of hosts to run job on.
404    @param meta_hosts List where each entry is a label name, and for each entry
405    one host will be chosen from that label to run the job on.
406    @param one_time_hosts List of hosts not in the database to run the job on.
407    @param atomic_group_name The name of an atomic group to schedule the job on.
408
409
410    @returns The created Job id number.
411    """
412    user = thread_local.get_user()
413    owner = user.login
414    # input validation
415    if not (hosts or meta_hosts or one_time_hosts or atomic_group_name):
416        raise model_logic.ValidationError({
417            'arguments' : "You must pass at least one of 'hosts', "
418                          "'meta_hosts', 'one_time_hosts', "
419                          "or 'atomic_group_name'"
420            })
421
422    labels_by_name = dict((label.name, label)
423                          for label in models.Label.objects.all())
424    atomic_groups_by_name = dict((ag.name, ag)
425                                 for ag in models.AtomicGroup.objects.all())
426
427    # convert hostnames & meta hosts to host/label objects
428    host_objects = models.Host.smart_get_bulk(hosts)
429    metahost_objects = []
430    for label in meta_hosts or []:
431        if label in labels_by_name:
432            this_label = labels_by_name[label]
433            metahost_objects.append(this_label)
434        elif label in atomic_groups_by_name:
435            # If a given metahost (Label) name that isn't a label, check to
436            # see if the user was specifying an atomic group instead.
437            atomic_group = atomic_groups_by_name[label]
438            if atomic_group_name and atomic_group_name != atomic_group.name:
439                raise model_logic.ValidationError({
440                        'meta_hosts': (
441                                'Label "%s" not found.  If assumed to be an '
442                                'atomic group it would conflict with the '
443                                'supplied atomic group "%s".' % (
444                                        label, atomic_group_name))})
445            atomic_group_name = atomic_group.name
446        else:
447            raise model_logic.ValidationError(
448                {'meta_hosts' : 'Label "%s" not found' % label})
449
450    # Create and sanity check an AtomicGroup object if requested.
451    if atomic_group_name:
452        if one_time_hosts:
453            raise model_logic.ValidationError(
454                    {'one_time_hosts':
455                     'One time hosts cannot be used with an Atomic Group.'})
456        atomic_group = models.AtomicGroup.smart_get(atomic_group_name)
457        if synch_count and synch_count > atomic_group.max_number_of_machines:
458            raise model_logic.ValidationError(
459                    {'atomic_group_name' :
460                     'You have requested a synch_count (%d) greater than the '
461                     'maximum machines in the requested Atomic Group (%d).' %
462                     (synch_count, atomic_group.max_number_of_machines)})
463    else:
464        atomic_group = None
465
466    for host in one_time_hosts or []:
467        this_host = models.Host.create_one_time_host(host)
468        host_objects.append(this_host)
469
470    if reboot_before is None:
471        reboot_before = user.get_reboot_before_display()
472    if reboot_after is None:
473        reboot_after = user.get_reboot_after_display()
474
475    options = dict(name=name,
476                   priority=priority,
477                   control_file=control_file,
478                   control_type=control_type,
479                   is_template=is_template,
480                   timeout=timeout,
481                   max_runtime_hrs=max_runtime_hrs,
482                   synch_count=synch_count,
483                   run_verify=run_verify,
484                   email_list=email_list,
485                   dependencies=dependencies,
486                   reboot_before=reboot_before,
487                   reboot_after=reboot_after,
488                   parse_failed_repair=parse_failed_repair)
489    return rpc_utils.create_new_job(owner=owner,
490                                    options=options,
491                                    host_objects=host_objects,
492                                    metahost_objects=metahost_objects,
493                                    atomic_group=atomic_group)
494
495
496def abort_host_queue_entries(**filter_data):
497    """\
498    Abort a set of host queue entries.
499    """
500    query = models.HostQueueEntry.query_objects(filter_data)
501    query = query.filter(complete=False)
502    models.AclGroup.check_abort_permissions(query)
503    host_queue_entries = list(query.select_related())
504    rpc_utils.check_abort_synchronous_jobs(host_queue_entries)
505
506    user = thread_local.get_user()
507    for queue_entry in host_queue_entries:
508        queue_entry.abort(user)
509
510
511def reverify_hosts(**filter_data):
512    """\
513    Schedules a set of hosts for verify.
514    """
515    hosts = models.Host.query_objects(filter_data)
516    models.AclGroup.check_for_acl_violation_hosts(hosts)
517    models.SpecialTask.schedule_special_task(hosts,
518                                             models.SpecialTask.Task.REVERIFY)
519
520
521def get_jobs(not_yet_run=False, running=False, finished=False, **filter_data):
522    """\
523    Extra filter args for get_jobs:
524    -not_yet_run: Include only jobs that have not yet started running.
525    -running: Include only jobs that have start running but for which not
526    all hosts have completed.
527    -finished: Include only jobs for which all hosts have completed (or
528    aborted).
529    At most one of these three fields should be specified.
530    """
531    filter_data['extra_args'] = rpc_utils.extra_job_filters(not_yet_run,
532                                                            running,
533                                                            finished)
534    job_dicts = []
535    jobs = list(models.Job.query_objects(filter_data))
536    models.Job.objects.populate_relationships(jobs, models.Label,
537                                              'dependencies')
538    for job in jobs:
539        job_dict = job.get_object_dict()
540        job_dict['dependencies'] = ','.join(label.name
541                                            for label in job.dependencies)
542        job_dicts.append(job_dict)
543    return rpc_utils.prepare_for_serialization(job_dicts)
544
545
546def get_num_jobs(not_yet_run=False, running=False, finished=False,
547                 **filter_data):
548    """\
549    See get_jobs() for documentation of extra filter parameters.
550    """
551    filter_data['extra_args'] = rpc_utils.extra_job_filters(not_yet_run,
552                                                            running,
553                                                            finished)
554    return models.Job.query_count(filter_data)
555
556
557def get_jobs_summary(**filter_data):
558    """\
559    Like get_jobs(), but adds a 'status_counts' field, which is a dictionary
560    mapping status strings to the number of hosts currently with that
561    status, i.e. {'Queued' : 4, 'Running' : 2}.
562    """
563    jobs = get_jobs(**filter_data)
564    ids = [job['id'] for job in jobs]
565    all_status_counts = models.Job.objects.get_status_counts(ids)
566    for job in jobs:
567        job['status_counts'] = all_status_counts[job['id']]
568    return rpc_utils.prepare_for_serialization(jobs)
569
570
571def get_info_for_clone(id, preserve_metahosts, queue_entry_filter_data=None):
572    """\
573    Retrieves all the information needed to clone a job.
574    """
575    job = models.Job.objects.get(id=id)
576    job_info = rpc_utils.get_job_info(job,
577                                      preserve_metahosts,
578                                      queue_entry_filter_data)
579
580    host_dicts = []
581    for host in job_info['hosts']:
582        host_dict = get_hosts(id=host.id)[0]
583        other_labels = host_dict['labels']
584        if host_dict['platform']:
585            other_labels.remove(host_dict['platform'])
586        host_dict['other_labels'] = ', '.join(other_labels)
587        host_dicts.append(host_dict)
588
589    for host in job_info['one_time_hosts']:
590        host_dict = dict(hostname=host.hostname,
591                         id=host.id,
592                         platform='(one-time host)',
593                         locked_text='')
594        host_dicts.append(host_dict)
595
596    # convert keys from Label objects to strings (names of labels)
597    meta_host_counts = dict((meta_host.name, count) for meta_host, count
598                            in job_info['meta_host_counts'].iteritems())
599
600    info = dict(job=job.get_object_dict(),
601                meta_host_counts=meta_host_counts,
602                hosts=host_dicts)
603    info['job']['dependencies'] = job_info['dependencies']
604    if job_info['atomic_group']:
605        info['atomic_group_name'] = (job_info['atomic_group']).name
606    else:
607        info['atomic_group_name'] = None
608
609    return rpc_utils.prepare_for_serialization(info)
610
611
612# host queue entries
613
614def get_host_queue_entries(**filter_data):
615    """\
616    @returns A sequence of nested dictionaries of host and job information.
617    """
618    return rpc_utils.prepare_rows_as_nested_dicts(
619            models.HostQueueEntry.query_objects(filter_data),
620            ('host', 'atomic_group', 'job'))
621
622
623def get_num_host_queue_entries(**filter_data):
624    """\
625    Get the number of host queue entries associated with this job.
626    """
627    return models.HostQueueEntry.query_count(filter_data)
628
629
630def get_hqe_percentage_complete(**filter_data):
631    """
632    Computes the fraction of host queue entries matching the given filter data
633    that are complete.
634    """
635    query = models.HostQueueEntry.query_objects(filter_data)
636    complete_count = query.filter(complete=True).count()
637    total_count = query.count()
638    if total_count == 0:
639        return 1
640    return float(complete_count) / total_count
641
642
643# recurring run
644
645def get_recurring(**filter_data):
646    return rpc_utils.prepare_rows_as_nested_dicts(
647            models.RecurringRun.query_objects(filter_data),
648            ('job', 'owner'))
649
650
651def get_num_recurring(**filter_data):
652    return models.RecurringRun.query_count(filter_data)
653
654
655def delete_recurring_runs(**filter_data):
656    to_delete = models.RecurringRun.query_objects(filter_data)
657    to_delete.delete()
658
659
660def create_recurring_run(job_id, start_date, loop_period, loop_count):
661    owner = thread_local.get_user().login
662    job = models.Job.objects.get(id=job_id)
663    return job.create_recurring_job(start_date=start_date,
664                                    loop_period=loop_period,
665                                    loop_count=loop_count,
666                                    owner=owner)
667
668
669# other
670
671def echo(data=""):
672    """\
673    Returns a passed in string. For doing a basic test to see if RPC calls
674    can successfully be made.
675    """
676    return data
677
678
679def get_motd():
680    """\
681    Returns the message of the day as a string.
682    """
683    return rpc_utils.get_motd()
684
685
686def get_static_data():
687    """\
688    Returns a dictionary containing a bunch of data that shouldn't change
689    often and is otherwise inaccessible.  This includes:
690
691    priorities: List of job priority choices.
692    default_priority: Default priority value for new jobs.
693    users: Sorted list of all users.
694    labels: Sorted list of all labels.
695    atomic_groups: Sorted list of all atomic groups.
696    tests: Sorted list of all tests.
697    profilers: Sorted list of all profilers.
698    current_user: Logged-in username.
699    host_statuses: Sorted list of possible Host statuses.
700    job_statuses: Sorted list of possible HostQueueEntry statuses.
701    job_timeout_default: The default job timeout length in hours.
702    parse_failed_repair_default: Default value for the parse_failed_repair job
703    option.
704    reboot_before_options: A list of valid RebootBefore string enums.
705    reboot_after_options: A list of valid RebootAfter string enums.
706    motd: Server's message of the day.
707    status_dictionary: A mapping from one word job status names to a more
708            informative description.
709    """
710
711    job_fields = models.Job.get_field_dict()
712
713    result = {}
714    result['priorities'] = models.Job.Priority.choices()
715    default_priority = job_fields['priority'].default
716    default_string = models.Job.Priority.get_string(default_priority)
717    result['default_priority'] = default_string
718    result['users'] = get_users(sort_by=['login'])
719    result['labels'] = get_labels(sort_by=['-platform', 'name'])
720    result['atomic_groups'] = get_atomic_groups(sort_by=['name'])
721    result['tests'] = get_tests(sort_by=['name'])
722    result['profilers'] = get_profilers(sort_by=['name'])
723    result['current_user'] = rpc_utils.prepare_for_serialization(
724        thread_local.get_user().get_object_dict())
725    result['host_statuses'] = sorted(models.Host.Status.names)
726    result['job_statuses'] = sorted(models.HostQueueEntry.Status.names)
727    result['job_timeout_default'] = models.Job.DEFAULT_TIMEOUT
728    result['job_max_runtime_hrs_default'] = models.Job.DEFAULT_MAX_RUNTIME_HRS
729    result['parse_failed_repair_default'] = bool(
730        models.Job.DEFAULT_PARSE_FAILED_REPAIR)
731    result['reboot_before_options'] = models.RebootBefore.names
732    result['reboot_after_options'] = models.RebootAfter.names
733    result['motd'] = rpc_utils.get_motd()
734
735    result['status_dictionary'] = {"Aborted": "Aborted",
736                                   "Verifying": "Verifying Host",
737                                   "Pending": "Waiting on other hosts",
738                                   "Running": "Running autoserv",
739                                   "Completed": "Autoserv completed",
740                                   "Failed": "Failed to complete",
741                                   "Queued": "Queued",
742                                   "Starting": "Next in host's queue",
743                                   "Stopped": "Other host(s) failed verify",
744                                   "Parsing": "Awaiting parse of final results",
745                                   "Gathering": "Gathering log files",
746                                   "Template": "Template job for recurring run"}
747    return result
748
749
750def get_server_time():
751    return datetime.datetime.now().strftime("%Y-%m-%d %H:%M")
752