1/******************************************************************************
2 *
3 * Module Name: dsmethod - Parser/Interpreter interface - control method parsing
4 *
5 *****************************************************************************/
6
7/*
8 * Copyright (C) 2000 - 2014, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 *    notice, this list of conditions, and the following disclaimer,
16 *    without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 *    substantially similar to the "NO WARRANTY" disclaimer below
19 *    ("Disclaimer") and any redistribution must be conditioned upon
20 *    including a substantially similar Disclaimer requirement for further
21 *    binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 *    of any contributors may be used to endorse or promote products derived
24 *    from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44#include <acpi/acpi.h>
45#include "accommon.h"
46#include "acdispat.h"
47#include "acinterp.h"
48#include "acnamesp.h"
49#ifdef	ACPI_DISASSEMBLER
50#include "acdisasm.h"
51#endif
52#include "acparser.h"
53#include "amlcode.h"
54
55#define _COMPONENT          ACPI_DISPATCHER
56ACPI_MODULE_NAME("dsmethod")
57
58/* Local prototypes */
59static acpi_status
60acpi_ds_detect_named_opcodes(struct acpi_walk_state *walk_state,
61			     union acpi_parse_object **out_op);
62
63static acpi_status
64acpi_ds_create_method_mutex(union acpi_operand_object *method_desc);
65
66/*******************************************************************************
67 *
68 * FUNCTION:    acpi_ds_auto_serialize_method
69 *
70 * PARAMETERS:  node                        - Namespace Node of the method
71 *              obj_desc                    - Method object attached to node
72 *
73 * RETURN:      Status
74 *
75 * DESCRIPTION: Parse a control method AML to scan for control methods that
76 *              need serialization due to the creation of named objects.
77 *
78 * NOTE: It is a bit of overkill to mark all such methods serialized, since
79 * there is only a problem if the method actually blocks during execution.
80 * A blocking operation is, for example, a Sleep() operation, or any access
81 * to an operation region. However, it is probably not possible to easily
82 * detect whether a method will block or not, so we simply mark all suspicious
83 * methods as serialized.
84 *
85 * NOTE2: This code is essentially a generic routine for parsing a single
86 * control method.
87 *
88 ******************************************************************************/
89
90acpi_status
91acpi_ds_auto_serialize_method(struct acpi_namespace_node *node,
92			      union acpi_operand_object *obj_desc)
93{
94	acpi_status status;
95	union acpi_parse_object *op = NULL;
96	struct acpi_walk_state *walk_state;
97
98	ACPI_FUNCTION_TRACE_PTR(ds_auto_serialize_method, node);
99
100	ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
101			  "Method auto-serialization parse [%4.4s] %p\n",
102			  acpi_ut_get_node_name(node), node));
103
104	/* Create/Init a root op for the method parse tree */
105
106	op = acpi_ps_alloc_op(AML_METHOD_OP);
107	if (!op) {
108		return_ACPI_STATUS(AE_NO_MEMORY);
109	}
110
111	acpi_ps_set_name(op, node->name.integer);
112	op->common.node = node;
113
114	/* Create and initialize a new walk state */
115
116	walk_state =
117	    acpi_ds_create_walk_state(node->owner_id, NULL, NULL, NULL);
118	if (!walk_state) {
119		return_ACPI_STATUS(AE_NO_MEMORY);
120	}
121
122	status =
123	    acpi_ds_init_aml_walk(walk_state, op, node,
124				  obj_desc->method.aml_start,
125				  obj_desc->method.aml_length, NULL, 0);
126	if (ACPI_FAILURE(status)) {
127		acpi_ds_delete_walk_state(walk_state);
128		return_ACPI_STATUS(status);
129	}
130
131	walk_state->descending_callback = acpi_ds_detect_named_opcodes;
132
133	/* Parse the method, scan for creation of named objects */
134
135	status = acpi_ps_parse_aml(walk_state);
136	if (ACPI_FAILURE(status)) {
137		return_ACPI_STATUS(status);
138	}
139
140	acpi_ps_delete_parse_tree(op);
141	return_ACPI_STATUS(status);
142}
143
144/*******************************************************************************
145 *
146 * FUNCTION:    acpi_ds_detect_named_opcodes
147 *
148 * PARAMETERS:  walk_state      - Current state of the parse tree walk
149 *              out_op          - Unused, required for parser interface
150 *
151 * RETURN:      Status
152 *
153 * DESCRIPTION: Descending callback used during the loading of ACPI tables.
154 *              Currently used to detect methods that must be marked serialized
155 *              in order to avoid problems with the creation of named objects.
156 *
157 ******************************************************************************/
158
159static acpi_status
160acpi_ds_detect_named_opcodes(struct acpi_walk_state *walk_state,
161			     union acpi_parse_object **out_op)
162{
163
164	ACPI_FUNCTION_NAME(acpi_ds_detect_named_opcodes);
165
166	/* We are only interested in opcodes that create a new name */
167
168	if (!
169	    (walk_state->op_info->
170	     flags & (AML_NAMED | AML_CREATE | AML_FIELD))) {
171		return (AE_OK);
172	}
173
174	/*
175	 * At this point, we know we have a Named object opcode.
176	 * Mark the method as serialized. Later code will create a mutex for
177	 * this method to enforce serialization.
178	 *
179	 * Note, ACPI_METHOD_IGNORE_SYNC_LEVEL flag means that we will ignore the
180	 * Sync Level mechanism for this method, even though it is now serialized.
181	 * Otherwise, there can be conflicts with existing ASL code that actually
182	 * uses sync levels.
183	 */
184	walk_state->method_desc->method.sync_level = 0;
185	walk_state->method_desc->method.info_flags |=
186	    (ACPI_METHOD_SERIALIZED | ACPI_METHOD_IGNORE_SYNC_LEVEL);
187
188	ACPI_DEBUG_PRINT((ACPI_DB_INFO,
189			  "Method serialized [%4.4s] %p - [%s] (%4.4X)\n",
190			  walk_state->method_node->name.ascii,
191			  walk_state->method_node, walk_state->op_info->name,
192			  walk_state->opcode));
193
194	/* Abort the parse, no need to examine this method any further */
195
196	return (AE_CTRL_TERMINATE);
197}
198
199/*******************************************************************************
200 *
201 * FUNCTION:    acpi_ds_method_error
202 *
203 * PARAMETERS:  status          - Execution status
204 *              walk_state      - Current state
205 *
206 * RETURN:      Status
207 *
208 * DESCRIPTION: Called on method error. Invoke the global exception handler if
209 *              present, dump the method data if the disassembler is configured
210 *
211 *              Note: Allows the exception handler to change the status code
212 *
213 ******************************************************************************/
214
215acpi_status
216acpi_ds_method_error(acpi_status status, struct acpi_walk_state * walk_state)
217{
218	ACPI_FUNCTION_ENTRY();
219
220	/* Ignore AE_OK and control exception codes */
221
222	if (ACPI_SUCCESS(status) || (status & AE_CODE_CONTROL)) {
223		return (status);
224	}
225
226	/* Invoke the global exception handler */
227
228	if (acpi_gbl_exception_handler) {
229
230		/* Exit the interpreter, allow handler to execute methods */
231
232		acpi_ex_exit_interpreter();
233
234		/*
235		 * Handler can map the exception code to anything it wants, including
236		 * AE_OK, in which case the executing method will not be aborted.
237		 */
238		status = acpi_gbl_exception_handler(status,
239						    walk_state->method_node ?
240						    walk_state->method_node->
241						    name.integer : 0,
242						    walk_state->opcode,
243						    walk_state->aml_offset,
244						    NULL);
245		acpi_ex_enter_interpreter();
246	}
247
248	acpi_ds_clear_implicit_return(walk_state);
249
250#ifdef ACPI_DISASSEMBLER
251	if (ACPI_FAILURE(status)) {
252
253		/* Display method locals/args if disassembler is present */
254
255		acpi_dm_dump_method_info(status, walk_state, walk_state->op);
256	}
257#endif
258
259	return (status);
260}
261
262/*******************************************************************************
263 *
264 * FUNCTION:    acpi_ds_create_method_mutex
265 *
266 * PARAMETERS:  obj_desc            - The method object
267 *
268 * RETURN:      Status
269 *
270 * DESCRIPTION: Create a mutex object for a serialized control method
271 *
272 ******************************************************************************/
273
274static acpi_status
275acpi_ds_create_method_mutex(union acpi_operand_object *method_desc)
276{
277	union acpi_operand_object *mutex_desc;
278	acpi_status status;
279
280	ACPI_FUNCTION_TRACE(ds_create_method_mutex);
281
282	/* Create the new mutex object */
283
284	mutex_desc = acpi_ut_create_internal_object(ACPI_TYPE_MUTEX);
285	if (!mutex_desc) {
286		return_ACPI_STATUS(AE_NO_MEMORY);
287	}
288
289	/* Create the actual OS Mutex */
290
291	status = acpi_os_create_mutex(&mutex_desc->mutex.os_mutex);
292	if (ACPI_FAILURE(status)) {
293		acpi_ut_delete_object_desc(mutex_desc);
294		return_ACPI_STATUS(status);
295	}
296
297	mutex_desc->mutex.sync_level = method_desc->method.sync_level;
298	method_desc->method.mutex = mutex_desc;
299	return_ACPI_STATUS(AE_OK);
300}
301
302/*******************************************************************************
303 *
304 * FUNCTION:    acpi_ds_begin_method_execution
305 *
306 * PARAMETERS:  method_node         - Node of the method
307 *              obj_desc            - The method object
308 *              walk_state          - current state, NULL if not yet executing
309 *                                    a method.
310 *
311 * RETURN:      Status
312 *
313 * DESCRIPTION: Prepare a method for execution. Parses the method if necessary,
314 *              increments the thread count, and waits at the method semaphore
315 *              for clearance to execute.
316 *
317 ******************************************************************************/
318
319acpi_status
320acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
321			       union acpi_operand_object *obj_desc,
322			       struct acpi_walk_state *walk_state)
323{
324	acpi_status status = AE_OK;
325
326	ACPI_FUNCTION_TRACE_PTR(ds_begin_method_execution, method_node);
327
328	if (!method_node) {
329		return_ACPI_STATUS(AE_NULL_ENTRY);
330	}
331
332	/* Prevent wraparound of thread count */
333
334	if (obj_desc->method.thread_count == ACPI_UINT8_MAX) {
335		ACPI_ERROR((AE_INFO,
336			    "Method reached maximum reentrancy limit (255)"));
337		return_ACPI_STATUS(AE_AML_METHOD_LIMIT);
338	}
339
340	/*
341	 * If this method is serialized, we need to acquire the method mutex.
342	 */
343	if (obj_desc->method.info_flags & ACPI_METHOD_SERIALIZED) {
344		/*
345		 * Create a mutex for the method if it is defined to be Serialized
346		 * and a mutex has not already been created. We defer the mutex creation
347		 * until a method is actually executed, to minimize the object count
348		 */
349		if (!obj_desc->method.mutex) {
350			status = acpi_ds_create_method_mutex(obj_desc);
351			if (ACPI_FAILURE(status)) {
352				return_ACPI_STATUS(status);
353			}
354		}
355
356		/*
357		 * The current_sync_level (per-thread) must be less than or equal to
358		 * the sync level of the method. This mechanism provides some
359		 * deadlock prevention.
360		 *
361		 * If the method was auto-serialized, we just ignore the sync level
362		 * mechanism, because auto-serialization of methods can interfere
363		 * with ASL code that actually uses sync levels.
364		 *
365		 * Top-level method invocation has no walk state at this point
366		 */
367		if (walk_state &&
368		    (!(obj_desc->method.
369		       info_flags & ACPI_METHOD_IGNORE_SYNC_LEVEL))
370		    && (walk_state->thread->current_sync_level >
371			obj_desc->method.mutex->mutex.sync_level)) {
372			ACPI_ERROR((AE_INFO,
373				    "Cannot acquire Mutex for method [%4.4s], current SyncLevel is too large (%u)",
374				    acpi_ut_get_node_name(method_node),
375				    walk_state->thread->current_sync_level));
376
377			return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
378		}
379
380		/*
381		 * Obtain the method mutex if necessary. Do not acquire mutex for a
382		 * recursive call.
383		 */
384		if (!walk_state ||
385		    !obj_desc->method.mutex->mutex.thread_id ||
386		    (walk_state->thread->thread_id !=
387		     obj_desc->method.mutex->mutex.thread_id)) {
388			/*
389			 * Acquire the method mutex. This releases the interpreter if we
390			 * block (and reacquires it before it returns)
391			 */
392			status =
393			    acpi_ex_system_wait_mutex(obj_desc->method.mutex->
394						      mutex.os_mutex,
395						      ACPI_WAIT_FOREVER);
396			if (ACPI_FAILURE(status)) {
397				return_ACPI_STATUS(status);
398			}
399
400			/* Update the mutex and walk info and save the original sync_level */
401
402			if (walk_state) {
403				obj_desc->method.mutex->mutex.
404				    original_sync_level =
405				    walk_state->thread->current_sync_level;
406
407				obj_desc->method.mutex->mutex.thread_id =
408				    walk_state->thread->thread_id;
409				walk_state->thread->current_sync_level =
410				    obj_desc->method.sync_level;
411			} else {
412				obj_desc->method.mutex->mutex.
413				    original_sync_level =
414				    obj_desc->method.mutex->mutex.sync_level;
415			}
416		}
417
418		/* Always increase acquisition depth */
419
420		obj_desc->method.mutex->mutex.acquisition_depth++;
421	}
422
423	/*
424	 * Allocate an Owner ID for this method, only if this is the first thread
425	 * to begin concurrent execution. We only need one owner_id, even if the
426	 * method is invoked recursively.
427	 */
428	if (!obj_desc->method.owner_id) {
429		status = acpi_ut_allocate_owner_id(&obj_desc->method.owner_id);
430		if (ACPI_FAILURE(status)) {
431			goto cleanup;
432		}
433	}
434
435	/*
436	 * Increment the method parse tree thread count since it has been
437	 * reentered one more time (even if it is the same thread)
438	 */
439	obj_desc->method.thread_count++;
440	acpi_method_count++;
441	return_ACPI_STATUS(status);
442
443cleanup:
444	/* On error, must release the method mutex (if present) */
445
446	if (obj_desc->method.mutex) {
447		acpi_os_release_mutex(obj_desc->method.mutex->mutex.os_mutex);
448	}
449	return_ACPI_STATUS(status);
450}
451
452/*******************************************************************************
453 *
454 * FUNCTION:    acpi_ds_call_control_method
455 *
456 * PARAMETERS:  thread              - Info for this thread
457 *              this_walk_state     - Current walk state
458 *              op                  - Current Op to be walked
459 *
460 * RETURN:      Status
461 *
462 * DESCRIPTION: Transfer execution to a called control method
463 *
464 ******************************************************************************/
465
466acpi_status
467acpi_ds_call_control_method(struct acpi_thread_state *thread,
468			    struct acpi_walk_state *this_walk_state,
469			    union acpi_parse_object *op)
470{
471	acpi_status status;
472	struct acpi_namespace_node *method_node;
473	struct acpi_walk_state *next_walk_state = NULL;
474	union acpi_operand_object *obj_desc;
475	struct acpi_evaluate_info *info;
476	u32 i;
477
478	ACPI_FUNCTION_TRACE_PTR(ds_call_control_method, this_walk_state);
479
480	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
481			  "Calling method %p, currentstate=%p\n",
482			  this_walk_state->prev_op, this_walk_state));
483
484	/*
485	 * Get the namespace entry for the control method we are about to call
486	 */
487	method_node = this_walk_state->method_call_node;
488	if (!method_node) {
489		return_ACPI_STATUS(AE_NULL_ENTRY);
490	}
491
492	obj_desc = acpi_ns_get_attached_object(method_node);
493	if (!obj_desc) {
494		return_ACPI_STATUS(AE_NULL_OBJECT);
495	}
496
497	/* Init for new method, possibly wait on method mutex */
498
499	status = acpi_ds_begin_method_execution(method_node, obj_desc,
500						this_walk_state);
501	if (ACPI_FAILURE(status)) {
502		return_ACPI_STATUS(status);
503	}
504
505	/* Begin method parse/execution. Create a new walk state */
506
507	next_walk_state = acpi_ds_create_walk_state(obj_desc->method.owner_id,
508						    NULL, obj_desc, thread);
509	if (!next_walk_state) {
510		status = AE_NO_MEMORY;
511		goto cleanup;
512	}
513
514	/*
515	 * The resolved arguments were put on the previous walk state's operand
516	 * stack. Operands on the previous walk state stack always
517	 * start at index 0. Also, null terminate the list of arguments
518	 */
519	this_walk_state->operands[this_walk_state->num_operands] = NULL;
520
521	/*
522	 * Allocate and initialize the evaluation information block
523	 * TBD: this is somewhat inefficient, should change interface to
524	 * ds_init_aml_walk. For now, keeps this struct off the CPU stack
525	 */
526	info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
527	if (!info) {
528		status = AE_NO_MEMORY;
529		goto cleanup;
530	}
531
532	info->parameters = &this_walk_state->operands[0];
533
534	status = acpi_ds_init_aml_walk(next_walk_state, NULL, method_node,
535				       obj_desc->method.aml_start,
536				       obj_desc->method.aml_length, info,
537				       ACPI_IMODE_EXECUTE);
538
539	ACPI_FREE(info);
540	if (ACPI_FAILURE(status)) {
541		goto cleanup;
542	}
543
544	/*
545	 * Delete the operands on the previous walkstate operand stack
546	 * (they were copied to new objects)
547	 */
548	for (i = 0; i < obj_desc->method.param_count; i++) {
549		acpi_ut_remove_reference(this_walk_state->operands[i]);
550		this_walk_state->operands[i] = NULL;
551	}
552
553	/* Clear the operand stack */
554
555	this_walk_state->num_operands = 0;
556
557	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
558			  "**** Begin nested execution of [%4.4s] **** WalkState=%p\n",
559			  method_node->name.ascii, next_walk_state));
560
561	/* Invoke an internal method if necessary */
562
563	if (obj_desc->method.info_flags & ACPI_METHOD_INTERNAL_ONLY) {
564		status =
565		    obj_desc->method.dispatch.implementation(next_walk_state);
566		if (status == AE_OK) {
567			status = AE_CTRL_TERMINATE;
568		}
569	}
570
571	return_ACPI_STATUS(status);
572
573cleanup:
574
575	/* On error, we must terminate the method properly */
576
577	acpi_ds_terminate_control_method(obj_desc, next_walk_state);
578	if (next_walk_state) {
579		acpi_ds_delete_walk_state(next_walk_state);
580	}
581
582	return_ACPI_STATUS(status);
583}
584
585/*******************************************************************************
586 *
587 * FUNCTION:    acpi_ds_restart_control_method
588 *
589 * PARAMETERS:  walk_state          - State for preempted method (caller)
590 *              return_desc         - Return value from the called method
591 *
592 * RETURN:      Status
593 *
594 * DESCRIPTION: Restart a method that was preempted by another (nested) method
595 *              invocation. Handle the return value (if any) from the callee.
596 *
597 ******************************************************************************/
598
599acpi_status
600acpi_ds_restart_control_method(struct acpi_walk_state *walk_state,
601			       union acpi_operand_object *return_desc)
602{
603	acpi_status status;
604	int same_as_implicit_return;
605
606	ACPI_FUNCTION_TRACE_PTR(ds_restart_control_method, walk_state);
607
608	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
609			  "****Restart [%4.4s] Op %p ReturnValueFromCallee %p\n",
610			  acpi_ut_get_node_name(walk_state->method_node),
611			  walk_state->method_call_op, return_desc));
612
613	ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
614			  "    ReturnFromThisMethodUsed?=%X ResStack %p Walk %p\n",
615			  walk_state->return_used,
616			  walk_state->results, walk_state));
617
618	/* Did the called method return a value? */
619
620	if (return_desc) {
621
622		/* Is the implicit return object the same as the return desc? */
623
624		same_as_implicit_return =
625		    (walk_state->implicit_return_obj == return_desc);
626
627		/* Are we actually going to use the return value? */
628
629		if (walk_state->return_used) {
630
631			/* Save the return value from the previous method */
632
633			status = acpi_ds_result_push(return_desc, walk_state);
634			if (ACPI_FAILURE(status)) {
635				acpi_ut_remove_reference(return_desc);
636				return_ACPI_STATUS(status);
637			}
638
639			/*
640			 * Save as THIS method's return value in case it is returned
641			 * immediately to yet another method
642			 */
643			walk_state->return_desc = return_desc;
644		}
645
646		/*
647		 * The following code is the optional support for the so-called
648		 * "implicit return". Some AML code assumes that the last value of the
649		 * method is "implicitly" returned to the caller, in the absence of an
650		 * explicit return value.
651		 *
652		 * Just save the last result of the method as the return value.
653		 *
654		 * NOTE: this is optional because the ASL language does not actually
655		 * support this behavior.
656		 */
657		else if (!acpi_ds_do_implicit_return
658			 (return_desc, walk_state, FALSE)
659			 || same_as_implicit_return) {
660			/*
661			 * Delete the return value if it will not be used by the
662			 * calling method or remove one reference if the explicit return
663			 * is the same as the implicit return value.
664			 */
665			acpi_ut_remove_reference(return_desc);
666		}
667	}
668
669	return_ACPI_STATUS(AE_OK);
670}
671
672/*******************************************************************************
673 *
674 * FUNCTION:    acpi_ds_terminate_control_method
675 *
676 * PARAMETERS:  method_desc         - Method object
677 *              walk_state          - State associated with the method
678 *
679 * RETURN:      None
680 *
681 * DESCRIPTION: Terminate a control method. Delete everything that the method
682 *              created, delete all locals and arguments, and delete the parse
683 *              tree if requested.
684 *
685 * MUTEX:       Interpreter is locked
686 *
687 ******************************************************************************/
688
689void
690acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
691				 struct acpi_walk_state *walk_state)
692{
693
694	ACPI_FUNCTION_TRACE_PTR(ds_terminate_control_method, walk_state);
695
696	/* method_desc is required, walk_state is optional */
697
698	if (!method_desc) {
699		return_VOID;
700	}
701
702	if (walk_state) {
703
704		/* Delete all arguments and locals */
705
706		acpi_ds_method_data_delete_all(walk_state);
707
708		/*
709		 * If method is serialized, release the mutex and restore the
710		 * current sync level for this thread
711		 */
712		if (method_desc->method.mutex) {
713
714			/* Acquisition Depth handles recursive calls */
715
716			method_desc->method.mutex->mutex.acquisition_depth--;
717			if (!method_desc->method.mutex->mutex.acquisition_depth) {
718				walk_state->thread->current_sync_level =
719				    method_desc->method.mutex->mutex.
720				    original_sync_level;
721
722				acpi_os_release_mutex(method_desc->method.
723						      mutex->mutex.os_mutex);
724				method_desc->method.mutex->mutex.thread_id = 0;
725			}
726		}
727
728		/*
729		 * Delete any namespace objects created anywhere within the
730		 * namespace by the execution of this method. Unless:
731		 * 1) This method is a module-level executable code method, in which
732		 *    case we want make the objects permanent.
733		 * 2) There are other threads executing the method, in which case we
734		 *    will wait until the last thread has completed.
735		 */
736		if (!(method_desc->method.info_flags & ACPI_METHOD_MODULE_LEVEL)
737		    && (method_desc->method.thread_count == 1)) {
738
739			/* Delete any direct children of (created by) this method */
740
741			acpi_ns_delete_namespace_subtree(walk_state->
742							 method_node);
743
744			/*
745			 * Delete any objects that were created by this method
746			 * elsewhere in the namespace (if any were created).
747			 * Use of the ACPI_METHOD_MODIFIED_NAMESPACE optimizes the
748			 * deletion such that we don't have to perform an entire
749			 * namespace walk for every control method execution.
750			 */
751			if (method_desc->method.
752			    info_flags & ACPI_METHOD_MODIFIED_NAMESPACE) {
753				acpi_ns_delete_namespace_by_owner(method_desc->
754								  method.
755								  owner_id);
756				method_desc->method.info_flags &=
757				    ~ACPI_METHOD_MODIFIED_NAMESPACE;
758			}
759		}
760	}
761
762	/* Decrement the thread count on the method */
763
764	if (method_desc->method.thread_count) {
765		method_desc->method.thread_count--;
766	} else {
767		ACPI_ERROR((AE_INFO, "Invalid zero thread count in method"));
768	}
769
770	/* Are there any other threads currently executing this method? */
771
772	if (method_desc->method.thread_count) {
773		/*
774		 * Additional threads. Do not release the owner_id in this case,
775		 * we immediately reuse it for the next thread executing this method
776		 */
777		ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
778				  "*** Completed execution of one thread, %u threads remaining\n",
779				  method_desc->method.thread_count));
780	} else {
781		/* This is the only executing thread for this method */
782
783		/*
784		 * Support to dynamically change a method from not_serialized to
785		 * Serialized if it appears that the method is incorrectly written and
786		 * does not support multiple thread execution. The best example of this
787		 * is if such a method creates namespace objects and blocks. A second
788		 * thread will fail with an AE_ALREADY_EXISTS exception.
789		 *
790		 * This code is here because we must wait until the last thread exits
791		 * before marking the method as serialized.
792		 */
793		if (method_desc->method.
794		    info_flags & ACPI_METHOD_SERIALIZED_PENDING) {
795			if (walk_state) {
796				ACPI_INFO((AE_INFO,
797					   "Marking method %4.4s as Serialized because of AE_ALREADY_EXISTS error",
798					   walk_state->method_node->name.
799					   ascii));
800			}
801
802			/*
803			 * Method tried to create an object twice and was marked as
804			 * "pending serialized". The probable cause is that the method
805			 * cannot handle reentrancy.
806			 *
807			 * The method was created as not_serialized, but it tried to create
808			 * a named object and then blocked, causing the second thread
809			 * entrance to begin and then fail. Workaround this problem by
810			 * marking the method permanently as Serialized when the last
811			 * thread exits here.
812			 */
813			method_desc->method.info_flags &=
814			    ~ACPI_METHOD_SERIALIZED_PENDING;
815			method_desc->method.info_flags |=
816			    (ACPI_METHOD_SERIALIZED |
817			     ACPI_METHOD_IGNORE_SYNC_LEVEL);
818			method_desc->method.sync_level = 0;
819		}
820
821		/* No more threads, we can free the owner_id */
822
823		if (!
824		    (method_desc->method.
825		     info_flags & ACPI_METHOD_MODULE_LEVEL)) {
826			acpi_ut_release_owner_id(&method_desc->method.owner_id);
827		}
828	}
829
830	return_VOID;
831}
832