nldr.c revision e436d07db9db374105daa6c15d853e3f13a37104
1/*
2 * nldr.c
3 *
4 * DSP-BIOS Bridge driver support functions for TI OMAP processors.
5 *
6 * DSP/BIOS Bridge dynamic + overlay Node loader.
7 *
8 * Copyright (C) 2005-2006 Texas Instruments, Inc.
9 *
10 * This package is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR
15 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED
16 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
17 */
18
19#include <dspbridge/host_os.h>
20
21#include <dspbridge/std.h>
22#include <dspbridge/dbdefs.h>
23
24#include <dspbridge/dbc.h>
25
26/* Platform manager */
27#include <dspbridge/cod.h>
28#include <dspbridge/dev.h>
29
30/* Resource manager */
31#include <dspbridge/dbll.h>
32#include <dspbridge/dbdcd.h>
33#include <dspbridge/rmm.h>
34#include <dspbridge/uuidutil.h>
35
36#include <dspbridge/nldr.h>
37
38/* Name of section containing dynamic load mem */
39#define DYNMEMSECT  ".dspbridge_mem"
40
41/* Name of section containing dependent library information */
42#define DEPLIBSECT  ".dspbridge_deplibs"
43
44/* Max depth of recursion for loading node's dependent libraries */
45#define MAXDEPTH	    5
46
47/* Max number of persistent libraries kept by a node */
48#define MAXLIBS	 5
49
50/*
51 *  Defines for extracting packed dynamic load memory requirements from two
52 *  masks.
53 *  These defines must match node.cdb and dynm.cdb
54 *  Format of data/code mask is:
55 *   uuuuuuuu|fueeeeee|fudddddd|fucccccc|
56 *  where
57 *      u = unused
58 *      cccccc = prefered/required dynamic mem segid for create phase data/code
59 *      dddddd = prefered/required dynamic mem segid for delete phase data/code
60 *      eeeeee = prefered/req. dynamic mem segid for execute phase data/code
61 *      f = flag indicating if memory is preferred or required:
62 *	  f = 1 if required, f = 0 if preferred.
63 *
64 *  The 6 bits of the segid are interpreted as follows:
65 *
66 *  If the 6th bit (bit 5) is not set, then this specifies a memory segment
67 *  between 0 and 31 (a maximum of 32 dynamic loading memory segments).
68 *  If the 6th bit (bit 5) is set, segid has the following interpretation:
69 *      segid = 32 - Any internal memory segment can be used.
70 *      segid = 33 - Any external memory segment can be used.
71 *      segid = 63 - Any memory segment can be used (in this case the
72 *		   required/preferred flag is irrelevant).
73 *
74 */
75/* Maximum allowed dynamic loading memory segments */
76#define MAXMEMSEGS      32
77
78#define MAXSEGID	3	/* Largest possible (real) segid */
79#define MEMINTERNALID   32	/* Segid meaning use internal mem */
80#define MEMEXTERNALID   33	/* Segid meaning use external mem */
81#define NULLID	  63		/* Segid meaning no memory req/pref */
82#define FLAGBIT	 7		/* 7th bit is pref./req. flag */
83#define SEGMASK	 0x3f		/* Bits 0 - 5 */
84
85#define CREATEBIT	0	/* Create segid starts at bit 0 */
86#define DELETEBIT	8	/* Delete segid starts at bit 8 */
87#define EXECUTEBIT      16	/* Execute segid starts at bit 16 */
88
89/*
90 *  Masks that define memory type.  Must match defines in dynm.cdb.
91 */
92#define DYNM_CODE	0x2
93#define DYNM_DATA	0x4
94#define DYNM_CODEDATA   (DYNM_CODE | DYNM_DATA)
95#define DYNM_INTERNAL   0x8
96#define DYNM_EXTERNAL   0x10
97
98/*
99 *  Defines for packing memory requirement/preference flags for code and
100 *  data of each of the node's phases into one mask.
101 *  The bit is set if the segid is required for loading code/data of the
102 *  given phase. The bit is not set, if the segid is preferred only.
103 *
104 *  These defines are also used as indeces into a segid array for the node.
105 *  eg node's segid[CREATEDATAFLAGBIT] is the memory segment id that the
106 *  create phase data is required or preferred to be loaded into.
107 */
108#define CREATEDATAFLAGBIT   0
109#define CREATECODEFLAGBIT   1
110#define EXECUTEDATAFLAGBIT  2
111#define EXECUTECODEFLAGBIT  3
112#define DELETEDATAFLAGBIT   4
113#define DELETECODEFLAGBIT   5
114#define MAXFLAGS	    6
115
116#define IS_INTERNAL(nldr_obj, segid) (((segid) <= MAXSEGID && \
117	    nldr_obj->seg_table[(segid)] & DYNM_INTERNAL) || \
118	    (segid) == MEMINTERNALID)
119
120#define IS_EXTERNAL(nldr_obj, segid) (((segid) <= MAXSEGID && \
121	    nldr_obj->seg_table[(segid)] & DYNM_EXTERNAL) || \
122	    (segid) == MEMEXTERNALID)
123
124#define SWAPLONG(x) ((((x) << 24) & 0xFF000000) | (((x) << 8) & 0xFF0000L) | \
125	(((x) >> 8) & 0xFF00L) | (((x) >> 24) & 0xFF))
126
127#define SWAPWORD(x) ((((x) << 8) & 0xFF00) | (((x) >> 8) & 0xFF))
128
129    /*
130     *  These names may be embedded in overlay sections to identify which
131     *  node phase the section should be overlayed.
132 */
133#define PCREATE	 "create"
134#define PDELETE	 "delete"
135#define PEXECUTE	"execute"
136
137#define IS_EQUAL_UUID(uuid1, uuid2) (\
138	((uuid1).ul_data1 == (uuid2).ul_data1) && \
139	((uuid1).us_data2 == (uuid2).us_data2) && \
140	((uuid1).us_data3 == (uuid2).us_data3) && \
141	((uuid1).uc_data4 == (uuid2).uc_data4) && \
142	((uuid1).uc_data5 == (uuid2).uc_data5) && \
143	(strncmp((void *)(uuid1).uc_data6, (void *)(uuid2).uc_data6, 6)) == 0)
144
145    /*
146     *  ======== mem_seg_info ========
147     *  Format of dynamic loading memory segment info in coff file.
148     *  Must match dynm.h55.
149 */
150struct mem_seg_info {
151	u32 segid;		/* Dynamic loading memory segment number */
152	u32 base;
153	u32 len;
154	u32 type;		/* Mask of DYNM_CODE, DYNM_INTERNAL, etc. */
155};
156
157/*
158 *  ======== lib_node ========
159 *  For maintaining a tree of library dependencies.
160 */
161struct lib_node {
162	struct dbll_library_obj *lib;	/* The library */
163	u16 dep_libs;		/* Number of dependent libraries */
164	struct lib_node *dep_libs_tree;	/* Dependent libraries of lib */
165};
166
167/*
168 *  ======== ovly_sect ========
169 *  Information needed to overlay a section.
170 */
171struct ovly_sect {
172	struct ovly_sect *next_sect;
173	u32 sect_load_addr;	/* Load address of section */
174	u32 sect_run_addr;	/* Run address of section */
175	u32 size;		/* Size of section */
176	u16 page;		/* DBL_CODE, DBL_DATA */
177};
178
179/*
180 *  ======== ovly_node ========
181 *  For maintaining a list of overlay nodes, with sections that need to be
182 *  overlayed for each of the nodes phases.
183 */
184struct ovly_node {
185	struct dsp_uuid uuid;
186	char *node_name;
187	struct ovly_sect *create_sects_list;
188	struct ovly_sect *delete_sects_list;
189	struct ovly_sect *execute_sects_list;
190	struct ovly_sect *other_sects_list;
191	u16 create_sects;
192	u16 delete_sects;
193	u16 execute_sects;
194	u16 other_sects;
195	u16 create_ref;
196	u16 delete_ref;
197	u16 execute_ref;
198	u16 other_ref;
199};
200
201/*
202 *  ======== nldr_object ========
203 *  Overlay loader object.
204 */
205struct nldr_object {
206	struct dev_object *hdev_obj;	/* Device object */
207	struct dcd_manager *hdcd_mgr;	/* Proc/Node data manager */
208	struct dbll_tar_obj *dbll;	/* The DBL loader */
209	struct dbll_library_obj *base_lib;	/* Base image library */
210	struct rmm_target_obj *rmm;	/* Remote memory manager for DSP */
211	struct dbll_fxns ldr_fxns;	/* Loader function table */
212	struct dbll_attrs ldr_attrs;	/* attrs to pass to loader functions */
213	nldr_ovlyfxn ovly_fxn;	/* "write" for overlay nodes */
214	nldr_writefxn write_fxn;	/* "write" for dynamic nodes */
215	struct ovly_node *ovly_table;	/* Table of overlay nodes */
216	u16 ovly_nodes;		/* Number of overlay nodes in base */
217	u16 ovly_nid;		/* Index for tracking overlay nodes */
218	u16 dload_segs;		/* Number of dynamic load mem segs */
219	u32 *seg_table;		/* memtypes of dynamic memory segs
220				 * indexed by segid
221				 */
222	u16 us_dsp_mau_size;	/* Size of DSP MAU */
223	u16 us_dsp_word_size;	/* Size of DSP word */
224};
225
226/*
227 *  ======== nldr_nodeobject ========
228 *  Dynamic node object. This object is created when a node is allocated.
229 */
230struct nldr_nodeobject {
231	struct nldr_object *nldr_obj;	/* Dynamic loader handle */
232	void *priv_ref;		/* Handle to pass to dbl_write_fxn */
233	struct dsp_uuid uuid;	/* Node's UUID */
234	bool dynamic;		/* Dynamically loaded node? */
235	bool overlay;		/* Overlay node? */
236	bool *pf_phase_split;	/* Multiple phase libraries? */
237	struct lib_node root;	/* Library containing node phase */
238	struct lib_node create_lib;	/* Library with create phase lib */
239	struct lib_node execute_lib;	/* Library with execute phase lib */
240	struct lib_node delete_lib;	/* Library with delete phase lib */
241	/* libs remain loaded until Delete */
242	struct lib_node pers_lib_table[MAXLIBS];
243	s32 pers_libs;		/* Number of persistent libraries */
244	/* Path in lib dependency tree */
245	struct dbll_library_obj *lib_path[MAXDEPTH + 1];
246	enum nldr_phase phase;	/* Node phase currently being loaded */
247
248	/*
249	 *  Dynamic loading memory segments for data and code of each phase.
250	 */
251	u16 seg_id[MAXFLAGS];
252
253	/*
254	 *  Mask indicating whether each mem segment specified in seg_id[]
255	 *  is preferred or required.
256	 *  For example
257	 *  	if (code_data_flag_mask & (1 << EXECUTEDATAFLAGBIT)) != 0,
258	 *  then it is required to load execute phase data into the memory
259	 *  specified by seg_id[EXECUTEDATAFLAGBIT].
260	 */
261	u32 code_data_flag_mask;
262};
263
264/* Dynamic loader function table */
265static struct dbll_fxns ldr_fxns = {
266	(dbll_close_fxn) dbll_close,
267	(dbll_create_fxn) dbll_create,
268	(dbll_delete_fxn) dbll_delete,
269	(dbll_exit_fxn) dbll_exit,
270	(dbll_get_attrs_fxn) dbll_get_attrs,
271	(dbll_get_addr_fxn) dbll_get_addr,
272	(dbll_get_c_addr_fxn) dbll_get_c_addr,
273	(dbll_get_sect_fxn) dbll_get_sect,
274	(dbll_init_fxn) dbll_init,
275	(dbll_load_fxn) dbll_load,
276	(dbll_load_sect_fxn) dbll_load_sect,
277	(dbll_open_fxn) dbll_open,
278	(dbll_read_sect_fxn) dbll_read_sect,
279	(dbll_set_attrs_fxn) dbll_set_attrs,
280	(dbll_unload_fxn) dbll_unload,
281	(dbll_unload_sect_fxn) dbll_unload_sect,
282};
283
284static u32 refs;		/* module reference count */
285
286static int add_ovly_info(void *handle, struct dbll_sect_info *sect_info,
287				u32 addr, u32 bytes);
288static int add_ovly_node(struct dsp_uuid *uuid_obj,
289				enum dsp_dcdobjtype obj_type, IN void *handle);
290static int add_ovly_sect(struct nldr_object *nldr_obj,
291				struct ovly_sect **pList,
292				struct dbll_sect_info *pSectInfo,
293				bool *exists, u32 addr, u32 bytes);
294static s32 fake_ovly_write(void *handle, u32 dsp_address, void *buf, u32 bytes,
295			   s32 mtype);
296static void free_sects(struct nldr_object *nldr_obj,
297		       struct ovly_sect *phase_sects, u16 alloc_num);
298static bool get_symbol_value(void *handle, void *parg, void *rmm_handle,
299			     char *symName, struct dbll_sym_val **sym);
300static int load_lib(struct nldr_nodeobject *nldr_node_obj,
301			   struct lib_node *root, struct dsp_uuid uuid,
302			   bool rootPersistent,
303			   struct dbll_library_obj **lib_path,
304			   enum nldr_phase phase, u16 depth);
305static int load_ovly(struct nldr_nodeobject *nldr_node_obj,
306			    enum nldr_phase phase);
307static int remote_alloc(void **pRef, u16 mem_sect_type, u32 size,
308			       u32 align, u32 *dsp_address,
309			       OPTIONAL s32 segmentId,
310			       OPTIONAL s32 req, bool reserve);
311static int remote_free(void **pRef, u16 space, u32 dsp_address, u32 size,
312			      bool reserve);
313
314static void unload_lib(struct nldr_nodeobject *nldr_node_obj,
315		       struct lib_node *root);
316static void unload_ovly(struct nldr_nodeobject *nldr_node_obj,
317			enum nldr_phase phase);
318static bool find_in_persistent_lib_array(struct nldr_nodeobject *nldr_node_obj,
319					 struct dbll_library_obj *lib);
320static u32 find_lcm(u32 a, u32 b);
321static u32 find_gcf(u32 a, u32 b);
322
323/*
324 *  ======== nldr_allocate ========
325 */
326int nldr_allocate(struct nldr_object *nldr_obj, void *priv_ref,
327			 IN CONST struct dcd_nodeprops *node_props,
328			 OUT struct nldr_nodeobject **nldr_nodeobj,
329			 IN bool *pf_phase_split)
330{
331	struct nldr_nodeobject *nldr_node_obj = NULL;
332	int status = 0;
333
334	DBC_REQUIRE(refs > 0);
335	DBC_REQUIRE(node_props != NULL);
336	DBC_REQUIRE(nldr_nodeobj != NULL);
337	DBC_REQUIRE(nldr_obj);
338
339	/* Initialize handle in case of failure */
340	*nldr_nodeobj = NULL;
341	/* Allocate node object */
342	nldr_node_obj = kzalloc(sizeof(struct nldr_nodeobject), GFP_KERNEL);
343
344	if (nldr_node_obj == NULL) {
345		status = -ENOMEM;
346	} else {
347		nldr_node_obj->pf_phase_split = pf_phase_split;
348		nldr_node_obj->pers_libs = 0;
349		nldr_node_obj->nldr_obj = nldr_obj;
350		nldr_node_obj->priv_ref = priv_ref;
351		/* Save node's UUID. */
352		nldr_node_obj->uuid = node_props->ndb_props.ui_node_id;
353		/*
354		 *  Determine if node is a dynamically loaded node from
355		 *  ndb_props.
356		 */
357		if (node_props->us_load_type == NLDR_DYNAMICLOAD) {
358			/* Dynamic node */
359			nldr_node_obj->dynamic = true;
360			/*
361			 *  Extract memory requirements from ndb_props masks
362			 */
363			/* Create phase */
364			nldr_node_obj->seg_id[CREATEDATAFLAGBIT] = (u16)
365			    (node_props->ul_data_mem_seg_mask >> CREATEBIT) &
366			    SEGMASK;
367			nldr_node_obj->code_data_flag_mask |=
368			    ((node_props->ul_data_mem_seg_mask >>
369			      (CREATEBIT + FLAGBIT)) & 1) << CREATEDATAFLAGBIT;
370			nldr_node_obj->seg_id[CREATECODEFLAGBIT] = (u16)
371			    (node_props->ul_code_mem_seg_mask >>
372			     CREATEBIT) & SEGMASK;
373			nldr_node_obj->code_data_flag_mask |=
374			    ((node_props->ul_code_mem_seg_mask >>
375			      (CREATEBIT + FLAGBIT)) & 1) << CREATECODEFLAGBIT;
376			/* Execute phase */
377			nldr_node_obj->seg_id[EXECUTEDATAFLAGBIT] = (u16)
378			    (node_props->ul_data_mem_seg_mask >>
379			     EXECUTEBIT) & SEGMASK;
380			nldr_node_obj->code_data_flag_mask |=
381			    ((node_props->ul_data_mem_seg_mask >>
382			      (EXECUTEBIT + FLAGBIT)) & 1) <<
383			    EXECUTEDATAFLAGBIT;
384			nldr_node_obj->seg_id[EXECUTECODEFLAGBIT] = (u16)
385			    (node_props->ul_code_mem_seg_mask >>
386			     EXECUTEBIT) & SEGMASK;
387			nldr_node_obj->code_data_flag_mask |=
388			    ((node_props->ul_code_mem_seg_mask >>
389			      (EXECUTEBIT + FLAGBIT)) & 1) <<
390			    EXECUTECODEFLAGBIT;
391			/* Delete phase */
392			nldr_node_obj->seg_id[DELETEDATAFLAGBIT] = (u16)
393			    (node_props->ul_data_mem_seg_mask >> DELETEBIT) &
394			    SEGMASK;
395			nldr_node_obj->code_data_flag_mask |=
396			    ((node_props->ul_data_mem_seg_mask >>
397			      (DELETEBIT + FLAGBIT)) & 1) << DELETEDATAFLAGBIT;
398			nldr_node_obj->seg_id[DELETECODEFLAGBIT] = (u16)
399			    (node_props->ul_code_mem_seg_mask >>
400			     DELETEBIT) & SEGMASK;
401			nldr_node_obj->code_data_flag_mask |=
402			    ((node_props->ul_code_mem_seg_mask >>
403			      (DELETEBIT + FLAGBIT)) & 1) << DELETECODEFLAGBIT;
404		} else {
405			/* Non-dynamically loaded nodes are part of the
406			 * base image */
407			nldr_node_obj->root.lib = nldr_obj->base_lib;
408			/* Check for overlay node */
409			if (node_props->us_load_type == NLDR_OVLYLOAD)
410				nldr_node_obj->overlay = true;
411
412		}
413		*nldr_nodeobj = (struct nldr_nodeobject *)nldr_node_obj;
414	}
415	/* Cleanup on failure */
416	if (DSP_FAILED(status) && nldr_node_obj)
417		kfree(nldr_node_obj);
418
419	DBC_ENSURE((DSP_SUCCEEDED(status) && *nldr_nodeobj)
420		   || (DSP_FAILED(status) && *nldr_nodeobj == NULL));
421	return status;
422}
423
424/*
425 *  ======== nldr_create ========
426 */
427int nldr_create(OUT struct nldr_object **phNldr,
428		       struct dev_object *hdev_obj,
429		       IN CONST struct nldr_attrs *pattrs)
430{
431	struct cod_manager *cod_mgr;	/* COD manager */
432	char *psz_coff_buf = NULL;
433	char sz_zl_file[COD_MAXPATHLENGTH];
434	struct nldr_object *nldr_obj = NULL;
435	struct dbll_attrs save_attrs;
436	struct dbll_attrs new_attrs;
437	dbll_flags flags;
438	u32 ul_entry;
439	u16 dload_segs = 0;
440	struct mem_seg_info *mem_info_obj;
441	u32 ul_len = 0;
442	u32 ul_addr;
443	struct rmm_segment *rmm_segs = NULL;
444	u16 i;
445	int status = 0;
446	DBC_REQUIRE(refs > 0);
447	DBC_REQUIRE(phNldr != NULL);
448	DBC_REQUIRE(hdev_obj != NULL);
449	DBC_REQUIRE(pattrs != NULL);
450	DBC_REQUIRE(pattrs->pfn_ovly != NULL);
451	DBC_REQUIRE(pattrs->pfn_write != NULL);
452
453	/* Allocate dynamic loader object */
454	nldr_obj = kzalloc(sizeof(struct nldr_object), GFP_KERNEL);
455	if (nldr_obj) {
456		nldr_obj->hdev_obj = hdev_obj;
457		/* warning, lazy status checking alert! */
458		dev_get_cod_mgr(hdev_obj, &cod_mgr);
459		if (cod_mgr) {
460			status = cod_get_loader(cod_mgr, &nldr_obj->dbll);
461			DBC_ASSERT(DSP_SUCCEEDED(status));
462			status = cod_get_base_lib(cod_mgr, &nldr_obj->base_lib);
463			DBC_ASSERT(DSP_SUCCEEDED(status));
464			status =
465			    cod_get_base_name(cod_mgr, sz_zl_file,
466							COD_MAXPATHLENGTH);
467			DBC_ASSERT(DSP_SUCCEEDED(status));
468		}
469		status = 0;
470		/* end lazy status checking */
471		nldr_obj->us_dsp_mau_size = pattrs->us_dsp_mau_size;
472		nldr_obj->us_dsp_word_size = pattrs->us_dsp_word_size;
473		nldr_obj->ldr_fxns = ldr_fxns;
474		if (!(nldr_obj->ldr_fxns.init_fxn()))
475			status = -ENOMEM;
476
477	} else {
478		status = -ENOMEM;
479	}
480	/* Create the DCD Manager */
481	if (DSP_SUCCEEDED(status))
482		status = dcd_create_manager(NULL, &nldr_obj->hdcd_mgr);
483
484	/* Get dynamic loading memory sections from base lib */
485	if (DSP_SUCCEEDED(status)) {
486		status =
487		    nldr_obj->ldr_fxns.get_sect_fxn(nldr_obj->base_lib,
488						    DYNMEMSECT, &ul_addr,
489						    &ul_len);
490		if (DSP_SUCCEEDED(status)) {
491			psz_coff_buf =
492				kzalloc(ul_len * nldr_obj->us_dsp_mau_size,
493								GFP_KERNEL);
494			if (!psz_coff_buf)
495				status = -ENOMEM;
496		} else {
497			/* Ok to not have dynamic loading memory */
498			status = 0;
499			ul_len = 0;
500			dev_dbg(bridge, "%s: failed - no dynamic loading mem "
501				"segments: 0x%x\n", __func__, status);
502		}
503	}
504	if (DSP_SUCCEEDED(status) && ul_len > 0) {
505		/* Read section containing dynamic load mem segments */
506		status =
507		    nldr_obj->ldr_fxns.read_sect_fxn(nldr_obj->base_lib,
508						     DYNMEMSECT, psz_coff_buf,
509						     ul_len);
510	}
511	if (DSP_SUCCEEDED(status) && ul_len > 0) {
512		/* Parse memory segment data */
513		dload_segs = (u16) (*((u32 *) psz_coff_buf));
514		if (dload_segs > MAXMEMSEGS)
515			status = -EBADF;
516	}
517	/* Parse dynamic load memory segments */
518	if (DSP_SUCCEEDED(status) && dload_segs > 0) {
519		rmm_segs = kzalloc(sizeof(struct rmm_segment) * dload_segs,
520								GFP_KERNEL);
521		nldr_obj->seg_table =
522				kzalloc(sizeof(u32) * dload_segs, GFP_KERNEL);
523		if (rmm_segs == NULL || nldr_obj->seg_table == NULL) {
524			status = -ENOMEM;
525		} else {
526			nldr_obj->dload_segs = dload_segs;
527			mem_info_obj = (struct mem_seg_info *)(psz_coff_buf +
528							       sizeof(u32));
529			for (i = 0; i < dload_segs; i++) {
530				rmm_segs[i].base = (mem_info_obj + i)->base;
531				rmm_segs[i].length = (mem_info_obj + i)->len;
532				rmm_segs[i].space = 0;
533				nldr_obj->seg_table[i] =
534				    (mem_info_obj + i)->type;
535				dev_dbg(bridge,
536					"(proc) DLL MEMSEGMENT: %d, "
537					"Base: 0x%x, Length: 0x%x\n", i,
538					rmm_segs[i].base, rmm_segs[i].length);
539			}
540		}
541	}
542	/* Create Remote memory manager */
543	if (DSP_SUCCEEDED(status))
544		status = rmm_create(&nldr_obj->rmm, rmm_segs, dload_segs);
545
546	if (DSP_SUCCEEDED(status)) {
547		/* set the alloc, free, write functions for loader */
548		nldr_obj->ldr_fxns.get_attrs_fxn(nldr_obj->dbll, &save_attrs);
549		new_attrs = save_attrs;
550		new_attrs.alloc = (dbll_alloc_fxn) remote_alloc;
551		new_attrs.free = (dbll_free_fxn) remote_free;
552		new_attrs.sym_lookup = (dbll_sym_lookup) get_symbol_value;
553		new_attrs.sym_handle = nldr_obj;
554		new_attrs.write = (dbll_write_fxn) pattrs->pfn_write;
555		nldr_obj->ovly_fxn = pattrs->pfn_ovly;
556		nldr_obj->write_fxn = pattrs->pfn_write;
557		nldr_obj->ldr_attrs = new_attrs;
558	}
559	kfree(rmm_segs);
560
561	kfree(psz_coff_buf);
562
563	/* Get overlay nodes */
564	if (DSP_SUCCEEDED(status)) {
565		status =
566		    cod_get_base_name(cod_mgr, sz_zl_file, COD_MAXPATHLENGTH);
567		/* lazy check */
568		DBC_ASSERT(DSP_SUCCEEDED(status));
569		/* First count number of overlay nodes */
570		status =
571		    dcd_get_objects(nldr_obj->hdcd_mgr, sz_zl_file,
572				    add_ovly_node, (void *)nldr_obj);
573		/* Now build table of overlay nodes */
574		if (DSP_SUCCEEDED(status) && nldr_obj->ovly_nodes > 0) {
575			/* Allocate table for overlay nodes */
576			nldr_obj->ovly_table =
577					kzalloc(sizeof(struct ovly_node) *
578					nldr_obj->ovly_nodes, GFP_KERNEL);
579			/* Put overlay nodes in the table */
580			nldr_obj->ovly_nid = 0;
581			status = dcd_get_objects(nldr_obj->hdcd_mgr, sz_zl_file,
582						 add_ovly_node,
583						 (void *)nldr_obj);
584		}
585	}
586	/* Do a fake reload of the base image to get overlay section info */
587	if (DSP_SUCCEEDED(status) && nldr_obj->ovly_nodes > 0) {
588		save_attrs.write = fake_ovly_write;
589		save_attrs.log_write = add_ovly_info;
590		save_attrs.log_write_handle = nldr_obj;
591		flags = DBLL_CODE | DBLL_DATA | DBLL_SYMB;
592		status = nldr_obj->ldr_fxns.load_fxn(nldr_obj->base_lib, flags,
593						     &save_attrs, &ul_entry);
594	}
595	if (DSP_SUCCEEDED(status)) {
596		*phNldr = (struct nldr_object *)nldr_obj;
597	} else {
598		if (nldr_obj)
599			nldr_delete((struct nldr_object *)nldr_obj);
600
601		*phNldr = NULL;
602	}
603	/* FIXME:Temp. Fix. Must be removed */
604	DBC_ENSURE((DSP_SUCCEEDED(status) && *phNldr)
605		   || (DSP_FAILED(status) && (*phNldr == NULL)));
606	return status;
607}
608
609/*
610 *  ======== nldr_delete ========
611 */
612void nldr_delete(struct nldr_object *nldr_obj)
613{
614	struct ovly_sect *ovly_section;
615	struct ovly_sect *next;
616	u16 i;
617	DBC_REQUIRE(refs > 0);
618	DBC_REQUIRE(nldr_obj);
619
620	nldr_obj->ldr_fxns.exit_fxn();
621	if (nldr_obj->rmm)
622		rmm_delete(nldr_obj->rmm);
623
624	kfree(nldr_obj->seg_table);
625
626	if (nldr_obj->hdcd_mgr)
627		dcd_destroy_manager(nldr_obj->hdcd_mgr);
628
629	/* Free overlay node information */
630	if (nldr_obj->ovly_table) {
631		for (i = 0; i < nldr_obj->ovly_nodes; i++) {
632			ovly_section =
633			    nldr_obj->ovly_table[i].create_sects_list;
634			while (ovly_section) {
635				next = ovly_section->next_sect;
636				kfree(ovly_section);
637				ovly_section = next;
638			}
639			ovly_section =
640			    nldr_obj->ovly_table[i].delete_sects_list;
641			while (ovly_section) {
642				next = ovly_section->next_sect;
643				kfree(ovly_section);
644				ovly_section = next;
645			}
646			ovly_section =
647			    nldr_obj->ovly_table[i].execute_sects_list;
648			while (ovly_section) {
649				next = ovly_section->next_sect;
650				kfree(ovly_section);
651				ovly_section = next;
652			}
653			ovly_section = nldr_obj->ovly_table[i].other_sects_list;
654			while (ovly_section) {
655				next = ovly_section->next_sect;
656				kfree(ovly_section);
657				ovly_section = next;
658			}
659		}
660		kfree(nldr_obj->ovly_table);
661	}
662	kfree(nldr_obj);
663}
664
665/*
666 *  ======== nldr_exit ========
667 *  Discontinue usage of NLDR module.
668 */
669void nldr_exit(void)
670{
671	DBC_REQUIRE(refs > 0);
672
673	refs--;
674
675	if (refs == 0)
676		rmm_exit();
677
678	DBC_ENSURE(refs >= 0);
679}
680
681/*
682 *  ======== nldr_get_fxn_addr ========
683 */
684int nldr_get_fxn_addr(struct nldr_nodeobject *nldr_node_obj,
685			     char *pstrFxn, u32 * pulAddr)
686{
687	struct dbll_sym_val *dbll_sym;
688	struct nldr_object *nldr_obj;
689	int status = 0;
690	bool status1 = false;
691	s32 i = 0;
692	struct lib_node root = { NULL, 0, NULL };
693	DBC_REQUIRE(refs > 0);
694	DBC_REQUIRE(nldr_node_obj);
695	DBC_REQUIRE(pulAddr != NULL);
696	DBC_REQUIRE(pstrFxn != NULL);
697
698	nldr_obj = nldr_node_obj->nldr_obj;
699	/* Called from node_create(), node_delete(), or node_run(). */
700	if (nldr_node_obj->dynamic && *nldr_node_obj->pf_phase_split) {
701		switch (nldr_node_obj->phase) {
702		case NLDR_CREATE:
703			root = nldr_node_obj->create_lib;
704			break;
705		case NLDR_EXECUTE:
706			root = nldr_node_obj->execute_lib;
707			break;
708		case NLDR_DELETE:
709			root = nldr_node_obj->delete_lib;
710			break;
711		default:
712			DBC_ASSERT(false);
713			break;
714		}
715	} else {
716		/* for Overlay nodes or non-split Dynamic nodes */
717		root = nldr_node_obj->root;
718	}
719	status1 =
720	    nldr_obj->ldr_fxns.get_c_addr_fxn(root.lib, pstrFxn, &dbll_sym);
721	if (!status1)
722		status1 =
723		    nldr_obj->ldr_fxns.get_addr_fxn(root.lib, pstrFxn,
724						    &dbll_sym);
725
726	/* If symbol not found, check dependent libraries */
727	if (!status1) {
728		for (i = 0; i < root.dep_libs; i++) {
729			status1 =
730			    nldr_obj->ldr_fxns.get_addr_fxn(root.dep_libs_tree
731							    [i].lib, pstrFxn,
732							    &dbll_sym);
733			if (!status1) {
734				status1 =
735				    nldr_obj->ldr_fxns.
736				    get_c_addr_fxn(root.dep_libs_tree[i].lib,
737						   pstrFxn, &dbll_sym);
738			}
739			if (status1) {
740				/* Symbol found */
741				break;
742			}
743		}
744	}
745	/* Check persistent libraries */
746	if (!status1) {
747		for (i = 0; i < nldr_node_obj->pers_libs; i++) {
748			status1 =
749			    nldr_obj->ldr_fxns.
750			    get_addr_fxn(nldr_node_obj->pers_lib_table[i].lib,
751					 pstrFxn, &dbll_sym);
752			if (!status1) {
753				status1 =
754				    nldr_obj->ldr_fxns.
755				    get_c_addr_fxn(nldr_node_obj->pers_lib_table
756						   [i].lib, pstrFxn, &dbll_sym);
757			}
758			if (status1) {
759				/* Symbol found */
760				break;
761			}
762		}
763	}
764
765	if (status1)
766		*pulAddr = dbll_sym->value;
767	else
768		status = -ESPIPE;
769
770	return status;
771}
772
773/*
774 *  ======== nldr_get_rmm_manager ========
775 *  Given a NLDR object, retrieve RMM Manager Handle
776 */
777int nldr_get_rmm_manager(struct nldr_object *nldr,
778				OUT struct rmm_target_obj **phRmmMgr)
779{
780	int status = 0;
781	struct nldr_object *nldr_obj = nldr;
782	DBC_REQUIRE(phRmmMgr != NULL);
783
784	if (nldr) {
785		*phRmmMgr = nldr_obj->rmm;
786	} else {
787		*phRmmMgr = NULL;
788		status = -EFAULT;
789	}
790
791	DBC_ENSURE(DSP_SUCCEEDED(status) || ((phRmmMgr != NULL) &&
792					     (*phRmmMgr == NULL)));
793
794	return status;
795}
796
797/*
798 *  ======== nldr_init ========
799 *  Initialize the NLDR module.
800 */
801bool nldr_init(void)
802{
803	DBC_REQUIRE(refs >= 0);
804
805	if (refs == 0)
806		rmm_init();
807
808	refs++;
809
810	DBC_ENSURE(refs > 0);
811	return true;
812}
813
814/*
815 *  ======== nldr_load ========
816 */
817int nldr_load(struct nldr_nodeobject *nldr_node_obj,
818		     enum nldr_phase phase)
819{
820	struct nldr_object *nldr_obj;
821	struct dsp_uuid lib_uuid;
822	int status = 0;
823
824	DBC_REQUIRE(refs > 0);
825	DBC_REQUIRE(nldr_node_obj);
826
827	nldr_obj = nldr_node_obj->nldr_obj;
828
829	if (nldr_node_obj->dynamic) {
830		nldr_node_obj->phase = phase;
831
832		lib_uuid = nldr_node_obj->uuid;
833
834		/* At this point, we may not know if node is split into
835		 * different libraries. So we'll go ahead and load the
836		 * library, and then save the pointer to the appropriate
837		 * location after we know. */
838
839		status =
840		    load_lib(nldr_node_obj, &nldr_node_obj->root, lib_uuid,
841			     false, nldr_node_obj->lib_path, phase, 0);
842
843		if (DSP_SUCCEEDED(status)) {
844			if (*nldr_node_obj->pf_phase_split) {
845				switch (phase) {
846				case NLDR_CREATE:
847					nldr_node_obj->create_lib =
848					    nldr_node_obj->root;
849					break;
850
851				case NLDR_EXECUTE:
852					nldr_node_obj->execute_lib =
853					    nldr_node_obj->root;
854					break;
855
856				case NLDR_DELETE:
857					nldr_node_obj->delete_lib =
858					    nldr_node_obj->root;
859					break;
860
861				default:
862					DBC_ASSERT(false);
863					break;
864				}
865			}
866		}
867	} else {
868		if (nldr_node_obj->overlay)
869			status = load_ovly(nldr_node_obj, phase);
870
871	}
872
873	return status;
874}
875
876/*
877 *  ======== nldr_unload ========
878 */
879int nldr_unload(struct nldr_nodeobject *nldr_node_obj,
880		       enum nldr_phase phase)
881{
882	int status = 0;
883	struct lib_node *root_lib = NULL;
884	s32 i = 0;
885
886	DBC_REQUIRE(refs > 0);
887	DBC_REQUIRE(nldr_node_obj);
888
889	if (nldr_node_obj != NULL) {
890		if (nldr_node_obj->dynamic) {
891			if (*nldr_node_obj->pf_phase_split) {
892				switch (phase) {
893				case NLDR_CREATE:
894					root_lib = &nldr_node_obj->create_lib;
895					break;
896				case NLDR_EXECUTE:
897					root_lib = &nldr_node_obj->execute_lib;
898					break;
899				case NLDR_DELETE:
900					root_lib = &nldr_node_obj->delete_lib;
901					/* Unload persistent libraries */
902					for (i = 0;
903					     i < nldr_node_obj->pers_libs;
904					     i++) {
905						unload_lib(nldr_node_obj,
906							   &nldr_node_obj->
907							   pers_lib_table[i]);
908					}
909					nldr_node_obj->pers_libs = 0;
910					break;
911				default:
912					DBC_ASSERT(false);
913					break;
914				}
915			} else {
916				/* Unload main library */
917				root_lib = &nldr_node_obj->root;
918			}
919			if (root_lib)
920				unload_lib(nldr_node_obj, root_lib);
921		} else {
922			if (nldr_node_obj->overlay)
923				unload_ovly(nldr_node_obj, phase);
924
925		}
926	}
927	return status;
928}
929
930/*
931 *  ======== add_ovly_info ========
932 */
933static int add_ovly_info(void *handle, struct dbll_sect_info *sect_info,
934				u32 addr, u32 bytes)
935{
936	char *node_name;
937	char *sect_name = (char *)sect_info->name;
938	bool sect_exists = false;
939	char seps = ':';
940	char *pch;
941	u16 i;
942	struct nldr_object *nldr_obj = (struct nldr_object *)handle;
943	int status = 0;
944
945	/* Is this an overlay section (load address != run address)? */
946	if (sect_info->sect_load_addr == sect_info->sect_run_addr)
947		goto func_end;
948
949	/* Find the node it belongs to */
950	for (i = 0; i < nldr_obj->ovly_nodes; i++) {
951		node_name = nldr_obj->ovly_table[i].node_name;
952		DBC_REQUIRE(node_name);
953		if (strncmp(node_name, sect_name + 1, strlen(node_name)) == 0) {
954			/* Found the node */
955			break;
956		}
957	}
958	if (!(i < nldr_obj->ovly_nodes))
959		goto func_end;
960
961	/* Determine which phase this section belongs to */
962	for (pch = sect_name + 1; *pch && *pch != seps; pch++)
963		;;
964
965	if (*pch) {
966		pch++;		/* Skip over the ':' */
967		if (strncmp(pch, PCREATE, strlen(PCREATE)) == 0) {
968			status =
969			    add_ovly_sect(nldr_obj,
970					  &nldr_obj->
971					  ovly_table[i].create_sects_list,
972					  sect_info, &sect_exists, addr, bytes);
973			if (DSP_SUCCEEDED(status) && !sect_exists)
974				nldr_obj->ovly_table[i].create_sects++;
975
976		} else if (strncmp(pch, PDELETE, strlen(PDELETE)) == 0) {
977			status =
978			    add_ovly_sect(nldr_obj,
979					  &nldr_obj->
980					  ovly_table[i].delete_sects_list,
981					  sect_info, &sect_exists, addr, bytes);
982			if (DSP_SUCCEEDED(status) && !sect_exists)
983				nldr_obj->ovly_table[i].delete_sects++;
984
985		} else if (strncmp(pch, PEXECUTE, strlen(PEXECUTE)) == 0) {
986			status =
987			    add_ovly_sect(nldr_obj,
988					  &nldr_obj->
989					  ovly_table[i].execute_sects_list,
990					  sect_info, &sect_exists, addr, bytes);
991			if (DSP_SUCCEEDED(status) && !sect_exists)
992				nldr_obj->ovly_table[i].execute_sects++;
993
994		} else {
995			/* Put in "other" sectins */
996			status =
997			    add_ovly_sect(nldr_obj,
998					  &nldr_obj->
999					  ovly_table[i].other_sects_list,
1000					  sect_info, &sect_exists, addr, bytes);
1001			if (DSP_SUCCEEDED(status) && !sect_exists)
1002				nldr_obj->ovly_table[i].other_sects++;
1003
1004		}
1005	}
1006func_end:
1007	return status;
1008}
1009
1010/*
1011 *  ======== add_ovly_node =========
1012 *  Callback function passed to dcd_get_objects.
1013 */
1014static int add_ovly_node(struct dsp_uuid *uuid_obj,
1015				enum dsp_dcdobjtype obj_type, IN void *handle)
1016{
1017	struct nldr_object *nldr_obj = (struct nldr_object *)handle;
1018	char *node_name = NULL;
1019	char *pbuf = NULL;
1020	u32 len;
1021	struct dcd_genericobj obj_def;
1022	int status = 0;
1023
1024	if (obj_type != DSP_DCDNODETYPE)
1025		goto func_end;
1026
1027	status =
1028	    dcd_get_object_def(nldr_obj->hdcd_mgr, uuid_obj, obj_type,
1029			       &obj_def);
1030	if (DSP_FAILED(status))
1031		goto func_end;
1032
1033	/* If overlay node, add to the list */
1034	if (obj_def.obj_data.node_obj.us_load_type == NLDR_OVLYLOAD) {
1035		if (nldr_obj->ovly_table == NULL) {
1036			nldr_obj->ovly_nodes++;
1037		} else {
1038			/* Add node to table */
1039			nldr_obj->ovly_table[nldr_obj->ovly_nid].uuid =
1040			    *uuid_obj;
1041			DBC_REQUIRE(obj_def.obj_data.node_obj.ndb_props.
1042				    ac_name);
1043			len =
1044			    strlen(obj_def.obj_data.node_obj.ndb_props.ac_name);
1045			node_name = obj_def.obj_data.node_obj.ndb_props.ac_name;
1046			pbuf = kzalloc(len + 1, GFP_KERNEL);
1047			if (pbuf == NULL) {
1048				status = -ENOMEM;
1049			} else {
1050				strncpy(pbuf, node_name, len);
1051				nldr_obj->ovly_table[nldr_obj->ovly_nid].
1052				    node_name = pbuf;
1053				nldr_obj->ovly_nid++;
1054			}
1055		}
1056	}
1057	/* These were allocated in dcd_get_object_def */
1058	kfree(obj_def.obj_data.node_obj.pstr_create_phase_fxn);
1059
1060	kfree(obj_def.obj_data.node_obj.pstr_execute_phase_fxn);
1061
1062	kfree(obj_def.obj_data.node_obj.pstr_delete_phase_fxn);
1063
1064	kfree(obj_def.obj_data.node_obj.pstr_i_alg_name);
1065
1066func_end:
1067	return status;
1068}
1069
1070/*
1071 *  ======== add_ovly_sect ========
1072 */
1073static int add_ovly_sect(struct nldr_object *nldr_obj,
1074				struct ovly_sect **pList,
1075				struct dbll_sect_info *pSectInfo,
1076				bool *exists, u32 addr, u32 bytes)
1077{
1078	struct ovly_sect *new_sect = NULL;
1079	struct ovly_sect *last_sect;
1080	struct ovly_sect *ovly_section;
1081	int status = 0;
1082
1083	ovly_section = last_sect = *pList;
1084	*exists = false;
1085	while (ovly_section) {
1086		/*
1087		 *  Make sure section has not already been added. Multiple
1088		 *  'write' calls may be made to load the section.
1089		 */
1090		if (ovly_section->sect_load_addr == addr) {
1091			/* Already added */
1092			*exists = true;
1093			break;
1094		}
1095		last_sect = ovly_section;
1096		ovly_section = ovly_section->next_sect;
1097	}
1098
1099	if (!ovly_section) {
1100		/* New section */
1101		new_sect = kzalloc(sizeof(struct ovly_sect), GFP_KERNEL);
1102		if (new_sect == NULL) {
1103			status = -ENOMEM;
1104		} else {
1105			new_sect->sect_load_addr = addr;
1106			new_sect->sect_run_addr = pSectInfo->sect_run_addr +
1107			    (addr - pSectInfo->sect_load_addr);
1108			new_sect->size = bytes;
1109			new_sect->page = pSectInfo->type;
1110		}
1111
1112		/* Add to the list */
1113		if (DSP_SUCCEEDED(status)) {
1114			if (*pList == NULL) {
1115				/* First in the list */
1116				*pList = new_sect;
1117			} else {
1118				last_sect->next_sect = new_sect;
1119			}
1120		}
1121	}
1122
1123	return status;
1124}
1125
1126/*
1127 *  ======== fake_ovly_write ========
1128 */
1129static s32 fake_ovly_write(void *handle, u32 dsp_address, void *buf, u32 bytes,
1130			   s32 mtype)
1131{
1132	return (s32) bytes;
1133}
1134
1135/*
1136 *  ======== free_sects ========
1137 */
1138static void free_sects(struct nldr_object *nldr_obj,
1139		       struct ovly_sect *phase_sects, u16 alloc_num)
1140{
1141	struct ovly_sect *ovly_section = phase_sects;
1142	u16 i = 0;
1143	bool ret;
1144
1145	while (ovly_section && i < alloc_num) {
1146		/* 'Deallocate' */
1147		/* segid - page not supported yet */
1148		/* Reserved memory */
1149		ret =
1150		    rmm_free(nldr_obj->rmm, 0, ovly_section->sect_run_addr,
1151			     ovly_section->size, true);
1152		DBC_ASSERT(ret);
1153		ovly_section = ovly_section->next_sect;
1154		i++;
1155	}
1156}
1157
1158/*
1159 *  ======== get_symbol_value ========
1160 *  Find symbol in library's base image.  If not there, check dependent
1161 *  libraries.
1162 */
1163static bool get_symbol_value(void *handle, void *parg, void *rmm_handle,
1164			     char *name, struct dbll_sym_val **sym)
1165{
1166	struct nldr_object *nldr_obj = (struct nldr_object *)handle;
1167	struct nldr_nodeobject *nldr_node_obj =
1168	    (struct nldr_nodeobject *)rmm_handle;
1169	struct lib_node *root = (struct lib_node *)parg;
1170	u16 i;
1171	bool status = false;
1172
1173	/* check the base image */
1174	status = nldr_obj->ldr_fxns.get_addr_fxn(nldr_obj->base_lib, name, sym);
1175	if (!status)
1176		status =
1177		    nldr_obj->ldr_fxns.get_c_addr_fxn(nldr_obj->base_lib, name,
1178						      sym);
1179
1180	/*
1181	 *  Check in root lib itself. If the library consists of
1182	 *  multiple object files linked together, some symbols in the
1183	 *  library may need to be resolved.
1184	 */
1185	if (!status) {
1186		status = nldr_obj->ldr_fxns.get_addr_fxn(root->lib, name, sym);
1187		if (!status) {
1188			status =
1189			    nldr_obj->ldr_fxns.get_c_addr_fxn(root->lib, name,
1190							      sym);
1191		}
1192	}
1193
1194	/*
1195	 *  Check in root lib's dependent libraries, but not dependent
1196	 *  libraries' dependents.
1197	 */
1198	if (!status) {
1199		for (i = 0; i < root->dep_libs; i++) {
1200			status =
1201			    nldr_obj->ldr_fxns.get_addr_fxn(root->dep_libs_tree
1202							    [i].lib, name, sym);
1203			if (!status) {
1204				status =
1205				    nldr_obj->ldr_fxns.
1206				    get_c_addr_fxn(root->dep_libs_tree[i].lib,
1207						   name, sym);
1208			}
1209			if (status) {
1210				/* Symbol found */
1211				break;
1212			}
1213		}
1214	}
1215	/*
1216	 * Check in persistent libraries
1217	 */
1218	if (!status) {
1219		for (i = 0; i < nldr_node_obj->pers_libs; i++) {
1220			status =
1221			    nldr_obj->ldr_fxns.
1222			    get_addr_fxn(nldr_node_obj->pers_lib_table[i].lib,
1223					 name, sym);
1224			if (!status) {
1225				status = nldr_obj->ldr_fxns.get_c_addr_fxn
1226				    (nldr_node_obj->pers_lib_table[i].lib, name,
1227				     sym);
1228			}
1229			if (status) {
1230				/* Symbol found */
1231				break;
1232			}
1233		}
1234	}
1235
1236	return status;
1237}
1238
1239/*
1240 *  ======== load_lib ========
1241 *  Recursively load library and all its dependent libraries. The library
1242 *  we're loading is specified by a uuid.
1243 */
1244static int load_lib(struct nldr_nodeobject *nldr_node_obj,
1245			   struct lib_node *root, struct dsp_uuid uuid,
1246			   bool rootPersistent,
1247			   struct dbll_library_obj **lib_path,
1248			   enum nldr_phase phase, u16 depth)
1249{
1250	struct nldr_object *nldr_obj = nldr_node_obj->nldr_obj;
1251	u16 nd_libs = 0;	/* Number of dependent libraries */
1252	u16 np_libs = 0;	/* Number of persistent libraries */
1253	u16 nd_libs_loaded = 0;	/* Number of dep. libraries loaded */
1254	u16 i;
1255	u32 entry;
1256	u32 dw_buf_size = NLDR_MAXPATHLENGTH;
1257	dbll_flags flags = DBLL_SYMB | DBLL_CODE | DBLL_DATA | DBLL_DYNAMIC;
1258	struct dbll_attrs new_attrs;
1259	char *psz_file_name = NULL;
1260	struct dsp_uuid *dep_lib_uui_ds = NULL;
1261	bool *persistent_dep_libs = NULL;
1262	int status = 0;
1263	bool lib_status = false;
1264	struct lib_node *dep_lib;
1265
1266	if (depth > MAXDEPTH) {
1267		/* Error */
1268		DBC_ASSERT(false);
1269	}
1270	root->lib = NULL;
1271	/* Allocate a buffer for library file name of size DBL_MAXPATHLENGTH */
1272	psz_file_name = kzalloc(DBLL_MAXPATHLENGTH, GFP_KERNEL);
1273	if (psz_file_name == NULL)
1274		status = -ENOMEM;
1275
1276	if (DSP_SUCCEEDED(status)) {
1277		/* Get the name of the library */
1278		if (depth == 0) {
1279			status =
1280			    dcd_get_library_name(nldr_node_obj->nldr_obj->
1281						 hdcd_mgr, &uuid, psz_file_name,
1282						 &dw_buf_size, phase,
1283						 nldr_node_obj->pf_phase_split);
1284		} else {
1285			/* Dependent libraries are registered with a phase */
1286			status =
1287			    dcd_get_library_name(nldr_node_obj->nldr_obj->
1288						 hdcd_mgr, &uuid, psz_file_name,
1289						 &dw_buf_size, NLDR_NOPHASE,
1290						 NULL);
1291		}
1292	}
1293	if (DSP_SUCCEEDED(status)) {
1294		/* Open the library, don't load symbols */
1295		status =
1296		    nldr_obj->ldr_fxns.open_fxn(nldr_obj->dbll, psz_file_name,
1297						DBLL_NOLOAD, &root->lib);
1298	}
1299	/* Done with file name */
1300	kfree(psz_file_name);
1301
1302	/* Check to see if library not already loaded */
1303	if (DSP_SUCCEEDED(status) && rootPersistent) {
1304		lib_status =
1305		    find_in_persistent_lib_array(nldr_node_obj, root->lib);
1306		/* Close library */
1307		if (lib_status) {
1308			nldr_obj->ldr_fxns.close_fxn(root->lib);
1309			return 0;
1310		}
1311	}
1312	if (DSP_SUCCEEDED(status)) {
1313		/* Check for circular dependencies. */
1314		for (i = 0; i < depth; i++) {
1315			if (root->lib == lib_path[i]) {
1316				/* This condition could be checked by a
1317				 * tool at build time. */
1318				status = -EILSEQ;
1319			}
1320		}
1321	}
1322	if (DSP_SUCCEEDED(status)) {
1323		/* Add library to current path in dependency tree */
1324		lib_path[depth] = root->lib;
1325		depth++;
1326		/* Get number of dependent libraries */
1327		status =
1328		    dcd_get_num_dep_libs(nldr_node_obj->nldr_obj->hdcd_mgr,
1329					 &uuid, &nd_libs, &np_libs, phase);
1330	}
1331	DBC_ASSERT(nd_libs >= np_libs);
1332	if (DSP_SUCCEEDED(status)) {
1333		if (!(*nldr_node_obj->pf_phase_split))
1334			np_libs = 0;
1335
1336		/* nd_libs = #of dependent libraries */
1337		root->dep_libs = nd_libs - np_libs;
1338		if (nd_libs > 0) {
1339			dep_lib_uui_ds = kzalloc(sizeof(struct dsp_uuid) *
1340							nd_libs, GFP_KERNEL);
1341			persistent_dep_libs =
1342				kzalloc(sizeof(bool) * nd_libs, GFP_KERNEL);
1343			if (!dep_lib_uui_ds || !persistent_dep_libs)
1344				status = -ENOMEM;
1345
1346			if (root->dep_libs > 0) {
1347				/* Allocate arrays for dependent lib UUIDs,
1348				 * lib nodes */
1349				root->dep_libs_tree = kzalloc
1350						(sizeof(struct lib_node) *
1351						(root->dep_libs), GFP_KERNEL);
1352				if (!(root->dep_libs_tree))
1353					status = -ENOMEM;
1354
1355			}
1356
1357			if (DSP_SUCCEEDED(status)) {
1358				/* Get the dependent library UUIDs */
1359				status =
1360				    dcd_get_dep_libs(nldr_node_obj->
1361						     nldr_obj->hdcd_mgr, &uuid,
1362						     nd_libs, dep_lib_uui_ds,
1363						     persistent_dep_libs,
1364						     phase);
1365			}
1366		}
1367	}
1368
1369	/*
1370	 *  Recursively load dependent libraries.
1371	 */
1372	if (DSP_SUCCEEDED(status)) {
1373		for (i = 0; i < nd_libs; i++) {
1374			/* If root library is NOT persistent, and dep library
1375			 * is, then record it.  If root library IS persistent,
1376			 * the deplib is already included */
1377			if (!rootPersistent && persistent_dep_libs[i] &&
1378			    *nldr_node_obj->pf_phase_split) {
1379				if ((nldr_node_obj->pers_libs) >= MAXLIBS) {
1380					status = -EILSEQ;
1381					break;
1382				}
1383
1384				/* Allocate library outside of phase */
1385				dep_lib =
1386				    &nldr_node_obj->pers_lib_table
1387				    [nldr_node_obj->pers_libs];
1388			} else {
1389				if (rootPersistent)
1390					persistent_dep_libs[i] = true;
1391
1392				/* Allocate library within phase */
1393				dep_lib = &root->dep_libs_tree[nd_libs_loaded];
1394			}
1395
1396			status = load_lib(nldr_node_obj, dep_lib,
1397					  dep_lib_uui_ds[i],
1398					  persistent_dep_libs[i], lib_path,
1399					  phase, depth);
1400
1401			if (DSP_SUCCEEDED(status)) {
1402				if ((status != 0) &&
1403				    !rootPersistent && persistent_dep_libs[i] &&
1404				    *nldr_node_obj->pf_phase_split) {
1405					(nldr_node_obj->pers_libs)++;
1406				} else {
1407					if (!persistent_dep_libs[i] ||
1408					    !(*nldr_node_obj->pf_phase_split)) {
1409						nd_libs_loaded++;
1410					}
1411				}
1412			} else {
1413				break;
1414			}
1415		}
1416	}
1417
1418	/* Now we can load the root library */
1419	if (DSP_SUCCEEDED(status)) {
1420		new_attrs = nldr_obj->ldr_attrs;
1421		new_attrs.sym_arg = root;
1422		new_attrs.rmm_handle = nldr_node_obj;
1423		new_attrs.input_params = nldr_node_obj->priv_ref;
1424		new_attrs.base_image = false;
1425
1426		status =
1427		    nldr_obj->ldr_fxns.load_fxn(root->lib, flags, &new_attrs,
1428						&entry);
1429	}
1430
1431	/*
1432	 *  In case of failure, unload any dependent libraries that
1433	 *  were loaded, and close the root library.
1434	 *  (Persistent libraries are unloaded from the very top)
1435	 */
1436	if (DSP_FAILED(status)) {
1437		if (phase != NLDR_EXECUTE) {
1438			for (i = 0; i < nldr_node_obj->pers_libs; i++)
1439				unload_lib(nldr_node_obj,
1440					   &nldr_node_obj->pers_lib_table[i]);
1441
1442			nldr_node_obj->pers_libs = 0;
1443		}
1444		for (i = 0; i < nd_libs_loaded; i++)
1445			unload_lib(nldr_node_obj, &root->dep_libs_tree[i]);
1446
1447		if (root->lib)
1448			nldr_obj->ldr_fxns.close_fxn(root->lib);
1449
1450	}
1451
1452	/* Going up one node in the dependency tree */
1453	depth--;
1454
1455	kfree(dep_lib_uui_ds);
1456	dep_lib_uui_ds = NULL;
1457
1458	kfree(persistent_dep_libs);
1459	persistent_dep_libs = NULL;
1460
1461	return status;
1462}
1463
1464/*
1465 *  ======== load_ovly ========
1466 */
1467static int load_ovly(struct nldr_nodeobject *nldr_node_obj,
1468			    enum nldr_phase phase)
1469{
1470	struct nldr_object *nldr_obj = nldr_node_obj->nldr_obj;
1471	struct ovly_node *po_node = NULL;
1472	struct ovly_sect *phase_sects = NULL;
1473	struct ovly_sect *other_sects_list = NULL;
1474	u16 i;
1475	u16 alloc_num = 0;
1476	u16 other_alloc = 0;
1477	u16 *ref_count = NULL;
1478	u16 *other_ref = NULL;
1479	u32 bytes;
1480	struct ovly_sect *ovly_section;
1481	int status = 0;
1482
1483	/* Find the node in the table */
1484	for (i = 0; i < nldr_obj->ovly_nodes; i++) {
1485		if (IS_EQUAL_UUID
1486		    (nldr_node_obj->uuid, nldr_obj->ovly_table[i].uuid)) {
1487			/* Found it */
1488			po_node = &(nldr_obj->ovly_table[i]);
1489			break;
1490		}
1491	}
1492
1493	DBC_ASSERT(i < nldr_obj->ovly_nodes);
1494
1495	if (!po_node) {
1496		status = -ENOENT;
1497		goto func_end;
1498	}
1499
1500	switch (phase) {
1501	case NLDR_CREATE:
1502		ref_count = &(po_node->create_ref);
1503		other_ref = &(po_node->other_ref);
1504		phase_sects = po_node->create_sects_list;
1505		other_sects_list = po_node->other_sects_list;
1506		break;
1507
1508	case NLDR_EXECUTE:
1509		ref_count = &(po_node->execute_ref);
1510		phase_sects = po_node->execute_sects_list;
1511		break;
1512
1513	case NLDR_DELETE:
1514		ref_count = &(po_node->delete_ref);
1515		phase_sects = po_node->delete_sects_list;
1516		break;
1517
1518	default:
1519		DBC_ASSERT(false);
1520		break;
1521	}
1522
1523	if (ref_count == NULL)
1524		goto func_end;
1525
1526	if (*ref_count != 0)
1527		goto func_end;
1528
1529	/* 'Allocate' memory for overlay sections of this phase */
1530	ovly_section = phase_sects;
1531	while (ovly_section) {
1532		/* allocate *//* page not supported yet */
1533		/* reserve *//* align */
1534		status = rmm_alloc(nldr_obj->rmm, 0, ovly_section->size, 0,
1535				   &(ovly_section->sect_run_addr), true);
1536		if (DSP_SUCCEEDED(status)) {
1537			ovly_section = ovly_section->next_sect;
1538			alloc_num++;
1539		} else {
1540			break;
1541		}
1542	}
1543	if (other_ref && *other_ref == 0) {
1544		/* 'Allocate' memory for other overlay sections
1545		 * (create phase) */
1546		if (DSP_SUCCEEDED(status)) {
1547			ovly_section = other_sects_list;
1548			while (ovly_section) {
1549				/* page not supported *//* align */
1550				/* reserve */
1551				status =
1552				    rmm_alloc(nldr_obj->rmm, 0,
1553					      ovly_section->size, 0,
1554					      &(ovly_section->sect_run_addr),
1555					      true);
1556				if (DSP_SUCCEEDED(status)) {
1557					ovly_section = ovly_section->next_sect;
1558					other_alloc++;
1559				} else {
1560					break;
1561				}
1562			}
1563		}
1564	}
1565	if (*ref_count == 0) {
1566		if (DSP_SUCCEEDED(status)) {
1567			/* Load sections for this phase */
1568			ovly_section = phase_sects;
1569			while (ovly_section && DSP_SUCCEEDED(status)) {
1570				bytes =
1571				    (*nldr_obj->ovly_fxn) (nldr_node_obj->
1572							   priv_ref,
1573							   ovly_section->
1574							   sect_run_addr,
1575							   ovly_section->
1576							   sect_load_addr,
1577							   ovly_section->size,
1578							   ovly_section->page);
1579				if (bytes != ovly_section->size)
1580					status = -EPERM;
1581
1582				ovly_section = ovly_section->next_sect;
1583			}
1584		}
1585	}
1586	if (other_ref && *other_ref == 0) {
1587		if (DSP_SUCCEEDED(status)) {
1588			/* Load other sections (create phase) */
1589			ovly_section = other_sects_list;
1590			while (ovly_section && DSP_SUCCEEDED(status)) {
1591				bytes =
1592				    (*nldr_obj->ovly_fxn) (nldr_node_obj->
1593							   priv_ref,
1594							   ovly_section->
1595							   sect_run_addr,
1596							   ovly_section->
1597							   sect_load_addr,
1598							   ovly_section->size,
1599							   ovly_section->page);
1600				if (bytes != ovly_section->size)
1601					status = -EPERM;
1602
1603				ovly_section = ovly_section->next_sect;
1604			}
1605		}
1606	}
1607	if (DSP_FAILED(status)) {
1608		/* 'Deallocate' memory */
1609		free_sects(nldr_obj, phase_sects, alloc_num);
1610		free_sects(nldr_obj, other_sects_list, other_alloc);
1611	}
1612func_end:
1613	if (DSP_SUCCEEDED(status) && (ref_count != NULL)) {
1614		*ref_count += 1;
1615		if (other_ref)
1616			*other_ref += 1;
1617
1618	}
1619
1620	return status;
1621}
1622
1623/*
1624 *  ======== remote_alloc ========
1625 */
1626static int remote_alloc(void **pRef, u16 space, u32 size,
1627			       u32 align, u32 *dsp_address,
1628			       OPTIONAL s32 segmentId, OPTIONAL s32 req,
1629			       bool reserve)
1630{
1631	struct nldr_nodeobject *hnode = (struct nldr_nodeobject *)pRef;
1632	struct nldr_object *nldr_obj;
1633	struct rmm_target_obj *rmm;
1634	u16 mem_phase_bit = MAXFLAGS;
1635	u16 segid = 0;
1636	u16 i;
1637	u16 mem_sect_type;
1638	u32 word_size;
1639	struct rmm_addr *rmm_addr_obj = (struct rmm_addr *)dsp_address;
1640	bool mem_load_req = false;
1641	int status = -ENOMEM;	/* Set to fail */
1642	DBC_REQUIRE(hnode);
1643	DBC_REQUIRE(space == DBLL_CODE || space == DBLL_DATA ||
1644		    space == DBLL_BSS);
1645	nldr_obj = hnode->nldr_obj;
1646	rmm = nldr_obj->rmm;
1647	/* Convert size to DSP words */
1648	word_size =
1649	    (size + nldr_obj->us_dsp_word_size -
1650	     1) / nldr_obj->us_dsp_word_size;
1651	/* Modify memory 'align' to account for DSP cache line size */
1652	align = find_lcm(GEM_CACHE_LINE_SIZE, align);
1653	dev_dbg(bridge, "%s: memory align to 0x%x\n", __func__, align);
1654	if (segmentId != -1) {
1655		rmm_addr_obj->segid = segmentId;
1656		segid = segmentId;
1657		mem_load_req = req;
1658	} else {
1659		switch (hnode->phase) {
1660		case NLDR_CREATE:
1661			mem_phase_bit = CREATEDATAFLAGBIT;
1662			break;
1663		case NLDR_DELETE:
1664			mem_phase_bit = DELETEDATAFLAGBIT;
1665			break;
1666		case NLDR_EXECUTE:
1667			mem_phase_bit = EXECUTEDATAFLAGBIT;
1668			break;
1669		default:
1670			DBC_ASSERT(false);
1671			break;
1672		}
1673		if (space == DBLL_CODE)
1674			mem_phase_bit++;
1675
1676		if (mem_phase_bit < MAXFLAGS)
1677			segid = hnode->seg_id[mem_phase_bit];
1678
1679		/* Determine if there is a memory loading requirement */
1680		if ((hnode->code_data_flag_mask >> mem_phase_bit) & 0x1)
1681			mem_load_req = true;
1682
1683	}
1684	mem_sect_type = (space == DBLL_CODE) ? DYNM_CODE : DYNM_DATA;
1685
1686	/* Find an appropriate segment based on space */
1687	if (segid == NULLID) {
1688		/* No memory requirements of preferences */
1689		DBC_ASSERT(!mem_load_req);
1690		goto func_cont;
1691	}
1692	if (segid <= MAXSEGID) {
1693		DBC_ASSERT(segid < nldr_obj->dload_segs);
1694		/* Attempt to allocate from segid first. */
1695		rmm_addr_obj->segid = segid;
1696		status =
1697		    rmm_alloc(rmm, segid, word_size, align, dsp_address, false);
1698		if (DSP_FAILED(status)) {
1699			dev_dbg(bridge, "%s: Unable allocate from segment %d\n",
1700				__func__, segid);
1701		}
1702	} else {
1703		/* segid > MAXSEGID ==> Internal or external memory */
1704		DBC_ASSERT(segid == MEMINTERNALID || segid == MEMEXTERNALID);
1705		/*  Check for any internal or external memory segment,
1706		 *  depending on segid. */
1707		mem_sect_type |= segid == MEMINTERNALID ?
1708		    DYNM_INTERNAL : DYNM_EXTERNAL;
1709		for (i = 0; i < nldr_obj->dload_segs; i++) {
1710			if ((nldr_obj->seg_table[i] & mem_sect_type) !=
1711			    mem_sect_type)
1712				continue;
1713
1714			status = rmm_alloc(rmm, i, word_size, align,
1715					   dsp_address, false);
1716			if (DSP_SUCCEEDED(status)) {
1717				/* Save segid for freeing later */
1718				rmm_addr_obj->segid = i;
1719				break;
1720			}
1721		}
1722	}
1723func_cont:
1724	/* Haven't found memory yet, attempt to find any segment that works */
1725	if (status == -ENOMEM && !mem_load_req) {
1726		dev_dbg(bridge, "%s: Preferred segment unavailable, trying "
1727			"another\n", __func__);
1728		for (i = 0; i < nldr_obj->dload_segs; i++) {
1729			/* All bits of mem_sect_type must be set */
1730			if ((nldr_obj->seg_table[i] & mem_sect_type) !=
1731			    mem_sect_type)
1732				continue;
1733
1734			status = rmm_alloc(rmm, i, word_size, align,
1735					   dsp_address, false);
1736			if (DSP_SUCCEEDED(status)) {
1737				/* Save segid */
1738				rmm_addr_obj->segid = i;
1739				break;
1740			}
1741		}
1742	}
1743
1744	return status;
1745}
1746
1747static int remote_free(void **pRef, u16 space, u32 dsp_address,
1748			      u32 size, bool reserve)
1749{
1750	struct nldr_object *nldr_obj = (struct nldr_object *)pRef;
1751	struct rmm_target_obj *rmm;
1752	u32 word_size;
1753	int status = -ENOMEM;	/* Set to fail */
1754
1755	DBC_REQUIRE(nldr_obj);
1756
1757	rmm = nldr_obj->rmm;
1758
1759	/* Convert size to DSP words */
1760	word_size =
1761	    (size + nldr_obj->us_dsp_word_size -
1762	     1) / nldr_obj->us_dsp_word_size;
1763
1764	if (rmm_free(rmm, space, dsp_address, word_size, reserve))
1765		status = 0;
1766
1767	return status;
1768}
1769
1770/*
1771 *  ======== unload_lib ========
1772 */
1773static void unload_lib(struct nldr_nodeobject *nldr_node_obj,
1774		       struct lib_node *root)
1775{
1776	struct dbll_attrs new_attrs;
1777	struct nldr_object *nldr_obj = nldr_node_obj->nldr_obj;
1778	u16 i;
1779
1780	DBC_ASSERT(root != NULL);
1781
1782	/* Unload dependent libraries */
1783	for (i = 0; i < root->dep_libs; i++)
1784		unload_lib(nldr_node_obj, &root->dep_libs_tree[i]);
1785
1786	root->dep_libs = 0;
1787
1788	new_attrs = nldr_obj->ldr_attrs;
1789	new_attrs.rmm_handle = nldr_obj->rmm;
1790	new_attrs.input_params = nldr_node_obj->priv_ref;
1791	new_attrs.base_image = false;
1792	new_attrs.sym_arg = root;
1793
1794	if (root->lib) {
1795		/* Unload the root library */
1796		nldr_obj->ldr_fxns.unload_fxn(root->lib, &new_attrs);
1797		nldr_obj->ldr_fxns.close_fxn(root->lib);
1798	}
1799
1800	/* Free dependent library list */
1801	kfree(root->dep_libs_tree);
1802	root->dep_libs_tree = NULL;
1803}
1804
1805/*
1806 *  ======== unload_ovly ========
1807 */
1808static void unload_ovly(struct nldr_nodeobject *nldr_node_obj,
1809			enum nldr_phase phase)
1810{
1811	struct nldr_object *nldr_obj = nldr_node_obj->nldr_obj;
1812	struct ovly_node *po_node = NULL;
1813	struct ovly_sect *phase_sects = NULL;
1814	struct ovly_sect *other_sects_list = NULL;
1815	u16 i;
1816	u16 alloc_num = 0;
1817	u16 other_alloc = 0;
1818	u16 *ref_count = NULL;
1819	u16 *other_ref = NULL;
1820
1821	/* Find the node in the table */
1822	for (i = 0; i < nldr_obj->ovly_nodes; i++) {
1823		if (IS_EQUAL_UUID
1824		    (nldr_node_obj->uuid, nldr_obj->ovly_table[i].uuid)) {
1825			/* Found it */
1826			po_node = &(nldr_obj->ovly_table[i]);
1827			break;
1828		}
1829	}
1830
1831	DBC_ASSERT(i < nldr_obj->ovly_nodes);
1832
1833	if (!po_node)
1834		/* TODO: Should we print warning here? */
1835		return;
1836
1837	switch (phase) {
1838	case NLDR_CREATE:
1839		ref_count = &(po_node->create_ref);
1840		phase_sects = po_node->create_sects_list;
1841		alloc_num = po_node->create_sects;
1842		break;
1843	case NLDR_EXECUTE:
1844		ref_count = &(po_node->execute_ref);
1845		phase_sects = po_node->execute_sects_list;
1846		alloc_num = po_node->execute_sects;
1847		break;
1848	case NLDR_DELETE:
1849		ref_count = &(po_node->delete_ref);
1850		other_ref = &(po_node->other_ref);
1851		phase_sects = po_node->delete_sects_list;
1852		/* 'Other' overlay sections are unloaded in the delete phase */
1853		other_sects_list = po_node->other_sects_list;
1854		alloc_num = po_node->delete_sects;
1855		other_alloc = po_node->other_sects;
1856		break;
1857	default:
1858		DBC_ASSERT(false);
1859		break;
1860	}
1861	DBC_ASSERT(ref_count && (*ref_count > 0));
1862	if (ref_count && (*ref_count > 0)) {
1863		*ref_count -= 1;
1864		if (other_ref) {
1865			DBC_ASSERT(*other_ref > 0);
1866			*other_ref -= 1;
1867		}
1868	}
1869
1870	if (ref_count && *ref_count == 0) {
1871		/* 'Deallocate' memory */
1872		free_sects(nldr_obj, phase_sects, alloc_num);
1873	}
1874	if (other_ref && *other_ref == 0)
1875		free_sects(nldr_obj, other_sects_list, other_alloc);
1876}
1877
1878/*
1879 *  ======== find_in_persistent_lib_array ========
1880 */
1881static bool find_in_persistent_lib_array(struct nldr_nodeobject *nldr_node_obj,
1882					 struct dbll_library_obj *lib)
1883{
1884	s32 i = 0;
1885
1886	for (i = 0; i < nldr_node_obj->pers_libs; i++) {
1887		if (lib == nldr_node_obj->pers_lib_table[i].lib)
1888			return true;
1889
1890	}
1891
1892	return false;
1893}
1894
1895/*
1896 * ================ Find LCM (Least Common Multiplier ===
1897 */
1898static u32 find_lcm(u32 a, u32 b)
1899{
1900	u32 ret;
1901
1902	ret = a * b / find_gcf(a, b);
1903
1904	return ret;
1905}
1906
1907/*
1908 * ================ Find GCF (Greatest Common Factor ) ===
1909 */
1910static u32 find_gcf(u32 a, u32 b)
1911{
1912	u32 c;
1913
1914	/* Get the GCF (Greatest common factor between the numbers,
1915	 * using Euclidian Algo */
1916	while ((c = (a % b))) {
1917		a = b;
1918		b = c;
1919	}
1920	return b;
1921}
1922
1923#ifdef CONFIG_TIDSPBRIDGE_BACKTRACE
1924/**
1925 * nldr_find_addr() - Find the closest symbol to the given address based on
1926 *		dynamic node object.
1927 *
1928 * @nldr_node:		Dynamic node object
1929 * @sym_addr:		Given address to find the dsp symbol
1930 * @offset_range:		offset range to look for dsp symbol
1931 * @offset_output:		Symbol Output address
1932 * @sym_name:		String with the dsp symbol
1933 *
1934 * 	This function finds the node library for a given address and
1935 *	retrieves the dsp symbol by calling dbll_find_dsp_symbol.
1936 */
1937int nldr_find_addr(struct nldr_nodeobject *nldr_node, u32 sym_addr,
1938			u32 offset_range, void *offset_output, char *sym_name)
1939{
1940	int status = 0;
1941	bool status1 = false;
1942	s32 i = 0;
1943	struct lib_node root = { NULL, 0, NULL };
1944	DBC_REQUIRE(refs > 0);
1945	DBC_REQUIRE(offset_output != NULL);
1946	DBC_REQUIRE(sym_name != NULL);
1947	pr_debug("%s(0x%x, 0x%x, 0x%x, 0x%x,  %s)\n", __func__, (u32) nldr_node,
1948			sym_addr, offset_range, (u32) offset_output, sym_name);
1949
1950	if (nldr_node->dynamic && *nldr_node->pf_phase_split) {
1951		switch (nldr_node->phase) {
1952		case NLDR_CREATE:
1953			root = nldr_node->create_lib;
1954			break;
1955		case NLDR_EXECUTE:
1956			root = nldr_node->execute_lib;
1957			break;
1958		case NLDR_DELETE:
1959			root = nldr_node->delete_lib;
1960			break;
1961		default:
1962			DBC_ASSERT(false);
1963			break;
1964		}
1965	} else {
1966		/* for Overlay nodes or non-split Dynamic nodes */
1967		root = nldr_node->root;
1968	}
1969
1970	status1 = dbll_find_dsp_symbol(root.lib, sym_addr,
1971			offset_range, offset_output, sym_name);
1972
1973	/* If symbol not found, check dependent libraries */
1974	if (!status1)
1975		for (i = 0; i < root.dep_libs; i++) {
1976			status1 = dbll_find_dsp_symbol(
1977				root.dep_libs_tree[i].lib, sym_addr,
1978				offset_range, offset_output, sym_name);
1979			if (status1)
1980				/* Symbol found */
1981				break;
1982		}
1983	/* Check persistent libraries */
1984	if (!status1)
1985		for (i = 0; i < nldr_node->pers_libs; i++) {
1986			status1 = dbll_find_dsp_symbol(
1987				nldr_node->pers_lib_table[i].lib, sym_addr,
1988				offset_range, offset_output, sym_name);
1989			if (status1)
1990				/* Symbol found */
1991				break;
1992		}
1993
1994	if (!status1) {
1995		pr_debug("%s: Address 0x%x not found in range %d.\n",
1996					__func__, sym_addr, offset_range);
1997		status = -ESPIPE;
1998	}
1999
2000	return status;
2001}
2002#endif
2003