remoteproc_core.c revision fd2c15ec1dd3c2fdfc6ff03bb9644da9d530e3b9
1/* 2 * Remote Processor Framework 3 * 4 * Copyright (C) 2011 Texas Instruments, Inc. 5 * Copyright (C) 2011 Google, Inc. 6 * 7 * Ohad Ben-Cohen <ohad@wizery.com> 8 * Brian Swetland <swetland@google.com> 9 * Mark Grosen <mgrosen@ti.com> 10 * Fernando Guzman Lugo <fernando.lugo@ti.com> 11 * Suman Anna <s-anna@ti.com> 12 * Robert Tivy <rtivy@ti.com> 13 * Armando Uribe De Leon <x0095078@ti.com> 14 * 15 * This program is free software; you can redistribute it and/or 16 * modify it under the terms of the GNU General Public License 17 * version 2 as published by the Free Software Foundation. 18 * 19 * This program is distributed in the hope that it will be useful, 20 * but WITHOUT ANY WARRANTY; without even the implied warranty of 21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 * GNU General Public License for more details. 23 */ 24 25#define pr_fmt(fmt) "%s: " fmt, __func__ 26 27#include <linux/kernel.h> 28#include <linux/module.h> 29#include <linux/device.h> 30#include <linux/slab.h> 31#include <linux/mutex.h> 32#include <linux/dma-mapping.h> 33#include <linux/firmware.h> 34#include <linux/string.h> 35#include <linux/debugfs.h> 36#include <linux/remoteproc.h> 37#include <linux/iommu.h> 38#include <linux/klist.h> 39#include <linux/elf.h> 40#include <linux/virtio_ids.h> 41#include <linux/virtio_ring.h> 42#include <asm/byteorder.h> 43 44#include "remoteproc_internal.h" 45 46static void klist_rproc_get(struct klist_node *n); 47static void klist_rproc_put(struct klist_node *n); 48 49/* 50 * klist of the available remote processors. 51 * 52 * We need this in order to support name-based lookups (needed by the 53 * rproc_get_by_name()). 54 * 55 * That said, we don't use rproc_get_by_name() anymore within the rpmsg 56 * framework. The use cases that do require its existence should be 57 * scrutinized, and hopefully migrated to rproc_boot() using device-based 58 * binding. 59 * 60 * If/when this materializes, we could drop the klist (and the by_name 61 * API). 62 */ 63static DEFINE_KLIST(rprocs, klist_rproc_get, klist_rproc_put); 64 65typedef int (*rproc_handle_resources_t)(struct rproc *rproc, 66 struct resource_table *table, int len); 67typedef int (*rproc_handle_resource_t)(struct rproc *rproc, void *, int avail); 68 69/* 70 * This is the IOMMU fault handler we register with the IOMMU API 71 * (when relevant; not all remote processors access memory through 72 * an IOMMU). 73 * 74 * IOMMU core will invoke this handler whenever the remote processor 75 * will try to access an unmapped device address. 76 * 77 * Currently this is mostly a stub, but it will be later used to trigger 78 * the recovery of the remote processor. 79 */ 80static int rproc_iommu_fault(struct iommu_domain *domain, struct device *dev, 81 unsigned long iova, int flags) 82{ 83 dev_err(dev, "iommu fault: da 0x%lx flags 0x%x\n", iova, flags); 84 85 /* 86 * Let the iommu core know we're not really handling this fault; 87 * we just plan to use this as a recovery trigger. 88 */ 89 return -ENOSYS; 90} 91 92static int rproc_enable_iommu(struct rproc *rproc) 93{ 94 struct iommu_domain *domain; 95 struct device *dev = rproc->dev; 96 int ret; 97 98 /* 99 * We currently use iommu_present() to decide if an IOMMU 100 * setup is needed. 101 * 102 * This works for simple cases, but will easily fail with 103 * platforms that do have an IOMMU, but not for this specific 104 * rproc. 105 * 106 * This will be easily solved by introducing hw capabilities 107 * that will be set by the remoteproc driver. 108 */ 109 if (!iommu_present(dev->bus)) { 110 dev_dbg(dev, "iommu not found\n"); 111 return 0; 112 } 113 114 domain = iommu_domain_alloc(dev->bus); 115 if (!domain) { 116 dev_err(dev, "can't alloc iommu domain\n"); 117 return -ENOMEM; 118 } 119 120 iommu_set_fault_handler(domain, rproc_iommu_fault); 121 122 ret = iommu_attach_device(domain, dev); 123 if (ret) { 124 dev_err(dev, "can't attach iommu device: %d\n", ret); 125 goto free_domain; 126 } 127 128 rproc->domain = domain; 129 130 return 0; 131 132free_domain: 133 iommu_domain_free(domain); 134 return ret; 135} 136 137static void rproc_disable_iommu(struct rproc *rproc) 138{ 139 struct iommu_domain *domain = rproc->domain; 140 struct device *dev = rproc->dev; 141 142 if (!domain) 143 return; 144 145 iommu_detach_device(domain, dev); 146 iommu_domain_free(domain); 147 148 return; 149} 150 151/* 152 * Some remote processors will ask us to allocate them physically contiguous 153 * memory regions (which we call "carveouts"), and map them to specific 154 * device addresses (which are hardcoded in the firmware). 155 * 156 * They may then ask us to copy objects into specific device addresses (e.g. 157 * code/data sections) or expose us certain symbols in other device address 158 * (e.g. their trace buffer). 159 * 160 * This function is an internal helper with which we can go over the allocated 161 * carveouts and translate specific device address to kernel virtual addresses 162 * so we can access the referenced memory. 163 * 164 * Note: phys_to_virt(iommu_iova_to_phys(rproc->domain, da)) will work too, 165 * but only on kernel direct mapped RAM memory. Instead, we're just using 166 * here the output of the DMA API, which should be more correct. 167 */ 168static void *rproc_da_to_va(struct rproc *rproc, u64 da, int len) 169{ 170 struct rproc_mem_entry *carveout; 171 void *ptr = NULL; 172 173 list_for_each_entry(carveout, &rproc->carveouts, node) { 174 int offset = da - carveout->da; 175 176 /* try next carveout if da is too small */ 177 if (offset < 0) 178 continue; 179 180 /* try next carveout if da is too large */ 181 if (offset + len > carveout->len) 182 continue; 183 184 ptr = carveout->va + offset; 185 186 break; 187 } 188 189 return ptr; 190} 191 192/** 193 * rproc_load_segments() - load firmware segments to memory 194 * @rproc: remote processor which will be booted using these fw segments 195 * @elf_data: the content of the ELF firmware image 196 * @len: firmware size (in bytes) 197 * 198 * This function loads the firmware segments to memory, where the remote 199 * processor expects them. 200 * 201 * Some remote processors will expect their code and data to be placed 202 * in specific device addresses, and can't have them dynamically assigned. 203 * 204 * We currently support only those kind of remote processors, and expect 205 * the program header's paddr member to contain those addresses. We then go 206 * through the physically contiguous "carveout" memory regions which we 207 * allocated (and mapped) earlier on behalf of the remote processor, 208 * and "translate" device address to kernel addresses, so we can copy the 209 * segments where they are expected. 210 * 211 * Currently we only support remote processors that required carveout 212 * allocations and got them mapped onto their iommus. Some processors 213 * might be different: they might not have iommus, and would prefer to 214 * directly allocate memory for every segment/resource. This is not yet 215 * supported, though. 216 */ 217static int 218rproc_load_segments(struct rproc *rproc, const u8 *elf_data, size_t len) 219{ 220 struct device *dev = rproc->dev; 221 struct elf32_hdr *ehdr; 222 struct elf32_phdr *phdr; 223 int i, ret = 0; 224 225 ehdr = (struct elf32_hdr *)elf_data; 226 phdr = (struct elf32_phdr *)(elf_data + ehdr->e_phoff); 227 228 /* go through the available ELF segments */ 229 for (i = 0; i < ehdr->e_phnum; i++, phdr++) { 230 u32 da = phdr->p_paddr; 231 u32 memsz = phdr->p_memsz; 232 u32 filesz = phdr->p_filesz; 233 u32 offset = phdr->p_offset; 234 void *ptr; 235 236 if (phdr->p_type != PT_LOAD) 237 continue; 238 239 dev_dbg(dev, "phdr: type %d da 0x%x memsz 0x%x filesz 0x%x\n", 240 phdr->p_type, da, memsz, filesz); 241 242 if (filesz > memsz) { 243 dev_err(dev, "bad phdr filesz 0x%x memsz 0x%x\n", 244 filesz, memsz); 245 ret = -EINVAL; 246 break; 247 } 248 249 if (offset + filesz > len) { 250 dev_err(dev, "truncated fw: need 0x%x avail 0x%x\n", 251 offset + filesz, len); 252 ret = -EINVAL; 253 break; 254 } 255 256 /* grab the kernel address for this device address */ 257 ptr = rproc_da_to_va(rproc, da, memsz); 258 if (!ptr) { 259 dev_err(dev, "bad phdr da 0x%x mem 0x%x\n", da, memsz); 260 ret = -EINVAL; 261 break; 262 } 263 264 /* put the segment where the remote processor expects it */ 265 if (phdr->p_filesz) 266 memcpy(ptr, elf_data + phdr->p_offset, filesz); 267 268 /* 269 * Zero out remaining memory for this segment. 270 * 271 * This isn't strictly required since dma_alloc_coherent already 272 * did this for us. albeit harmless, we may consider removing 273 * this. 274 */ 275 if (memsz > filesz) 276 memset(ptr + filesz, 0, memsz - filesz); 277 } 278 279 return ret; 280} 281 282/** 283 * rproc_handle_early_vdev() - early handle a virtio header resource 284 * @rproc: the remote processor 285 * @rsc: the resource descriptor 286 * @avail: size of available data (for sanity checking the image) 287 * 288 * The existence of this virtio hdr resource entry means that the firmware 289 * of this @rproc supports this virtio device. 290 * 291 * Currently we support only a single virtio device of type VIRTIO_ID_RPMSG, 292 * but the plan is to remove this limitation and support any number 293 * of virtio devices (and of any type). We'll also add support for dynamically 294 * adding (and removing) virtio devices over the rpmsg bus, but simple 295 * firmwares that doesn't want to get involved with rpmsg will be able 296 * to simply use the resource table for this. 297 * 298 * Returns 0 on success, or an appropriate error code otherwise 299 */ 300static int rproc_handle_early_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc, 301 int avail) 302{ 303 struct rproc_vdev *rvdev; 304 305 /* make sure resource isn't truncated */ 306 if (sizeof(*rsc) > avail) { 307 dev_err(rproc->dev, "vdev rsc is truncated\n"); 308 return -EINVAL; 309 } 310 311 /* we only support VIRTIO_ID_RPMSG devices for now */ 312 if (rsc->id != VIRTIO_ID_RPMSG) { 313 dev_warn(rproc->dev, "unsupported vdev: %d\n", rsc->id); 314 return -EINVAL; 315 } 316 317 /* we only support a single vdev per rproc for now */ 318 if (rproc->rvdev) { 319 dev_warn(rproc->dev, "redundant vdev entry\n"); 320 return -EINVAL; 321 } 322 323 rvdev = kzalloc(sizeof(struct rproc_vdev), GFP_KERNEL); 324 if (!rvdev) 325 return -ENOMEM; 326 327 /* remember the device features */ 328 rvdev->dfeatures = rsc->dfeatures; 329 330 rproc->rvdev = rvdev; 331 rvdev->rproc = rproc; 332 333 return 0; 334} 335 336/** 337 * rproc_handle_vdev() - handle a vdev fw resource 338 * @rproc: the remote processor 339 * @rsc: the vring resource descriptor 340 * @avail: size of available data (for sanity checking the image) 341 * 342 * This resource entry requires allocation of non-cacheable memory 343 * for a virtio vring. Currently we only support two vrings per remote 344 * processor, required for the virtio rpmsg device. 345 * 346 * The 'len' member of @rsc should contain the number of buffers this vring 347 * support and 'da' should either contain the device address where 348 * the remote processor is expecting the vring, or indicate that 349 * dynamically allocation of the vring's device address is supported. 350 * 351 * Note: 'da' is currently not handled. This will be revised when the generic 352 * iommu-based DMA API will arrive, or a dynanic & non-iommu use case show 353 * up. Meanwhile, statically-addressed iommu-based images should use 354 * RSC_DEVMEM resource entries to map their require 'da' to the physical 355 * address of their base CMA region. 356 * 357 * Returns 0 on success, or an appropriate error code otherwise 358 */ 359static int rproc_handle_vdev(struct rproc *rproc, struct fw_rsc_vdev *rsc, 360 int avail) 361{ 362 struct device *dev = rproc->dev; 363 struct rproc_vdev *rvdev = rproc->rvdev; 364 int i; 365 366 /* make sure resource isn't truncated */ 367 if (sizeof(*rsc) + rsc->num_of_vrings * sizeof(struct fw_rsc_vdev_vring) 368 + rsc->config_len > avail) { 369 dev_err(rproc->dev, "vdev rsc is truncated\n"); 370 return -EINVAL; 371 } 372 373 /* make sure reserved bytes are zeroes */ 374 if (rsc->reserved[0] || rsc->reserved[1]) { 375 dev_err(dev, "vdev rsc has non zero reserved bytes\n"); 376 return -EINVAL; 377 } 378 379 dev_dbg(dev, "vdev rsc: id %d, dfeatures %x, cfg len %d, %d vrings\n", 380 rsc->id, rsc->dfeatures, rsc->config_len, rsc->num_of_vrings); 381 382 /* no vdev is in place ? */ 383 if (!rvdev) { 384 dev_err(dev, "vring requested without a virtio dev entry\n"); 385 return -EINVAL; 386 } 387 388 /* we currently support two vrings per rproc (for rx and tx) */ 389 if (rsc->num_of_vrings != ARRAY_SIZE(rvdev->vring)) { 390 dev_err(dev, "too many vrings: %d\n", rsc->num_of_vrings); 391 return -EINVAL; 392 } 393 394 /* initialize the vrings */ 395 for (i = 0; i < rsc->num_of_vrings; i++) { 396 struct fw_rsc_vdev_vring *vring = &rsc->vring[i]; 397 dma_addr_t dma; 398 int size; 399 void *va; 400 401 /* make sure reserved bytes are zeroes */ 402 if (vring->reserved) { 403 dev_err(dev, "vring rsc has non zero reserved bytes\n"); 404 return -EINVAL; 405 } 406 407 /* the firmware must provide the expected queue size */ 408 if (!vring->num) { 409 dev_err(dev, "missing expected queue size\n"); 410 /* potential cleanups are taken care of later on */ 411 return -EINVAL; 412 } 413 414 /* actual size of vring (in bytes) */ 415 size = PAGE_ALIGN(vring_size(vring->num, AMP_VRING_ALIGN)); 416 417 /* 418 * Allocate non-cacheable memory for the vring. In the future 419 * this call will also configure the IOMMU for us 420 */ 421 va = dma_alloc_coherent(dev, size, &dma, GFP_KERNEL); 422 if (!va) { 423 dev_err(dev, "dma_alloc_coherent failed\n"); 424 /* potential cleanups are taken care of later on */ 425 return -EINVAL; 426 } 427 428 dev_dbg(dev, "vring%d: va %p dma %x qsz %d ring size %x\n", i, 429 va, dma, vring->num, size); 430 431 rvdev->vring[i].len = vring->num; 432 rvdev->vring[i].va = va; 433 rvdev->vring[i].dma = dma; 434 } 435 436 return 0; 437} 438 439/** 440 * rproc_handle_trace() - handle a shared trace buffer resource 441 * @rproc: the remote processor 442 * @rsc: the trace resource descriptor 443 * @avail: size of available data (for sanity checking the image) 444 * 445 * In case the remote processor dumps trace logs into memory, 446 * export it via debugfs. 447 * 448 * Currently, the 'da' member of @rsc should contain the device address 449 * where the remote processor is dumping the traces. Later we could also 450 * support dynamically allocating this address using the generic 451 * DMA API (but currently there isn't a use case for that). 452 * 453 * Returns 0 on success, or an appropriate error code otherwise 454 */ 455static int rproc_handle_trace(struct rproc *rproc, struct fw_rsc_trace *rsc, 456 int avail) 457{ 458 struct rproc_mem_entry *trace; 459 struct device *dev = rproc->dev; 460 void *ptr; 461 char name[15]; 462 463 if (sizeof(*rsc) > avail) { 464 dev_err(rproc->dev, "trace rsc is truncated\n"); 465 return -EINVAL; 466 } 467 468 /* make sure reserved bytes are zeroes */ 469 if (rsc->reserved) { 470 dev_err(dev, "trace rsc has non zero reserved bytes\n"); 471 return -EINVAL; 472 } 473 474 /* what's the kernel address of this resource ? */ 475 ptr = rproc_da_to_va(rproc, rsc->da, rsc->len); 476 if (!ptr) { 477 dev_err(dev, "erroneous trace resource entry\n"); 478 return -EINVAL; 479 } 480 481 trace = kzalloc(sizeof(*trace), GFP_KERNEL); 482 if (!trace) { 483 dev_err(dev, "kzalloc trace failed\n"); 484 return -ENOMEM; 485 } 486 487 /* set the trace buffer dma properties */ 488 trace->len = rsc->len; 489 trace->va = ptr; 490 491 /* make sure snprintf always null terminates, even if truncating */ 492 snprintf(name, sizeof(name), "trace%d", rproc->num_traces); 493 494 /* create the debugfs entry */ 495 trace->priv = rproc_create_trace_file(name, rproc, trace); 496 if (!trace->priv) { 497 trace->va = NULL; 498 kfree(trace); 499 return -EINVAL; 500 } 501 502 list_add_tail(&trace->node, &rproc->traces); 503 504 rproc->num_traces++; 505 506 dev_dbg(dev, "%s added: va %p, da 0x%x, len 0x%x\n", name, ptr, 507 rsc->da, rsc->len); 508 509 return 0; 510} 511 512/** 513 * rproc_handle_devmem() - handle devmem resource entry 514 * @rproc: remote processor handle 515 * @rsc: the devmem resource entry 516 * @avail: size of available data (for sanity checking the image) 517 * 518 * Remote processors commonly need to access certain on-chip peripherals. 519 * 520 * Some of these remote processors access memory via an iommu device, 521 * and might require us to configure their iommu before they can access 522 * the on-chip peripherals they need. 523 * 524 * This resource entry is a request to map such a peripheral device. 525 * 526 * These devmem entries will contain the physical address of the device in 527 * the 'pa' member. If a specific device address is expected, then 'da' will 528 * contain it (currently this is the only use case supported). 'len' will 529 * contain the size of the physical region we need to map. 530 * 531 * Currently we just "trust" those devmem entries to contain valid physical 532 * addresses, but this is going to change: we want the implementations to 533 * tell us ranges of physical addresses the firmware is allowed to request, 534 * and not allow firmwares to request access to physical addresses that 535 * are outside those ranges. 536 */ 537static int rproc_handle_devmem(struct rproc *rproc, struct fw_rsc_devmem *rsc, 538 int avail) 539{ 540 struct rproc_mem_entry *mapping; 541 int ret; 542 543 /* no point in handling this resource without a valid iommu domain */ 544 if (!rproc->domain) 545 return -EINVAL; 546 547 if (sizeof(*rsc) > avail) { 548 dev_err(rproc->dev, "devmem rsc is truncated\n"); 549 return -EINVAL; 550 } 551 552 /* make sure reserved bytes are zeroes */ 553 if (rsc->reserved) { 554 dev_err(rproc->dev, "devmem rsc has non zero reserved bytes\n"); 555 return -EINVAL; 556 } 557 558 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); 559 if (!mapping) { 560 dev_err(rproc->dev, "kzalloc mapping failed\n"); 561 return -ENOMEM; 562 } 563 564 ret = iommu_map(rproc->domain, rsc->da, rsc->pa, rsc->len, rsc->flags); 565 if (ret) { 566 dev_err(rproc->dev, "failed to map devmem: %d\n", ret); 567 goto out; 568 } 569 570 /* 571 * We'll need this info later when we'll want to unmap everything 572 * (e.g. on shutdown). 573 * 574 * We can't trust the remote processor not to change the resource 575 * table, so we must maintain this info independently. 576 */ 577 mapping->da = rsc->da; 578 mapping->len = rsc->len; 579 list_add_tail(&mapping->node, &rproc->mappings); 580 581 dev_dbg(rproc->dev, "mapped devmem pa 0x%x, da 0x%x, len 0x%x\n", 582 rsc->pa, rsc->da, rsc->len); 583 584 return 0; 585 586out: 587 kfree(mapping); 588 return ret; 589} 590 591/** 592 * rproc_handle_carveout() - handle phys contig memory allocation requests 593 * @rproc: rproc handle 594 * @rsc: the resource entry 595 * @avail: size of available data (for image validation) 596 * 597 * This function will handle firmware requests for allocation of physically 598 * contiguous memory regions. 599 * 600 * These request entries should come first in the firmware's resource table, 601 * as other firmware entries might request placing other data objects inside 602 * these memory regions (e.g. data/code segments, trace resource entries, ...). 603 * 604 * Allocating memory this way helps utilizing the reserved physical memory 605 * (e.g. CMA) more efficiently, and also minimizes the number of TLB entries 606 * needed to map it (in case @rproc is using an IOMMU). Reducing the TLB 607 * pressure is important; it may have a substantial impact on performance. 608 */ 609static int rproc_handle_carveout(struct rproc *rproc, 610 struct fw_rsc_carveout *rsc, int avail) 611{ 612 struct rproc_mem_entry *carveout, *mapping; 613 struct device *dev = rproc->dev; 614 dma_addr_t dma; 615 void *va; 616 int ret; 617 618 if (sizeof(*rsc) > avail) { 619 dev_err(rproc->dev, "carveout rsc is truncated\n"); 620 return -EINVAL; 621 } 622 623 /* make sure reserved bytes are zeroes */ 624 if (rsc->reserved) { 625 dev_err(dev, "carveout rsc has non zero reserved bytes\n"); 626 return -EINVAL; 627 } 628 629 dev_dbg(dev, "carveout rsc: da %x, pa %x, len %x, flags %x\n", 630 rsc->da, rsc->pa, rsc->len, rsc->flags); 631 632 mapping = kzalloc(sizeof(*mapping), GFP_KERNEL); 633 if (!mapping) { 634 dev_err(dev, "kzalloc mapping failed\n"); 635 return -ENOMEM; 636 } 637 638 carveout = kzalloc(sizeof(*carveout), GFP_KERNEL); 639 if (!carveout) { 640 dev_err(dev, "kzalloc carveout failed\n"); 641 ret = -ENOMEM; 642 goto free_mapping; 643 } 644 645 va = dma_alloc_coherent(dev, rsc->len, &dma, GFP_KERNEL); 646 if (!va) { 647 dev_err(dev, "failed to dma alloc carveout: %d\n", rsc->len); 648 ret = -ENOMEM; 649 goto free_carv; 650 } 651 652 dev_dbg(dev, "carveout va %p, dma %x, len 0x%x\n", va, dma, rsc->len); 653 654 /* 655 * Ok, this is non-standard. 656 * 657 * Sometimes we can't rely on the generic iommu-based DMA API 658 * to dynamically allocate the device address and then set the IOMMU 659 * tables accordingly, because some remote processors might 660 * _require_ us to use hard coded device addresses that their 661 * firmware was compiled with. 662 * 663 * In this case, we must use the IOMMU API directly and map 664 * the memory to the device address as expected by the remote 665 * processor. 666 * 667 * Obviously such remote processor devices should not be configured 668 * to use the iommu-based DMA API: we expect 'dma' to contain the 669 * physical address in this case. 670 */ 671 if (rproc->domain) { 672 ret = iommu_map(rproc->domain, rsc->da, dma, rsc->len, 673 rsc->flags); 674 if (ret) { 675 dev_err(dev, "iommu_map failed: %d\n", ret); 676 goto dma_free; 677 } 678 679 /* 680 * We'll need this info later when we'll want to unmap 681 * everything (e.g. on shutdown). 682 * 683 * We can't trust the remote processor not to change the 684 * resource table, so we must maintain this info independently. 685 */ 686 mapping->da = rsc->da; 687 mapping->len = rsc->len; 688 list_add_tail(&mapping->node, &rproc->mappings); 689 690 dev_dbg(dev, "carveout mapped 0x%x to 0x%x\n", rsc->da, dma); 691 692 /* 693 * Some remote processors might need to know the pa 694 * even though they are behind an IOMMU. E.g., OMAP4's 695 * remote M3 processor needs this so it can control 696 * on-chip hardware accelerators that are not behind 697 * the IOMMU, and therefor must know the pa. 698 * 699 * Generally we don't want to expose physical addresses 700 * if we don't have to (remote processors are generally 701 * _not_ trusted), so we might want to do this only for 702 * remote processor that _must_ have this (e.g. OMAP4's 703 * dual M3 subsystem). 704 */ 705 rsc->pa = dma; 706 } 707 708 carveout->va = va; 709 carveout->len = rsc->len; 710 carveout->dma = dma; 711 carveout->da = rsc->da; 712 713 list_add_tail(&carveout->node, &rproc->carveouts); 714 715 return 0; 716 717dma_free: 718 dma_free_coherent(dev, rsc->len, va, dma); 719free_carv: 720 kfree(carveout); 721free_mapping: 722 kfree(mapping); 723 return ret; 724} 725 726/* 727 * A lookup table for resource handlers. The indices are defined in 728 * enum fw_resource_type. 729 */ 730static rproc_handle_resource_t rproc_handle_rsc[] = { 731 [RSC_CARVEOUT] = (rproc_handle_resource_t)rproc_handle_carveout, 732 [RSC_DEVMEM] = (rproc_handle_resource_t)rproc_handle_devmem, 733 [RSC_TRACE] = (rproc_handle_resource_t)rproc_handle_trace, 734 [RSC_VDEV] = (rproc_handle_resource_t)rproc_handle_vdev, 735}; 736 737/* handle firmware resource entries before booting the remote processor */ 738static int 739rproc_handle_boot_rsc(struct rproc *rproc, struct resource_table *table, int len) 740{ 741 struct device *dev = rproc->dev; 742 rproc_handle_resource_t handler; 743 int ret = 0, i; 744 745 for (i = 0; i < table->num; i++) { 746 int offset = table->offset[i]; 747 struct fw_rsc_hdr *hdr = (void *)table + offset; 748 int avail = len - offset - sizeof(*hdr); 749 void *rsc = (void *)hdr + sizeof(*hdr); 750 751 /* make sure table isn't truncated */ 752 if (avail < 0) { 753 dev_err(dev, "rsc table is truncated\n"); 754 return -EINVAL; 755 } 756 757 dev_dbg(dev, "rsc: type %d\n", hdr->type); 758 759 if (hdr->type >= RSC_LAST) { 760 dev_warn(dev, "unsupported resource %d\n", hdr->type); 761 continue; 762 } 763 764 handler = rproc_handle_rsc[hdr->type]; 765 if (!handler) 766 continue; 767 768 ret = handler(rproc, rsc, avail); 769 if (ret) 770 break; 771 } 772 773 return ret; 774} 775 776/* handle firmware resource entries while registering the remote processor */ 777static int 778rproc_handle_virtio_rsc(struct rproc *rproc, struct resource_table *table, int len) 779{ 780 struct device *dev = rproc->dev; 781 int ret = 0, i; 782 783 for (i = 0; i < table->num; i++) { 784 int offset = table->offset[i]; 785 struct fw_rsc_hdr *hdr = (void *)table + offset; 786 int avail = len - offset - sizeof(*hdr); 787 788 /* make sure table isn't truncated */ 789 if (avail < 0) { 790 dev_err(dev, "rsc table is truncated\n"); 791 return -EINVAL; 792 } 793 794 dev_dbg(dev, "%s: rsc type %d\n", __func__, hdr->type); 795 796 if (hdr->type == RSC_VDEV) { 797 struct fw_rsc_vdev *vrsc = 798 (struct fw_rsc_vdev *)hdr->data; 799 ret = rproc_handle_early_vdev(rproc, vrsc, avail); 800 break; 801 } 802 } 803 804 return ret; 805} 806 807/** 808 * rproc_handle_resources() - find and handle the resource table 809 * @rproc: the rproc handle 810 * @elf_data: the content of the ELF firmware image 811 * @len: firmware size (in bytes) 812 * @handler: function that should be used to handle the resource table 813 * 814 * This function finds the resource table inside the remote processor's 815 * firmware, and invoke a user-supplied handler with it (we have two 816 * possible handlers: one is invoked upon registration of @rproc, 817 * in order to register the supported virito devices, and the other is 818 * invoked when @rproc is actually booted). 819 * 820 * Currently this function fails if a resource table doesn't exist. 821 * This restriction will be removed when we'll start supporting remote 822 * processors that don't need a resource table. 823 */ 824static int rproc_handle_resources(struct rproc *rproc, const u8 *elf_data, 825 size_t len, rproc_handle_resources_t handler) 826 827{ 828 struct elf32_hdr *ehdr; 829 struct elf32_shdr *shdr; 830 const char *name_table; 831 struct device *dev = rproc->dev; 832 int i, ret = -EINVAL; 833 struct resource_table *table; 834 835 ehdr = (struct elf32_hdr *)elf_data; 836 shdr = (struct elf32_shdr *)(elf_data + ehdr->e_shoff); 837 name_table = elf_data + shdr[ehdr->e_shstrndx].sh_offset; 838 839 /* look for the resource table and handle it */ 840 for (i = 0; i < ehdr->e_shnum; i++, shdr++) { 841 int size = shdr->sh_size; 842 int offset = shdr->sh_offset; 843 844 if (strcmp(name_table + shdr->sh_name, ".resource_table")) 845 continue; 846 847 table = (struct resource_table *)(elf_data + offset); 848 849 /* make sure we have the entire table */ 850 if (offset + size > len) { 851 dev_err(dev, "resource table truncated\n"); 852 return -EINVAL; 853 } 854 855 /* make sure table has at least the header */ 856 if (sizeof(struct resource_table) > size) { 857 dev_err(dev, "header-less resource table\n"); 858 return -EINVAL; 859 } 860 861 /* we don't support any version beyond the first */ 862 if (table->ver != 1) { 863 dev_err(dev, "unsupported fw ver: %d\n", table->ver); 864 return -EINVAL; 865 } 866 867 /* make sure reserved bytes are zeroes */ 868 if (table->reserved[0] || table->reserved[1]) { 869 dev_err(dev, "non zero reserved bytes\n"); 870 return -EINVAL; 871 } 872 873 /* make sure the offsets array isn't truncated */ 874 if (table->num * sizeof(table->offset[0]) + 875 sizeof(struct resource_table) > size) { 876 dev_err(dev, "resource table incomplete\n"); 877 return -EINVAL; 878 } 879 880 ret = handler(rproc, table, shdr->sh_size); 881 break; 882 } 883 884 return ret; 885} 886 887/** 888 * rproc_resource_cleanup() - clean up and free all acquired resources 889 * @rproc: rproc handle 890 * 891 * This function will free all resources acquired for @rproc, and it 892 * is called when @rproc shuts down, or just failed booting. 893 */ 894static void rproc_resource_cleanup(struct rproc *rproc) 895{ 896 struct rproc_mem_entry *entry, *tmp; 897 struct device *dev = rproc->dev; 898 struct rproc_vdev *rvdev = rproc->rvdev; 899 int i; 900 901 /* clean up debugfs trace entries */ 902 list_for_each_entry_safe(entry, tmp, &rproc->traces, node) { 903 rproc_remove_trace_file(entry->priv); 904 rproc->num_traces--; 905 list_del(&entry->node); 906 kfree(entry); 907 } 908 909 /* free the coherent memory allocated for the vrings */ 910 for (i = 0; rvdev && i < ARRAY_SIZE(rvdev->vring); i++) { 911 int qsz = rvdev->vring[i].len; 912 void *va = rvdev->vring[i].va; 913 int dma = rvdev->vring[i].dma; 914 915 /* virtqueue size is expressed in number of buffers supported */ 916 if (qsz) { 917 /* how many bytes does this vring really occupy ? */ 918 int size = PAGE_ALIGN(vring_size(qsz, AMP_VRING_ALIGN)); 919 920 dma_free_coherent(rproc->dev, size, va, dma); 921 922 rvdev->vring[i].len = 0; 923 } 924 } 925 926 /* clean up carveout allocations */ 927 list_for_each_entry_safe(entry, tmp, &rproc->carveouts, node) { 928 dma_free_coherent(dev, entry->len, entry->va, entry->dma); 929 list_del(&entry->node); 930 kfree(entry); 931 } 932 933 /* clean up iommu mapping entries */ 934 list_for_each_entry_safe(entry, tmp, &rproc->mappings, node) { 935 size_t unmapped; 936 937 unmapped = iommu_unmap(rproc->domain, entry->da, entry->len); 938 if (unmapped != entry->len) { 939 /* nothing much to do besides complaining */ 940 dev_err(dev, "failed to unmap %u/%u\n", entry->len, 941 unmapped); 942 } 943 944 list_del(&entry->node); 945 kfree(entry); 946 } 947} 948 949/* make sure this fw image is sane */ 950static int rproc_fw_sanity_check(struct rproc *rproc, const struct firmware *fw) 951{ 952 const char *name = rproc->firmware; 953 struct device *dev = rproc->dev; 954 struct elf32_hdr *ehdr; 955 char class; 956 957 if (!fw) { 958 dev_err(dev, "failed to load %s\n", name); 959 return -EINVAL; 960 } 961 962 if (fw->size < sizeof(struct elf32_hdr)) { 963 dev_err(dev, "Image is too small\n"); 964 return -EINVAL; 965 } 966 967 ehdr = (struct elf32_hdr *)fw->data; 968 969 /* We only support ELF32 at this point */ 970 class = ehdr->e_ident[EI_CLASS]; 971 if (class != ELFCLASS32) { 972 dev_err(dev, "Unsupported class: %d\n", class); 973 return -EINVAL; 974 } 975 976 /* We assume the firmware has the same endianess as the host */ 977# ifdef __LITTLE_ENDIAN 978 if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB) { 979# else /* BIG ENDIAN */ 980 if (ehdr->e_ident[EI_DATA] != ELFDATA2MSB) { 981# endif 982 dev_err(dev, "Unsupported firmware endianess\n"); 983 return -EINVAL; 984 } 985 986 if (fw->size < ehdr->e_shoff + sizeof(struct elf32_shdr)) { 987 dev_err(dev, "Image is too small\n"); 988 return -EINVAL; 989 } 990 991 if (memcmp(ehdr->e_ident, ELFMAG, SELFMAG)) { 992 dev_err(dev, "Image is corrupted (bad magic)\n"); 993 return -EINVAL; 994 } 995 996 if (ehdr->e_phnum == 0) { 997 dev_err(dev, "No loadable segments\n"); 998 return -EINVAL; 999 } 1000 1001 if (ehdr->e_phoff > fw->size) { 1002 dev_err(dev, "Firmware size is too small\n"); 1003 return -EINVAL; 1004 } 1005 1006 return 0; 1007} 1008 1009/* 1010 * take a firmware and boot a remote processor with it. 1011 */ 1012static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw) 1013{ 1014 struct device *dev = rproc->dev; 1015 const char *name = rproc->firmware; 1016 struct elf32_hdr *ehdr; 1017 int ret; 1018 1019 ret = rproc_fw_sanity_check(rproc, fw); 1020 if (ret) 1021 return ret; 1022 1023 ehdr = (struct elf32_hdr *)fw->data; 1024 1025 dev_info(dev, "Booting fw image %s, size %d\n", name, fw->size); 1026 1027 /* 1028 * if enabling an IOMMU isn't relevant for this rproc, this is 1029 * just a nop 1030 */ 1031 ret = rproc_enable_iommu(rproc); 1032 if (ret) { 1033 dev_err(dev, "can't enable iommu: %d\n", ret); 1034 return ret; 1035 } 1036 1037 /* 1038 * The ELF entry point is the rproc's boot addr (though this is not 1039 * a configurable property of all remote processors: some will always 1040 * boot at a specific hardcoded address). 1041 */ 1042 rproc->bootaddr = ehdr->e_entry; 1043 1044 /* handle fw resources which are required to boot rproc */ 1045 ret = rproc_handle_resources(rproc, fw->data, fw->size, 1046 rproc_handle_boot_rsc); 1047 if (ret) { 1048 dev_err(dev, "Failed to process resources: %d\n", ret); 1049 goto clean_up; 1050 } 1051 1052 /* load the ELF segments to memory */ 1053 ret = rproc_load_segments(rproc, fw->data, fw->size); 1054 if (ret) { 1055 dev_err(dev, "Failed to load program segments: %d\n", ret); 1056 goto clean_up; 1057 } 1058 1059 /* power up the remote processor */ 1060 ret = rproc->ops->start(rproc); 1061 if (ret) { 1062 dev_err(dev, "can't start rproc %s: %d\n", rproc->name, ret); 1063 goto clean_up; 1064 } 1065 1066 rproc->state = RPROC_RUNNING; 1067 1068 dev_info(dev, "remote processor %s is now up\n", rproc->name); 1069 1070 return 0; 1071 1072clean_up: 1073 rproc_resource_cleanup(rproc); 1074 rproc_disable_iommu(rproc); 1075 return ret; 1076} 1077 1078/* 1079 * take a firmware and look for virtio devices to register. 1080 * 1081 * Note: this function is called asynchronously upon registration of the 1082 * remote processor (so we must wait until it completes before we try 1083 * to unregister the device. one other option is just to use kref here, 1084 * that might be cleaner). 1085 */ 1086static void rproc_fw_config_virtio(const struct firmware *fw, void *context) 1087{ 1088 struct rproc *rproc = context; 1089 struct device *dev = rproc->dev; 1090 int ret; 1091 1092 if (rproc_fw_sanity_check(rproc, fw) < 0) 1093 goto out; 1094 1095 /* does the fw support any virtio devices ? */ 1096 ret = rproc_handle_resources(rproc, fw->data, fw->size, 1097 rproc_handle_virtio_rsc); 1098 if (ret) { 1099 dev_info(dev, "No fw virtio device was found\n"); 1100 goto out; 1101 } 1102 1103 /* add the virtio device (currently only rpmsg vdevs are supported) */ 1104 ret = rproc_add_rpmsg_vdev(rproc); 1105 if (ret) 1106 goto out; 1107 1108out: 1109 if (fw) 1110 release_firmware(fw); 1111 /* allow rproc_unregister() contexts, if any, to proceed */ 1112 complete_all(&rproc->firmware_loading_complete); 1113} 1114 1115/** 1116 * rproc_boot() - boot a remote processor 1117 * @rproc: handle of a remote processor 1118 * 1119 * Boot a remote processor (i.e. load its firmware, power it on, ...). 1120 * 1121 * If the remote processor is already powered on, this function immediately 1122 * returns (successfully). 1123 * 1124 * Returns 0 on success, and an appropriate error value otherwise. 1125 */ 1126int rproc_boot(struct rproc *rproc) 1127{ 1128 const struct firmware *firmware_p; 1129 struct device *dev; 1130 int ret; 1131 1132 if (!rproc) { 1133 pr_err("invalid rproc handle\n"); 1134 return -EINVAL; 1135 } 1136 1137 dev = rproc->dev; 1138 1139 ret = mutex_lock_interruptible(&rproc->lock); 1140 if (ret) { 1141 dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret); 1142 return ret; 1143 } 1144 1145 /* loading a firmware is required */ 1146 if (!rproc->firmware) { 1147 dev_err(dev, "%s: no firmware to load\n", __func__); 1148 ret = -EINVAL; 1149 goto unlock_mutex; 1150 } 1151 1152 /* prevent underlying implementation from being removed */ 1153 if (!try_module_get(dev->driver->owner)) { 1154 dev_err(dev, "%s: can't get owner\n", __func__); 1155 ret = -EINVAL; 1156 goto unlock_mutex; 1157 } 1158 1159 /* skip the boot process if rproc is already powered up */ 1160 if (atomic_inc_return(&rproc->power) > 1) { 1161 ret = 0; 1162 goto unlock_mutex; 1163 } 1164 1165 dev_info(dev, "powering up %s\n", rproc->name); 1166 1167 /* load firmware */ 1168 ret = request_firmware(&firmware_p, rproc->firmware, dev); 1169 if (ret < 0) { 1170 dev_err(dev, "request_firmware failed: %d\n", ret); 1171 goto downref_rproc; 1172 } 1173 1174 ret = rproc_fw_boot(rproc, firmware_p); 1175 1176 release_firmware(firmware_p); 1177 1178downref_rproc: 1179 if (ret) { 1180 module_put(dev->driver->owner); 1181 atomic_dec(&rproc->power); 1182 } 1183unlock_mutex: 1184 mutex_unlock(&rproc->lock); 1185 return ret; 1186} 1187EXPORT_SYMBOL(rproc_boot); 1188 1189/** 1190 * rproc_shutdown() - power off the remote processor 1191 * @rproc: the remote processor 1192 * 1193 * Power off a remote processor (previously booted with rproc_boot()). 1194 * 1195 * In case @rproc is still being used by an additional user(s), then 1196 * this function will just decrement the power refcount and exit, 1197 * without really powering off the device. 1198 * 1199 * Every call to rproc_boot() must (eventually) be accompanied by a call 1200 * to rproc_shutdown(). Calling rproc_shutdown() redundantly is a bug. 1201 * 1202 * Notes: 1203 * - we're not decrementing the rproc's refcount, only the power refcount. 1204 * which means that the @rproc handle stays valid even after rproc_shutdown() 1205 * returns, and users can still use it with a subsequent rproc_boot(), if 1206 * needed. 1207 * - don't call rproc_shutdown() to unroll rproc_get_by_name(), exactly 1208 * because rproc_shutdown() _does not_ decrement the refcount of @rproc. 1209 * To decrement the refcount of @rproc, use rproc_put() (but _only_ if 1210 * you acquired @rproc using rproc_get_by_name()). 1211 */ 1212void rproc_shutdown(struct rproc *rproc) 1213{ 1214 struct device *dev = rproc->dev; 1215 int ret; 1216 1217 ret = mutex_lock_interruptible(&rproc->lock); 1218 if (ret) { 1219 dev_err(dev, "can't lock rproc %s: %d\n", rproc->name, ret); 1220 return; 1221 } 1222 1223 /* if the remote proc is still needed, bail out */ 1224 if (!atomic_dec_and_test(&rproc->power)) 1225 goto out; 1226 1227 /* power off the remote processor */ 1228 ret = rproc->ops->stop(rproc); 1229 if (ret) { 1230 atomic_inc(&rproc->power); 1231 dev_err(dev, "can't stop rproc: %d\n", ret); 1232 goto out; 1233 } 1234 1235 /* clean up all acquired resources */ 1236 rproc_resource_cleanup(rproc); 1237 1238 rproc_disable_iommu(rproc); 1239 1240 rproc->state = RPROC_OFFLINE; 1241 1242 dev_info(dev, "stopped remote processor %s\n", rproc->name); 1243 1244out: 1245 mutex_unlock(&rproc->lock); 1246 if (!ret) 1247 module_put(dev->driver->owner); 1248} 1249EXPORT_SYMBOL(rproc_shutdown); 1250 1251/** 1252 * rproc_release() - completely deletes the existence of a remote processor 1253 * @kref: the rproc's kref 1254 * 1255 * This function should _never_ be called directly. 1256 * 1257 * The only reasonable location to use it is as an argument when kref_put'ing 1258 * @rproc's refcount. 1259 * 1260 * This way it will be called when no one holds a valid pointer to this @rproc 1261 * anymore (and obviously after it is removed from the rprocs klist). 1262 * 1263 * Note: this function is not static because rproc_vdev_release() needs it when 1264 * it decrements @rproc's refcount. 1265 */ 1266void rproc_release(struct kref *kref) 1267{ 1268 struct rproc *rproc = container_of(kref, struct rproc, refcount); 1269 1270 dev_info(rproc->dev, "removing %s\n", rproc->name); 1271 1272 rproc_delete_debug_dir(rproc); 1273 1274 /* at this point no one holds a reference to rproc anymore */ 1275 kfree(rproc); 1276} 1277 1278/* will be called when an rproc is added to the rprocs klist */ 1279static void klist_rproc_get(struct klist_node *n) 1280{ 1281 struct rproc *rproc = container_of(n, struct rproc, node); 1282 1283 kref_get(&rproc->refcount); 1284} 1285 1286/* will be called when an rproc is removed from the rprocs klist */ 1287static void klist_rproc_put(struct klist_node *n) 1288{ 1289 struct rproc *rproc = container_of(n, struct rproc, node); 1290 1291 kref_put(&rproc->refcount, rproc_release); 1292} 1293 1294static struct rproc *next_rproc(struct klist_iter *i) 1295{ 1296 struct klist_node *n; 1297 1298 n = klist_next(i); 1299 if (!n) 1300 return NULL; 1301 1302 return container_of(n, struct rproc, node); 1303} 1304 1305/** 1306 * rproc_get_by_name() - find a remote processor by name and boot it 1307 * @name: name of the remote processor 1308 * 1309 * Finds an rproc handle using the remote processor's name, and then 1310 * boot it. If it's already powered on, then just immediately return 1311 * (successfully). 1312 * 1313 * Returns the rproc handle on success, and NULL on failure. 1314 * 1315 * This function increments the remote processor's refcount, so always 1316 * use rproc_put() to decrement it back once rproc isn't needed anymore. 1317 * 1318 * Note: currently this function (and its counterpart rproc_put()) are not 1319 * used anymore by the rpmsg subsystem. We need to scrutinize the use cases 1320 * that still need them, and see if we can migrate them to use the non 1321 * name-based boot/shutdown interface. 1322 */ 1323struct rproc *rproc_get_by_name(const char *name) 1324{ 1325 struct rproc *rproc; 1326 struct klist_iter i; 1327 int ret; 1328 1329 /* find the remote processor, and upref its refcount */ 1330 klist_iter_init(&rprocs, &i); 1331 while ((rproc = next_rproc(&i)) != NULL) 1332 if (!strcmp(rproc->name, name)) { 1333 kref_get(&rproc->refcount); 1334 break; 1335 } 1336 klist_iter_exit(&i); 1337 1338 /* can't find this rproc ? */ 1339 if (!rproc) { 1340 pr_err("can't find remote processor %s\n", name); 1341 return NULL; 1342 } 1343 1344 ret = rproc_boot(rproc); 1345 if (ret < 0) { 1346 kref_put(&rproc->refcount, rproc_release); 1347 return NULL; 1348 } 1349 1350 return rproc; 1351} 1352EXPORT_SYMBOL(rproc_get_by_name); 1353 1354/** 1355 * rproc_put() - decrement the refcount of a remote processor, and shut it down 1356 * @rproc: the remote processor 1357 * 1358 * This function tries to shutdown @rproc, and it then decrements its 1359 * refcount. 1360 * 1361 * After this function returns, @rproc may _not_ be used anymore, and its 1362 * handle should be considered invalid. 1363 * 1364 * This function should be called _iff_ the @rproc handle was grabbed by 1365 * calling rproc_get_by_name(). 1366 */ 1367void rproc_put(struct rproc *rproc) 1368{ 1369 /* try to power off the remote processor */ 1370 rproc_shutdown(rproc); 1371 1372 /* downref rproc's refcount */ 1373 kref_put(&rproc->refcount, rproc_release); 1374} 1375EXPORT_SYMBOL(rproc_put); 1376 1377/** 1378 * rproc_register() - register a remote processor 1379 * @rproc: the remote processor handle to register 1380 * 1381 * Registers @rproc with the remoteproc framework, after it has been 1382 * allocated with rproc_alloc(). 1383 * 1384 * This is called by the platform-specific rproc implementation, whenever 1385 * a new remote processor device is probed. 1386 * 1387 * Returns 0 on success and an appropriate error code otherwise. 1388 * 1389 * Note: this function initiates an asynchronous firmware loading 1390 * context, which will look for virtio devices supported by the rproc's 1391 * firmware. 1392 * 1393 * If found, those virtio devices will be created and added, so as a result 1394 * of registering this remote processor, additional virtio drivers will be 1395 * probed. 1396 * 1397 * Currently, though, we only support a single RPMSG virtio vdev per remote 1398 * processor. 1399 */ 1400int rproc_register(struct rproc *rproc) 1401{ 1402 struct device *dev = rproc->dev; 1403 int ret = 0; 1404 1405 /* expose to rproc_get_by_name users */ 1406 klist_add_tail(&rproc->node, &rprocs); 1407 1408 dev_info(rproc->dev, "%s is available\n", rproc->name); 1409 1410 dev_info(dev, "Note: remoteproc is still under development and considered experimental.\n"); 1411 dev_info(dev, "THE BINARY FORMAT IS NOT YET FINALIZED, and backward compatibility isn't yet guaranteed.\n"); 1412 1413 /* create debugfs entries */ 1414 rproc_create_debug_dir(rproc); 1415 1416 /* rproc_unregister() calls must wait until async loader completes */ 1417 init_completion(&rproc->firmware_loading_complete); 1418 1419 /* 1420 * We must retrieve early virtio configuration info from 1421 * the firmware (e.g. whether to register a virtio rpmsg device, 1422 * what virtio features does it support, ...). 1423 * 1424 * We're initiating an asynchronous firmware loading, so we can 1425 * be built-in kernel code, without hanging the boot process. 1426 */ 1427 ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_HOTPLUG, 1428 rproc->firmware, dev, GFP_KERNEL, 1429 rproc, rproc_fw_config_virtio); 1430 if (ret < 0) { 1431 dev_err(dev, "request_firmware_nowait failed: %d\n", ret); 1432 complete_all(&rproc->firmware_loading_complete); 1433 klist_remove(&rproc->node); 1434 } 1435 1436 return ret; 1437} 1438EXPORT_SYMBOL(rproc_register); 1439 1440/** 1441 * rproc_alloc() - allocate a remote processor handle 1442 * @dev: the underlying device 1443 * @name: name of this remote processor 1444 * @ops: platform-specific handlers (mainly start/stop) 1445 * @firmware: name of firmware file to load 1446 * @len: length of private data needed by the rproc driver (in bytes) 1447 * 1448 * Allocates a new remote processor handle, but does not register 1449 * it yet. 1450 * 1451 * This function should be used by rproc implementations during initialization 1452 * of the remote processor. 1453 * 1454 * After creating an rproc handle using this function, and when ready, 1455 * implementations should then call rproc_register() to complete 1456 * the registration of the remote processor. 1457 * 1458 * On success the new rproc is returned, and on failure, NULL. 1459 * 1460 * Note: _never_ directly deallocate @rproc, even if it was not registered 1461 * yet. Instead, if you just need to unroll rproc_alloc(), use rproc_free(). 1462 */ 1463struct rproc *rproc_alloc(struct device *dev, const char *name, 1464 const struct rproc_ops *ops, 1465 const char *firmware, int len) 1466{ 1467 struct rproc *rproc; 1468 1469 if (!dev || !name || !ops) 1470 return NULL; 1471 1472 rproc = kzalloc(sizeof(struct rproc) + len, GFP_KERNEL); 1473 if (!rproc) { 1474 dev_err(dev, "%s: kzalloc failed\n", __func__); 1475 return NULL; 1476 } 1477 1478 rproc->dev = dev; 1479 rproc->name = name; 1480 rproc->ops = ops; 1481 rproc->firmware = firmware; 1482 rproc->priv = &rproc[1]; 1483 1484 atomic_set(&rproc->power, 0); 1485 1486 kref_init(&rproc->refcount); 1487 1488 mutex_init(&rproc->lock); 1489 1490 INIT_LIST_HEAD(&rproc->carveouts); 1491 INIT_LIST_HEAD(&rproc->mappings); 1492 INIT_LIST_HEAD(&rproc->traces); 1493 1494 rproc->state = RPROC_OFFLINE; 1495 1496 return rproc; 1497} 1498EXPORT_SYMBOL(rproc_alloc); 1499 1500/** 1501 * rproc_free() - free an rproc handle that was allocated by rproc_alloc 1502 * @rproc: the remote processor handle 1503 * 1504 * This function should _only_ be used if @rproc was only allocated, 1505 * but not registered yet. 1506 * 1507 * If @rproc was already successfully registered (by calling rproc_register()), 1508 * then use rproc_unregister() instead. 1509 */ 1510void rproc_free(struct rproc *rproc) 1511{ 1512 kfree(rproc); 1513} 1514EXPORT_SYMBOL(rproc_free); 1515 1516/** 1517 * rproc_unregister() - unregister a remote processor 1518 * @rproc: rproc handle to unregister 1519 * 1520 * Unregisters a remote processor, and decrements its refcount. 1521 * If its refcount drops to zero, then @rproc will be freed. If not, 1522 * it will be freed later once the last reference is dropped. 1523 * 1524 * This function should be called when the platform specific rproc 1525 * implementation decides to remove the rproc device. it should 1526 * _only_ be called if a previous invocation of rproc_register() 1527 * has completed successfully. 1528 * 1529 * After rproc_unregister() returns, @rproc is _not_ valid anymore and 1530 * it shouldn't be used. More specifically, don't call rproc_free() 1531 * or try to directly free @rproc after rproc_unregister() returns; 1532 * none of these are needed, and calling them is a bug. 1533 * 1534 * Returns 0 on success and -EINVAL if @rproc isn't valid. 1535 */ 1536int rproc_unregister(struct rproc *rproc) 1537{ 1538 if (!rproc) 1539 return -EINVAL; 1540 1541 /* if rproc is just being registered, wait */ 1542 wait_for_completion(&rproc->firmware_loading_complete); 1543 1544 /* was an rpmsg vdev created ? */ 1545 if (rproc->rvdev) 1546 rproc_remove_rpmsg_vdev(rproc); 1547 1548 klist_remove(&rproc->node); 1549 1550 kref_put(&rproc->refcount, rproc_release); 1551 1552 return 0; 1553} 1554EXPORT_SYMBOL(rproc_unregister); 1555 1556static int __init remoteproc_init(void) 1557{ 1558 rproc_init_debugfs(); 1559 return 0; 1560} 1561module_init(remoteproc_init); 1562 1563static void __exit remoteproc_exit(void) 1564{ 1565 rproc_exit_debugfs(); 1566} 1567module_exit(remoteproc_exit); 1568 1569MODULE_LICENSE("GPL v2"); 1570MODULE_DESCRIPTION("Generic Remote Processor Framework"); 1571