Lines Matching refs:slave

351  * This function unregisters and destroy all slave MTD objects which are
357 struct mtd_part *slave, *next;
361 list_for_each_entry_safe(slave, next, &mtd_partitions, list)
362 if (slave->master == master) {
363 ret = del_mtd_device(&slave->mtd);
368 list_del(&slave->list);
369 free_partition(slave);
380 struct mtd_part *slave;
384 slave = kzalloc(sizeof(*slave), GFP_KERNEL);
386 if (!name || !slave) {
390 kfree(slave);
395 slave->mtd.type = master->type;
396 slave->mtd.flags = master->flags & ~part->mask_flags;
397 slave->mtd.size = part->size;
398 slave->mtd.writesize = master->writesize;
399 slave->mtd.writebufsize = master->writebufsize;
400 slave->mtd.oobsize = master->oobsize;
401 slave->mtd.oobavail = master->oobavail;
402 slave->mtd.subpage_sft = master->subpage_sft;
404 slave->mtd.name = name;
405 slave->mtd.owner = master->owner;
406 slave->mtd.backing_dev_info = master->backing_dev_info;
411 slave->mtd.dev.parent = master->dev.parent;
413 slave->mtd.read = part_read;
414 slave->mtd.write = part_write;
417 slave->mtd.panic_write = part_panic_write;
420 slave->mtd.point = part_point;
421 slave->mtd.unpoint = part_unpoint;
425 slave->mtd.get_unmapped_area = part_get_unmapped_area;
427 slave->mtd.read_oob = part_read_oob;
429 slave->mtd.write_oob = part_write_oob;
431 slave->mtd.read_user_prot_reg = part_read_user_prot_reg;
433 slave->mtd.read_fact_prot_reg = part_read_fact_prot_reg;
435 slave->mtd.write_user_prot_reg = part_write_user_prot_reg;
437 slave->mtd.lock_user_prot_reg = part_lock_user_prot_reg;
439 slave->mtd.get_user_prot_info = part_get_user_prot_info;
441 slave->mtd.get_fact_prot_info = part_get_fact_prot_info;
443 slave->mtd.sync = part_sync;
445 slave->mtd.suspend = part_suspend;
446 slave->mtd.resume = part_resume;
449 slave->mtd.writev = part_writev;
451 slave->mtd.lock = part_lock;
453 slave->mtd.unlock = part_unlock;
455 slave->mtd.is_locked = part_is_locked;
457 slave->mtd.block_isbad = part_block_isbad;
459 slave->mtd.block_markbad = part_block_markbad;
460 slave->mtd.erase = part_erase;
461 slave->master = master;
462 slave->offset = part->offset;
464 if (slave->offset == MTDPART_OFS_APPEND)
465 slave->offset = cur_offset;
466 if (slave->offset == MTDPART_OFS_NXTBLK) {
467 slave->offset = cur_offset;
470 slave->offset = (mtd_div_by_eb(cur_offset, master) + 1) * master->erasesize;
473 (unsigned long long)cur_offset, (unsigned long long)slave->offset);
476 if (slave->offset == MTDPART_OFS_RETAIN) {
477 slave->offset = cur_offset;
478 if (master->size - slave->offset >= slave->mtd.size) {
479 slave->mtd.size = master->size - slave->offset
480 - slave->mtd.size;
483 part->name, master->size - slave->offset,
484 slave->mtd.size);
489 if (slave->mtd.size == MTDPART_SIZ_FULL)
490 slave->mtd.size = master->size - slave->offset;
492 printk(KERN_NOTICE "0x%012llx-0x%012llx : \"%s\"\n", (unsigned long long)slave->offset,
493 (unsigned long long)(slave->offset + slave->mtd.size), slave->mtd.name);
496 if (slave->offset >= master->size) {
498 slave->offset = 0;
499 slave->mtd.size = 0;
504 if (slave->offset + slave->mtd.size > master->size) {
505 slave->mtd.size = master->size - slave->offset;
507 part->name, master->name, (unsigned long long)slave->mtd.size);
512 u64 end = slave->offset + slave->mtd.size;
517 for (i = 0; i < max && regions[i].offset <= slave->offset; i++)
525 if (slave->mtd.erasesize < regions[i].erasesize) {
526 slave->mtd.erasesize = regions[i].erasesize;
529 BUG_ON(slave->mtd.erasesize == 0);
532 slave->mtd.erasesize = master->erasesize;
535 if ((slave->mtd.flags & MTD_WRITEABLE) &&
536 mtd_mod_by_eb(slave->offset, &slave->mtd)) {
540 slave->mtd.flags &= ~MTD_WRITEABLE;
544 if ((slave->mtd.flags & MTD_WRITEABLE) &&
545 mtd_mod_by_eb(slave->mtd.size, &slave->mtd)) {
546 slave->mtd.flags &= ~MTD_WRITEABLE;
551 slave->mtd.ecclayout = master->ecclayout;
555 while (offs < slave->mtd.size) {
556 if (mtd_block_isbad(master, offs + slave->offset))
557 slave->mtd.ecc_stats.badblocks++;
558 offs += slave->mtd.erasesize;
563 return slave;
625 struct mtd_part *slave, *next;
629 list_for_each_entry_safe(slave, next, &mtd_partitions, list)
630 if ((slave->master == master) &&
631 (slave->mtd.index == partno)) {
632 ret = del_mtd_device(&slave->mtd);
636 list_del(&slave->list);
637 free_partition(slave);
648 * and registers slave MTD objects which are bound to the master according to
659 struct mtd_part *slave;
666 slave = allocate_partition(master, parts + i, i, cur_offset);
667 if (IS_ERR(slave))
668 return PTR_ERR(slave);
671 list_add(&slave->list, &mtd_partitions);
674 add_mtd_device(&slave->mtd);
676 cur_offset = slave->offset + slave->mtd.size;