Lines Matching refs:sg

560 	if (user_srbcmd->sg.count > ARRAY_SIZE(sg_list)) {
561 dprintk((KERN_DEBUG"aacraid: too many sg entries %d\n",
562 le32_to_cpu(srbcmd->sg.count)));
567 ((user_srbcmd->sg.count & 0xff) * sizeof(struct sgentry));
568 actual_fibsize64 = actual_fibsize + (user_srbcmd->sg.count & 0xff) *
574 "user_srbcmd->sg.count=%d aac_srb=%lu sgentry=%lu;%lu "
576 actual_fibsize, actual_fibsize64, user_srbcmd->sg.count,
582 if ((data_dir == DMA_NONE) && user_srbcmd->sg.count) {
589 struct user_sgmap64* upsg = (struct user_sgmap64*)&user_srbcmd->sg;
590 struct sgmap64* psg = (struct sgmap64*)&srbcmd->sg;
600 if (upsg->sg[i].count >
609 p = kmalloc(upsg->sg[i].count,GFP_KERNEL|__GFP_DMA);
612 upsg->sg[i].count,i,upsg->count));
616 addr = (u64)upsg->sg[i].addr[0];
617 addr += ((u64)upsg->sg[i].addr[1]) << 32;
623 if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){
624 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
629 addr = pci_map_single(dev->pdev, p, upsg->sg[i].count, data_dir);
631 psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
632 psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
633 byte_count += upsg->sg[i].count;
634 psg->sg[i].count = cpu_to_le32(upsg->sg[i].count);
652 if (usg->sg[i].count >
662 p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
665 usg->sg[i].count,i,usg->count));
670 sg_user[i] = (void __user *)(uintptr_t)usg->sg[i].addr;
675 if(copy_from_user(p,sg_user[i],upsg->sg[i].count)){
677 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
682 addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir);
684 psg->sg[i].addr[0] = cpu_to_le32(addr & 0xffffffff);
685 psg->sg[i].addr[1] = cpu_to_le32(addr>>32);
686 byte_count += usg->sg[i].count;
687 psg->sg[i].count = cpu_to_le32(usg->sg[i].count);
695 struct user_sgmap* upsg = &user_srbcmd->sg;
696 struct sgmap* psg = &srbcmd->sg;
703 if (usg->sg[i].count >
712 p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
715 usg->sg[i].count,i,usg->count));
719 addr = (u64)usg->sg[i].addr[0];
720 addr += ((u64)usg->sg[i].addr[1]) << 32;
726 if(copy_from_user(p,sg_user[i],usg->sg[i].count)){
727 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
732 addr = pci_map_single(dev->pdev, p, usg->sg[i].count, data_dir);
734 psg->sg[i].addr = cpu_to_le32(addr & 0xffffffff);
735 byte_count += usg->sg[i].count;
736 psg->sg[i].count = cpu_to_le32(usg->sg[i].count);
742 if (upsg->sg[i].count >
750 p = kmalloc(upsg->sg[i].count, GFP_KERNEL);
753 upsg->sg[i].count, i, upsg->count));
757 sg_user[i] = (void __user *)(uintptr_t)upsg->sg[i].addr;
763 upsg->sg[i].count)) {
764 dprintk((KERN_DEBUG"aacraid: Could not copy sg data from user\n"));
770 upsg->sg[i].count, data_dir);
772 psg->sg[i].addr = cpu_to_le32(addr);
773 byte_count += upsg->sg[i].count;
774 psg->sg[i].count = cpu_to_le32(upsg->sg[i].count);
796 ? ((struct sgmap64*)&srbcmd->sg)->sg[i].count
797 : srbcmd->sg.sg[i].count);
799 dprintk((KERN_DEBUG"aacraid: Could not copy sg data to user\n"));