Lines Matching refs:whc

30 static void whc_hw_reset(struct whc *whc)
32 le_writel(WUSBCMD_WHCRESET, whc->base + WUSBCMD);
33 whci_wait_for(&whc->umc->dev, whc->base + WUSBCMD, WUSBCMD_WHCRESET, 0,
37 static void whc_hw_init_di_buf(struct whc *whc)
42 for (d = 0; d < whc->n_devices; d++)
43 whc->di_buf[d].addr_sec_info = WHC_DI_DISABLE;
45 le_writeq(whc->di_buf_dma, whc->base + WUSBDEVICEINFOADDR);
48 static void whc_hw_init_dn_buf(struct whc *whc)
52 memset(whc->dn_buf, 0, 4096);
54 le_writeq(whc->dn_buf_dma, whc->base + WUSBDNTSBUFADDR);
57 int whc_init(struct whc *whc)
63 spin_lock_init(&whc->lock);
64 mutex_init(&whc->mutex);
65 init_waitqueue_head(&whc->cmd_wq);
66 init_waitqueue_head(&whc->async_list_wq);
67 init_waitqueue_head(&whc->periodic_list_wq);
68 whc->workqueue = create_singlethread_workqueue(dev_name(&whc->umc->dev));
69 if (whc->workqueue == NULL) {
73 INIT_WORK(&whc->dn_work, whc_dn_work);
75 INIT_WORK(&whc->async_work, scan_async_work);
76 INIT_LIST_HEAD(&whc->async_list);
77 INIT_LIST_HEAD(&whc->async_removed_list);
79 INIT_WORK(&whc->periodic_work, scan_periodic_work);
81 INIT_LIST_HEAD(&whc->periodic_list[i]);
82 INIT_LIST_HEAD(&whc->periodic_removed_list);
85 start = whc->umc->resource.start;
86 len = whc->umc->resource.end - start + 1;
88 dev_err(&whc->umc->dev, "can't request HC region\n");
92 whc->base_phys = start;
93 whc->base = ioremap(start, len);
94 if (!whc->base) {
95 dev_err(&whc->umc->dev, "ioremap\n");
100 whc_hw_reset(whc);
103 whcsparams = le_readl(whc->base + WHCSPARAMS);
104 whc->n_devices = WHCSPARAMS_TO_N_DEVICES(whcsparams);
105 whc->n_keys = WHCSPARAMS_TO_N_KEYS(whcsparams);
106 whc->n_mmc_ies = WHCSPARAMS_TO_N_MMC_IES(whcsparams);
108 dev_dbg(&whc->umc->dev, "N_DEVICES = %d, N_KEYS = %d, N_MMC_IES = %d\n",
109 whc->n_devices, whc->n_keys, whc->n_mmc_ies);
111 whc->qset_pool = dma_pool_create("qset", &whc->umc->dev,
113 if (whc->qset_pool == NULL) {
118 ret = asl_init(whc);
121 ret = pzl_init(whc);
129 whc->gen_cmd_buf = dma_alloc_coherent(&whc->umc->dev, WHC_GEN_CMD_DATA_LEN,
130 &whc->gen_cmd_buf_dma, GFP_KERNEL);
131 if (whc->gen_cmd_buf == NULL) {
136 whc->dn_buf = dma_alloc_coherent(&whc->umc->dev,
138 &whc->dn_buf_dma, GFP_KERNEL);
139 if (!whc->dn_buf) {
143 whc_hw_init_dn_buf(whc);
145 whc->di_buf = dma_alloc_coherent(&whc->umc->dev,
146 sizeof(struct di_buf_entry) * whc->n_devices,
147 &whc->di_buf_dma, GFP_KERNEL);
148 if (!whc->di_buf) {
152 whc_hw_init_di_buf(whc);
157 whc_clean_up(whc);
161 void whc_clean_up(struct whc *whc)
165 if (whc->di_buf)
166 dma_free_coherent(&whc->umc->dev, sizeof(struct di_buf_entry) * whc->n_devices,
167 whc->di_buf, whc->di_buf_dma);
168 if (whc->dn_buf)
169 dma_free_coherent(&whc->umc->dev, sizeof(struct dn_buf_entry) * WHC_N_DN_ENTRIES,
170 whc->dn_buf, whc->dn_buf_dma);
171 if (whc->gen_cmd_buf)
172 dma_free_coherent(&whc->umc->dev, WHC_GEN_CMD_DATA_LEN,
173 whc->gen_cmd_buf, whc->gen_cmd_buf_dma);
175 pzl_clean_up(whc);
176 asl_clean_up(whc);
178 if (whc->qset_pool)
179 dma_pool_destroy(whc->qset_pool);
181 len = resource_size(&whc->umc->resource);
182 if (whc->base)
183 iounmap(whc->base);
184 if (whc->base_phys)
185 release_mem_region(whc->base_phys, len);
187 if (whc->workqueue)
188 destroy_workqueue(whc->workqueue);