1/*
2 *  linux/drivers/mmc/core/mmc.c
3 *
4 *  Copyright (C) 2003-2004 Russell King, All Rights Reserved.
5 *  Copyright (C) 2005-2007 Pierre Ossman, All Rights Reserved.
6 *  MMCv4 support Copyright (C) 2006 Philip Langdale, All Rights Reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/err.h>
14#include <linux/slab.h>
15#include <linux/stat.h>
16#include <linux/pm_runtime.h>
17
18#include <linux/mmc/host.h>
19#include <linux/mmc/card.h>
20#include <linux/mmc/mmc.h>
21
22#include "core.h"
23#include "bus.h"
24#include "mmc_ops.h"
25#include "sd_ops.h"
26
27static const unsigned int tran_exp[] = {
28	10000,		100000,		1000000,	10000000,
29	0,		0,		0,		0
30};
31
32static const unsigned char tran_mant[] = {
33	0,	10,	12,	13,	15,	20,	25,	30,
34	35,	40,	45,	50,	55,	60,	70,	80,
35};
36
37static const unsigned int tacc_exp[] = {
38	1,	10,	100,	1000,	10000,	100000,	1000000, 10000000,
39};
40
41static const unsigned int tacc_mant[] = {
42	0,	10,	12,	13,	15,	20,	25,	30,
43	35,	40,	45,	50,	55,	60,	70,	80,
44};
45
46#define UNSTUFF_BITS(resp,start,size)					\
47	({								\
48		const int __size = size;				\
49		const u32 __mask = (__size < 32 ? 1 << __size : 0) - 1;	\
50		const int __off = 3 - ((start) / 32);			\
51		const int __shft = (start) & 31;			\
52		u32 __res;						\
53									\
54		__res = resp[__off] >> __shft;				\
55		if (__size + __shft > 32)				\
56			__res |= resp[__off-1] << ((32 - __shft) % 32);	\
57		__res & __mask;						\
58	})
59
60/*
61 * Given the decoded CSD structure, decode the raw CID to our CID structure.
62 */
63static int mmc_decode_cid(struct mmc_card *card)
64{
65	u32 *resp = card->raw_cid;
66
67	/*
68	 * The selection of the format here is based upon published
69	 * specs from sandisk and from what people have reported.
70	 */
71	switch (card->csd.mmca_vsn) {
72	case 0: /* MMC v1.0 - v1.2 */
73	case 1: /* MMC v1.4 */
74		card->cid.manfid	= UNSTUFF_BITS(resp, 104, 24);
75		card->cid.prod_name[0]	= UNSTUFF_BITS(resp, 96, 8);
76		card->cid.prod_name[1]	= UNSTUFF_BITS(resp, 88, 8);
77		card->cid.prod_name[2]	= UNSTUFF_BITS(resp, 80, 8);
78		card->cid.prod_name[3]	= UNSTUFF_BITS(resp, 72, 8);
79		card->cid.prod_name[4]	= UNSTUFF_BITS(resp, 64, 8);
80		card->cid.prod_name[5]	= UNSTUFF_BITS(resp, 56, 8);
81		card->cid.prod_name[6]	= UNSTUFF_BITS(resp, 48, 8);
82		card->cid.hwrev		= UNSTUFF_BITS(resp, 44, 4);
83		card->cid.fwrev		= UNSTUFF_BITS(resp, 40, 4);
84		card->cid.serial	= UNSTUFF_BITS(resp, 16, 24);
85		card->cid.month		= UNSTUFF_BITS(resp, 12, 4);
86		card->cid.year		= UNSTUFF_BITS(resp, 8, 4) + 1997;
87		break;
88
89	case 2: /* MMC v2.0 - v2.2 */
90	case 3: /* MMC v3.1 - v3.3 */
91	case 4: /* MMC v4 */
92		card->cid.manfid	= UNSTUFF_BITS(resp, 120, 8);
93		card->cid.oemid		= UNSTUFF_BITS(resp, 104, 16);
94		card->cid.prod_name[0]	= UNSTUFF_BITS(resp, 96, 8);
95		card->cid.prod_name[1]	= UNSTUFF_BITS(resp, 88, 8);
96		card->cid.prod_name[2]	= UNSTUFF_BITS(resp, 80, 8);
97		card->cid.prod_name[3]	= UNSTUFF_BITS(resp, 72, 8);
98		card->cid.prod_name[4]	= UNSTUFF_BITS(resp, 64, 8);
99		card->cid.prod_name[5]	= UNSTUFF_BITS(resp, 56, 8);
100		card->cid.prv		= UNSTUFF_BITS(resp, 48, 8);
101		card->cid.serial	= UNSTUFF_BITS(resp, 16, 32);
102		card->cid.month		= UNSTUFF_BITS(resp, 12, 4);
103		card->cid.year		= UNSTUFF_BITS(resp, 8, 4) + 1997;
104		break;
105
106	default:
107		pr_err("%s: card has unknown MMCA version %d\n",
108			mmc_hostname(card->host), card->csd.mmca_vsn);
109		return -EINVAL;
110	}
111
112	return 0;
113}
114
115static void mmc_set_erase_size(struct mmc_card *card)
116{
117	if (card->ext_csd.erase_group_def & 1)
118		card->erase_size = card->ext_csd.hc_erase_size;
119	else
120		card->erase_size = card->csd.erase_size;
121
122	mmc_init_erase(card);
123}
124
125/*
126 * Given a 128-bit response, decode to our card CSD structure.
127 */
128static int mmc_decode_csd(struct mmc_card *card)
129{
130	struct mmc_csd *csd = &card->csd;
131	unsigned int e, m, a, b;
132	u32 *resp = card->raw_csd;
133
134	/*
135	 * We only understand CSD structure v1.1 and v1.2.
136	 * v1.2 has extra information in bits 15, 11 and 10.
137	 * We also support eMMC v4.4 & v4.41.
138	 */
139	csd->structure = UNSTUFF_BITS(resp, 126, 2);
140	if (csd->structure == 0) {
141		pr_err("%s: unrecognised CSD structure version %d\n",
142			mmc_hostname(card->host), csd->structure);
143		return -EINVAL;
144	}
145
146	csd->mmca_vsn	 = UNSTUFF_BITS(resp, 122, 4);
147	m = UNSTUFF_BITS(resp, 115, 4);
148	e = UNSTUFF_BITS(resp, 112, 3);
149	csd->tacc_ns	 = (tacc_exp[e] * tacc_mant[m] + 9) / 10;
150	csd->tacc_clks	 = UNSTUFF_BITS(resp, 104, 8) * 100;
151
152	m = UNSTUFF_BITS(resp, 99, 4);
153	e = UNSTUFF_BITS(resp, 96, 3);
154	csd->max_dtr	  = tran_exp[e] * tran_mant[m];
155	csd->cmdclass	  = UNSTUFF_BITS(resp, 84, 12);
156
157	e = UNSTUFF_BITS(resp, 47, 3);
158	m = UNSTUFF_BITS(resp, 62, 12);
159	csd->capacity	  = (1 + m) << (e + 2);
160
161	csd->read_blkbits = UNSTUFF_BITS(resp, 80, 4);
162	csd->read_partial = UNSTUFF_BITS(resp, 79, 1);
163	csd->write_misalign = UNSTUFF_BITS(resp, 78, 1);
164	csd->read_misalign = UNSTUFF_BITS(resp, 77, 1);
165	csd->dsr_imp = UNSTUFF_BITS(resp, 76, 1);
166	csd->r2w_factor = UNSTUFF_BITS(resp, 26, 3);
167	csd->write_blkbits = UNSTUFF_BITS(resp, 22, 4);
168	csd->write_partial = UNSTUFF_BITS(resp, 21, 1);
169
170	if (csd->write_blkbits >= 9) {
171		a = UNSTUFF_BITS(resp, 42, 5);
172		b = UNSTUFF_BITS(resp, 37, 5);
173		csd->erase_size = (a + 1) * (b + 1);
174		csd->erase_size <<= csd->write_blkbits - 9;
175	}
176
177	return 0;
178}
179
180/*
181 * Read extended CSD.
182 */
183static int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
184{
185	int err;
186	u8 *ext_csd;
187
188	BUG_ON(!card);
189	BUG_ON(!new_ext_csd);
190
191	*new_ext_csd = NULL;
192
193	if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
194		return 0;
195
196	/*
197	 * As the ext_csd is so large and mostly unused, we don't store the
198	 * raw block in mmc_card.
199	 */
200	ext_csd = kmalloc(512, GFP_KERNEL);
201	if (!ext_csd) {
202		pr_err("%s: could not allocate a buffer to "
203			"receive the ext_csd.\n", mmc_hostname(card->host));
204		return -ENOMEM;
205	}
206
207	err = mmc_send_ext_csd(card, ext_csd);
208	if (err) {
209		kfree(ext_csd);
210		*new_ext_csd = NULL;
211
212		/* If the host or the card can't do the switch,
213		 * fail more gracefully. */
214		if ((err != -EINVAL)
215		 && (err != -ENOSYS)
216		 && (err != -EFAULT))
217			return err;
218
219		/*
220		 * High capacity cards should have this "magic" size
221		 * stored in their CSD.
222		 */
223		if (card->csd.capacity == (4096 * 512)) {
224			pr_err("%s: unable to read EXT_CSD "
225				"on a possible high capacity card. "
226				"Card will be ignored.\n",
227				mmc_hostname(card->host));
228		} else {
229			pr_warn("%s: unable to read EXT_CSD, performance might suffer\n",
230				mmc_hostname(card->host));
231			err = 0;
232		}
233	} else
234		*new_ext_csd = ext_csd;
235
236	return err;
237}
238
239static void mmc_select_card_type(struct mmc_card *card)
240{
241	struct mmc_host *host = card->host;
242	u8 card_type = card->ext_csd.raw_card_type;
243	u32 caps = host->caps, caps2 = host->caps2;
244	unsigned int hs_max_dtr = 0, hs200_max_dtr = 0;
245	unsigned int avail_type = 0;
246
247	if (caps & MMC_CAP_MMC_HIGHSPEED &&
248	    card_type & EXT_CSD_CARD_TYPE_HS_26) {
249		hs_max_dtr = MMC_HIGH_26_MAX_DTR;
250		avail_type |= EXT_CSD_CARD_TYPE_HS_26;
251	}
252
253	if (caps & MMC_CAP_MMC_HIGHSPEED &&
254	    card_type & EXT_CSD_CARD_TYPE_HS_52) {
255		hs_max_dtr = MMC_HIGH_52_MAX_DTR;
256		avail_type |= EXT_CSD_CARD_TYPE_HS_52;
257	}
258
259	if (caps & MMC_CAP_1_8V_DDR &&
260	    card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) {
261		hs_max_dtr = MMC_HIGH_DDR_MAX_DTR;
262		avail_type |= EXT_CSD_CARD_TYPE_DDR_1_8V;
263	}
264
265	if (caps & MMC_CAP_1_2V_DDR &&
266	    card_type & EXT_CSD_CARD_TYPE_DDR_1_2V) {
267		hs_max_dtr = MMC_HIGH_DDR_MAX_DTR;
268		avail_type |= EXT_CSD_CARD_TYPE_DDR_1_2V;
269	}
270
271	if (caps2 & MMC_CAP2_HS200_1_8V_SDR &&
272	    card_type & EXT_CSD_CARD_TYPE_HS200_1_8V) {
273		hs200_max_dtr = MMC_HS200_MAX_DTR;
274		avail_type |= EXT_CSD_CARD_TYPE_HS200_1_8V;
275	}
276
277	if (caps2 & MMC_CAP2_HS200_1_2V_SDR &&
278	    card_type & EXT_CSD_CARD_TYPE_HS200_1_2V) {
279		hs200_max_dtr = MMC_HS200_MAX_DTR;
280		avail_type |= EXT_CSD_CARD_TYPE_HS200_1_2V;
281	}
282
283	if (caps2 & MMC_CAP2_HS400_1_8V &&
284	    card_type & EXT_CSD_CARD_TYPE_HS400_1_8V) {
285		hs200_max_dtr = MMC_HS200_MAX_DTR;
286		avail_type |= EXT_CSD_CARD_TYPE_HS400_1_8V;
287	}
288
289	if (caps2 & MMC_CAP2_HS400_1_2V &&
290	    card_type & EXT_CSD_CARD_TYPE_HS400_1_2V) {
291		hs200_max_dtr = MMC_HS200_MAX_DTR;
292		avail_type |= EXT_CSD_CARD_TYPE_HS400_1_2V;
293	}
294
295	card->ext_csd.hs_max_dtr = hs_max_dtr;
296	card->ext_csd.hs200_max_dtr = hs200_max_dtr;
297	card->mmc_avail_type = avail_type;
298}
299
300static void mmc_manage_enhanced_area(struct mmc_card *card, u8 *ext_csd)
301{
302	u8 hc_erase_grp_sz, hc_wp_grp_sz;
303
304	/*
305	 * Disable these attributes by default
306	 */
307	card->ext_csd.enhanced_area_offset = -EINVAL;
308	card->ext_csd.enhanced_area_size = -EINVAL;
309
310	/*
311	 * Enhanced area feature support -- check whether the eMMC
312	 * card has the Enhanced area enabled.  If so, export enhanced
313	 * area offset and size to user by adding sysfs interface.
314	 */
315	if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) &&
316	    (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) {
317		if (card->ext_csd.partition_setting_completed) {
318			hc_erase_grp_sz =
319				ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
320			hc_wp_grp_sz =
321				ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
322
323			/*
324			 * calculate the enhanced data area offset, in bytes
325			 */
326			card->ext_csd.enhanced_area_offset =
327				(ext_csd[139] << 24) + (ext_csd[138] << 16) +
328				(ext_csd[137] << 8) + ext_csd[136];
329			if (mmc_card_blockaddr(card))
330				card->ext_csd.enhanced_area_offset <<= 9;
331			/*
332			 * calculate the enhanced data area size, in kilobytes
333			 */
334			card->ext_csd.enhanced_area_size =
335				(ext_csd[142] << 16) + (ext_csd[141] << 8) +
336				ext_csd[140];
337			card->ext_csd.enhanced_area_size *=
338				(size_t)(hc_erase_grp_sz * hc_wp_grp_sz);
339			card->ext_csd.enhanced_area_size <<= 9;
340		} else {
341			pr_warn("%s: defines enhanced area without partition setting complete\n",
342				mmc_hostname(card->host));
343		}
344	}
345}
346
347static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
348{
349	int idx;
350	u8 hc_erase_grp_sz, hc_wp_grp_sz;
351	unsigned int part_size;
352
353	/*
354	 * General purpose partition feature support --
355	 * If ext_csd has the size of general purpose partitions,
356	 * set size, part_cfg, partition name in mmc_part.
357	 */
358	if (ext_csd[EXT_CSD_PARTITION_SUPPORT] &
359	    EXT_CSD_PART_SUPPORT_PART_EN) {
360		hc_erase_grp_sz =
361			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
362		hc_wp_grp_sz =
363			ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
364
365		for (idx = 0; idx < MMC_NUM_GP_PARTITION; idx++) {
366			if (!ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3] &&
367			    !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1] &&
368			    !ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2])
369				continue;
370			if (card->ext_csd.partition_setting_completed == 0) {
371				pr_warn("%s: has partition size defined without partition complete\n",
372					mmc_hostname(card->host));
373				break;
374			}
375			part_size =
376				(ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 2]
377				<< 16) +
378				(ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1]
379				<< 8) +
380				ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3];
381			part_size *= (size_t)(hc_erase_grp_sz *
382				hc_wp_grp_sz);
383			mmc_part_add(card, part_size << 19,
384				EXT_CSD_PART_CONFIG_ACC_GP0 + idx,
385				"gp%d", idx, false,
386				MMC_BLK_DATA_AREA_GP);
387		}
388	}
389}
390
391/*
392 * Decode extended CSD.
393 */
394static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
395{
396	int err = 0, idx;
397	unsigned int part_size;
398
399	BUG_ON(!card);
400
401	if (!ext_csd)
402		return 0;
403
404	/* Version is coded in the CSD_STRUCTURE byte in the EXT_CSD register */
405	card->ext_csd.raw_ext_csd_structure = ext_csd[EXT_CSD_STRUCTURE];
406	if (card->csd.structure == 3) {
407		if (card->ext_csd.raw_ext_csd_structure > 2) {
408			pr_err("%s: unrecognised EXT_CSD structure "
409				"version %d\n", mmc_hostname(card->host),
410					card->ext_csd.raw_ext_csd_structure);
411			err = -EINVAL;
412			goto out;
413		}
414	}
415
416	/*
417	 * The EXT_CSD format is meant to be forward compatible. As long
418	 * as CSD_STRUCTURE does not change, all values for EXT_CSD_REV
419	 * are authorized, see JEDEC JESD84-B50 section B.8.
420	 */
421	card->ext_csd.rev = ext_csd[EXT_CSD_REV];
422
423	card->ext_csd.raw_sectors[0] = ext_csd[EXT_CSD_SEC_CNT + 0];
424	card->ext_csd.raw_sectors[1] = ext_csd[EXT_CSD_SEC_CNT + 1];
425	card->ext_csd.raw_sectors[2] = ext_csd[EXT_CSD_SEC_CNT + 2];
426	card->ext_csd.raw_sectors[3] = ext_csd[EXT_CSD_SEC_CNT + 3];
427	if (card->ext_csd.rev >= 2) {
428		card->ext_csd.sectors =
429			ext_csd[EXT_CSD_SEC_CNT + 0] << 0 |
430			ext_csd[EXT_CSD_SEC_CNT + 1] << 8 |
431			ext_csd[EXT_CSD_SEC_CNT + 2] << 16 |
432			ext_csd[EXT_CSD_SEC_CNT + 3] << 24;
433
434		/* Cards with density > 2GiB are sector addressed */
435		if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512)
436			mmc_card_set_blockaddr(card);
437	}
438
439	card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
440	mmc_select_card_type(card);
441
442	card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT];
443	card->ext_csd.raw_erase_timeout_mult =
444		ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
445	card->ext_csd.raw_hc_erase_grp_size =
446		ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE];
447	if (card->ext_csd.rev >= 3) {
448		u8 sa_shift = ext_csd[EXT_CSD_S_A_TIMEOUT];
449		card->ext_csd.part_config = ext_csd[EXT_CSD_PART_CONFIG];
450
451		/* EXT_CSD value is in units of 10ms, but we store in ms */
452		card->ext_csd.part_time = 10 * ext_csd[EXT_CSD_PART_SWITCH_TIME];
453
454		/* Sleep / awake timeout in 100ns units */
455		if (sa_shift > 0 && sa_shift <= 0x17)
456			card->ext_csd.sa_timeout =
457					1 << ext_csd[EXT_CSD_S_A_TIMEOUT];
458		card->ext_csd.erase_group_def =
459			ext_csd[EXT_CSD_ERASE_GROUP_DEF];
460		card->ext_csd.hc_erase_timeout = 300 *
461			ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT];
462		card->ext_csd.hc_erase_size =
463			ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE] << 10;
464
465		card->ext_csd.rel_sectors = ext_csd[EXT_CSD_REL_WR_SEC_C];
466
467		/*
468		 * There are two boot regions of equal size, defined in
469		 * multiples of 128K.
470		 */
471		if (ext_csd[EXT_CSD_BOOT_MULT] && mmc_boot_partition_access(card->host)) {
472			for (idx = 0; idx < MMC_NUM_BOOT_PARTITION; idx++) {
473				part_size = ext_csd[EXT_CSD_BOOT_MULT] << 17;
474				mmc_part_add(card, part_size,
475					EXT_CSD_PART_CONFIG_ACC_BOOT0 + idx,
476					"boot%d", idx, true,
477					MMC_BLK_DATA_AREA_BOOT);
478			}
479		}
480	}
481
482	card->ext_csd.raw_hc_erase_gap_size =
483		ext_csd[EXT_CSD_HC_WP_GRP_SIZE];
484	card->ext_csd.raw_sec_trim_mult =
485		ext_csd[EXT_CSD_SEC_TRIM_MULT];
486	card->ext_csd.raw_sec_erase_mult =
487		ext_csd[EXT_CSD_SEC_ERASE_MULT];
488	card->ext_csd.raw_sec_feature_support =
489		ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
490	card->ext_csd.raw_trim_mult =
491		ext_csd[EXT_CSD_TRIM_MULT];
492	card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT];
493	if (card->ext_csd.rev >= 4) {
494		if (ext_csd[EXT_CSD_PARTITION_SETTING_COMPLETED] &
495		    EXT_CSD_PART_SETTING_COMPLETED)
496			card->ext_csd.partition_setting_completed = 1;
497		else
498			card->ext_csd.partition_setting_completed = 0;
499
500		mmc_manage_enhanced_area(card, ext_csd);
501
502		mmc_manage_gp_partitions(card, ext_csd);
503
504		card->ext_csd.sec_trim_mult =
505			ext_csd[EXT_CSD_SEC_TRIM_MULT];
506		card->ext_csd.sec_erase_mult =
507			ext_csd[EXT_CSD_SEC_ERASE_MULT];
508		card->ext_csd.sec_feature_support =
509			ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT];
510		card->ext_csd.trim_timeout = 300 *
511			ext_csd[EXT_CSD_TRIM_MULT];
512
513		/*
514		 * Note that the call to mmc_part_add above defaults to read
515		 * only. If this default assumption is changed, the call must
516		 * take into account the value of boot_locked below.
517		 */
518		card->ext_csd.boot_ro_lock = ext_csd[EXT_CSD_BOOT_WP];
519		card->ext_csd.boot_ro_lockable = true;
520
521		/* Save power class values */
522		card->ext_csd.raw_pwr_cl_52_195 =
523			ext_csd[EXT_CSD_PWR_CL_52_195];
524		card->ext_csd.raw_pwr_cl_26_195 =
525			ext_csd[EXT_CSD_PWR_CL_26_195];
526		card->ext_csd.raw_pwr_cl_52_360 =
527			ext_csd[EXT_CSD_PWR_CL_52_360];
528		card->ext_csd.raw_pwr_cl_26_360 =
529			ext_csd[EXT_CSD_PWR_CL_26_360];
530		card->ext_csd.raw_pwr_cl_200_195 =
531			ext_csd[EXT_CSD_PWR_CL_200_195];
532		card->ext_csd.raw_pwr_cl_200_360 =
533			ext_csd[EXT_CSD_PWR_CL_200_360];
534		card->ext_csd.raw_pwr_cl_ddr_52_195 =
535			ext_csd[EXT_CSD_PWR_CL_DDR_52_195];
536		card->ext_csd.raw_pwr_cl_ddr_52_360 =
537			ext_csd[EXT_CSD_PWR_CL_DDR_52_360];
538		card->ext_csd.raw_pwr_cl_ddr_200_360 =
539			ext_csd[EXT_CSD_PWR_CL_DDR_200_360];
540	}
541
542	if (card->ext_csd.rev >= 5) {
543		/* Adjust production date as per JEDEC JESD84-B451 */
544		if (card->cid.year < 2010)
545			card->cid.year += 16;
546
547		/* check whether the eMMC card supports BKOPS */
548		if (ext_csd[EXT_CSD_BKOPS_SUPPORT] & 0x1) {
549			card->ext_csd.bkops = 1;
550			card->ext_csd.bkops_en = ext_csd[EXT_CSD_BKOPS_EN];
551			card->ext_csd.raw_bkops_status =
552				ext_csd[EXT_CSD_BKOPS_STATUS];
553			if (!card->ext_csd.bkops_en)
554				pr_info("%s: BKOPS_EN bit is not set\n",
555					mmc_hostname(card->host));
556		}
557
558		/* check whether the eMMC card supports HPI */
559		if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x1) {
560			card->ext_csd.hpi = 1;
561			if (ext_csd[EXT_CSD_HPI_FEATURES] & 0x2)
562				card->ext_csd.hpi_cmd =	MMC_STOP_TRANSMISSION;
563			else
564				card->ext_csd.hpi_cmd = MMC_SEND_STATUS;
565			/*
566			 * Indicate the maximum timeout to close
567			 * a command interrupted by HPI
568			 */
569			card->ext_csd.out_of_int_time =
570				ext_csd[EXT_CSD_OUT_OF_INTERRUPT_TIME] * 10;
571		}
572
573		card->ext_csd.rel_param = ext_csd[EXT_CSD_WR_REL_PARAM];
574		card->ext_csd.rst_n_function = ext_csd[EXT_CSD_RST_N_FUNCTION];
575
576		/*
577		 * RPMB regions are defined in multiples of 128K.
578		 */
579		card->ext_csd.raw_rpmb_size_mult = ext_csd[EXT_CSD_RPMB_MULT];
580		if (ext_csd[EXT_CSD_RPMB_MULT] && mmc_host_cmd23(card->host)) {
581			mmc_part_add(card, ext_csd[EXT_CSD_RPMB_MULT] << 17,
582				EXT_CSD_PART_CONFIG_ACC_RPMB,
583				"rpmb", 0, false,
584				MMC_BLK_DATA_AREA_RPMB);
585		}
586	}
587
588	card->ext_csd.raw_erased_mem_count = ext_csd[EXT_CSD_ERASED_MEM_CONT];
589	if (ext_csd[EXT_CSD_ERASED_MEM_CONT])
590		card->erased_byte = 0xFF;
591	else
592		card->erased_byte = 0x0;
593
594	/* eMMC v4.5 or later */
595	if (card->ext_csd.rev >= 6) {
596		card->ext_csd.feature_support |= MMC_DISCARD_FEATURE;
597
598		card->ext_csd.generic_cmd6_time = 10 *
599			ext_csd[EXT_CSD_GENERIC_CMD6_TIME];
600		card->ext_csd.power_off_longtime = 10 *
601			ext_csd[EXT_CSD_POWER_OFF_LONG_TIME];
602
603		card->ext_csd.cache_size =
604			ext_csd[EXT_CSD_CACHE_SIZE + 0] << 0 |
605			ext_csd[EXT_CSD_CACHE_SIZE + 1] << 8 |
606			ext_csd[EXT_CSD_CACHE_SIZE + 2] << 16 |
607			ext_csd[EXT_CSD_CACHE_SIZE + 3] << 24;
608
609		if (ext_csd[EXT_CSD_DATA_SECTOR_SIZE] == 1)
610			card->ext_csd.data_sector_size = 4096;
611		else
612			card->ext_csd.data_sector_size = 512;
613
614		if ((ext_csd[EXT_CSD_DATA_TAG_SUPPORT] & 1) &&
615		    (ext_csd[EXT_CSD_TAG_UNIT_SIZE] <= 8)) {
616			card->ext_csd.data_tag_unit_size =
617			((unsigned int) 1 << ext_csd[EXT_CSD_TAG_UNIT_SIZE]) *
618			(card->ext_csd.data_sector_size);
619		} else {
620			card->ext_csd.data_tag_unit_size = 0;
621		}
622
623		card->ext_csd.max_packed_writes =
624			ext_csd[EXT_CSD_MAX_PACKED_WRITES];
625		card->ext_csd.max_packed_reads =
626			ext_csd[EXT_CSD_MAX_PACKED_READS];
627	} else {
628		card->ext_csd.data_sector_size = 512;
629	}
630
631out:
632	return err;
633}
634
635static inline void mmc_free_ext_csd(u8 *ext_csd)
636{
637	kfree(ext_csd);
638}
639
640
641static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width)
642{
643	u8 *bw_ext_csd;
644	int err;
645
646	if (bus_width == MMC_BUS_WIDTH_1)
647		return 0;
648
649	err = mmc_get_ext_csd(card, &bw_ext_csd);
650
651	if (err || bw_ext_csd == NULL) {
652		err = -EINVAL;
653		goto out;
654	}
655
656	/* only compare read only fields */
657	err = !((card->ext_csd.raw_partition_support ==
658			bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) &&
659		(card->ext_csd.raw_erased_mem_count ==
660			bw_ext_csd[EXT_CSD_ERASED_MEM_CONT]) &&
661		(card->ext_csd.rev ==
662			bw_ext_csd[EXT_CSD_REV]) &&
663		(card->ext_csd.raw_ext_csd_structure ==
664			bw_ext_csd[EXT_CSD_STRUCTURE]) &&
665		(card->ext_csd.raw_card_type ==
666			bw_ext_csd[EXT_CSD_CARD_TYPE]) &&
667		(card->ext_csd.raw_s_a_timeout ==
668			bw_ext_csd[EXT_CSD_S_A_TIMEOUT]) &&
669		(card->ext_csd.raw_hc_erase_gap_size ==
670			bw_ext_csd[EXT_CSD_HC_WP_GRP_SIZE]) &&
671		(card->ext_csd.raw_erase_timeout_mult ==
672			bw_ext_csd[EXT_CSD_ERASE_TIMEOUT_MULT]) &&
673		(card->ext_csd.raw_hc_erase_grp_size ==
674			bw_ext_csd[EXT_CSD_HC_ERASE_GRP_SIZE]) &&
675		(card->ext_csd.raw_sec_trim_mult ==
676			bw_ext_csd[EXT_CSD_SEC_TRIM_MULT]) &&
677		(card->ext_csd.raw_sec_erase_mult ==
678			bw_ext_csd[EXT_CSD_SEC_ERASE_MULT]) &&
679		(card->ext_csd.raw_sec_feature_support ==
680			bw_ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]) &&
681		(card->ext_csd.raw_trim_mult ==
682			bw_ext_csd[EXT_CSD_TRIM_MULT]) &&
683		(card->ext_csd.raw_sectors[0] ==
684			bw_ext_csd[EXT_CSD_SEC_CNT + 0]) &&
685		(card->ext_csd.raw_sectors[1] ==
686			bw_ext_csd[EXT_CSD_SEC_CNT + 1]) &&
687		(card->ext_csd.raw_sectors[2] ==
688			bw_ext_csd[EXT_CSD_SEC_CNT + 2]) &&
689		(card->ext_csd.raw_sectors[3] ==
690			bw_ext_csd[EXT_CSD_SEC_CNT + 3]) &&
691		(card->ext_csd.raw_pwr_cl_52_195 ==
692			bw_ext_csd[EXT_CSD_PWR_CL_52_195]) &&
693		(card->ext_csd.raw_pwr_cl_26_195 ==
694			bw_ext_csd[EXT_CSD_PWR_CL_26_195]) &&
695		(card->ext_csd.raw_pwr_cl_52_360 ==
696			bw_ext_csd[EXT_CSD_PWR_CL_52_360]) &&
697		(card->ext_csd.raw_pwr_cl_26_360 ==
698			bw_ext_csd[EXT_CSD_PWR_CL_26_360]) &&
699		(card->ext_csd.raw_pwr_cl_200_195 ==
700			bw_ext_csd[EXT_CSD_PWR_CL_200_195]) &&
701		(card->ext_csd.raw_pwr_cl_200_360 ==
702			bw_ext_csd[EXT_CSD_PWR_CL_200_360]) &&
703		(card->ext_csd.raw_pwr_cl_ddr_52_195 ==
704			bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_195]) &&
705		(card->ext_csd.raw_pwr_cl_ddr_52_360 ==
706			bw_ext_csd[EXT_CSD_PWR_CL_DDR_52_360]) &&
707		(card->ext_csd.raw_pwr_cl_ddr_200_360 ==
708			bw_ext_csd[EXT_CSD_PWR_CL_DDR_200_360]));
709
710	if (err)
711		err = -EINVAL;
712
713out:
714	mmc_free_ext_csd(bw_ext_csd);
715	return err;
716}
717
718MMC_DEV_ATTR(cid, "%08x%08x%08x%08x\n", card->raw_cid[0], card->raw_cid[1],
719	card->raw_cid[2], card->raw_cid[3]);
720MMC_DEV_ATTR(csd, "%08x%08x%08x%08x\n", card->raw_csd[0], card->raw_csd[1],
721	card->raw_csd[2], card->raw_csd[3]);
722MMC_DEV_ATTR(date, "%02d/%04d\n", card->cid.month, card->cid.year);
723MMC_DEV_ATTR(erase_size, "%u\n", card->erase_size << 9);
724MMC_DEV_ATTR(preferred_erase_size, "%u\n", card->pref_erase << 9);
725MMC_DEV_ATTR(fwrev, "0x%x\n", card->cid.fwrev);
726MMC_DEV_ATTR(hwrev, "0x%x\n", card->cid.hwrev);
727MMC_DEV_ATTR(manfid, "0x%06x\n", card->cid.manfid);
728MMC_DEV_ATTR(name, "%s\n", card->cid.prod_name);
729MMC_DEV_ATTR(oemid, "0x%04x\n", card->cid.oemid);
730MMC_DEV_ATTR(prv, "0x%x\n", card->cid.prv);
731MMC_DEV_ATTR(serial, "0x%08x\n", card->cid.serial);
732MMC_DEV_ATTR(enhanced_area_offset, "%llu\n",
733		card->ext_csd.enhanced_area_offset);
734MMC_DEV_ATTR(enhanced_area_size, "%u\n", card->ext_csd.enhanced_area_size);
735MMC_DEV_ATTR(raw_rpmb_size_mult, "%#x\n", card->ext_csd.raw_rpmb_size_mult);
736MMC_DEV_ATTR(rel_sectors, "%#x\n", card->ext_csd.rel_sectors);
737
738static struct attribute *mmc_std_attrs[] = {
739	&dev_attr_cid.attr,
740	&dev_attr_csd.attr,
741	&dev_attr_date.attr,
742	&dev_attr_erase_size.attr,
743	&dev_attr_preferred_erase_size.attr,
744	&dev_attr_fwrev.attr,
745	&dev_attr_hwrev.attr,
746	&dev_attr_manfid.attr,
747	&dev_attr_name.attr,
748	&dev_attr_oemid.attr,
749	&dev_attr_prv.attr,
750	&dev_attr_serial.attr,
751	&dev_attr_enhanced_area_offset.attr,
752	&dev_attr_enhanced_area_size.attr,
753	&dev_attr_raw_rpmb_size_mult.attr,
754	&dev_attr_rel_sectors.attr,
755	NULL,
756};
757ATTRIBUTE_GROUPS(mmc_std);
758
759static struct device_type mmc_type = {
760	.groups = mmc_std_groups,
761};
762
763/*
764 * Select the PowerClass for the current bus width
765 * If power class is defined for 4/8 bit bus in the
766 * extended CSD register, select it by executing the
767 * mmc_switch command.
768 */
769static int __mmc_select_powerclass(struct mmc_card *card,
770				   unsigned int bus_width)
771{
772	struct mmc_host *host = card->host;
773	struct mmc_ext_csd *ext_csd = &card->ext_csd;
774	unsigned int pwrclass_val = 0;
775	int err = 0;
776
777	/* Power class selection is supported for versions >= 4.0 */
778	if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
779		return 0;
780
781	/* Power class values are defined only for 4/8 bit bus */
782	if (bus_width == EXT_CSD_BUS_WIDTH_1)
783		return 0;
784
785	switch (1 << host->ios.vdd) {
786	case MMC_VDD_165_195:
787		if (host->ios.clock <= MMC_HIGH_26_MAX_DTR)
788			pwrclass_val = ext_csd->raw_pwr_cl_26_195;
789		else if (host->ios.clock <= MMC_HIGH_52_MAX_DTR)
790			pwrclass_val = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
791				ext_csd->raw_pwr_cl_52_195 :
792				ext_csd->raw_pwr_cl_ddr_52_195;
793		else if (host->ios.clock <= MMC_HS200_MAX_DTR)
794			pwrclass_val = ext_csd->raw_pwr_cl_200_195;
795		break;
796	case MMC_VDD_27_28:
797	case MMC_VDD_28_29:
798	case MMC_VDD_29_30:
799	case MMC_VDD_30_31:
800	case MMC_VDD_31_32:
801	case MMC_VDD_32_33:
802	case MMC_VDD_33_34:
803	case MMC_VDD_34_35:
804	case MMC_VDD_35_36:
805		if (host->ios.clock <= MMC_HIGH_26_MAX_DTR)
806			pwrclass_val = ext_csd->raw_pwr_cl_26_360;
807		else if (host->ios.clock <= MMC_HIGH_52_MAX_DTR)
808			pwrclass_val = (bus_width <= EXT_CSD_BUS_WIDTH_8) ?
809				ext_csd->raw_pwr_cl_52_360 :
810				ext_csd->raw_pwr_cl_ddr_52_360;
811		else if (host->ios.clock <= MMC_HS200_MAX_DTR)
812			pwrclass_val = (bus_width == EXT_CSD_DDR_BUS_WIDTH_8) ?
813				ext_csd->raw_pwr_cl_ddr_200_360 :
814				ext_csd->raw_pwr_cl_200_360;
815		break;
816	default:
817		pr_warn("%s: Voltage range not supported for power class\n",
818			mmc_hostname(host));
819		return -EINVAL;
820	}
821
822	if (bus_width & (EXT_CSD_BUS_WIDTH_8 | EXT_CSD_DDR_BUS_WIDTH_8))
823		pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_8BIT_MASK) >>
824				EXT_CSD_PWR_CL_8BIT_SHIFT;
825	else
826		pwrclass_val = (pwrclass_val & EXT_CSD_PWR_CL_4BIT_MASK) >>
827				EXT_CSD_PWR_CL_4BIT_SHIFT;
828
829	/* If the power class is different from the default value */
830	if (pwrclass_val > 0) {
831		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
832				 EXT_CSD_POWER_CLASS,
833				 pwrclass_val,
834				 card->ext_csd.generic_cmd6_time);
835	}
836
837	return err;
838}
839
840static int mmc_select_powerclass(struct mmc_card *card)
841{
842	struct mmc_host *host = card->host;
843	u32 bus_width, ext_csd_bits;
844	int err, ddr;
845
846	/* Power class selection is supported for versions >= 4.0 */
847	if (card->csd.mmca_vsn < CSD_SPEC_VER_4)
848		return 0;
849
850	bus_width = host->ios.bus_width;
851	/* Power class values are defined only for 4/8 bit bus */
852	if (bus_width == MMC_BUS_WIDTH_1)
853		return 0;
854
855	ddr = card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_52;
856	if (ddr)
857		ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
858			EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
859	else
860		ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
861			EXT_CSD_BUS_WIDTH_8 :  EXT_CSD_BUS_WIDTH_4;
862
863	err = __mmc_select_powerclass(card, ext_csd_bits);
864	if (err)
865		pr_warn("%s: power class selection to bus width %d ddr %d failed\n",
866			mmc_hostname(host), 1 << bus_width, ddr);
867
868	return err;
869}
870
871/*
872 * Set the bus speed for the selected speed mode.
873 */
874static void mmc_set_bus_speed(struct mmc_card *card)
875{
876	unsigned int max_dtr = (unsigned int)-1;
877
878	if ((mmc_card_hs200(card) || mmc_card_hs400(card)) &&
879	     max_dtr > card->ext_csd.hs200_max_dtr)
880		max_dtr = card->ext_csd.hs200_max_dtr;
881	else if (mmc_card_hs(card) && max_dtr > card->ext_csd.hs_max_dtr)
882		max_dtr = card->ext_csd.hs_max_dtr;
883	else if (max_dtr > card->csd.max_dtr)
884		max_dtr = card->csd.max_dtr;
885
886	mmc_set_clock(card->host, max_dtr);
887}
888
889/*
890 * Select the bus width amoung 4-bit and 8-bit(SDR).
891 * If the bus width is changed successfully, return the selected width value.
892 * Zero is returned instead of error value if the wide width is not supported.
893 */
894static int mmc_select_bus_width(struct mmc_card *card)
895{
896	static unsigned ext_csd_bits[] = {
897		EXT_CSD_BUS_WIDTH_8,
898		EXT_CSD_BUS_WIDTH_4,
899	};
900	static unsigned bus_widths[] = {
901		MMC_BUS_WIDTH_8,
902		MMC_BUS_WIDTH_4,
903	};
904	struct mmc_host *host = card->host;
905	unsigned idx, bus_width = 0;
906	int err = 0;
907
908	if ((card->csd.mmca_vsn < CSD_SPEC_VER_4) &&
909	    !(host->caps & (MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA)))
910		return 0;
911
912	idx = (host->caps & MMC_CAP_8_BIT_DATA) ? 0 : 1;
913
914	/*
915	 * Unlike SD, MMC cards dont have a configuration register to notify
916	 * supported bus width. So bus test command should be run to identify
917	 * the supported bus width or compare the ext csd values of current
918	 * bus width and ext csd values of 1 bit mode read earlier.
919	 */
920	for (; idx < ARRAY_SIZE(bus_widths); idx++) {
921		/*
922		 * Host is capable of 8bit transfer, then switch
923		 * the device to work in 8bit transfer mode. If the
924		 * mmc switch command returns error then switch to
925		 * 4bit transfer mode. On success set the corresponding
926		 * bus width on the host.
927		 */
928		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
929				 EXT_CSD_BUS_WIDTH,
930				 ext_csd_bits[idx],
931				 card->ext_csd.generic_cmd6_time);
932		if (err)
933			continue;
934
935		bus_width = bus_widths[idx];
936		mmc_set_bus_width(host, bus_width);
937
938		/*
939		 * If controller can't handle bus width test,
940		 * compare ext_csd previously read in 1 bit mode
941		 * against ext_csd at new bus width
942		 */
943		if (!(host->caps & MMC_CAP_BUS_WIDTH_TEST))
944			err = mmc_compare_ext_csds(card, bus_width);
945		else
946			err = mmc_bus_test(card, bus_width);
947
948		if (!err) {
949			err = bus_width;
950			break;
951		} else {
952			pr_warn("%s: switch to bus width %d failed\n",
953				mmc_hostname(host), ext_csd_bits[idx]);
954		}
955	}
956
957	return err;
958}
959
960/*
961 * Switch to the high-speed mode
962 */
963static int mmc_select_hs(struct mmc_card *card)
964{
965	int err;
966
967	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
968			   EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
969			   card->ext_csd.generic_cmd6_time,
970			   true, true, true);
971	if (!err)
972		mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
973
974	return err;
975}
976
977/*
978 * Activate wide bus and DDR if supported.
979 */
980static int mmc_select_hs_ddr(struct mmc_card *card)
981{
982	struct mmc_host *host = card->host;
983	u32 bus_width, ext_csd_bits;
984	int err = 0;
985
986	if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_52))
987		return 0;
988
989	bus_width = host->ios.bus_width;
990	if (bus_width == MMC_BUS_WIDTH_1)
991		return 0;
992
993	ext_csd_bits = (bus_width == MMC_BUS_WIDTH_8) ?
994		EXT_CSD_DDR_BUS_WIDTH_8 : EXT_CSD_DDR_BUS_WIDTH_4;
995
996	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
997			EXT_CSD_BUS_WIDTH,
998			ext_csd_bits,
999			card->ext_csd.generic_cmd6_time);
1000	if (err) {
1001		pr_warn("%s: switch to bus width %d ddr failed\n",
1002			mmc_hostname(host), 1 << bus_width);
1003		return err;
1004	}
1005
1006	/*
1007	 * eMMC cards can support 3.3V to 1.2V i/o (vccq)
1008	 * signaling.
1009	 *
1010	 * EXT_CSD_CARD_TYPE_DDR_1_8V means 3.3V or 1.8V vccq.
1011	 *
1012	 * 1.8V vccq at 3.3V core voltage (vcc) is not required
1013	 * in the JEDEC spec for DDR.
1014	 *
1015	 * Even (e)MMC card can support 3.3v to 1.2v vccq, but not all
1016	 * host controller can support this, like some of the SDHCI
1017	 * controller which connect to an eMMC device. Some of these
1018	 * host controller still needs to use 1.8v vccq for supporting
1019	 * DDR mode.
1020	 *
1021	 * So the sequence will be:
1022	 * if (host and device can both support 1.2v IO)
1023	 *	use 1.2v IO;
1024	 * else if (host and device can both support 1.8v IO)
1025	 *	use 1.8v IO;
1026	 * so if host and device can only support 3.3v IO, this is the
1027	 * last choice.
1028	 *
1029	 * WARNING: eMMC rules are NOT the same as SD DDR
1030	 */
1031	err = -EINVAL;
1032	if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_2V)
1033		err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
1034
1035	if (err && (card->mmc_avail_type & EXT_CSD_CARD_TYPE_DDR_1_8V))
1036		err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
1037
1038	/* make sure vccq is 3.3v after switching disaster */
1039	if (err)
1040		err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_330);
1041
1042	if (!err)
1043		mmc_set_timing(host, MMC_TIMING_MMC_DDR52);
1044
1045	return err;
1046}
1047
1048static int mmc_select_hs400(struct mmc_card *card)
1049{
1050	struct mmc_host *host = card->host;
1051	int err = 0;
1052
1053	/*
1054	 * HS400 mode requires 8-bit bus width
1055	 */
1056	if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
1057	      host->ios.bus_width == MMC_BUS_WIDTH_8))
1058		return 0;
1059
1060	/*
1061	 * Before switching to dual data rate operation for HS400,
1062	 * it is required to convert from HS200 mode to HS mode.
1063	 */
1064	mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
1065	mmc_set_bus_speed(card);
1066
1067	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1068			   EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS,
1069			   card->ext_csd.generic_cmd6_time,
1070			   true, true, true);
1071	if (err) {
1072		pr_warn("%s: switch to high-speed from hs200 failed, err:%d\n",
1073			mmc_hostname(host), err);
1074		return err;
1075	}
1076
1077	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1078			 EXT_CSD_BUS_WIDTH,
1079			 EXT_CSD_DDR_BUS_WIDTH_8,
1080			 card->ext_csd.generic_cmd6_time);
1081	if (err) {
1082		pr_warn("%s: switch to bus width for hs400 failed, err:%d\n",
1083			mmc_hostname(host), err);
1084		return err;
1085	}
1086
1087	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1088			   EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS400,
1089			   card->ext_csd.generic_cmd6_time,
1090			   true, true, true);
1091	if (err) {
1092		pr_warn("%s: switch to hs400 failed, err:%d\n",
1093			 mmc_hostname(host), err);
1094		return err;
1095	}
1096
1097	mmc_set_timing(host, MMC_TIMING_MMC_HS400);
1098	mmc_set_bus_speed(card);
1099
1100	return 0;
1101}
1102
1103/*
1104 * For device supporting HS200 mode, the following sequence
1105 * should be done before executing the tuning process.
1106 * 1. set the desired bus width(4-bit or 8-bit, 1-bit is not supported)
1107 * 2. switch to HS200 mode
1108 * 3. set the clock to > 52Mhz and <=200MHz
1109 */
1110static int mmc_select_hs200(struct mmc_card *card)
1111{
1112	struct mmc_host *host = card->host;
1113	int err = -EINVAL;
1114
1115	if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_2V)
1116		err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120);
1117
1118	if (err && card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200_1_8V)
1119		err = __mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180);
1120
1121	/* If fails try again during next card power cycle */
1122	if (err)
1123		goto err;
1124
1125	/*
1126	 * Set the bus width(4 or 8) with host's support and
1127	 * switch to HS200 mode if bus width is set successfully.
1128	 */
1129	err = mmc_select_bus_width(card);
1130	if (!IS_ERR_VALUE(err)) {
1131		err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1132				   EXT_CSD_HS_TIMING, EXT_CSD_TIMING_HS200,
1133				   card->ext_csd.generic_cmd6_time,
1134				   true, true, true);
1135		if (!err)
1136			mmc_set_timing(host, MMC_TIMING_MMC_HS200);
1137	}
1138err:
1139	return err;
1140}
1141
1142/*
1143 * Activate High Speed or HS200 mode if supported.
1144 */
1145static int mmc_select_timing(struct mmc_card *card)
1146{
1147	int err = 0;
1148
1149	if ((card->csd.mmca_vsn < CSD_SPEC_VER_4 &&
1150	     card->ext_csd.hs_max_dtr == 0))
1151		goto bus_speed;
1152
1153	if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS200)
1154		err = mmc_select_hs200(card);
1155	else if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS)
1156		err = mmc_select_hs(card);
1157
1158	if (err && err != -EBADMSG)
1159		return err;
1160
1161	if (err) {
1162		pr_warn("%s: switch to %s failed\n",
1163			mmc_card_hs(card) ? "high-speed" :
1164			(mmc_card_hs200(card) ? "hs200" : ""),
1165			mmc_hostname(card->host));
1166		err = 0;
1167	}
1168
1169bus_speed:
1170	/*
1171	 * Set the bus speed to the selected bus timing.
1172	 * If timing is not selected, backward compatible is the default.
1173	 */
1174	mmc_set_bus_speed(card);
1175	return err;
1176}
1177
1178const u8 tuning_blk_pattern_4bit[MMC_TUNING_BLK_PATTERN_4BIT_SIZE] = {
1179	0xff, 0x0f, 0xff, 0x00, 0xff, 0xcc, 0xc3, 0xcc,
1180	0xc3, 0x3c, 0xcc, 0xff, 0xfe, 0xff, 0xfe, 0xef,
1181	0xff, 0xdf, 0xff, 0xdd, 0xff, 0xfb, 0xff, 0xfb,
1182	0xbf, 0xff, 0x7f, 0xff, 0x77, 0xf7, 0xbd, 0xef,
1183	0xff, 0xf0, 0xff, 0xf0, 0x0f, 0xfc, 0xcc, 0x3c,
1184	0xcc, 0x33, 0xcc, 0xcf, 0xff, 0xef, 0xff, 0xee,
1185	0xff, 0xfd, 0xff, 0xfd, 0xdf, 0xff, 0xbf, 0xff,
1186	0xbb, 0xff, 0xf7, 0xff, 0xf7, 0x7f, 0x7b, 0xde,
1187};
1188EXPORT_SYMBOL(tuning_blk_pattern_4bit);
1189
1190const u8 tuning_blk_pattern_8bit[MMC_TUNING_BLK_PATTERN_8BIT_SIZE] = {
1191	0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00, 0x00,
1192	0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc, 0xcc,
1193	0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff, 0xff,
1194	0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee, 0xff,
1195	0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd, 0xdd,
1196	0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff, 0xbb,
1197	0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff, 0xff,
1198	0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee, 0xff,
1199	0xff, 0xff, 0xff, 0x00, 0xff, 0xff, 0xff, 0x00,
1200	0x00, 0xff, 0xff, 0xcc, 0xcc, 0xcc, 0x33, 0xcc,
1201	0xcc, 0xcc, 0x33, 0x33, 0xcc, 0xcc, 0xcc, 0xff,
1202	0xff, 0xff, 0xee, 0xff, 0xff, 0xff, 0xee, 0xee,
1203	0xff, 0xff, 0xff, 0xdd, 0xff, 0xff, 0xff, 0xdd,
1204	0xdd, 0xff, 0xff, 0xff, 0xbb, 0xff, 0xff, 0xff,
1205	0xbb, 0xbb, 0xff, 0xff, 0xff, 0x77, 0xff, 0xff,
1206	0xff, 0x77, 0x77, 0xff, 0x77, 0xbb, 0xdd, 0xee,
1207};
1208EXPORT_SYMBOL(tuning_blk_pattern_8bit);
1209
1210/*
1211 * Execute tuning sequence to seek the proper bus operating
1212 * conditions for HS200 and HS400, which sends CMD21 to the device.
1213 */
1214static int mmc_hs200_tuning(struct mmc_card *card)
1215{
1216	struct mmc_host *host = card->host;
1217	int err = 0;
1218
1219	/*
1220	 * Timing should be adjusted to the HS400 target
1221	 * operation frequency for tuning process
1222	 */
1223	if (card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
1224	    host->ios.bus_width == MMC_BUS_WIDTH_8)
1225		if (host->ops->prepare_hs400_tuning)
1226			host->ops->prepare_hs400_tuning(host, &host->ios);
1227
1228	if (host->ops->execute_tuning) {
1229		mmc_host_clk_hold(host);
1230		err = host->ops->execute_tuning(host,
1231				MMC_SEND_TUNING_BLOCK_HS200);
1232		mmc_host_clk_release(host);
1233
1234		if (err)
1235			pr_warn("%s: tuning execution failed\n",
1236				mmc_hostname(host));
1237	}
1238
1239	return err;
1240}
1241
1242/*
1243 * Handle the detection and initialisation of a card.
1244 *
1245 * In the case of a resume, "oldcard" will contain the card
1246 * we're trying to reinitialise.
1247 */
1248static int mmc_init_card(struct mmc_host *host, u32 ocr,
1249	struct mmc_card *oldcard)
1250{
1251	struct mmc_card *card;
1252	int err;
1253	u32 cid[4];
1254	u32 rocr;
1255	u8 *ext_csd = NULL;
1256
1257	BUG_ON(!host);
1258	WARN_ON(!host->claimed);
1259
1260	/* Set correct bus mode for MMC before attempting init */
1261	if (!mmc_host_is_spi(host))
1262		mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN);
1263
1264	/*
1265	 * Since we're changing the OCR value, we seem to
1266	 * need to tell some cards to go back to the idle
1267	 * state.  We wait 1ms to give cards time to
1268	 * respond.
1269	 * mmc_go_idle is needed for eMMC that are asleep
1270	 */
1271	mmc_go_idle(host);
1272
1273	/* The extra bit indicates that we support high capacity */
1274	err = mmc_send_op_cond(host, ocr | (1 << 30), &rocr);
1275	if (err)
1276		goto err;
1277
1278	/*
1279	 * For SPI, enable CRC as appropriate.
1280	 */
1281	if (mmc_host_is_spi(host)) {
1282		err = mmc_spi_set_crc(host, use_spi_crc);
1283		if (err)
1284			goto err;
1285	}
1286
1287	/*
1288	 * Fetch CID from card.
1289	 */
1290	if (mmc_host_is_spi(host))
1291		err = mmc_send_cid(host, cid);
1292	else
1293		err = mmc_all_send_cid(host, cid);
1294	if (err)
1295		goto err;
1296
1297	if (oldcard) {
1298		if (memcmp(cid, oldcard->raw_cid, sizeof(cid)) != 0) {
1299			err = -ENOENT;
1300			goto err;
1301		}
1302
1303		card = oldcard;
1304	} else {
1305		/*
1306		 * Allocate card structure.
1307		 */
1308		card = mmc_alloc_card(host, &mmc_type);
1309		if (IS_ERR(card)) {
1310			err = PTR_ERR(card);
1311			goto err;
1312		}
1313
1314		card->ocr = ocr;
1315		card->type = MMC_TYPE_MMC;
1316		card->rca = 1;
1317		memcpy(card->raw_cid, cid, sizeof(card->raw_cid));
1318	}
1319
1320	/*
1321	 * For native busses:  set card RCA and quit open drain mode.
1322	 */
1323	if (!mmc_host_is_spi(host)) {
1324		err = mmc_set_relative_addr(card);
1325		if (err)
1326			goto free_card;
1327
1328		mmc_set_bus_mode(host, MMC_BUSMODE_PUSHPULL);
1329	}
1330
1331	if (!oldcard) {
1332		/*
1333		 * Fetch CSD from card.
1334		 */
1335		err = mmc_send_csd(card, card->raw_csd);
1336		if (err)
1337			goto free_card;
1338
1339		err = mmc_decode_csd(card);
1340		if (err)
1341			goto free_card;
1342		err = mmc_decode_cid(card);
1343		if (err)
1344			goto free_card;
1345	}
1346
1347	/*
1348	 * handling only for cards supporting DSR and hosts requesting
1349	 * DSR configuration
1350	 */
1351	if (card->csd.dsr_imp && host->dsr_req)
1352		mmc_set_dsr(host);
1353
1354	/*
1355	 * Select card, as all following commands rely on that.
1356	 */
1357	if (!mmc_host_is_spi(host)) {
1358		err = mmc_select_card(card);
1359		if (err)
1360			goto free_card;
1361	}
1362
1363	if (!oldcard) {
1364		/*
1365		 * Fetch and process extended CSD.
1366		 */
1367
1368		err = mmc_get_ext_csd(card, &ext_csd);
1369		if (err)
1370			goto free_card;
1371		err = mmc_read_ext_csd(card, ext_csd);
1372		if (err)
1373			goto free_card;
1374
1375		/* If doing byte addressing, check if required to do sector
1376		 * addressing.  Handle the case of <2GB cards needing sector
1377		 * addressing.  See section 8.1 JEDEC Standard JED84-A441;
1378		 * ocr register has bit 30 set for sector addressing.
1379		 */
1380		if (!(mmc_card_blockaddr(card)) && (rocr & (1<<30)))
1381			mmc_card_set_blockaddr(card);
1382
1383		/* Erase size depends on CSD and Extended CSD */
1384		mmc_set_erase_size(card);
1385	}
1386
1387	/*
1388	 * If enhanced_area_en is TRUE, host needs to enable ERASE_GRP_DEF
1389	 * bit.  This bit will be lost every time after a reset or power off.
1390	 */
1391	if (card->ext_csd.partition_setting_completed ||
1392	    (card->ext_csd.rev >= 3 && (host->caps2 & MMC_CAP2_HC_ERASE_SZ))) {
1393		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1394				 EXT_CSD_ERASE_GROUP_DEF, 1,
1395				 card->ext_csd.generic_cmd6_time);
1396
1397		if (err && err != -EBADMSG)
1398			goto free_card;
1399
1400		if (err) {
1401			err = 0;
1402			/*
1403			 * Just disable enhanced area off & sz
1404			 * will try to enable ERASE_GROUP_DEF
1405			 * during next time reinit
1406			 */
1407			card->ext_csd.enhanced_area_offset = -EINVAL;
1408			card->ext_csd.enhanced_area_size = -EINVAL;
1409		} else {
1410			card->ext_csd.erase_group_def = 1;
1411			/*
1412			 * enable ERASE_GRP_DEF successfully.
1413			 * This will affect the erase size, so
1414			 * here need to reset erase size
1415			 */
1416			mmc_set_erase_size(card);
1417		}
1418	}
1419
1420	/*
1421	 * Ensure eMMC user default partition is enabled
1422	 */
1423	if (card->ext_csd.part_config & EXT_CSD_PART_CONFIG_ACC_MASK) {
1424		card->ext_csd.part_config &= ~EXT_CSD_PART_CONFIG_ACC_MASK;
1425		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_PART_CONFIG,
1426				 card->ext_csd.part_config,
1427				 card->ext_csd.part_time);
1428		if (err && err != -EBADMSG)
1429			goto free_card;
1430	}
1431
1432	/*
1433	 * Enable power_off_notification byte in the ext_csd register
1434	 */
1435	if (card->ext_csd.rev >= 6) {
1436		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1437				 EXT_CSD_POWER_OFF_NOTIFICATION,
1438				 EXT_CSD_POWER_ON,
1439				 card->ext_csd.generic_cmd6_time);
1440		if (err && err != -EBADMSG)
1441			goto free_card;
1442
1443		/*
1444		 * The err can be -EBADMSG or 0,
1445		 * so check for success and update the flag
1446		 */
1447		if (!err)
1448			card->ext_csd.power_off_notification = EXT_CSD_POWER_ON;
1449	}
1450
1451	/*
1452	 * Select timing interface
1453	 */
1454	err = mmc_select_timing(card);
1455	if (err)
1456		goto free_card;
1457
1458	if (mmc_card_hs200(card)) {
1459		err = mmc_hs200_tuning(card);
1460		if (err)
1461			goto err;
1462
1463		err = mmc_select_hs400(card);
1464		if (err)
1465			goto err;
1466	} else if (mmc_card_hs(card)) {
1467		/* Select the desired bus width optionally */
1468		err = mmc_select_bus_width(card);
1469		if (!IS_ERR_VALUE(err)) {
1470			err = mmc_select_hs_ddr(card);
1471			if (err)
1472				goto err;
1473		}
1474	}
1475
1476	/*
1477	 * Choose the power class with selected bus interface
1478	 */
1479	mmc_select_powerclass(card);
1480
1481	/*
1482	 * Enable HPI feature (if supported)
1483	 */
1484	if (card->ext_csd.hpi) {
1485		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1486				EXT_CSD_HPI_MGMT, 1,
1487				card->ext_csd.generic_cmd6_time);
1488		if (err && err != -EBADMSG)
1489			goto free_card;
1490		if (err) {
1491			pr_warn("%s: Enabling HPI failed\n",
1492				mmc_hostname(card->host));
1493			err = 0;
1494		} else
1495			card->ext_csd.hpi_en = 1;
1496	}
1497
1498	/*
1499	 * If cache size is higher than 0, this indicates
1500	 * the existence of cache and it can be turned on.
1501	 */
1502	if (card->ext_csd.cache_size > 0) {
1503		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1504				EXT_CSD_CACHE_CTRL, 1,
1505				card->ext_csd.generic_cmd6_time);
1506		if (err && err != -EBADMSG)
1507			goto free_card;
1508
1509		/*
1510		 * Only if no error, cache is turned on successfully.
1511		 */
1512		if (err) {
1513			pr_warn("%s: Cache is supported, but failed to turn on (%d)\n",
1514				mmc_hostname(card->host), err);
1515			card->ext_csd.cache_ctrl = 0;
1516			err = 0;
1517		} else {
1518			card->ext_csd.cache_ctrl = 1;
1519		}
1520	}
1521
1522	/*
1523	 * The mandatory minimum values are defined for packed command.
1524	 * read: 5, write: 3
1525	 */
1526	if (card->ext_csd.max_packed_writes >= 3 &&
1527	    card->ext_csd.max_packed_reads >= 5 &&
1528	    host->caps2 & MMC_CAP2_PACKED_CMD) {
1529		err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1530				EXT_CSD_EXP_EVENTS_CTRL,
1531				EXT_CSD_PACKED_EVENT_EN,
1532				card->ext_csd.generic_cmd6_time);
1533		if (err && err != -EBADMSG)
1534			goto free_card;
1535		if (err) {
1536			pr_warn("%s: Enabling packed event failed\n",
1537				mmc_hostname(card->host));
1538			card->ext_csd.packed_event_en = 0;
1539			err = 0;
1540		} else {
1541			card->ext_csd.packed_event_en = 1;
1542		}
1543	}
1544
1545	if (!oldcard)
1546		host->card = card;
1547
1548	mmc_free_ext_csd(ext_csd);
1549	return 0;
1550
1551free_card:
1552	if (!oldcard)
1553		mmc_remove_card(card);
1554err:
1555	mmc_free_ext_csd(ext_csd);
1556
1557	return err;
1558}
1559
1560static int mmc_can_sleep(struct mmc_card *card)
1561{
1562	return (card && card->ext_csd.rev >= 3);
1563}
1564
1565static int mmc_sleep(struct mmc_host *host)
1566{
1567	struct mmc_command cmd = {0};
1568	struct mmc_card *card = host->card;
1569	unsigned int timeout_ms = DIV_ROUND_UP(card->ext_csd.sa_timeout, 10000);
1570	int err;
1571
1572	err = mmc_deselect_cards(host);
1573	if (err)
1574		return err;
1575
1576	cmd.opcode = MMC_SLEEP_AWAKE;
1577	cmd.arg = card->rca << 16;
1578	cmd.arg |= 1 << 15;
1579
1580	/*
1581	 * If the max_busy_timeout of the host is specified, validate it against
1582	 * the sleep cmd timeout. A failure means we need to prevent the host
1583	 * from doing hw busy detection, which is done by converting to a R1
1584	 * response instead of a R1B.
1585	 */
1586	if (host->max_busy_timeout && (timeout_ms > host->max_busy_timeout)) {
1587		cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
1588	} else {
1589		cmd.flags = MMC_RSP_R1B | MMC_CMD_AC;
1590		cmd.busy_timeout = timeout_ms;
1591	}
1592
1593	err = mmc_wait_for_cmd(host, &cmd, 0);
1594	if (err)
1595		return err;
1596
1597	/*
1598	 * If the host does not wait while the card signals busy, then we will
1599	 * will have to wait the sleep/awake timeout.  Note, we cannot use the
1600	 * SEND_STATUS command to poll the status because that command (and most
1601	 * others) is invalid while the card sleeps.
1602	 */
1603	if (!cmd.busy_timeout || !(host->caps & MMC_CAP_WAIT_WHILE_BUSY))
1604		mmc_delay(timeout_ms);
1605
1606	return err;
1607}
1608
1609static int mmc_can_poweroff_notify(const struct mmc_card *card)
1610{
1611	return card &&
1612		mmc_card_mmc(card) &&
1613		(card->ext_csd.power_off_notification == EXT_CSD_POWER_ON);
1614}
1615
1616static int mmc_poweroff_notify(struct mmc_card *card, unsigned int notify_type)
1617{
1618	unsigned int timeout = card->ext_csd.generic_cmd6_time;
1619	int err;
1620
1621	/* Use EXT_CSD_POWER_OFF_SHORT as default notification type. */
1622	if (notify_type == EXT_CSD_POWER_OFF_LONG)
1623		timeout = card->ext_csd.power_off_longtime;
1624
1625	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1626			EXT_CSD_POWER_OFF_NOTIFICATION,
1627			notify_type, timeout, true, false, false);
1628	if (err)
1629		pr_err("%s: Power Off Notification timed out, %u\n",
1630		       mmc_hostname(card->host), timeout);
1631
1632	/* Disable the power off notification after the switch operation. */
1633	card->ext_csd.power_off_notification = EXT_CSD_NO_POWER_NOTIFICATION;
1634
1635	return err;
1636}
1637
1638/*
1639 * Host is being removed. Free up the current card.
1640 */
1641static void mmc_remove(struct mmc_host *host)
1642{
1643	BUG_ON(!host);
1644	BUG_ON(!host->card);
1645
1646	mmc_remove_card(host->card);
1647	host->card = NULL;
1648}
1649
1650/*
1651 * Card detection - card is alive.
1652 */
1653static int mmc_alive(struct mmc_host *host)
1654{
1655	return mmc_send_status(host->card, NULL);
1656}
1657
1658/*
1659 * Card detection callback from host.
1660 */
1661static void mmc_detect(struct mmc_host *host)
1662{
1663	int err;
1664
1665	BUG_ON(!host);
1666	BUG_ON(!host->card);
1667
1668	mmc_get_card(host->card);
1669
1670	/*
1671	 * Just check if our card has been removed.
1672	 */
1673	err = _mmc_detect_card_removed(host);
1674
1675	mmc_put_card(host->card);
1676
1677	if (err) {
1678		mmc_remove(host);
1679
1680		mmc_claim_host(host);
1681		mmc_detach_bus(host);
1682		mmc_power_off(host);
1683		mmc_release_host(host);
1684	}
1685}
1686
1687static int _mmc_suspend(struct mmc_host *host, bool is_suspend)
1688{
1689	int err = 0;
1690	unsigned int notify_type = is_suspend ? EXT_CSD_POWER_OFF_SHORT :
1691					EXT_CSD_POWER_OFF_LONG;
1692
1693	BUG_ON(!host);
1694	BUG_ON(!host->card);
1695
1696	mmc_claim_host(host);
1697
1698	if (mmc_card_suspended(host->card))
1699		goto out;
1700
1701	if (mmc_card_doing_bkops(host->card)) {
1702		err = mmc_stop_bkops(host->card);
1703		if (err)
1704			goto out;
1705	}
1706
1707	err = mmc_flush_cache(host->card);
1708	if (err)
1709		goto out;
1710
1711	if (mmc_can_poweroff_notify(host->card) &&
1712		((host->caps2 & MMC_CAP2_FULL_PWR_CYCLE) || !is_suspend))
1713		err = mmc_poweroff_notify(host->card, notify_type);
1714	else if (mmc_can_sleep(host->card))
1715		err = mmc_sleep(host);
1716	else if (!mmc_host_is_spi(host))
1717		err = mmc_deselect_cards(host);
1718
1719	if (!err) {
1720		mmc_power_off(host);
1721		mmc_card_set_suspended(host->card);
1722	}
1723out:
1724	mmc_release_host(host);
1725	return err;
1726}
1727
1728/*
1729 * Suspend callback
1730 */
1731static int mmc_suspend(struct mmc_host *host)
1732{
1733	int err;
1734
1735	err = _mmc_suspend(host, true);
1736	if (!err) {
1737		pm_runtime_disable(&host->card->dev);
1738		pm_runtime_set_suspended(&host->card->dev);
1739	}
1740
1741	return err;
1742}
1743
1744/*
1745 * This function tries to determine if the same card is still present
1746 * and, if so, restore all state to it.
1747 */
1748static int _mmc_resume(struct mmc_host *host)
1749{
1750	int err = 0;
1751
1752	BUG_ON(!host);
1753	BUG_ON(!host->card);
1754
1755	mmc_claim_host(host);
1756
1757	if (!mmc_card_suspended(host->card))
1758		goto out;
1759
1760	mmc_power_up(host, host->card->ocr);
1761	err = mmc_init_card(host, host->card->ocr, host->card);
1762	mmc_card_clr_suspended(host->card);
1763
1764out:
1765	mmc_release_host(host);
1766	return err;
1767}
1768
1769/*
1770 * Shutdown callback
1771 */
1772static int mmc_shutdown(struct mmc_host *host)
1773{
1774	int err = 0;
1775
1776	/*
1777	 * In a specific case for poweroff notify, we need to resume the card
1778	 * before we can shutdown it properly.
1779	 */
1780	if (mmc_can_poweroff_notify(host->card) &&
1781		!(host->caps2 & MMC_CAP2_FULL_PWR_CYCLE))
1782		err = _mmc_resume(host);
1783
1784	if (!err)
1785		err = _mmc_suspend(host, false);
1786
1787	return err;
1788}
1789
1790/*
1791 * Callback for resume.
1792 */
1793static int mmc_resume(struct mmc_host *host)
1794{
1795	int err = 0;
1796
1797	if (!(host->caps & MMC_CAP_RUNTIME_RESUME)) {
1798		err = _mmc_resume(host);
1799		pm_runtime_set_active(&host->card->dev);
1800		pm_runtime_mark_last_busy(&host->card->dev);
1801	}
1802	pm_runtime_enable(&host->card->dev);
1803
1804	return err;
1805}
1806
1807/*
1808 * Callback for runtime_suspend.
1809 */
1810static int mmc_runtime_suspend(struct mmc_host *host)
1811{
1812	int err;
1813
1814	if (!(host->caps & MMC_CAP_AGGRESSIVE_PM))
1815		return 0;
1816
1817	err = _mmc_suspend(host, true);
1818	if (err)
1819		pr_err("%s: error %d doing aggessive suspend\n",
1820			mmc_hostname(host), err);
1821
1822	return err;
1823}
1824
1825/*
1826 * Callback for runtime_resume.
1827 */
1828static int mmc_runtime_resume(struct mmc_host *host)
1829{
1830	int err;
1831
1832	if (!(host->caps & (MMC_CAP_AGGRESSIVE_PM | MMC_CAP_RUNTIME_RESUME)))
1833		return 0;
1834
1835	err = _mmc_resume(host);
1836	if (err)
1837		pr_err("%s: error %d doing aggessive resume\n",
1838			mmc_hostname(host), err);
1839
1840	return 0;
1841}
1842
1843static int mmc_power_restore(struct mmc_host *host)
1844{
1845	int ret;
1846
1847	mmc_claim_host(host);
1848	ret = mmc_init_card(host, host->card->ocr, host->card);
1849	mmc_release_host(host);
1850
1851	return ret;
1852}
1853
1854static const struct mmc_bus_ops mmc_ops = {
1855	.remove = mmc_remove,
1856	.detect = mmc_detect,
1857	.suspend = mmc_suspend,
1858	.resume = mmc_resume,
1859	.runtime_suspend = mmc_runtime_suspend,
1860	.runtime_resume = mmc_runtime_resume,
1861	.power_restore = mmc_power_restore,
1862	.alive = mmc_alive,
1863	.shutdown = mmc_shutdown,
1864};
1865
1866/*
1867 * Starting point for MMC card init.
1868 */
1869int mmc_attach_mmc(struct mmc_host *host)
1870{
1871	int err;
1872	u32 ocr, rocr;
1873
1874	BUG_ON(!host);
1875	WARN_ON(!host->claimed);
1876
1877	/* Set correct bus mode for MMC before attempting attach */
1878	if (!mmc_host_is_spi(host))
1879		mmc_set_bus_mode(host, MMC_BUSMODE_OPENDRAIN);
1880
1881	err = mmc_send_op_cond(host, 0, &ocr);
1882	if (err)
1883		return err;
1884
1885	mmc_attach_bus(host, &mmc_ops);
1886	if (host->ocr_avail_mmc)
1887		host->ocr_avail = host->ocr_avail_mmc;
1888
1889	/*
1890	 * We need to get OCR a different way for SPI.
1891	 */
1892	if (mmc_host_is_spi(host)) {
1893		err = mmc_spi_read_ocr(host, 1, &ocr);
1894		if (err)
1895			goto err;
1896	}
1897
1898	rocr = mmc_select_voltage(host, ocr);
1899
1900	/*
1901	 * Can we support the voltage of the card?
1902	 */
1903	if (!rocr) {
1904		err = -EINVAL;
1905		goto err;
1906	}
1907
1908	/*
1909	 * Detect and init the card.
1910	 */
1911	err = mmc_init_card(host, rocr, NULL);
1912	if (err)
1913		goto err;
1914
1915	mmc_release_host(host);
1916	err = mmc_add_card(host->card);
1917	mmc_claim_host(host);
1918	if (err)
1919		goto remove_card;
1920
1921	return 0;
1922
1923remove_card:
1924	mmc_release_host(host);
1925	mmc_remove_card(host->card);
1926	mmc_claim_host(host);
1927	host->card = NULL;
1928err:
1929	mmc_detach_bus(host);
1930
1931	pr_err("%s: error %d whilst initialising MMC card\n",
1932		mmc_hostname(host), err);
1933
1934	return err;
1935}
1936